1 //===-- TargetLowering.cpp - Implement the TargetLowering class -----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This implements the TargetLowering class.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "llvm/CodeGen/TargetLowering.h"
14 #include "llvm/ADT/STLExtras.h"
15 #include "llvm/CodeGen/CallingConvLower.h"
16 #include "llvm/CodeGen/MachineFrameInfo.h"
17 #include "llvm/CodeGen/MachineFunction.h"
18 #include "llvm/CodeGen/MachineJumpTableInfo.h"
19 #include "llvm/CodeGen/MachineRegisterInfo.h"
20 #include "llvm/CodeGen/SelectionDAG.h"
21 #include "llvm/CodeGen/TargetRegisterInfo.h"
22 #include "llvm/CodeGen/TargetSubtargetInfo.h"
23 #include "llvm/IR/DataLayout.h"
24 #include "llvm/IR/DerivedTypes.h"
25 #include "llvm/IR/GlobalVariable.h"
26 #include "llvm/IR/LLVMContext.h"
27 #include "llvm/MC/MCAsmInfo.h"
28 #include "llvm/MC/MCExpr.h"
29 #include "llvm/Support/ErrorHandling.h"
30 #include "llvm/Support/KnownBits.h"
31 #include "llvm/Support/MathExtras.h"
32 #include "llvm/Target/TargetLoweringObjectFile.h"
33 #include "llvm/Target/TargetMachine.h"
34 #include <cctype>
35 using namespace llvm;
36 
37 /// NOTE: The TargetMachine owns TLOF.
38 TargetLowering::TargetLowering(const TargetMachine &tm)
39     : TargetLoweringBase(tm) {}
40 
41 const char *TargetLowering::getTargetNodeName(unsigned Opcode) const {
42   return nullptr;
43 }
44 
45 bool TargetLowering::isPositionIndependent() const {
46   return getTargetMachine().isPositionIndependent();
47 }
48 
49 /// Check whether a given call node is in tail position within its function. If
50 /// so, it sets Chain to the input chain of the tail call.
51 bool TargetLowering::isInTailCallPosition(SelectionDAG &DAG, SDNode *Node,
52                                           SDValue &Chain) const {
53   const Function &F = DAG.getMachineFunction().getFunction();
54 
55   // First, check if tail calls have been disabled in this function.
56   if (F.getFnAttribute("disable-tail-calls").getValueAsString() == "true")
57     return false;
58 
59   // Conservatively require the attributes of the call to match those of
60   // the return. Ignore NoAlias and NonNull because they don't affect the
61   // call sequence.
62   AttributeList CallerAttrs = F.getAttributes();
63   if (AttrBuilder(CallerAttrs, AttributeList::ReturnIndex)
64           .removeAttribute(Attribute::NoAlias)
65           .removeAttribute(Attribute::NonNull)
66           .hasAttributes())
67     return false;
68 
69   // It's not safe to eliminate the sign / zero extension of the return value.
70   if (CallerAttrs.hasAttribute(AttributeList::ReturnIndex, Attribute::ZExt) ||
71       CallerAttrs.hasAttribute(AttributeList::ReturnIndex, Attribute::SExt))
72     return false;
73 
74   // Check if the only use is a function return node.
75   return isUsedByReturnOnly(Node, Chain);
76 }
77 
78 bool TargetLowering::parametersInCSRMatch(const MachineRegisterInfo &MRI,
79     const uint32_t *CallerPreservedMask,
80     const SmallVectorImpl<CCValAssign> &ArgLocs,
81     const SmallVectorImpl<SDValue> &OutVals) const {
82   for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) {
83     const CCValAssign &ArgLoc = ArgLocs[I];
84     if (!ArgLoc.isRegLoc())
85       continue;
86     Register Reg = ArgLoc.getLocReg();
87     // Only look at callee saved registers.
88     if (MachineOperand::clobbersPhysReg(CallerPreservedMask, Reg))
89       continue;
90     // Check that we pass the value used for the caller.
91     // (We look for a CopyFromReg reading a virtual register that is used
92     //  for the function live-in value of register Reg)
93     SDValue Value = OutVals[I];
94     if (Value->getOpcode() != ISD::CopyFromReg)
95       return false;
96     unsigned ArgReg = cast<RegisterSDNode>(Value->getOperand(1))->getReg();
97     if (MRI.getLiveInPhysReg(ArgReg) != Reg)
98       return false;
99   }
100   return true;
101 }
102 
103 /// Set CallLoweringInfo attribute flags based on a call instruction
104 /// and called function attributes.
105 void TargetLoweringBase::ArgListEntry::setAttributes(const CallBase *Call,
106                                                      unsigned ArgIdx) {
107   IsSExt = Call->paramHasAttr(ArgIdx, Attribute::SExt);
108   IsZExt = Call->paramHasAttr(ArgIdx, Attribute::ZExt);
109   IsInReg = Call->paramHasAttr(ArgIdx, Attribute::InReg);
110   IsSRet = Call->paramHasAttr(ArgIdx, Attribute::StructRet);
111   IsNest = Call->paramHasAttr(ArgIdx, Attribute::Nest);
112   IsByVal = Call->paramHasAttr(ArgIdx, Attribute::ByVal);
113   IsInAlloca = Call->paramHasAttr(ArgIdx, Attribute::InAlloca);
114   IsReturned = Call->paramHasAttr(ArgIdx, Attribute::Returned);
115   IsSwiftSelf = Call->paramHasAttr(ArgIdx, Attribute::SwiftSelf);
116   IsSwiftError = Call->paramHasAttr(ArgIdx, Attribute::SwiftError);
117   Alignment = Call->getParamAlignment(ArgIdx);
118   ByValType = nullptr;
119   if (Call->paramHasAttr(ArgIdx, Attribute::ByVal))
120     ByValType = Call->getParamByValType(ArgIdx);
121 }
122 
123 /// Generate a libcall taking the given operands as arguments and returning a
124 /// result of type RetVT.
125 std::pair<SDValue, SDValue>
126 TargetLowering::makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, EVT RetVT,
127                             ArrayRef<SDValue> Ops,
128                             MakeLibCallOptions CallOptions,
129                             const SDLoc &dl,
130                             SDValue InChain) const {
131   if (!InChain)
132     InChain = DAG.getEntryNode();
133 
134   TargetLowering::ArgListTy Args;
135   Args.reserve(Ops.size());
136 
137   TargetLowering::ArgListEntry Entry;
138   for (unsigned i = 0; i < Ops.size(); ++i) {
139     SDValue NewOp = Ops[i];
140     Entry.Node = NewOp;
141     Entry.Ty = Entry.Node.getValueType().getTypeForEVT(*DAG.getContext());
142     Entry.IsSExt = shouldSignExtendTypeInLibCall(NewOp.getValueType(),
143                                                  CallOptions.IsSExt);
144     Entry.IsZExt = !Entry.IsSExt;
145 
146     if (CallOptions.IsSoften &&
147         !shouldExtendTypeInLibCall(CallOptions.OpsVTBeforeSoften[i])) {
148       Entry.IsSExt = Entry.IsZExt = false;
149     }
150     Args.push_back(Entry);
151   }
152 
153   if (LC == RTLIB::UNKNOWN_LIBCALL)
154     report_fatal_error("Unsupported library call operation!");
155   SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC),
156                                          getPointerTy(DAG.getDataLayout()));
157 
158   Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext());
159   TargetLowering::CallLoweringInfo CLI(DAG);
160   bool signExtend = shouldSignExtendTypeInLibCall(RetVT, CallOptions.IsSExt);
161   bool zeroExtend = !signExtend;
162 
163   if (CallOptions.IsSoften &&
164       !shouldExtendTypeInLibCall(CallOptions.RetVTBeforeSoften)) {
165     signExtend = zeroExtend = false;
166   }
167 
168   CLI.setDebugLoc(dl)
169       .setChain(InChain)
170       .setLibCallee(getLibcallCallingConv(LC), RetTy, Callee, std::move(Args))
171       .setNoReturn(CallOptions.DoesNotReturn)
172       .setDiscardResult(!CallOptions.IsReturnValueUsed)
173       .setIsPostTypeLegalization(CallOptions.IsPostTypeLegalization)
174       .setSExtResult(signExtend)
175       .setZExtResult(zeroExtend);
176   return LowerCallTo(CLI);
177 }
178 
179 bool TargetLowering::findOptimalMemOpLowering(
180     std::vector<EVT> &MemOps, unsigned Limit, const MemOp &Op, unsigned DstAS,
181     unsigned SrcAS, const AttributeList &FuncAttributes) const {
182   // If 'SrcAlign' is zero, that means the memory operation does not need to
183   // load the value, i.e. memset or memcpy from constant string. Otherwise,
184   // it's the inferred alignment of the source. 'DstAlign', on the other hand,
185   // is the specified alignment of the memory operation. If it is zero, that
186   // means it's possible to change the alignment of the destination.
187   // 'MemcpyStrSrc' indicates whether the memcpy source is constant so it does
188   // not need to be loaded.
189   if (!(Op.getSrcAlign() == 0 || Op.getSrcAlign() >= Op.getDstAlign()))
190     return false;
191 
192   EVT VT = getOptimalMemOpType(Op, FuncAttributes);
193 
194   if (VT == MVT::Other) {
195     // Use the largest integer type whose alignment constraints are satisfied.
196     // We only need to check DstAlign here as SrcAlign is always greater or
197     // equal to DstAlign (or zero).
198     VT = MVT::i64;
199     while (Op.getDstAlign() && Op.getDstAlign() < VT.getSizeInBits() / 8 &&
200            !allowsMisalignedMemoryAccesses(VT, DstAS, Op.getDstAlign()))
201       VT = (MVT::SimpleValueType)(VT.getSimpleVT().SimpleTy - 1);
202     assert(VT.isInteger());
203 
204     // Find the largest legal integer type.
205     MVT LVT = MVT::i64;
206     while (!isTypeLegal(LVT))
207       LVT = (MVT::SimpleValueType)(LVT.SimpleTy - 1);
208     assert(LVT.isInteger());
209 
210     // If the type we've chosen is larger than the largest legal integer type
211     // then use that instead.
212     if (VT.bitsGT(LVT))
213       VT = LVT;
214   }
215 
216   unsigned NumMemOps = 0;
217   auto Size = Op.size();
218   while (Size != 0) {
219     unsigned VTSize = VT.getSizeInBits() / 8;
220     while (VTSize > Size) {
221       // For now, only use non-vector load / store's for the left-over pieces.
222       EVT NewVT = VT;
223       unsigned NewVTSize;
224 
225       bool Found = false;
226       if (VT.isVector() || VT.isFloatingPoint()) {
227         NewVT = (VT.getSizeInBits() > 64) ? MVT::i64 : MVT::i32;
228         if (isOperationLegalOrCustom(ISD::STORE, NewVT) &&
229             isSafeMemOpType(NewVT.getSimpleVT()))
230           Found = true;
231         else if (NewVT == MVT::i64 &&
232                  isOperationLegalOrCustom(ISD::STORE, MVT::f64) &&
233                  isSafeMemOpType(MVT::f64)) {
234           // i64 is usually not legal on 32-bit targets, but f64 may be.
235           NewVT = MVT::f64;
236           Found = true;
237         }
238       }
239 
240       if (!Found) {
241         do {
242           NewVT = (MVT::SimpleValueType)(NewVT.getSimpleVT().SimpleTy - 1);
243           if (NewVT == MVT::i8)
244             break;
245         } while (!isSafeMemOpType(NewVT.getSimpleVT()));
246       }
247       NewVTSize = NewVT.getSizeInBits() / 8;
248 
249       // If the new VT cannot cover all of the remaining bits, then consider
250       // issuing a (or a pair of) unaligned and overlapping load / store.
251       bool Fast;
252       if (NumMemOps && Op.allowOverlap() && NewVTSize < Size &&
253           allowsMisalignedMemoryAccesses(VT, DstAS, Op.getDstAlign(),
254                                          MachineMemOperand::MONone, &Fast) &&
255           Fast)
256         VTSize = Size;
257       else {
258         VT = NewVT;
259         VTSize = NewVTSize;
260       }
261     }
262 
263     if (++NumMemOps > Limit)
264       return false;
265 
266     MemOps.push_back(VT);
267     Size -= VTSize;
268   }
269 
270   return true;
271 }
272 
273 /// Soften the operands of a comparison. This code is shared among BR_CC,
274 /// SELECT_CC, and SETCC handlers.
275 void TargetLowering::softenSetCCOperands(SelectionDAG &DAG, EVT VT,
276                                          SDValue &NewLHS, SDValue &NewRHS,
277                                          ISD::CondCode &CCCode,
278                                          const SDLoc &dl, const SDValue OldLHS,
279                                          const SDValue OldRHS) const {
280   SDValue Chain;
281   return softenSetCCOperands(DAG, VT, NewLHS, NewRHS, CCCode, dl, OldLHS,
282                              OldRHS, Chain);
283 }
284 
285 void TargetLowering::softenSetCCOperands(SelectionDAG &DAG, EVT VT,
286                                          SDValue &NewLHS, SDValue &NewRHS,
287                                          ISD::CondCode &CCCode,
288                                          const SDLoc &dl, const SDValue OldLHS,
289                                          const SDValue OldRHS,
290                                          SDValue &Chain,
291                                          bool IsSignaling) const {
292   // FIXME: Currently we cannot really respect all IEEE predicates due to libgcc
293   // not supporting it. We can update this code when libgcc provides such
294   // functions.
295 
296   assert((VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f128 || VT == MVT::ppcf128)
297          && "Unsupported setcc type!");
298 
299   // Expand into one or more soft-fp libcall(s).
300   RTLIB::Libcall LC1 = RTLIB::UNKNOWN_LIBCALL, LC2 = RTLIB::UNKNOWN_LIBCALL;
301   bool ShouldInvertCC = false;
302   switch (CCCode) {
303   case ISD::SETEQ:
304   case ISD::SETOEQ:
305     LC1 = (VT == MVT::f32) ? RTLIB::OEQ_F32 :
306           (VT == MVT::f64) ? RTLIB::OEQ_F64 :
307           (VT == MVT::f128) ? RTLIB::OEQ_F128 : RTLIB::OEQ_PPCF128;
308     break;
309   case ISD::SETNE:
310   case ISD::SETUNE:
311     LC1 = (VT == MVT::f32) ? RTLIB::UNE_F32 :
312           (VT == MVT::f64) ? RTLIB::UNE_F64 :
313           (VT == MVT::f128) ? RTLIB::UNE_F128 : RTLIB::UNE_PPCF128;
314     break;
315   case ISD::SETGE:
316   case ISD::SETOGE:
317     LC1 = (VT == MVT::f32) ? RTLIB::OGE_F32 :
318           (VT == MVT::f64) ? RTLIB::OGE_F64 :
319           (VT == MVT::f128) ? RTLIB::OGE_F128 : RTLIB::OGE_PPCF128;
320     break;
321   case ISD::SETLT:
322   case ISD::SETOLT:
323     LC1 = (VT == MVT::f32) ? RTLIB::OLT_F32 :
324           (VT == MVT::f64) ? RTLIB::OLT_F64 :
325           (VT == MVT::f128) ? RTLIB::OLT_F128 : RTLIB::OLT_PPCF128;
326     break;
327   case ISD::SETLE:
328   case ISD::SETOLE:
329     LC1 = (VT == MVT::f32) ? RTLIB::OLE_F32 :
330           (VT == MVT::f64) ? RTLIB::OLE_F64 :
331           (VT == MVT::f128) ? RTLIB::OLE_F128 : RTLIB::OLE_PPCF128;
332     break;
333   case ISD::SETGT:
334   case ISD::SETOGT:
335     LC1 = (VT == MVT::f32) ? RTLIB::OGT_F32 :
336           (VT == MVT::f64) ? RTLIB::OGT_F64 :
337           (VT == MVT::f128) ? RTLIB::OGT_F128 : RTLIB::OGT_PPCF128;
338     break;
339   case ISD::SETO:
340     ShouldInvertCC = true;
341     LLVM_FALLTHROUGH;
342   case ISD::SETUO:
343     LC1 = (VT == MVT::f32) ? RTLIB::UO_F32 :
344           (VT == MVT::f64) ? RTLIB::UO_F64 :
345           (VT == MVT::f128) ? RTLIB::UO_F128 : RTLIB::UO_PPCF128;
346     break;
347   case ISD::SETONE:
348     // SETONE = O && UNE
349     ShouldInvertCC = true;
350     LLVM_FALLTHROUGH;
351   case ISD::SETUEQ:
352     LC1 = (VT == MVT::f32) ? RTLIB::UO_F32 :
353           (VT == MVT::f64) ? RTLIB::UO_F64 :
354           (VT == MVT::f128) ? RTLIB::UO_F128 : RTLIB::UO_PPCF128;
355     LC2 = (VT == MVT::f32) ? RTLIB::OEQ_F32 :
356           (VT == MVT::f64) ? RTLIB::OEQ_F64 :
357           (VT == MVT::f128) ? RTLIB::OEQ_F128 : RTLIB::OEQ_PPCF128;
358     break;
359   default:
360     // Invert CC for unordered comparisons
361     ShouldInvertCC = true;
362     switch (CCCode) {
363     case ISD::SETULT:
364       LC1 = (VT == MVT::f32) ? RTLIB::OGE_F32 :
365             (VT == MVT::f64) ? RTLIB::OGE_F64 :
366             (VT == MVT::f128) ? RTLIB::OGE_F128 : RTLIB::OGE_PPCF128;
367       break;
368     case ISD::SETULE:
369       LC1 = (VT == MVT::f32) ? RTLIB::OGT_F32 :
370             (VT == MVT::f64) ? RTLIB::OGT_F64 :
371             (VT == MVT::f128) ? RTLIB::OGT_F128 : RTLIB::OGT_PPCF128;
372       break;
373     case ISD::SETUGT:
374       LC1 = (VT == MVT::f32) ? RTLIB::OLE_F32 :
375             (VT == MVT::f64) ? RTLIB::OLE_F64 :
376             (VT == MVT::f128) ? RTLIB::OLE_F128 : RTLIB::OLE_PPCF128;
377       break;
378     case ISD::SETUGE:
379       LC1 = (VT == MVT::f32) ? RTLIB::OLT_F32 :
380             (VT == MVT::f64) ? RTLIB::OLT_F64 :
381             (VT == MVT::f128) ? RTLIB::OLT_F128 : RTLIB::OLT_PPCF128;
382       break;
383     default: llvm_unreachable("Do not know how to soften this setcc!");
384     }
385   }
386 
387   // Use the target specific return value for comparions lib calls.
388   EVT RetVT = getCmpLibcallReturnType();
389   SDValue Ops[2] = {NewLHS, NewRHS};
390   TargetLowering::MakeLibCallOptions CallOptions;
391   EVT OpsVT[2] = { OldLHS.getValueType(),
392                    OldRHS.getValueType() };
393   CallOptions.setTypeListBeforeSoften(OpsVT, RetVT, true);
394   auto Call = makeLibCall(DAG, LC1, RetVT, Ops, CallOptions, dl, Chain);
395   NewLHS = Call.first;
396   NewRHS = DAG.getConstant(0, dl, RetVT);
397 
398   CCCode = getCmpLibcallCC(LC1);
399   if (ShouldInvertCC) {
400     assert(RetVT.isInteger());
401     CCCode = getSetCCInverse(CCCode, RetVT);
402   }
403 
404   if (LC2 == RTLIB::UNKNOWN_LIBCALL) {
405     // Update Chain.
406     Chain = Call.second;
407   } else {
408     EVT SetCCVT =
409         getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), RetVT);
410     SDValue Tmp = DAG.getSetCC(dl, SetCCVT, NewLHS, NewRHS, CCCode);
411     auto Call2 = makeLibCall(DAG, LC2, RetVT, Ops, CallOptions, dl, Chain);
412     CCCode = getCmpLibcallCC(LC2);
413     if (ShouldInvertCC)
414       CCCode = getSetCCInverse(CCCode, RetVT);
415     NewLHS = DAG.getSetCC(dl, SetCCVT, Call2.first, NewRHS, CCCode);
416     if (Chain)
417       Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Call.second,
418                           Call2.second);
419     NewLHS = DAG.getNode(ShouldInvertCC ? ISD::AND : ISD::OR, dl,
420                          Tmp.getValueType(), Tmp, NewLHS);
421     NewRHS = SDValue();
422   }
423 }
424 
425 /// Return the entry encoding for a jump table in the current function. The
426 /// returned value is a member of the MachineJumpTableInfo::JTEntryKind enum.
427 unsigned TargetLowering::getJumpTableEncoding() const {
428   // In non-pic modes, just use the address of a block.
429   if (!isPositionIndependent())
430     return MachineJumpTableInfo::EK_BlockAddress;
431 
432   // In PIC mode, if the target supports a GPRel32 directive, use it.
433   if (getTargetMachine().getMCAsmInfo()->getGPRel32Directive() != nullptr)
434     return MachineJumpTableInfo::EK_GPRel32BlockAddress;
435 
436   // Otherwise, use a label difference.
437   return MachineJumpTableInfo::EK_LabelDifference32;
438 }
439 
440 SDValue TargetLowering::getPICJumpTableRelocBase(SDValue Table,
441                                                  SelectionDAG &DAG) const {
442   // If our PIC model is GP relative, use the global offset table as the base.
443   unsigned JTEncoding = getJumpTableEncoding();
444 
445   if ((JTEncoding == MachineJumpTableInfo::EK_GPRel64BlockAddress) ||
446       (JTEncoding == MachineJumpTableInfo::EK_GPRel32BlockAddress))
447     return DAG.getGLOBAL_OFFSET_TABLE(getPointerTy(DAG.getDataLayout()));
448 
449   return Table;
450 }
451 
452 /// This returns the relocation base for the given PIC jumptable, the same as
453 /// getPICJumpTableRelocBase, but as an MCExpr.
454 const MCExpr *
455 TargetLowering::getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
456                                              unsigned JTI,MCContext &Ctx) const{
457   // The normal PIC reloc base is the label at the start of the jump table.
458   return MCSymbolRefExpr::create(MF->getJTISymbol(JTI, Ctx), Ctx);
459 }
460 
461 bool
462 TargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
463   const TargetMachine &TM = getTargetMachine();
464   const GlobalValue *GV = GA->getGlobal();
465 
466   // If the address is not even local to this DSO we will have to load it from
467   // a got and then add the offset.
468   if (!TM.shouldAssumeDSOLocal(*GV->getParent(), GV))
469     return false;
470 
471   // If the code is position independent we will have to add a base register.
472   if (isPositionIndependent())
473     return false;
474 
475   // Otherwise we can do it.
476   return true;
477 }
478 
479 //===----------------------------------------------------------------------===//
480 //  Optimization Methods
481 //===----------------------------------------------------------------------===//
482 
483 /// If the specified instruction has a constant integer operand and there are
484 /// bits set in that constant that are not demanded, then clear those bits and
485 /// return true.
486 bool TargetLowering::ShrinkDemandedConstant(SDValue Op, const APInt &Demanded,
487                                             TargetLoweringOpt &TLO) const {
488   SDLoc DL(Op);
489   unsigned Opcode = Op.getOpcode();
490 
491   // Do target-specific constant optimization.
492   if (targetShrinkDemandedConstant(Op, Demanded, TLO))
493     return TLO.New.getNode();
494 
495   // FIXME: ISD::SELECT, ISD::SELECT_CC
496   switch (Opcode) {
497   default:
498     break;
499   case ISD::XOR:
500   case ISD::AND:
501   case ISD::OR: {
502     auto *Op1C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
503     if (!Op1C)
504       return false;
505 
506     // If this is a 'not' op, don't touch it because that's a canonical form.
507     const APInt &C = Op1C->getAPIntValue();
508     if (Opcode == ISD::XOR && Demanded.isSubsetOf(C))
509       return false;
510 
511     if (!C.isSubsetOf(Demanded)) {
512       EVT VT = Op.getValueType();
513       SDValue NewC = TLO.DAG.getConstant(Demanded & C, DL, VT);
514       SDValue NewOp = TLO.DAG.getNode(Opcode, DL, VT, Op.getOperand(0), NewC);
515       return TLO.CombineTo(Op, NewOp);
516     }
517 
518     break;
519   }
520   }
521 
522   return false;
523 }
524 
525 /// Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if the casts are free.
526 /// This uses isZExtFree and ZERO_EXTEND for the widening cast, but it could be
527 /// generalized for targets with other types of implicit widening casts.
528 bool TargetLowering::ShrinkDemandedOp(SDValue Op, unsigned BitWidth,
529                                       const APInt &Demanded,
530                                       TargetLoweringOpt &TLO) const {
531   assert(Op.getNumOperands() == 2 &&
532          "ShrinkDemandedOp only supports binary operators!");
533   assert(Op.getNode()->getNumValues() == 1 &&
534          "ShrinkDemandedOp only supports nodes with one result!");
535 
536   SelectionDAG &DAG = TLO.DAG;
537   SDLoc dl(Op);
538 
539   // Early return, as this function cannot handle vector types.
540   if (Op.getValueType().isVector())
541     return false;
542 
543   // Don't do this if the node has another user, which may require the
544   // full value.
545   if (!Op.getNode()->hasOneUse())
546     return false;
547 
548   // Search for the smallest integer type with free casts to and from
549   // Op's type. For expedience, just check power-of-2 integer types.
550   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
551   unsigned DemandedSize = Demanded.getActiveBits();
552   unsigned SmallVTBits = DemandedSize;
553   if (!isPowerOf2_32(SmallVTBits))
554     SmallVTBits = NextPowerOf2(SmallVTBits);
555   for (; SmallVTBits < BitWidth; SmallVTBits = NextPowerOf2(SmallVTBits)) {
556     EVT SmallVT = EVT::getIntegerVT(*DAG.getContext(), SmallVTBits);
557     if (TLI.isTruncateFree(Op.getValueType(), SmallVT) &&
558         TLI.isZExtFree(SmallVT, Op.getValueType())) {
559       // We found a type with free casts.
560       SDValue X = DAG.getNode(
561           Op.getOpcode(), dl, SmallVT,
562           DAG.getNode(ISD::TRUNCATE, dl, SmallVT, Op.getOperand(0)),
563           DAG.getNode(ISD::TRUNCATE, dl, SmallVT, Op.getOperand(1)));
564       assert(DemandedSize <= SmallVTBits && "Narrowed below demanded bits?");
565       SDValue Z = DAG.getNode(ISD::ANY_EXTEND, dl, Op.getValueType(), X);
566       return TLO.CombineTo(Op, Z);
567     }
568   }
569   return false;
570 }
571 
572 bool TargetLowering::SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits,
573                                           DAGCombinerInfo &DCI) const {
574   SelectionDAG &DAG = DCI.DAG;
575   TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
576                         !DCI.isBeforeLegalizeOps());
577   KnownBits Known;
578 
579   bool Simplified = SimplifyDemandedBits(Op, DemandedBits, Known, TLO);
580   if (Simplified) {
581     DCI.AddToWorklist(Op.getNode());
582     DCI.CommitTargetLoweringOpt(TLO);
583   }
584   return Simplified;
585 }
586 
587 bool TargetLowering::SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits,
588                                           KnownBits &Known,
589                                           TargetLoweringOpt &TLO,
590                                           unsigned Depth,
591                                           bool AssumeSingleUse) const {
592   EVT VT = Op.getValueType();
593   APInt DemandedElts = VT.isVector()
594                            ? APInt::getAllOnesValue(VT.getVectorNumElements())
595                            : APInt(1, 1);
596   return SimplifyDemandedBits(Op, DemandedBits, DemandedElts, Known, TLO, Depth,
597                               AssumeSingleUse);
598 }
599 
600 // TODO: Can we merge SelectionDAG::GetDemandedBits into this?
601 // TODO: Under what circumstances can we create nodes? Constant folding?
602 SDValue TargetLowering::SimplifyMultipleUseDemandedBits(
603     SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
604     SelectionDAG &DAG, unsigned Depth) const {
605   // Limit search depth.
606   if (Depth >= SelectionDAG::MaxRecursionDepth)
607     return SDValue();
608 
609   // Ignore UNDEFs.
610   if (Op.isUndef())
611     return SDValue();
612 
613   // Not demanding any bits/elts from Op.
614   if (DemandedBits == 0 || DemandedElts == 0)
615     return DAG.getUNDEF(Op.getValueType());
616 
617   unsigned NumElts = DemandedElts.getBitWidth();
618   KnownBits LHSKnown, RHSKnown;
619   switch (Op.getOpcode()) {
620   case ISD::BITCAST: {
621     SDValue Src = peekThroughBitcasts(Op.getOperand(0));
622     EVT SrcVT = Src.getValueType();
623     EVT DstVT = Op.getValueType();
624     unsigned NumSrcEltBits = SrcVT.getScalarSizeInBits();
625     unsigned NumDstEltBits = DstVT.getScalarSizeInBits();
626 
627     if (NumSrcEltBits == NumDstEltBits)
628       if (SDValue V = SimplifyMultipleUseDemandedBits(
629               Src, DemandedBits, DemandedElts, DAG, Depth + 1))
630         return DAG.getBitcast(DstVT, V);
631 
632     // TODO - bigendian once we have test coverage.
633     if (SrcVT.isVector() && (NumDstEltBits % NumSrcEltBits) == 0 &&
634         DAG.getDataLayout().isLittleEndian()) {
635       unsigned Scale = NumDstEltBits / NumSrcEltBits;
636       unsigned NumSrcElts = SrcVT.getVectorNumElements();
637       APInt DemandedSrcBits = APInt::getNullValue(NumSrcEltBits);
638       APInt DemandedSrcElts = APInt::getNullValue(NumSrcElts);
639       for (unsigned i = 0; i != Scale; ++i) {
640         unsigned Offset = i * NumSrcEltBits;
641         APInt Sub = DemandedBits.extractBits(NumSrcEltBits, Offset);
642         if (!Sub.isNullValue()) {
643           DemandedSrcBits |= Sub;
644           for (unsigned j = 0; j != NumElts; ++j)
645             if (DemandedElts[j])
646               DemandedSrcElts.setBit((j * Scale) + i);
647         }
648       }
649 
650       if (SDValue V = SimplifyMultipleUseDemandedBits(
651               Src, DemandedSrcBits, DemandedSrcElts, DAG, Depth + 1))
652         return DAG.getBitcast(DstVT, V);
653     }
654 
655     // TODO - bigendian once we have test coverage.
656     if ((NumSrcEltBits % NumDstEltBits) == 0 &&
657         DAG.getDataLayout().isLittleEndian()) {
658       unsigned Scale = NumSrcEltBits / NumDstEltBits;
659       unsigned NumSrcElts = SrcVT.isVector() ? SrcVT.getVectorNumElements() : 1;
660       APInt DemandedSrcBits = APInt::getNullValue(NumSrcEltBits);
661       APInt DemandedSrcElts = APInt::getNullValue(NumSrcElts);
662       for (unsigned i = 0; i != NumElts; ++i)
663         if (DemandedElts[i]) {
664           unsigned Offset = (i % Scale) * NumDstEltBits;
665           DemandedSrcBits.insertBits(DemandedBits, Offset);
666           DemandedSrcElts.setBit(i / Scale);
667         }
668 
669       if (SDValue V = SimplifyMultipleUseDemandedBits(
670               Src, DemandedSrcBits, DemandedSrcElts, DAG, Depth + 1))
671         return DAG.getBitcast(DstVT, V);
672     }
673 
674     break;
675   }
676   case ISD::AND: {
677     LHSKnown = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
678     RHSKnown = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
679 
680     // If all of the demanded bits are known 1 on one side, return the other.
681     // These bits cannot contribute to the result of the 'and' in this
682     // context.
683     if (DemandedBits.isSubsetOf(LHSKnown.Zero | RHSKnown.One))
684       return Op.getOperand(0);
685     if (DemandedBits.isSubsetOf(RHSKnown.Zero | LHSKnown.One))
686       return Op.getOperand(1);
687     break;
688   }
689   case ISD::OR: {
690     LHSKnown = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
691     RHSKnown = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
692 
693     // If all of the demanded bits are known zero on one side, return the
694     // other.  These bits cannot contribute to the result of the 'or' in this
695     // context.
696     if (DemandedBits.isSubsetOf(LHSKnown.One | RHSKnown.Zero))
697       return Op.getOperand(0);
698     if (DemandedBits.isSubsetOf(RHSKnown.One | LHSKnown.Zero))
699       return Op.getOperand(1);
700     break;
701   }
702   case ISD::XOR: {
703     LHSKnown = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
704     RHSKnown = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
705 
706     // If all of the demanded bits are known zero on one side, return the
707     // other.
708     if (DemandedBits.isSubsetOf(RHSKnown.Zero))
709       return Op.getOperand(0);
710     if (DemandedBits.isSubsetOf(LHSKnown.Zero))
711       return Op.getOperand(1);
712     break;
713   }
714   case ISD::SETCC: {
715     SDValue Op0 = Op.getOperand(0);
716     SDValue Op1 = Op.getOperand(1);
717     ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
718     // If (1) we only need the sign-bit, (2) the setcc operands are the same
719     // width as the setcc result, and (3) the result of a setcc conforms to 0 or
720     // -1, we may be able to bypass the setcc.
721     if (DemandedBits.isSignMask() &&
722         Op0.getScalarValueSizeInBits() == DemandedBits.getBitWidth() &&
723         getBooleanContents(Op0.getValueType()) ==
724             BooleanContent::ZeroOrNegativeOneBooleanContent) {
725       // If we're testing X < 0, then this compare isn't needed - just use X!
726       // FIXME: We're limiting to integer types here, but this should also work
727       // if we don't care about FP signed-zero. The use of SETLT with FP means
728       // that we don't care about NaNs.
729       if (CC == ISD::SETLT && Op1.getValueType().isInteger() &&
730           (isNullConstant(Op1) || ISD::isBuildVectorAllZeros(Op1.getNode())))
731         return Op0;
732     }
733     break;
734   }
735   case ISD::SIGN_EXTEND_INREG: {
736     // If none of the extended bits are demanded, eliminate the sextinreg.
737     EVT ExVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
738     if (DemandedBits.getActiveBits() <= ExVT.getScalarSizeInBits())
739       return Op.getOperand(0);
740     break;
741   }
742   case ISD::INSERT_VECTOR_ELT: {
743     // If we don't demand the inserted element, return the base vector.
744     SDValue Vec = Op.getOperand(0);
745     auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2));
746     EVT VecVT = Vec.getValueType();
747     if (CIdx && CIdx->getAPIntValue().ult(VecVT.getVectorNumElements()) &&
748         !DemandedElts[CIdx->getZExtValue()])
749       return Vec;
750     break;
751   }
752   case ISD::INSERT_SUBVECTOR: {
753     // If we don't demand the inserted subvector, return the base vector.
754     SDValue Vec = Op.getOperand(0);
755     SDValue Sub = Op.getOperand(1);
756     auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2));
757     unsigned NumVecElts = Vec.getValueType().getVectorNumElements();
758     unsigned NumSubElts = Sub.getValueType().getVectorNumElements();
759     if (CIdx && CIdx->getAPIntValue().ule(NumVecElts - NumSubElts))
760       if (DemandedElts.extractBits(NumSubElts, CIdx->getZExtValue()) == 0)
761         return Vec;
762     break;
763   }
764   case ISD::VECTOR_SHUFFLE: {
765     ArrayRef<int> ShuffleMask = cast<ShuffleVectorSDNode>(Op)->getMask();
766 
767     // If all the demanded elts are from one operand and are inline,
768     // then we can use the operand directly.
769     bool AllUndef = true, IdentityLHS = true, IdentityRHS = true;
770     for (unsigned i = 0; i != NumElts; ++i) {
771       int M = ShuffleMask[i];
772       if (M < 0 || !DemandedElts[i])
773         continue;
774       AllUndef = false;
775       IdentityLHS &= (M == (int)i);
776       IdentityRHS &= ((M - NumElts) == i);
777     }
778 
779     if (AllUndef)
780       return DAG.getUNDEF(Op.getValueType());
781     if (IdentityLHS)
782       return Op.getOperand(0);
783     if (IdentityRHS)
784       return Op.getOperand(1);
785     break;
786   }
787   default:
788     if (Op.getOpcode() >= ISD::BUILTIN_OP_END)
789       if (SDValue V = SimplifyMultipleUseDemandedBitsForTargetNode(
790               Op, DemandedBits, DemandedElts, DAG, Depth))
791         return V;
792     break;
793   }
794   return SDValue();
795 }
796 
797 /// Look at Op. At this point, we know that only the OriginalDemandedBits of the
798 /// result of Op are ever used downstream. If we can use this information to
799 /// simplify Op, create a new simplified DAG node and return true, returning the
800 /// original and new nodes in Old and New. Otherwise, analyze the expression and
801 /// return a mask of Known bits for the expression (used to simplify the
802 /// caller).  The Known bits may only be accurate for those bits in the
803 /// OriginalDemandedBits and OriginalDemandedElts.
804 bool TargetLowering::SimplifyDemandedBits(
805     SDValue Op, const APInt &OriginalDemandedBits,
806     const APInt &OriginalDemandedElts, KnownBits &Known, TargetLoweringOpt &TLO,
807     unsigned Depth, bool AssumeSingleUse) const {
808   unsigned BitWidth = OriginalDemandedBits.getBitWidth();
809   assert(Op.getScalarValueSizeInBits() == BitWidth &&
810          "Mask size mismatches value type size!");
811 
812   unsigned NumElts = OriginalDemandedElts.getBitWidth();
813   assert((!Op.getValueType().isVector() ||
814           NumElts == Op.getValueType().getVectorNumElements()) &&
815          "Unexpected vector size");
816 
817   APInt DemandedBits = OriginalDemandedBits;
818   APInt DemandedElts = OriginalDemandedElts;
819   SDLoc dl(Op);
820   auto &DL = TLO.DAG.getDataLayout();
821 
822   // Don't know anything.
823   Known = KnownBits(BitWidth);
824 
825   // Undef operand.
826   if (Op.isUndef())
827     return false;
828 
829   if (Op.getOpcode() == ISD::Constant) {
830     // We know all of the bits for a constant!
831     Known.One = cast<ConstantSDNode>(Op)->getAPIntValue();
832     Known.Zero = ~Known.One;
833     return false;
834   }
835 
836   // Other users may use these bits.
837   EVT VT = Op.getValueType();
838   if (!Op.getNode()->hasOneUse() && !AssumeSingleUse) {
839     if (Depth != 0) {
840       // If not at the root, Just compute the Known bits to
841       // simplify things downstream.
842       Known = TLO.DAG.computeKnownBits(Op, DemandedElts, Depth);
843       return false;
844     }
845     // If this is the root being simplified, allow it to have multiple uses,
846     // just set the DemandedBits/Elts to all bits.
847     DemandedBits = APInt::getAllOnesValue(BitWidth);
848     DemandedElts = APInt::getAllOnesValue(NumElts);
849   } else if (OriginalDemandedBits == 0 || OriginalDemandedElts == 0) {
850     // Not demanding any bits/elts from Op.
851     return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT));
852   } else if (Depth >= SelectionDAG::MaxRecursionDepth) {
853     // Limit search depth.
854     return false;
855   }
856 
857   KnownBits Known2, KnownOut;
858   switch (Op.getOpcode()) {
859   case ISD::TargetConstant:
860     llvm_unreachable("Can't simplify this node");
861   case ISD::SCALAR_TO_VECTOR: {
862     if (!DemandedElts[0])
863       return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT));
864 
865     KnownBits SrcKnown;
866     SDValue Src = Op.getOperand(0);
867     unsigned SrcBitWidth = Src.getScalarValueSizeInBits();
868     APInt SrcDemandedBits = DemandedBits.zextOrSelf(SrcBitWidth);
869     if (SimplifyDemandedBits(Src, SrcDemandedBits, SrcKnown, TLO, Depth + 1))
870       return true;
871     Known = SrcKnown.zextOrTrunc(BitWidth, false);
872     break;
873   }
874   case ISD::BUILD_VECTOR:
875     // Collect the known bits that are shared by every demanded element.
876     // TODO: Call SimplifyDemandedBits for non-constant demanded elements.
877     Known = TLO.DAG.computeKnownBits(Op, DemandedElts, Depth);
878     return false; // Don't fall through, will infinitely loop.
879   case ISD::LOAD: {
880     LoadSDNode *LD = cast<LoadSDNode>(Op);
881     if (getTargetConstantFromLoad(LD)) {
882       Known = TLO.DAG.computeKnownBits(Op, DemandedElts, Depth);
883       return false; // Don't fall through, will infinitely loop.
884     } else if (ISD::isZEXTLoad(Op.getNode()) && Op.getResNo() == 0) {
885       // If this is a ZEXTLoad and we are looking at the loaded value.
886       EVT VT = LD->getMemoryVT();
887       unsigned MemBits = VT.getScalarSizeInBits();
888       Known.Zero.setBitsFrom(MemBits);
889       return false; // Don't fall through, will infinitely loop.
890     }
891     break;
892   }
893   case ISD::INSERT_VECTOR_ELT: {
894     SDValue Vec = Op.getOperand(0);
895     SDValue Scl = Op.getOperand(1);
896     auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2));
897     EVT VecVT = Vec.getValueType();
898 
899     // If index isn't constant, assume we need all vector elements AND the
900     // inserted element.
901     APInt DemandedVecElts(DemandedElts);
902     if (CIdx && CIdx->getAPIntValue().ult(VecVT.getVectorNumElements())) {
903       unsigned Idx = CIdx->getZExtValue();
904       DemandedVecElts.clearBit(Idx);
905 
906       // Inserted element is not required.
907       if (!DemandedElts[Idx])
908         return TLO.CombineTo(Op, Vec);
909     }
910 
911     KnownBits KnownScl;
912     unsigned NumSclBits = Scl.getScalarValueSizeInBits();
913     APInt DemandedSclBits = DemandedBits.zextOrTrunc(NumSclBits);
914     if (SimplifyDemandedBits(Scl, DemandedSclBits, KnownScl, TLO, Depth + 1))
915       return true;
916 
917     Known = KnownScl.zextOrTrunc(BitWidth, false);
918 
919     KnownBits KnownVec;
920     if (SimplifyDemandedBits(Vec, DemandedBits, DemandedVecElts, KnownVec, TLO,
921                              Depth + 1))
922       return true;
923 
924     if (!!DemandedVecElts) {
925       Known.One &= KnownVec.One;
926       Known.Zero &= KnownVec.Zero;
927     }
928 
929     return false;
930   }
931   case ISD::INSERT_SUBVECTOR: {
932     SDValue Base = Op.getOperand(0);
933     SDValue Sub = Op.getOperand(1);
934     EVT SubVT = Sub.getValueType();
935     unsigned NumSubElts = SubVT.getVectorNumElements();
936 
937     // If index isn't constant, assume we need the original demanded base
938     // elements and ALL the inserted subvector elements.
939     APInt BaseElts = DemandedElts;
940     APInt SubElts = APInt::getAllOnesValue(NumSubElts);
941     if (isa<ConstantSDNode>(Op.getOperand(2))) {
942       const APInt &Idx = Op.getConstantOperandAPInt(2);
943       if (Idx.ule(NumElts - NumSubElts)) {
944         unsigned SubIdx = Idx.getZExtValue();
945         SubElts = DemandedElts.extractBits(NumSubElts, SubIdx);
946         BaseElts.insertBits(APInt::getNullValue(NumSubElts), SubIdx);
947       }
948     }
949 
950     KnownBits KnownSub, KnownBase;
951     if (SimplifyDemandedBits(Sub, DemandedBits, SubElts, KnownSub, TLO,
952                              Depth + 1))
953       return true;
954     if (SimplifyDemandedBits(Base, DemandedBits, BaseElts, KnownBase, TLO,
955                              Depth + 1))
956       return true;
957 
958     Known.Zero.setAllBits();
959     Known.One.setAllBits();
960     if (!!SubElts) {
961         Known.One &= KnownSub.One;
962         Known.Zero &= KnownSub.Zero;
963     }
964     if (!!BaseElts) {
965         Known.One &= KnownBase.One;
966         Known.Zero &= KnownBase.Zero;
967     }
968 
969     // Attempt to avoid multi-use src if we don't need anything from it.
970     if (!DemandedBits.isAllOnesValue() || !SubElts.isAllOnesValue() ||
971         !BaseElts.isAllOnesValue()) {
972       SDValue NewSub = SimplifyMultipleUseDemandedBits(
973           Sub, DemandedBits, SubElts, TLO.DAG, Depth + 1);
974       SDValue NewBase = SimplifyMultipleUseDemandedBits(
975           Base, DemandedBits, BaseElts, TLO.DAG, Depth + 1);
976       if (NewSub || NewBase) {
977         NewSub = NewSub ? NewSub : Sub;
978         NewBase = NewBase ? NewBase : Base;
979         SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, VT, NewBase, NewSub,
980                                         Op.getOperand(2));
981         return TLO.CombineTo(Op, NewOp);
982       }
983     }
984     break;
985   }
986   case ISD::EXTRACT_SUBVECTOR: {
987     // If index isn't constant, assume we need all the source vector elements.
988     SDValue Src = Op.getOperand(0);
989     ConstantSDNode *SubIdx = dyn_cast<ConstantSDNode>(Op.getOperand(1));
990     unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
991     APInt SrcElts = APInt::getAllOnesValue(NumSrcElts);
992     if (SubIdx && SubIdx->getAPIntValue().ule(NumSrcElts - NumElts)) {
993       // Offset the demanded elts by the subvector index.
994       uint64_t Idx = SubIdx->getZExtValue();
995       SrcElts = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx);
996     }
997     if (SimplifyDemandedBits(Src, DemandedBits, SrcElts, Known, TLO, Depth + 1))
998       return true;
999 
1000     // Attempt to avoid multi-use src if we don't need anything from it.
1001     if (!DemandedBits.isAllOnesValue() || !SrcElts.isAllOnesValue()) {
1002       SDValue DemandedSrc = SimplifyMultipleUseDemandedBits(
1003           Src, DemandedBits, SrcElts, TLO.DAG, Depth + 1);
1004       if (DemandedSrc) {
1005         SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, VT, DemandedSrc,
1006                                         Op.getOperand(1));
1007         return TLO.CombineTo(Op, NewOp);
1008       }
1009     }
1010     break;
1011   }
1012   case ISD::CONCAT_VECTORS: {
1013     Known.Zero.setAllBits();
1014     Known.One.setAllBits();
1015     EVT SubVT = Op.getOperand(0).getValueType();
1016     unsigned NumSubVecs = Op.getNumOperands();
1017     unsigned NumSubElts = SubVT.getVectorNumElements();
1018     for (unsigned i = 0; i != NumSubVecs; ++i) {
1019       APInt DemandedSubElts =
1020           DemandedElts.extractBits(NumSubElts, i * NumSubElts);
1021       if (SimplifyDemandedBits(Op.getOperand(i), DemandedBits, DemandedSubElts,
1022                                Known2, TLO, Depth + 1))
1023         return true;
1024       // Known bits are shared by every demanded subvector element.
1025       if (!!DemandedSubElts) {
1026         Known.One &= Known2.One;
1027         Known.Zero &= Known2.Zero;
1028       }
1029     }
1030     break;
1031   }
1032   case ISD::VECTOR_SHUFFLE: {
1033     ArrayRef<int> ShuffleMask = cast<ShuffleVectorSDNode>(Op)->getMask();
1034 
1035     // Collect demanded elements from shuffle operands..
1036     APInt DemandedLHS(NumElts, 0);
1037     APInt DemandedRHS(NumElts, 0);
1038     for (unsigned i = 0; i != NumElts; ++i) {
1039       if (!DemandedElts[i])
1040         continue;
1041       int M = ShuffleMask[i];
1042       if (M < 0) {
1043         // For UNDEF elements, we don't know anything about the common state of
1044         // the shuffle result.
1045         DemandedLHS.clearAllBits();
1046         DemandedRHS.clearAllBits();
1047         break;
1048       }
1049       assert(0 <= M && M < (int)(2 * NumElts) && "Shuffle index out of range");
1050       if (M < (int)NumElts)
1051         DemandedLHS.setBit(M);
1052       else
1053         DemandedRHS.setBit(M - NumElts);
1054     }
1055 
1056     if (!!DemandedLHS || !!DemandedRHS) {
1057       SDValue Op0 = Op.getOperand(0);
1058       SDValue Op1 = Op.getOperand(1);
1059 
1060       Known.Zero.setAllBits();
1061       Known.One.setAllBits();
1062       if (!!DemandedLHS) {
1063         if (SimplifyDemandedBits(Op0, DemandedBits, DemandedLHS, Known2, TLO,
1064                                  Depth + 1))
1065           return true;
1066         Known.One &= Known2.One;
1067         Known.Zero &= Known2.Zero;
1068       }
1069       if (!!DemandedRHS) {
1070         if (SimplifyDemandedBits(Op1, DemandedBits, DemandedRHS, Known2, TLO,
1071                                  Depth + 1))
1072           return true;
1073         Known.One &= Known2.One;
1074         Known.Zero &= Known2.Zero;
1075       }
1076 
1077       // Attempt to avoid multi-use ops if we don't need anything from them.
1078       SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
1079           Op0, DemandedBits, DemandedLHS, TLO.DAG, Depth + 1);
1080       SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits(
1081           Op1, DemandedBits, DemandedRHS, TLO.DAG, Depth + 1);
1082       if (DemandedOp0 || DemandedOp1) {
1083         Op0 = DemandedOp0 ? DemandedOp0 : Op0;
1084         Op1 = DemandedOp1 ? DemandedOp1 : Op1;
1085         SDValue NewOp = TLO.DAG.getVectorShuffle(VT, dl, Op0, Op1, ShuffleMask);
1086         return TLO.CombineTo(Op, NewOp);
1087       }
1088     }
1089     break;
1090   }
1091   case ISD::AND: {
1092     SDValue Op0 = Op.getOperand(0);
1093     SDValue Op1 = Op.getOperand(1);
1094 
1095     // If the RHS is a constant, check to see if the LHS would be zero without
1096     // using the bits from the RHS.  Below, we use knowledge about the RHS to
1097     // simplify the LHS, here we're using information from the LHS to simplify
1098     // the RHS.
1099     if (ConstantSDNode *RHSC = isConstOrConstSplat(Op1)) {
1100       // Do not increment Depth here; that can cause an infinite loop.
1101       KnownBits LHSKnown = TLO.DAG.computeKnownBits(Op0, DemandedElts, Depth);
1102       // If the LHS already has zeros where RHSC does, this 'and' is dead.
1103       if ((LHSKnown.Zero & DemandedBits) ==
1104           (~RHSC->getAPIntValue() & DemandedBits))
1105         return TLO.CombineTo(Op, Op0);
1106 
1107       // If any of the set bits in the RHS are known zero on the LHS, shrink
1108       // the constant.
1109       if (ShrinkDemandedConstant(Op, ~LHSKnown.Zero & DemandedBits, TLO))
1110         return true;
1111 
1112       // Bitwise-not (xor X, -1) is a special case: we don't usually shrink its
1113       // constant, but if this 'and' is only clearing bits that were just set by
1114       // the xor, then this 'and' can be eliminated by shrinking the mask of
1115       // the xor. For example, for a 32-bit X:
1116       // and (xor (srl X, 31), -1), 1 --> xor (srl X, 31), 1
1117       if (isBitwiseNot(Op0) && Op0.hasOneUse() &&
1118           LHSKnown.One == ~RHSC->getAPIntValue()) {
1119         SDValue Xor = TLO.DAG.getNode(ISD::XOR, dl, VT, Op0.getOperand(0), Op1);
1120         return TLO.CombineTo(Op, Xor);
1121       }
1122     }
1123 
1124     if (SimplifyDemandedBits(Op1, DemandedBits, DemandedElts, Known, TLO,
1125                              Depth + 1))
1126       return true;
1127     assert(!Known.hasConflict() && "Bits known to be one AND zero?");
1128     if (SimplifyDemandedBits(Op0, ~Known.Zero & DemandedBits, DemandedElts,
1129                              Known2, TLO, Depth + 1))
1130       return true;
1131     assert(!Known2.hasConflict() && "Bits known to be one AND zero?");
1132 
1133     // Attempt to avoid multi-use ops if we don't need anything from them.
1134     if (!DemandedBits.isAllOnesValue() || !DemandedElts.isAllOnesValue()) {
1135       SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
1136           Op0, DemandedBits, DemandedElts, TLO.DAG, Depth + 1);
1137       SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits(
1138           Op1, DemandedBits, DemandedElts, TLO.DAG, Depth + 1);
1139       if (DemandedOp0 || DemandedOp1) {
1140         Op0 = DemandedOp0 ? DemandedOp0 : Op0;
1141         Op1 = DemandedOp1 ? DemandedOp1 : Op1;
1142         SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, VT, Op0, Op1);
1143         return TLO.CombineTo(Op, NewOp);
1144       }
1145     }
1146 
1147     // If all of the demanded bits are known one on one side, return the other.
1148     // These bits cannot contribute to the result of the 'and'.
1149     if (DemandedBits.isSubsetOf(Known2.Zero | Known.One))
1150       return TLO.CombineTo(Op, Op0);
1151     if (DemandedBits.isSubsetOf(Known.Zero | Known2.One))
1152       return TLO.CombineTo(Op, Op1);
1153     // If all of the demanded bits in the inputs are known zeros, return zero.
1154     if (DemandedBits.isSubsetOf(Known.Zero | Known2.Zero))
1155       return TLO.CombineTo(Op, TLO.DAG.getConstant(0, dl, VT));
1156     // If the RHS is a constant, see if we can simplify it.
1157     if (ShrinkDemandedConstant(Op, ~Known2.Zero & DemandedBits, TLO))
1158       return true;
1159     // If the operation can be done in a smaller type, do so.
1160     if (ShrinkDemandedOp(Op, BitWidth, DemandedBits, TLO))
1161       return true;
1162 
1163     // Output known-1 bits are only known if set in both the LHS & RHS.
1164     Known.One &= Known2.One;
1165     // Output known-0 are known to be clear if zero in either the LHS | RHS.
1166     Known.Zero |= Known2.Zero;
1167     break;
1168   }
1169   case ISD::OR: {
1170     SDValue Op0 = Op.getOperand(0);
1171     SDValue Op1 = Op.getOperand(1);
1172 
1173     if (SimplifyDemandedBits(Op1, DemandedBits, DemandedElts, Known, TLO,
1174                              Depth + 1))
1175       return true;
1176     assert(!Known.hasConflict() && "Bits known to be one AND zero?");
1177     if (SimplifyDemandedBits(Op0, ~Known.One & DemandedBits, DemandedElts,
1178                              Known2, TLO, Depth + 1))
1179       return true;
1180     assert(!Known2.hasConflict() && "Bits known to be one AND zero?");
1181 
1182     // Attempt to avoid multi-use ops if we don't need anything from them.
1183     if (!DemandedBits.isAllOnesValue() || !DemandedElts.isAllOnesValue()) {
1184       SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
1185           Op0, DemandedBits, DemandedElts, TLO.DAG, Depth + 1);
1186       SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits(
1187           Op1, DemandedBits, DemandedElts, TLO.DAG, Depth + 1);
1188       if (DemandedOp0 || DemandedOp1) {
1189         Op0 = DemandedOp0 ? DemandedOp0 : Op0;
1190         Op1 = DemandedOp1 ? DemandedOp1 : Op1;
1191         SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, VT, Op0, Op1);
1192         return TLO.CombineTo(Op, NewOp);
1193       }
1194     }
1195 
1196     // If all of the demanded bits are known zero on one side, return the other.
1197     // These bits cannot contribute to the result of the 'or'.
1198     if (DemandedBits.isSubsetOf(Known2.One | Known.Zero))
1199       return TLO.CombineTo(Op, Op0);
1200     if (DemandedBits.isSubsetOf(Known.One | Known2.Zero))
1201       return TLO.CombineTo(Op, Op1);
1202     // If the RHS is a constant, see if we can simplify it.
1203     if (ShrinkDemandedConstant(Op, DemandedBits, TLO))
1204       return true;
1205     // If the operation can be done in a smaller type, do so.
1206     if (ShrinkDemandedOp(Op, BitWidth, DemandedBits, TLO))
1207       return true;
1208 
1209     // Output known-0 bits are only known if clear in both the LHS & RHS.
1210     Known.Zero &= Known2.Zero;
1211     // Output known-1 are known to be set if set in either the LHS | RHS.
1212     Known.One |= Known2.One;
1213     break;
1214   }
1215   case ISD::XOR: {
1216     SDValue Op0 = Op.getOperand(0);
1217     SDValue Op1 = Op.getOperand(1);
1218 
1219     if (SimplifyDemandedBits(Op1, DemandedBits, DemandedElts, Known, TLO,
1220                              Depth + 1))
1221       return true;
1222     assert(!Known.hasConflict() && "Bits known to be one AND zero?");
1223     if (SimplifyDemandedBits(Op0, DemandedBits, DemandedElts, Known2, TLO,
1224                              Depth + 1))
1225       return true;
1226     assert(!Known2.hasConflict() && "Bits known to be one AND zero?");
1227 
1228     // Attempt to avoid multi-use ops if we don't need anything from them.
1229     if (!DemandedBits.isAllOnesValue() || !DemandedElts.isAllOnesValue()) {
1230       SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
1231           Op0, DemandedBits, DemandedElts, TLO.DAG, Depth + 1);
1232       SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits(
1233           Op1, DemandedBits, DemandedElts, TLO.DAG, Depth + 1);
1234       if (DemandedOp0 || DemandedOp1) {
1235         Op0 = DemandedOp0 ? DemandedOp0 : Op0;
1236         Op1 = DemandedOp1 ? DemandedOp1 : Op1;
1237         SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, VT, Op0, Op1);
1238         return TLO.CombineTo(Op, NewOp);
1239       }
1240     }
1241 
1242     // If all of the demanded bits are known zero on one side, return the other.
1243     // These bits cannot contribute to the result of the 'xor'.
1244     if (DemandedBits.isSubsetOf(Known.Zero))
1245       return TLO.CombineTo(Op, Op0);
1246     if (DemandedBits.isSubsetOf(Known2.Zero))
1247       return TLO.CombineTo(Op, Op1);
1248     // If the operation can be done in a smaller type, do so.
1249     if (ShrinkDemandedOp(Op, BitWidth, DemandedBits, TLO))
1250       return true;
1251 
1252     // If all of the unknown bits are known to be zero on one side or the other
1253     // (but not both) turn this into an *inclusive* or.
1254     //    e.g. (A & C1)^(B & C2) -> (A & C1)|(B & C2) iff C1&C2 == 0
1255     if (DemandedBits.isSubsetOf(Known.Zero | Known2.Zero))
1256       return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::OR, dl, VT, Op0, Op1));
1257 
1258     // Output known-0 bits are known if clear or set in both the LHS & RHS.
1259     KnownOut.Zero = (Known.Zero & Known2.Zero) | (Known.One & Known2.One);
1260     // Output known-1 are known to be set if set in only one of the LHS, RHS.
1261     KnownOut.One = (Known.Zero & Known2.One) | (Known.One & Known2.Zero);
1262 
1263     if (ConstantSDNode *C = isConstOrConstSplat(Op1)) {
1264       // If one side is a constant, and all of the known set bits on the other
1265       // side are also set in the constant, turn this into an AND, as we know
1266       // the bits will be cleared.
1267       //    e.g. (X | C1) ^ C2 --> (X | C1) & ~C2 iff (C1&C2) == C2
1268       // NB: it is okay if more bits are known than are requested
1269       if (C->getAPIntValue() == Known2.One) {
1270         SDValue ANDC =
1271             TLO.DAG.getConstant(~C->getAPIntValue() & DemandedBits, dl, VT);
1272         return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::AND, dl, VT, Op0, ANDC));
1273       }
1274 
1275       // If the RHS is a constant, see if we can change it. Don't alter a -1
1276       // constant because that's a 'not' op, and that is better for combining
1277       // and codegen.
1278       if (!C->isAllOnesValue()) {
1279         if (DemandedBits.isSubsetOf(C->getAPIntValue())) {
1280           // We're flipping all demanded bits. Flip the undemanded bits too.
1281           SDValue New = TLO.DAG.getNOT(dl, Op0, VT);
1282           return TLO.CombineTo(Op, New);
1283         }
1284         // If we can't turn this into a 'not', try to shrink the constant.
1285         if (ShrinkDemandedConstant(Op, DemandedBits, TLO))
1286           return true;
1287       }
1288     }
1289 
1290     Known = std::move(KnownOut);
1291     break;
1292   }
1293   case ISD::SELECT:
1294     if (SimplifyDemandedBits(Op.getOperand(2), DemandedBits, Known, TLO,
1295                              Depth + 1))
1296       return true;
1297     if (SimplifyDemandedBits(Op.getOperand(1), DemandedBits, Known2, TLO,
1298                              Depth + 1))
1299       return true;
1300     assert(!Known.hasConflict() && "Bits known to be one AND zero?");
1301     assert(!Known2.hasConflict() && "Bits known to be one AND zero?");
1302 
1303     // If the operands are constants, see if we can simplify them.
1304     if (ShrinkDemandedConstant(Op, DemandedBits, TLO))
1305       return true;
1306 
1307     // Only known if known in both the LHS and RHS.
1308     Known.One &= Known2.One;
1309     Known.Zero &= Known2.Zero;
1310     break;
1311   case ISD::SELECT_CC:
1312     if (SimplifyDemandedBits(Op.getOperand(3), DemandedBits, Known, TLO,
1313                              Depth + 1))
1314       return true;
1315     if (SimplifyDemandedBits(Op.getOperand(2), DemandedBits, Known2, TLO,
1316                              Depth + 1))
1317       return true;
1318     assert(!Known.hasConflict() && "Bits known to be one AND zero?");
1319     assert(!Known2.hasConflict() && "Bits known to be one AND zero?");
1320 
1321     // If the operands are constants, see if we can simplify them.
1322     if (ShrinkDemandedConstant(Op, DemandedBits, TLO))
1323       return true;
1324 
1325     // Only known if known in both the LHS and RHS.
1326     Known.One &= Known2.One;
1327     Known.Zero &= Known2.Zero;
1328     break;
1329   case ISD::SETCC: {
1330     SDValue Op0 = Op.getOperand(0);
1331     SDValue Op1 = Op.getOperand(1);
1332     ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
1333     // If (1) we only need the sign-bit, (2) the setcc operands are the same
1334     // width as the setcc result, and (3) the result of a setcc conforms to 0 or
1335     // -1, we may be able to bypass the setcc.
1336     if (DemandedBits.isSignMask() &&
1337         Op0.getScalarValueSizeInBits() == BitWidth &&
1338         getBooleanContents(Op0.getValueType()) ==
1339             BooleanContent::ZeroOrNegativeOneBooleanContent) {
1340       // If we're testing X < 0, then this compare isn't needed - just use X!
1341       // FIXME: We're limiting to integer types here, but this should also work
1342       // if we don't care about FP signed-zero. The use of SETLT with FP means
1343       // that we don't care about NaNs.
1344       if (CC == ISD::SETLT && Op1.getValueType().isInteger() &&
1345           (isNullConstant(Op1) || ISD::isBuildVectorAllZeros(Op1.getNode())))
1346         return TLO.CombineTo(Op, Op0);
1347 
1348       // TODO: Should we check for other forms of sign-bit comparisons?
1349       // Examples: X <= -1, X >= 0
1350     }
1351     if (getBooleanContents(Op0.getValueType()) ==
1352             TargetLowering::ZeroOrOneBooleanContent &&
1353         BitWidth > 1)
1354       Known.Zero.setBitsFrom(1);
1355     break;
1356   }
1357   case ISD::SHL: {
1358     SDValue Op0 = Op.getOperand(0);
1359     SDValue Op1 = Op.getOperand(1);
1360 
1361     if (ConstantSDNode *SA = isConstOrConstSplat(Op1, DemandedElts)) {
1362       // If the shift count is an invalid immediate, don't do anything.
1363       if (SA->getAPIntValue().uge(BitWidth))
1364         break;
1365 
1366       unsigned ShAmt = SA->getZExtValue();
1367       if (ShAmt == 0)
1368         return TLO.CombineTo(Op, Op0);
1369 
1370       // If this is ((X >>u C1) << ShAmt), see if we can simplify this into a
1371       // single shift.  We can do this if the bottom bits (which are shifted
1372       // out) are never demanded.
1373       // TODO - support non-uniform vector amounts.
1374       if (Op0.getOpcode() == ISD::SRL) {
1375         if (!DemandedBits.intersects(APInt::getLowBitsSet(BitWidth, ShAmt))) {
1376           if (ConstantSDNode *SA2 =
1377                   isConstOrConstSplat(Op0.getOperand(1), DemandedElts)) {
1378             if (SA2->getAPIntValue().ult(BitWidth)) {
1379               unsigned C1 = SA2->getZExtValue();
1380               unsigned Opc = ISD::SHL;
1381               int Diff = ShAmt - C1;
1382               if (Diff < 0) {
1383                 Diff = -Diff;
1384                 Opc = ISD::SRL;
1385               }
1386 
1387               SDValue NewSA = TLO.DAG.getConstant(Diff, dl, Op1.getValueType());
1388               return TLO.CombineTo(
1389                   Op, TLO.DAG.getNode(Opc, dl, VT, Op0.getOperand(0), NewSA));
1390             }
1391           }
1392         }
1393       }
1394 
1395       APInt InDemandedMask = DemandedBits.lshr(ShAmt);
1396       if (SimplifyDemandedBits(Op0, InDemandedMask, DemandedElts, Known, TLO,
1397                                Depth + 1))
1398         return true;
1399 
1400       // Try shrinking the operation as long as the shift amount will still be
1401       // in range.
1402       if ((ShAmt < DemandedBits.getActiveBits()) &&
1403           ShrinkDemandedOp(Op, BitWidth, DemandedBits, TLO))
1404         return true;
1405 
1406       // Convert (shl (anyext x, c)) to (anyext (shl x, c)) if the high bits
1407       // are not demanded. This will likely allow the anyext to be folded away.
1408       if (Op0.getOpcode() == ISD::ANY_EXTEND) {
1409         SDValue InnerOp = Op0.getOperand(0);
1410         EVT InnerVT = InnerOp.getValueType();
1411         unsigned InnerBits = InnerVT.getScalarSizeInBits();
1412         if (ShAmt < InnerBits && DemandedBits.getActiveBits() <= InnerBits &&
1413             isTypeDesirableForOp(ISD::SHL, InnerVT)) {
1414           EVT ShTy = getShiftAmountTy(InnerVT, DL);
1415           if (!APInt(BitWidth, ShAmt).isIntN(ShTy.getSizeInBits()))
1416             ShTy = InnerVT;
1417           SDValue NarrowShl =
1418               TLO.DAG.getNode(ISD::SHL, dl, InnerVT, InnerOp,
1419                               TLO.DAG.getConstant(ShAmt, dl, ShTy));
1420           return TLO.CombineTo(
1421               Op, TLO.DAG.getNode(ISD::ANY_EXTEND, dl, VT, NarrowShl));
1422         }
1423         // Repeat the SHL optimization above in cases where an extension
1424         // intervenes: (shl (anyext (shr x, c1)), c2) to
1425         // (shl (anyext x), c2-c1).  This requires that the bottom c1 bits
1426         // aren't demanded (as above) and that the shifted upper c1 bits of
1427         // x aren't demanded.
1428         if (Op0.hasOneUse() && InnerOp.getOpcode() == ISD::SRL &&
1429             InnerOp.hasOneUse()) {
1430           if (ConstantSDNode *SA2 =
1431                   isConstOrConstSplat(InnerOp.getOperand(1))) {
1432             unsigned InnerShAmt = SA2->getLimitedValue(InnerBits);
1433             if (InnerShAmt < ShAmt && InnerShAmt < InnerBits &&
1434                 DemandedBits.getActiveBits() <=
1435                     (InnerBits - InnerShAmt + ShAmt) &&
1436                 DemandedBits.countTrailingZeros() >= ShAmt) {
1437               SDValue NewSA = TLO.DAG.getConstant(ShAmt - InnerShAmt, dl,
1438                                                   Op1.getValueType());
1439               SDValue NewExt = TLO.DAG.getNode(ISD::ANY_EXTEND, dl, VT,
1440                                                InnerOp.getOperand(0));
1441               return TLO.CombineTo(
1442                   Op, TLO.DAG.getNode(ISD::SHL, dl, VT, NewExt, NewSA));
1443             }
1444           }
1445         }
1446       }
1447 
1448       Known.Zero <<= ShAmt;
1449       Known.One <<= ShAmt;
1450       // low bits known zero.
1451       Known.Zero.setLowBits(ShAmt);
1452     }
1453     break;
1454   }
1455   case ISD::SRL: {
1456     SDValue Op0 = Op.getOperand(0);
1457     SDValue Op1 = Op.getOperand(1);
1458 
1459     if (ConstantSDNode *SA = isConstOrConstSplat(Op1, DemandedElts)) {
1460       // If the shift count is an invalid immediate, don't do anything.
1461       if (SA->getAPIntValue().uge(BitWidth))
1462         break;
1463 
1464       unsigned ShAmt = SA->getZExtValue();
1465       if (ShAmt == 0)
1466         return TLO.CombineTo(Op, Op0);
1467 
1468       EVT ShiftVT = Op1.getValueType();
1469       APInt InDemandedMask = (DemandedBits << ShAmt);
1470 
1471       // If the shift is exact, then it does demand the low bits (and knows that
1472       // they are zero).
1473       if (Op->getFlags().hasExact())
1474         InDemandedMask.setLowBits(ShAmt);
1475 
1476       // If this is ((X << C1) >>u ShAmt), see if we can simplify this into a
1477       // single shift.  We can do this if the top bits (which are shifted out)
1478       // are never demanded.
1479       // TODO - support non-uniform vector amounts.
1480       if (Op0.getOpcode() == ISD::SHL) {
1481         if (ConstantSDNode *SA2 =
1482                 isConstOrConstSplat(Op0.getOperand(1), DemandedElts)) {
1483           if (!DemandedBits.intersects(
1484                   APInt::getHighBitsSet(BitWidth, ShAmt))) {
1485             if (SA2->getAPIntValue().ult(BitWidth)) {
1486               unsigned C1 = SA2->getZExtValue();
1487               unsigned Opc = ISD::SRL;
1488               int Diff = ShAmt - C1;
1489               if (Diff < 0) {
1490                 Diff = -Diff;
1491                 Opc = ISD::SHL;
1492               }
1493 
1494               SDValue NewSA = TLO.DAG.getConstant(Diff, dl, ShiftVT);
1495               return TLO.CombineTo(
1496                   Op, TLO.DAG.getNode(Opc, dl, VT, Op0.getOperand(0), NewSA));
1497             }
1498           }
1499         }
1500       }
1501 
1502       // Compute the new bits that are at the top now.
1503       if (SimplifyDemandedBits(Op0, InDemandedMask, DemandedElts, Known, TLO,
1504                                Depth + 1))
1505         return true;
1506       assert(!Known.hasConflict() && "Bits known to be one AND zero?");
1507       Known.Zero.lshrInPlace(ShAmt);
1508       Known.One.lshrInPlace(ShAmt);
1509 
1510       Known.Zero.setHighBits(ShAmt); // High bits known zero.
1511     }
1512     break;
1513   }
1514   case ISD::SRA: {
1515     SDValue Op0 = Op.getOperand(0);
1516     SDValue Op1 = Op.getOperand(1);
1517 
1518     // If we only want bits that already match the signbit then we don't need
1519     // to shift.
1520     unsigned NumHiDemandedBits = BitWidth - DemandedBits.countTrailingZeros();
1521     if (TLO.DAG.ComputeNumSignBits(Op0, DemandedElts, Depth + 1) >=
1522         NumHiDemandedBits)
1523       return TLO.CombineTo(Op, Op0);
1524 
1525     // If this is an arithmetic shift right and only the low-bit is set, we can
1526     // always convert this into a logical shr, even if the shift amount is
1527     // variable.  The low bit of the shift cannot be an input sign bit unless
1528     // the shift amount is >= the size of the datatype, which is undefined.
1529     if (DemandedBits.isOneValue())
1530       return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL, dl, VT, Op0, Op1));
1531 
1532     if (ConstantSDNode *SA = isConstOrConstSplat(Op1, DemandedElts)) {
1533       // If the shift count is an invalid immediate, don't do anything.
1534       if (SA->getAPIntValue().uge(BitWidth))
1535         break;
1536 
1537       unsigned ShAmt = SA->getZExtValue();
1538       if (ShAmt == 0)
1539         return TLO.CombineTo(Op, Op0);
1540 
1541       APInt InDemandedMask = (DemandedBits << ShAmt);
1542 
1543       // If the shift is exact, then it does demand the low bits (and knows that
1544       // they are zero).
1545       if (Op->getFlags().hasExact())
1546         InDemandedMask.setLowBits(ShAmt);
1547 
1548       // If any of the demanded bits are produced by the sign extension, we also
1549       // demand the input sign bit.
1550       if (DemandedBits.countLeadingZeros() < ShAmt)
1551         InDemandedMask.setSignBit();
1552 
1553       if (SimplifyDemandedBits(Op0, InDemandedMask, DemandedElts, Known, TLO,
1554                                Depth + 1))
1555         return true;
1556       assert(!Known.hasConflict() && "Bits known to be one AND zero?");
1557       Known.Zero.lshrInPlace(ShAmt);
1558       Known.One.lshrInPlace(ShAmt);
1559 
1560       // If the input sign bit is known to be zero, or if none of the top bits
1561       // are demanded, turn this into an unsigned shift right.
1562       if (Known.Zero[BitWidth - ShAmt - 1] ||
1563           DemandedBits.countLeadingZeros() >= ShAmt) {
1564         SDNodeFlags Flags;
1565         Flags.setExact(Op->getFlags().hasExact());
1566         return TLO.CombineTo(
1567             Op, TLO.DAG.getNode(ISD::SRL, dl, VT, Op0, Op1, Flags));
1568       }
1569 
1570       int Log2 = DemandedBits.exactLogBase2();
1571       if (Log2 >= 0) {
1572         // The bit must come from the sign.
1573         SDValue NewSA =
1574             TLO.DAG.getConstant(BitWidth - 1 - Log2, dl, Op1.getValueType());
1575         return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL, dl, VT, Op0, NewSA));
1576       }
1577 
1578       if (Known.One[BitWidth - ShAmt - 1])
1579         // New bits are known one.
1580         Known.One.setHighBits(ShAmt);
1581 
1582       // Attempt to avoid multi-use ops if we don't need anything from them.
1583       if (!InDemandedMask.isAllOnesValue() || !DemandedElts.isAllOnesValue()) {
1584         SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
1585             Op0, InDemandedMask, DemandedElts, TLO.DAG, Depth + 1);
1586         if (DemandedOp0) {
1587           SDValue NewOp = TLO.DAG.getNode(ISD::SRA, dl, VT, DemandedOp0, Op1);
1588           return TLO.CombineTo(Op, NewOp);
1589         }
1590       }
1591     }
1592     break;
1593   }
1594   case ISD::FSHL:
1595   case ISD::FSHR: {
1596     SDValue Op0 = Op.getOperand(0);
1597     SDValue Op1 = Op.getOperand(1);
1598     SDValue Op2 = Op.getOperand(2);
1599     bool IsFSHL = (Op.getOpcode() == ISD::FSHL);
1600 
1601     if (ConstantSDNode *SA = isConstOrConstSplat(Op2, DemandedElts)) {
1602       unsigned Amt = SA->getAPIntValue().urem(BitWidth);
1603 
1604       // For fshl, 0-shift returns the 1st arg.
1605       // For fshr, 0-shift returns the 2nd arg.
1606       if (Amt == 0) {
1607         if (SimplifyDemandedBits(IsFSHL ? Op0 : Op1, DemandedBits, DemandedElts,
1608                                  Known, TLO, Depth + 1))
1609           return true;
1610         break;
1611       }
1612 
1613       // fshl: (Op0 << Amt) | (Op1 >> (BW - Amt))
1614       // fshr: (Op0 << (BW - Amt)) | (Op1 >> Amt)
1615       APInt Demanded0 = DemandedBits.lshr(IsFSHL ? Amt : (BitWidth - Amt));
1616       APInt Demanded1 = DemandedBits << (IsFSHL ? (BitWidth - Amt) : Amt);
1617       if (SimplifyDemandedBits(Op0, Demanded0, DemandedElts, Known2, TLO,
1618                                Depth + 1))
1619         return true;
1620       if (SimplifyDemandedBits(Op1, Demanded1, DemandedElts, Known, TLO,
1621                                Depth + 1))
1622         return true;
1623 
1624       Known2.One <<= (IsFSHL ? Amt : (BitWidth - Amt));
1625       Known2.Zero <<= (IsFSHL ? Amt : (BitWidth - Amt));
1626       Known.One.lshrInPlace(IsFSHL ? (BitWidth - Amt) : Amt);
1627       Known.Zero.lshrInPlace(IsFSHL ? (BitWidth - Amt) : Amt);
1628       Known.One |= Known2.One;
1629       Known.Zero |= Known2.Zero;
1630     }
1631     break;
1632   }
1633   case ISD::ROTL:
1634   case ISD::ROTR: {
1635     SDValue Op0 = Op.getOperand(0);
1636 
1637     // If we're rotating an 0/-1 value, then it stays an 0/-1 value.
1638     if (BitWidth == TLO.DAG.ComputeNumSignBits(Op0, DemandedElts, Depth + 1))
1639       return TLO.CombineTo(Op, Op0);
1640     break;
1641   }
1642   case ISD::BITREVERSE: {
1643     SDValue Src = Op.getOperand(0);
1644     APInt DemandedSrcBits = DemandedBits.reverseBits();
1645     if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedElts, Known2, TLO,
1646                              Depth + 1))
1647       return true;
1648     Known.One = Known2.One.reverseBits();
1649     Known.Zero = Known2.Zero.reverseBits();
1650     break;
1651   }
1652   case ISD::BSWAP: {
1653     SDValue Src = Op.getOperand(0);
1654     APInt DemandedSrcBits = DemandedBits.byteSwap();
1655     if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedElts, Known2, TLO,
1656                              Depth + 1))
1657       return true;
1658     Known.One = Known2.One.byteSwap();
1659     Known.Zero = Known2.Zero.byteSwap();
1660     break;
1661   }
1662   case ISD::SIGN_EXTEND_INREG: {
1663     SDValue Op0 = Op.getOperand(0);
1664     EVT ExVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
1665     unsigned ExVTBits = ExVT.getScalarSizeInBits();
1666 
1667     // If we only care about the highest bit, don't bother shifting right.
1668     if (DemandedBits.isSignMask()) {
1669       unsigned NumSignBits =
1670           TLO.DAG.ComputeNumSignBits(Op0, DemandedElts, Depth + 1);
1671       bool AlreadySignExtended = NumSignBits >= BitWidth - ExVTBits + 1;
1672       // However if the input is already sign extended we expect the sign
1673       // extension to be dropped altogether later and do not simplify.
1674       if (!AlreadySignExtended) {
1675         // Compute the correct shift amount type, which must be getShiftAmountTy
1676         // for scalar types after legalization.
1677         EVT ShiftAmtTy = VT;
1678         if (TLO.LegalTypes() && !ShiftAmtTy.isVector())
1679           ShiftAmtTy = getShiftAmountTy(ShiftAmtTy, DL);
1680 
1681         SDValue ShiftAmt =
1682             TLO.DAG.getConstant(BitWidth - ExVTBits, dl, ShiftAmtTy);
1683         return TLO.CombineTo(Op,
1684                              TLO.DAG.getNode(ISD::SHL, dl, VT, Op0, ShiftAmt));
1685       }
1686     }
1687 
1688     // If none of the extended bits are demanded, eliminate the sextinreg.
1689     if (DemandedBits.getActiveBits() <= ExVTBits)
1690       return TLO.CombineTo(Op, Op0);
1691 
1692     APInt InputDemandedBits = DemandedBits.getLoBits(ExVTBits);
1693 
1694     // Since the sign extended bits are demanded, we know that the sign
1695     // bit is demanded.
1696     InputDemandedBits.setBit(ExVTBits - 1);
1697 
1698     if (SimplifyDemandedBits(Op0, InputDemandedBits, Known, TLO, Depth + 1))
1699       return true;
1700     assert(!Known.hasConflict() && "Bits known to be one AND zero?");
1701 
1702     // If the sign bit of the input is known set or clear, then we know the
1703     // top bits of the result.
1704 
1705     // If the input sign bit is known zero, convert this into a zero extension.
1706     if (Known.Zero[ExVTBits - 1])
1707       return TLO.CombineTo(
1708           Op, TLO.DAG.getZeroExtendInReg(Op0, dl, ExVT.getScalarType()));
1709 
1710     APInt Mask = APInt::getLowBitsSet(BitWidth, ExVTBits);
1711     if (Known.One[ExVTBits - 1]) { // Input sign bit known set
1712       Known.One.setBitsFrom(ExVTBits);
1713       Known.Zero &= Mask;
1714     } else { // Input sign bit unknown
1715       Known.Zero &= Mask;
1716       Known.One &= Mask;
1717     }
1718     break;
1719   }
1720   case ISD::BUILD_PAIR: {
1721     EVT HalfVT = Op.getOperand(0).getValueType();
1722     unsigned HalfBitWidth = HalfVT.getScalarSizeInBits();
1723 
1724     APInt MaskLo = DemandedBits.getLoBits(HalfBitWidth).trunc(HalfBitWidth);
1725     APInt MaskHi = DemandedBits.getHiBits(HalfBitWidth).trunc(HalfBitWidth);
1726 
1727     KnownBits KnownLo, KnownHi;
1728 
1729     if (SimplifyDemandedBits(Op.getOperand(0), MaskLo, KnownLo, TLO, Depth + 1))
1730       return true;
1731 
1732     if (SimplifyDemandedBits(Op.getOperand(1), MaskHi, KnownHi, TLO, Depth + 1))
1733       return true;
1734 
1735     Known.Zero = KnownLo.Zero.zext(BitWidth) |
1736                  KnownHi.Zero.zext(BitWidth).shl(HalfBitWidth);
1737 
1738     Known.One = KnownLo.One.zext(BitWidth) |
1739                 KnownHi.One.zext(BitWidth).shl(HalfBitWidth);
1740     break;
1741   }
1742   case ISD::ZERO_EXTEND:
1743   case ISD::ZERO_EXTEND_VECTOR_INREG: {
1744     SDValue Src = Op.getOperand(0);
1745     EVT SrcVT = Src.getValueType();
1746     unsigned InBits = SrcVT.getScalarSizeInBits();
1747     unsigned InElts = SrcVT.isVector() ? SrcVT.getVectorNumElements() : 1;
1748     bool IsVecInReg = Op.getOpcode() == ISD::ZERO_EXTEND_VECTOR_INREG;
1749 
1750     // If none of the top bits are demanded, convert this into an any_extend.
1751     if (DemandedBits.getActiveBits() <= InBits) {
1752       // If we only need the non-extended bits of the bottom element
1753       // then we can just bitcast to the result.
1754       if (IsVecInReg && DemandedElts == 1 &&
1755           VT.getSizeInBits() == SrcVT.getSizeInBits() &&
1756           TLO.DAG.getDataLayout().isLittleEndian())
1757         return TLO.CombineTo(Op, TLO.DAG.getBitcast(VT, Src));
1758 
1759       unsigned Opc =
1760           IsVecInReg ? ISD::ANY_EXTEND_VECTOR_INREG : ISD::ANY_EXTEND;
1761       if (!TLO.LegalOperations() || isOperationLegal(Opc, VT))
1762         return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, dl, VT, Src));
1763     }
1764 
1765     APInt InDemandedBits = DemandedBits.trunc(InBits);
1766     APInt InDemandedElts = DemandedElts.zextOrSelf(InElts);
1767     if (SimplifyDemandedBits(Src, InDemandedBits, InDemandedElts, Known, TLO,
1768                              Depth + 1))
1769       return true;
1770     assert(!Known.hasConflict() && "Bits known to be one AND zero?");
1771     assert(Known.getBitWidth() == InBits && "Src width has changed?");
1772     Known = Known.zext(BitWidth, true /* ExtendedBitsAreKnownZero */);
1773     break;
1774   }
1775   case ISD::SIGN_EXTEND:
1776   case ISD::SIGN_EXTEND_VECTOR_INREG: {
1777     SDValue Src = Op.getOperand(0);
1778     EVT SrcVT = Src.getValueType();
1779     unsigned InBits = SrcVT.getScalarSizeInBits();
1780     unsigned InElts = SrcVT.isVector() ? SrcVT.getVectorNumElements() : 1;
1781     bool IsVecInReg = Op.getOpcode() == ISD::SIGN_EXTEND_VECTOR_INREG;
1782 
1783     // If none of the top bits are demanded, convert this into an any_extend.
1784     if (DemandedBits.getActiveBits() <= InBits) {
1785       // If we only need the non-extended bits of the bottom element
1786       // then we can just bitcast to the result.
1787       if (IsVecInReg && DemandedElts == 1 &&
1788           VT.getSizeInBits() == SrcVT.getSizeInBits() &&
1789           TLO.DAG.getDataLayout().isLittleEndian())
1790         return TLO.CombineTo(Op, TLO.DAG.getBitcast(VT, Src));
1791 
1792       unsigned Opc =
1793           IsVecInReg ? ISD::ANY_EXTEND_VECTOR_INREG : ISD::ANY_EXTEND;
1794       if (!TLO.LegalOperations() || isOperationLegal(Opc, VT))
1795         return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, dl, VT, Src));
1796     }
1797 
1798     APInt InDemandedBits = DemandedBits.trunc(InBits);
1799     APInt InDemandedElts = DemandedElts.zextOrSelf(InElts);
1800 
1801     // Since some of the sign extended bits are demanded, we know that the sign
1802     // bit is demanded.
1803     InDemandedBits.setBit(InBits - 1);
1804 
1805     if (SimplifyDemandedBits(Src, InDemandedBits, InDemandedElts, Known, TLO,
1806                              Depth + 1))
1807       return true;
1808     assert(!Known.hasConflict() && "Bits known to be one AND zero?");
1809     assert(Known.getBitWidth() == InBits && "Src width has changed?");
1810 
1811     // If the sign bit is known one, the top bits match.
1812     Known = Known.sext(BitWidth);
1813 
1814     // If the sign bit is known zero, convert this to a zero extend.
1815     if (Known.isNonNegative()) {
1816       unsigned Opc =
1817           IsVecInReg ? ISD::ZERO_EXTEND_VECTOR_INREG : ISD::ZERO_EXTEND;
1818       if (!TLO.LegalOperations() || isOperationLegal(Opc, VT))
1819         return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, dl, VT, Src));
1820     }
1821     break;
1822   }
1823   case ISD::ANY_EXTEND:
1824   case ISD::ANY_EXTEND_VECTOR_INREG: {
1825     SDValue Src = Op.getOperand(0);
1826     EVT SrcVT = Src.getValueType();
1827     unsigned InBits = SrcVT.getScalarSizeInBits();
1828     unsigned InElts = SrcVT.isVector() ? SrcVT.getVectorNumElements() : 1;
1829     bool IsVecInReg = Op.getOpcode() == ISD::ANY_EXTEND_VECTOR_INREG;
1830 
1831     // If we only need the bottom element then we can just bitcast.
1832     // TODO: Handle ANY_EXTEND?
1833     if (IsVecInReg && DemandedElts == 1 &&
1834         VT.getSizeInBits() == SrcVT.getSizeInBits() &&
1835         TLO.DAG.getDataLayout().isLittleEndian())
1836       return TLO.CombineTo(Op, TLO.DAG.getBitcast(VT, Src));
1837 
1838     APInt InDemandedBits = DemandedBits.trunc(InBits);
1839     APInt InDemandedElts = DemandedElts.zextOrSelf(InElts);
1840     if (SimplifyDemandedBits(Src, InDemandedBits, InDemandedElts, Known, TLO,
1841                              Depth + 1))
1842       return true;
1843     assert(!Known.hasConflict() && "Bits known to be one AND zero?");
1844     assert(Known.getBitWidth() == InBits && "Src width has changed?");
1845     Known = Known.zext(BitWidth, false /* => any extend */);
1846 
1847     // Attempt to avoid multi-use ops if we don't need anything from them.
1848     if (SDValue NewSrc = SimplifyMultipleUseDemandedBits(
1849             Src, InDemandedBits, InDemandedElts, TLO.DAG, Depth + 1))
1850       return TLO.CombineTo(Op, TLO.DAG.getNode(Op.getOpcode(), dl, VT, NewSrc));
1851     break;
1852   }
1853   case ISD::TRUNCATE: {
1854     SDValue Src = Op.getOperand(0);
1855 
1856     // Simplify the input, using demanded bit information, and compute the known
1857     // zero/one bits live out.
1858     unsigned OperandBitWidth = Src.getScalarValueSizeInBits();
1859     APInt TruncMask = DemandedBits.zext(OperandBitWidth);
1860     if (SimplifyDemandedBits(Src, TruncMask, Known, TLO, Depth + 1))
1861       return true;
1862     Known = Known.trunc(BitWidth);
1863 
1864     // Attempt to avoid multi-use ops if we don't need anything from them.
1865     if (SDValue NewSrc = SimplifyMultipleUseDemandedBits(
1866             Src, TruncMask, DemandedElts, TLO.DAG, Depth + 1))
1867       return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::TRUNCATE, dl, VT, NewSrc));
1868 
1869     // If the input is only used by this truncate, see if we can shrink it based
1870     // on the known demanded bits.
1871     if (Src.getNode()->hasOneUse()) {
1872       switch (Src.getOpcode()) {
1873       default:
1874         break;
1875       case ISD::SRL:
1876         // Shrink SRL by a constant if none of the high bits shifted in are
1877         // demanded.
1878         if (TLO.LegalTypes() && !isTypeDesirableForOp(ISD::SRL, VT))
1879           // Do not turn (vt1 truncate (vt2 srl)) into (vt1 srl) if vt1 is
1880           // undesirable.
1881           break;
1882 
1883         SDValue ShAmt = Src.getOperand(1);
1884         auto *ShAmtC = dyn_cast<ConstantSDNode>(ShAmt);
1885         if (!ShAmtC || ShAmtC->getAPIntValue().uge(BitWidth))
1886           break;
1887         uint64_t ShVal = ShAmtC->getZExtValue();
1888 
1889         APInt HighBits =
1890             APInt::getHighBitsSet(OperandBitWidth, OperandBitWidth - BitWidth);
1891         HighBits.lshrInPlace(ShVal);
1892         HighBits = HighBits.trunc(BitWidth);
1893 
1894         if (!(HighBits & DemandedBits)) {
1895           // None of the shifted in bits are needed.  Add a truncate of the
1896           // shift input, then shift it.
1897           if (TLO.LegalTypes())
1898             ShAmt = TLO.DAG.getConstant(ShVal, dl, getShiftAmountTy(VT, DL));
1899           SDValue NewTrunc =
1900               TLO.DAG.getNode(ISD::TRUNCATE, dl, VT, Src.getOperand(0));
1901           return TLO.CombineTo(
1902               Op, TLO.DAG.getNode(ISD::SRL, dl, VT, NewTrunc, ShAmt));
1903         }
1904         break;
1905       }
1906     }
1907 
1908     assert(!Known.hasConflict() && "Bits known to be one AND zero?");
1909     break;
1910   }
1911   case ISD::AssertZext: {
1912     // AssertZext demands all of the high bits, plus any of the low bits
1913     // demanded by its users.
1914     EVT ZVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
1915     APInt InMask = APInt::getLowBitsSet(BitWidth, ZVT.getSizeInBits());
1916     if (SimplifyDemandedBits(Op.getOperand(0), ~InMask | DemandedBits, Known,
1917                              TLO, Depth + 1))
1918       return true;
1919     assert(!Known.hasConflict() && "Bits known to be one AND zero?");
1920 
1921     Known.Zero |= ~InMask;
1922     break;
1923   }
1924   case ISD::EXTRACT_VECTOR_ELT: {
1925     SDValue Src = Op.getOperand(0);
1926     SDValue Idx = Op.getOperand(1);
1927     unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
1928     unsigned EltBitWidth = Src.getScalarValueSizeInBits();
1929 
1930     // Demand the bits from every vector element without a constant index.
1931     APInt DemandedSrcElts = APInt::getAllOnesValue(NumSrcElts);
1932     if (auto *CIdx = dyn_cast<ConstantSDNode>(Idx))
1933       if (CIdx->getAPIntValue().ult(NumSrcElts))
1934         DemandedSrcElts = APInt::getOneBitSet(NumSrcElts, CIdx->getZExtValue());
1935 
1936     // If BitWidth > EltBitWidth the value is anyext:ed. So we do not know
1937     // anything about the extended bits.
1938     APInt DemandedSrcBits = DemandedBits;
1939     if (BitWidth > EltBitWidth)
1940       DemandedSrcBits = DemandedSrcBits.trunc(EltBitWidth);
1941 
1942     if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedSrcElts, Known2, TLO,
1943                              Depth + 1))
1944       return true;
1945 
1946     // Attempt to avoid multi-use ops if we don't need anything from them.
1947     if (!DemandedSrcBits.isAllOnesValue() ||
1948         !DemandedSrcElts.isAllOnesValue()) {
1949       if (SDValue DemandedSrc = SimplifyMultipleUseDemandedBits(
1950               Src, DemandedSrcBits, DemandedSrcElts, TLO.DAG, Depth + 1)) {
1951         SDValue NewOp =
1952             TLO.DAG.getNode(Op.getOpcode(), dl, VT, DemandedSrc, Idx);
1953         return TLO.CombineTo(Op, NewOp);
1954       }
1955     }
1956 
1957     Known = Known2;
1958     if (BitWidth > EltBitWidth)
1959       Known = Known.zext(BitWidth, false /* => any extend */);
1960     break;
1961   }
1962   case ISD::BITCAST: {
1963     SDValue Src = Op.getOperand(0);
1964     EVT SrcVT = Src.getValueType();
1965     unsigned NumSrcEltBits = SrcVT.getScalarSizeInBits();
1966 
1967     // If this is an FP->Int bitcast and if the sign bit is the only
1968     // thing demanded, turn this into a FGETSIGN.
1969     if (!TLO.LegalOperations() && !VT.isVector() && !SrcVT.isVector() &&
1970         DemandedBits == APInt::getSignMask(Op.getValueSizeInBits()) &&
1971         SrcVT.isFloatingPoint()) {
1972       bool OpVTLegal = isOperationLegalOrCustom(ISD::FGETSIGN, VT);
1973       bool i32Legal = isOperationLegalOrCustom(ISD::FGETSIGN, MVT::i32);
1974       if ((OpVTLegal || i32Legal) && VT.isSimple() && SrcVT != MVT::f16 &&
1975           SrcVT != MVT::f128) {
1976         // Cannot eliminate/lower SHL for f128 yet.
1977         EVT Ty = OpVTLegal ? VT : MVT::i32;
1978         // Make a FGETSIGN + SHL to move the sign bit into the appropriate
1979         // place.  We expect the SHL to be eliminated by other optimizations.
1980         SDValue Sign = TLO.DAG.getNode(ISD::FGETSIGN, dl, Ty, Src);
1981         unsigned OpVTSizeInBits = Op.getValueSizeInBits();
1982         if (!OpVTLegal && OpVTSizeInBits > 32)
1983           Sign = TLO.DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Sign);
1984         unsigned ShVal = Op.getValueSizeInBits() - 1;
1985         SDValue ShAmt = TLO.DAG.getConstant(ShVal, dl, VT);
1986         return TLO.CombineTo(Op,
1987                              TLO.DAG.getNode(ISD::SHL, dl, VT, Sign, ShAmt));
1988       }
1989     }
1990 
1991     // Bitcast from a vector using SimplifyDemanded Bits/VectorElts.
1992     // Demand the elt/bit if any of the original elts/bits are demanded.
1993     // TODO - bigendian once we have test coverage.
1994     if (SrcVT.isVector() && (BitWidth % NumSrcEltBits) == 0 &&
1995         TLO.DAG.getDataLayout().isLittleEndian()) {
1996       unsigned Scale = BitWidth / NumSrcEltBits;
1997       unsigned NumSrcElts = SrcVT.getVectorNumElements();
1998       APInt DemandedSrcBits = APInt::getNullValue(NumSrcEltBits);
1999       APInt DemandedSrcElts = APInt::getNullValue(NumSrcElts);
2000       for (unsigned i = 0; i != Scale; ++i) {
2001         unsigned Offset = i * NumSrcEltBits;
2002         APInt Sub = DemandedBits.extractBits(NumSrcEltBits, Offset);
2003         if (!Sub.isNullValue()) {
2004           DemandedSrcBits |= Sub;
2005           for (unsigned j = 0; j != NumElts; ++j)
2006             if (DemandedElts[j])
2007               DemandedSrcElts.setBit((j * Scale) + i);
2008         }
2009       }
2010 
2011       APInt KnownSrcUndef, KnownSrcZero;
2012       if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, KnownSrcUndef,
2013                                      KnownSrcZero, TLO, Depth + 1))
2014         return true;
2015 
2016       KnownBits KnownSrcBits;
2017       if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedSrcElts,
2018                                KnownSrcBits, TLO, Depth + 1))
2019         return true;
2020     } else if ((NumSrcEltBits % BitWidth) == 0 &&
2021                TLO.DAG.getDataLayout().isLittleEndian()) {
2022       unsigned Scale = NumSrcEltBits / BitWidth;
2023       unsigned NumSrcElts = SrcVT.isVector() ? SrcVT.getVectorNumElements() : 1;
2024       APInt DemandedSrcBits = APInt::getNullValue(NumSrcEltBits);
2025       APInt DemandedSrcElts = APInt::getNullValue(NumSrcElts);
2026       for (unsigned i = 0; i != NumElts; ++i)
2027         if (DemandedElts[i]) {
2028           unsigned Offset = (i % Scale) * BitWidth;
2029           DemandedSrcBits.insertBits(DemandedBits, Offset);
2030           DemandedSrcElts.setBit(i / Scale);
2031         }
2032 
2033       if (SrcVT.isVector()) {
2034         APInt KnownSrcUndef, KnownSrcZero;
2035         if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, KnownSrcUndef,
2036                                        KnownSrcZero, TLO, Depth + 1))
2037           return true;
2038       }
2039 
2040       KnownBits KnownSrcBits;
2041       if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedSrcElts,
2042                                KnownSrcBits, TLO, Depth + 1))
2043         return true;
2044     }
2045 
2046     // If this is a bitcast, let computeKnownBits handle it.  Only do this on a
2047     // recursive call where Known may be useful to the caller.
2048     if (Depth > 0) {
2049       Known = TLO.DAG.computeKnownBits(Op, DemandedElts, Depth);
2050       return false;
2051     }
2052     break;
2053   }
2054   case ISD::ADD:
2055   case ISD::MUL:
2056   case ISD::SUB: {
2057     // Add, Sub, and Mul don't demand any bits in positions beyond that
2058     // of the highest bit demanded of them.
2059     SDValue Op0 = Op.getOperand(0), Op1 = Op.getOperand(1);
2060     SDNodeFlags Flags = Op.getNode()->getFlags();
2061     unsigned DemandedBitsLZ = DemandedBits.countLeadingZeros();
2062     APInt LoMask = APInt::getLowBitsSet(BitWidth, BitWidth - DemandedBitsLZ);
2063     if (SimplifyDemandedBits(Op0, LoMask, DemandedElts, Known2, TLO,
2064                              Depth + 1) ||
2065         SimplifyDemandedBits(Op1, LoMask, DemandedElts, Known2, TLO,
2066                              Depth + 1) ||
2067         // See if the operation should be performed at a smaller bit width.
2068         ShrinkDemandedOp(Op, BitWidth, DemandedBits, TLO)) {
2069       if (Flags.hasNoSignedWrap() || Flags.hasNoUnsignedWrap()) {
2070         // Disable the nsw and nuw flags. We can no longer guarantee that we
2071         // won't wrap after simplification.
2072         Flags.setNoSignedWrap(false);
2073         Flags.setNoUnsignedWrap(false);
2074         SDValue NewOp =
2075             TLO.DAG.getNode(Op.getOpcode(), dl, VT, Op0, Op1, Flags);
2076         return TLO.CombineTo(Op, NewOp);
2077       }
2078       return true;
2079     }
2080 
2081     // Attempt to avoid multi-use ops if we don't need anything from them.
2082     if (!LoMask.isAllOnesValue() || !DemandedElts.isAllOnesValue()) {
2083       SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
2084           Op0, LoMask, DemandedElts, TLO.DAG, Depth + 1);
2085       SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits(
2086           Op1, LoMask, DemandedElts, TLO.DAG, Depth + 1);
2087       if (DemandedOp0 || DemandedOp1) {
2088         Flags.setNoSignedWrap(false);
2089         Flags.setNoUnsignedWrap(false);
2090         Op0 = DemandedOp0 ? DemandedOp0 : Op0;
2091         Op1 = DemandedOp1 ? DemandedOp1 : Op1;
2092         SDValue NewOp =
2093             TLO.DAG.getNode(Op.getOpcode(), dl, VT, Op0, Op1, Flags);
2094         return TLO.CombineTo(Op, NewOp);
2095       }
2096     }
2097 
2098     // If we have a constant operand, we may be able to turn it into -1 if we
2099     // do not demand the high bits. This can make the constant smaller to
2100     // encode, allow more general folding, or match specialized instruction
2101     // patterns (eg, 'blsr' on x86). Don't bother changing 1 to -1 because that
2102     // is probably not useful (and could be detrimental).
2103     ConstantSDNode *C = isConstOrConstSplat(Op1);
2104     APInt HighMask = APInt::getHighBitsSet(BitWidth, DemandedBitsLZ);
2105     if (C && !C->isAllOnesValue() && !C->isOne() &&
2106         (C->getAPIntValue() | HighMask).isAllOnesValue()) {
2107       SDValue Neg1 = TLO.DAG.getAllOnesConstant(dl, VT);
2108       // Disable the nsw and nuw flags. We can no longer guarantee that we
2109       // won't wrap after simplification.
2110       Flags.setNoSignedWrap(false);
2111       Flags.setNoUnsignedWrap(false);
2112       SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, VT, Op0, Neg1, Flags);
2113       return TLO.CombineTo(Op, NewOp);
2114     }
2115 
2116     LLVM_FALLTHROUGH;
2117   }
2118   default:
2119     if (Op.getOpcode() >= ISD::BUILTIN_OP_END) {
2120       if (SimplifyDemandedBitsForTargetNode(Op, DemandedBits, DemandedElts,
2121                                             Known, TLO, Depth))
2122         return true;
2123       break;
2124     }
2125 
2126     // Just use computeKnownBits to compute output bits.
2127     Known = TLO.DAG.computeKnownBits(Op, DemandedElts, Depth);
2128     break;
2129   }
2130 
2131   // If we know the value of all of the demanded bits, return this as a
2132   // constant.
2133   if (DemandedBits.isSubsetOf(Known.Zero | Known.One)) {
2134     // Avoid folding to a constant if any OpaqueConstant is involved.
2135     const SDNode *N = Op.getNode();
2136     for (SDNodeIterator I = SDNodeIterator::begin(N),
2137                         E = SDNodeIterator::end(N);
2138          I != E; ++I) {
2139       SDNode *Op = *I;
2140       if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op))
2141         if (C->isOpaque())
2142           return false;
2143     }
2144     // TODO: Handle float bits as well.
2145     if (VT.isInteger())
2146       return TLO.CombineTo(Op, TLO.DAG.getConstant(Known.One, dl, VT));
2147   }
2148 
2149   return false;
2150 }
2151 
2152 bool TargetLowering::SimplifyDemandedVectorElts(SDValue Op,
2153                                                 const APInt &DemandedElts,
2154                                                 APInt &KnownUndef,
2155                                                 APInt &KnownZero,
2156                                                 DAGCombinerInfo &DCI) const {
2157   SelectionDAG &DAG = DCI.DAG;
2158   TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
2159                         !DCI.isBeforeLegalizeOps());
2160 
2161   bool Simplified =
2162       SimplifyDemandedVectorElts(Op, DemandedElts, KnownUndef, KnownZero, TLO);
2163   if (Simplified) {
2164     DCI.AddToWorklist(Op.getNode());
2165     DCI.CommitTargetLoweringOpt(TLO);
2166   }
2167 
2168   return Simplified;
2169 }
2170 
2171 /// Given a vector binary operation and known undefined elements for each input
2172 /// operand, compute whether each element of the output is undefined.
2173 static APInt getKnownUndefForVectorBinop(SDValue BO, SelectionDAG &DAG,
2174                                          const APInt &UndefOp0,
2175                                          const APInt &UndefOp1) {
2176   EVT VT = BO.getValueType();
2177   assert(DAG.getTargetLoweringInfo().isBinOp(BO.getOpcode()) && VT.isVector() &&
2178          "Vector binop only");
2179 
2180   EVT EltVT = VT.getVectorElementType();
2181   unsigned NumElts = VT.getVectorNumElements();
2182   assert(UndefOp0.getBitWidth() == NumElts &&
2183          UndefOp1.getBitWidth() == NumElts && "Bad type for undef analysis");
2184 
2185   auto getUndefOrConstantElt = [&](SDValue V, unsigned Index,
2186                                    const APInt &UndefVals) {
2187     if (UndefVals[Index])
2188       return DAG.getUNDEF(EltVT);
2189 
2190     if (auto *BV = dyn_cast<BuildVectorSDNode>(V)) {
2191       // Try hard to make sure that the getNode() call is not creating temporary
2192       // nodes. Ignore opaque integers because they do not constant fold.
2193       SDValue Elt = BV->getOperand(Index);
2194       auto *C = dyn_cast<ConstantSDNode>(Elt);
2195       if (isa<ConstantFPSDNode>(Elt) || Elt.isUndef() || (C && !C->isOpaque()))
2196         return Elt;
2197     }
2198 
2199     return SDValue();
2200   };
2201 
2202   APInt KnownUndef = APInt::getNullValue(NumElts);
2203   for (unsigned i = 0; i != NumElts; ++i) {
2204     // If both inputs for this element are either constant or undef and match
2205     // the element type, compute the constant/undef result for this element of
2206     // the vector.
2207     // TODO: Ideally we would use FoldConstantArithmetic() here, but that does
2208     // not handle FP constants. The code within getNode() should be refactored
2209     // to avoid the danger of creating a bogus temporary node here.
2210     SDValue C0 = getUndefOrConstantElt(BO.getOperand(0), i, UndefOp0);
2211     SDValue C1 = getUndefOrConstantElt(BO.getOperand(1), i, UndefOp1);
2212     if (C0 && C1 && C0.getValueType() == EltVT && C1.getValueType() == EltVT)
2213       if (DAG.getNode(BO.getOpcode(), SDLoc(BO), EltVT, C0, C1).isUndef())
2214         KnownUndef.setBit(i);
2215   }
2216   return KnownUndef;
2217 }
2218 
2219 bool TargetLowering::SimplifyDemandedVectorElts(
2220     SDValue Op, const APInt &OriginalDemandedElts, APInt &KnownUndef,
2221     APInt &KnownZero, TargetLoweringOpt &TLO, unsigned Depth,
2222     bool AssumeSingleUse) const {
2223   EVT VT = Op.getValueType();
2224   APInt DemandedElts = OriginalDemandedElts;
2225   unsigned NumElts = DemandedElts.getBitWidth();
2226   assert(VT.isVector() && "Expected vector op");
2227   assert(VT.getVectorNumElements() == NumElts &&
2228          "Mask size mismatches value type element count!");
2229 
2230   KnownUndef = KnownZero = APInt::getNullValue(NumElts);
2231 
2232   // Undef operand.
2233   if (Op.isUndef()) {
2234     KnownUndef.setAllBits();
2235     return false;
2236   }
2237 
2238   // If Op has other users, assume that all elements are needed.
2239   if (!Op.getNode()->hasOneUse() && !AssumeSingleUse)
2240     DemandedElts.setAllBits();
2241 
2242   // Not demanding any elements from Op.
2243   if (DemandedElts == 0) {
2244     KnownUndef.setAllBits();
2245     return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT));
2246   }
2247 
2248   // Limit search depth.
2249   if (Depth >= SelectionDAG::MaxRecursionDepth)
2250     return false;
2251 
2252   SDLoc DL(Op);
2253   unsigned EltSizeInBits = VT.getScalarSizeInBits();
2254 
2255   switch (Op.getOpcode()) {
2256   case ISD::SCALAR_TO_VECTOR: {
2257     if (!DemandedElts[0]) {
2258       KnownUndef.setAllBits();
2259       return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT));
2260     }
2261     KnownUndef.setHighBits(NumElts - 1);
2262     break;
2263   }
2264   case ISD::BITCAST: {
2265     SDValue Src = Op.getOperand(0);
2266     EVT SrcVT = Src.getValueType();
2267 
2268     // We only handle vectors here.
2269     // TODO - investigate calling SimplifyDemandedBits/ComputeKnownBits?
2270     if (!SrcVT.isVector())
2271       break;
2272 
2273     // Fast handling of 'identity' bitcasts.
2274     unsigned NumSrcElts = SrcVT.getVectorNumElements();
2275     if (NumSrcElts == NumElts)
2276       return SimplifyDemandedVectorElts(Src, DemandedElts, KnownUndef,
2277                                         KnownZero, TLO, Depth + 1);
2278 
2279     APInt SrcZero, SrcUndef;
2280     APInt SrcDemandedElts = APInt::getNullValue(NumSrcElts);
2281 
2282     // Bitcast from 'large element' src vector to 'small element' vector, we
2283     // must demand a source element if any DemandedElt maps to it.
2284     if ((NumElts % NumSrcElts) == 0) {
2285       unsigned Scale = NumElts / NumSrcElts;
2286       for (unsigned i = 0; i != NumElts; ++i)
2287         if (DemandedElts[i])
2288           SrcDemandedElts.setBit(i / Scale);
2289 
2290       if (SimplifyDemandedVectorElts(Src, SrcDemandedElts, SrcUndef, SrcZero,
2291                                      TLO, Depth + 1))
2292         return true;
2293 
2294       // Try calling SimplifyDemandedBits, converting demanded elts to the bits
2295       // of the large element.
2296       // TODO - bigendian once we have test coverage.
2297       if (TLO.DAG.getDataLayout().isLittleEndian()) {
2298         unsigned SrcEltSizeInBits = SrcVT.getScalarSizeInBits();
2299         APInt SrcDemandedBits = APInt::getNullValue(SrcEltSizeInBits);
2300         for (unsigned i = 0; i != NumElts; ++i)
2301           if (DemandedElts[i]) {
2302             unsigned Ofs = (i % Scale) * EltSizeInBits;
2303             SrcDemandedBits.setBits(Ofs, Ofs + EltSizeInBits);
2304           }
2305 
2306         KnownBits Known;
2307         if (SimplifyDemandedBits(Src, SrcDemandedBits, Known, TLO, Depth + 1))
2308           return true;
2309       }
2310 
2311       // If the src element is zero/undef then all the output elements will be -
2312       // only demanded elements are guaranteed to be correct.
2313       for (unsigned i = 0; i != NumSrcElts; ++i) {
2314         if (SrcDemandedElts[i]) {
2315           if (SrcZero[i])
2316             KnownZero.setBits(i * Scale, (i + 1) * Scale);
2317           if (SrcUndef[i])
2318             KnownUndef.setBits(i * Scale, (i + 1) * Scale);
2319         }
2320       }
2321     }
2322 
2323     // Bitcast from 'small element' src vector to 'large element' vector, we
2324     // demand all smaller source elements covered by the larger demanded element
2325     // of this vector.
2326     if ((NumSrcElts % NumElts) == 0) {
2327       unsigned Scale = NumSrcElts / NumElts;
2328       for (unsigned i = 0; i != NumElts; ++i)
2329         if (DemandedElts[i])
2330           SrcDemandedElts.setBits(i * Scale, (i + 1) * Scale);
2331 
2332       if (SimplifyDemandedVectorElts(Src, SrcDemandedElts, SrcUndef, SrcZero,
2333                                      TLO, Depth + 1))
2334         return true;
2335 
2336       // If all the src elements covering an output element are zero/undef, then
2337       // the output element will be as well, assuming it was demanded.
2338       for (unsigned i = 0; i != NumElts; ++i) {
2339         if (DemandedElts[i]) {
2340           if (SrcZero.extractBits(Scale, i * Scale).isAllOnesValue())
2341             KnownZero.setBit(i);
2342           if (SrcUndef.extractBits(Scale, i * Scale).isAllOnesValue())
2343             KnownUndef.setBit(i);
2344         }
2345       }
2346     }
2347     break;
2348   }
2349   case ISD::BUILD_VECTOR: {
2350     // Check all elements and simplify any unused elements with UNDEF.
2351     if (!DemandedElts.isAllOnesValue()) {
2352       // Don't simplify BROADCASTS.
2353       if (llvm::any_of(Op->op_values(),
2354                        [&](SDValue Elt) { return Op.getOperand(0) != Elt; })) {
2355         SmallVector<SDValue, 32> Ops(Op->op_begin(), Op->op_end());
2356         bool Updated = false;
2357         for (unsigned i = 0; i != NumElts; ++i) {
2358           if (!DemandedElts[i] && !Ops[i].isUndef()) {
2359             Ops[i] = TLO.DAG.getUNDEF(Ops[0].getValueType());
2360             KnownUndef.setBit(i);
2361             Updated = true;
2362           }
2363         }
2364         if (Updated)
2365           return TLO.CombineTo(Op, TLO.DAG.getBuildVector(VT, DL, Ops));
2366       }
2367     }
2368     for (unsigned i = 0; i != NumElts; ++i) {
2369       SDValue SrcOp = Op.getOperand(i);
2370       if (SrcOp.isUndef()) {
2371         KnownUndef.setBit(i);
2372       } else if (EltSizeInBits == SrcOp.getScalarValueSizeInBits() &&
2373                  (isNullConstant(SrcOp) || isNullFPConstant(SrcOp))) {
2374         KnownZero.setBit(i);
2375       }
2376     }
2377     break;
2378   }
2379   case ISD::CONCAT_VECTORS: {
2380     EVT SubVT = Op.getOperand(0).getValueType();
2381     unsigned NumSubVecs = Op.getNumOperands();
2382     unsigned NumSubElts = SubVT.getVectorNumElements();
2383     for (unsigned i = 0; i != NumSubVecs; ++i) {
2384       SDValue SubOp = Op.getOperand(i);
2385       APInt SubElts = DemandedElts.extractBits(NumSubElts, i * NumSubElts);
2386       APInt SubUndef, SubZero;
2387       if (SimplifyDemandedVectorElts(SubOp, SubElts, SubUndef, SubZero, TLO,
2388                                      Depth + 1))
2389         return true;
2390       KnownUndef.insertBits(SubUndef, i * NumSubElts);
2391       KnownZero.insertBits(SubZero, i * NumSubElts);
2392     }
2393     break;
2394   }
2395   case ISD::INSERT_SUBVECTOR: {
2396     if (!isa<ConstantSDNode>(Op.getOperand(2)))
2397       break;
2398     SDValue Base = Op.getOperand(0);
2399     SDValue Sub = Op.getOperand(1);
2400     EVT SubVT = Sub.getValueType();
2401     unsigned NumSubElts = SubVT.getVectorNumElements();
2402     const APInt &Idx = Op.getConstantOperandAPInt(2);
2403     if (Idx.ugt(NumElts - NumSubElts))
2404       break;
2405     unsigned SubIdx = Idx.getZExtValue();
2406     APInt SubElts = DemandedElts.extractBits(NumSubElts, SubIdx);
2407     APInt SubUndef, SubZero;
2408     if (SimplifyDemandedVectorElts(Sub, SubElts, SubUndef, SubZero, TLO,
2409                                    Depth + 1))
2410       return true;
2411     APInt BaseElts = DemandedElts;
2412     BaseElts.insertBits(APInt::getNullValue(NumSubElts), SubIdx);
2413 
2414     // If none of the base operand elements are demanded, replace it with undef.
2415     if (!BaseElts && !Base.isUndef())
2416       return TLO.CombineTo(Op,
2417                            TLO.DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
2418                                            TLO.DAG.getUNDEF(VT),
2419                                            Op.getOperand(1),
2420                                            Op.getOperand(2)));
2421 
2422     if (SimplifyDemandedVectorElts(Base, BaseElts, KnownUndef, KnownZero, TLO,
2423                                    Depth + 1))
2424       return true;
2425     KnownUndef.insertBits(SubUndef, SubIdx);
2426     KnownZero.insertBits(SubZero, SubIdx);
2427     break;
2428   }
2429   case ISD::EXTRACT_SUBVECTOR: {
2430     SDValue Src = Op.getOperand(0);
2431     ConstantSDNode *SubIdx = dyn_cast<ConstantSDNode>(Op.getOperand(1));
2432     unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
2433     if (SubIdx && SubIdx->getAPIntValue().ule(NumSrcElts - NumElts)) {
2434       // Offset the demanded elts by the subvector index.
2435       uint64_t Idx = SubIdx->getZExtValue();
2436       APInt SrcElts = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx);
2437       APInt SrcUndef, SrcZero;
2438       if (SimplifyDemandedVectorElts(Src, SrcElts, SrcUndef, SrcZero, TLO,
2439                                      Depth + 1))
2440         return true;
2441       KnownUndef = SrcUndef.extractBits(NumElts, Idx);
2442       KnownZero = SrcZero.extractBits(NumElts, Idx);
2443     }
2444     break;
2445   }
2446   case ISD::INSERT_VECTOR_ELT: {
2447     SDValue Vec = Op.getOperand(0);
2448     SDValue Scl = Op.getOperand(1);
2449     auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2));
2450 
2451     // For a legal, constant insertion index, if we don't need this insertion
2452     // then strip it, else remove it from the demanded elts.
2453     if (CIdx && CIdx->getAPIntValue().ult(NumElts)) {
2454       unsigned Idx = CIdx->getZExtValue();
2455       if (!DemandedElts[Idx])
2456         return TLO.CombineTo(Op, Vec);
2457 
2458       APInt DemandedVecElts(DemandedElts);
2459       DemandedVecElts.clearBit(Idx);
2460       if (SimplifyDemandedVectorElts(Vec, DemandedVecElts, KnownUndef,
2461                                      KnownZero, TLO, Depth + 1))
2462         return true;
2463 
2464       KnownUndef.clearBit(Idx);
2465       if (Scl.isUndef())
2466         KnownUndef.setBit(Idx);
2467 
2468       KnownZero.clearBit(Idx);
2469       if (isNullConstant(Scl) || isNullFPConstant(Scl))
2470         KnownZero.setBit(Idx);
2471       break;
2472     }
2473 
2474     APInt VecUndef, VecZero;
2475     if (SimplifyDemandedVectorElts(Vec, DemandedElts, VecUndef, VecZero, TLO,
2476                                    Depth + 1))
2477       return true;
2478     // Without knowing the insertion index we can't set KnownUndef/KnownZero.
2479     break;
2480   }
2481   case ISD::VSELECT: {
2482     // Try to transform the select condition based on the current demanded
2483     // elements.
2484     // TODO: If a condition element is undef, we can choose from one arm of the
2485     //       select (and if one arm is undef, then we can propagate that to the
2486     //       result).
2487     // TODO - add support for constant vselect masks (see IR version of this).
2488     APInt UnusedUndef, UnusedZero;
2489     if (SimplifyDemandedVectorElts(Op.getOperand(0), DemandedElts, UnusedUndef,
2490                                    UnusedZero, TLO, Depth + 1))
2491       return true;
2492 
2493     // See if we can simplify either vselect operand.
2494     APInt DemandedLHS(DemandedElts);
2495     APInt DemandedRHS(DemandedElts);
2496     APInt UndefLHS, ZeroLHS;
2497     APInt UndefRHS, ZeroRHS;
2498     if (SimplifyDemandedVectorElts(Op.getOperand(1), DemandedLHS, UndefLHS,
2499                                    ZeroLHS, TLO, Depth + 1))
2500       return true;
2501     if (SimplifyDemandedVectorElts(Op.getOperand(2), DemandedRHS, UndefRHS,
2502                                    ZeroRHS, TLO, Depth + 1))
2503       return true;
2504 
2505     KnownUndef = UndefLHS & UndefRHS;
2506     KnownZero = ZeroLHS & ZeroRHS;
2507     break;
2508   }
2509   case ISD::VECTOR_SHUFFLE: {
2510     ArrayRef<int> ShuffleMask = cast<ShuffleVectorSDNode>(Op)->getMask();
2511 
2512     // Collect demanded elements from shuffle operands..
2513     APInt DemandedLHS(NumElts, 0);
2514     APInt DemandedRHS(NumElts, 0);
2515     for (unsigned i = 0; i != NumElts; ++i) {
2516       int M = ShuffleMask[i];
2517       if (M < 0 || !DemandedElts[i])
2518         continue;
2519       assert(0 <= M && M < (int)(2 * NumElts) && "Shuffle index out of range");
2520       if (M < (int)NumElts)
2521         DemandedLHS.setBit(M);
2522       else
2523         DemandedRHS.setBit(M - NumElts);
2524     }
2525 
2526     // See if we can simplify either shuffle operand.
2527     APInt UndefLHS, ZeroLHS;
2528     APInt UndefRHS, ZeroRHS;
2529     if (SimplifyDemandedVectorElts(Op.getOperand(0), DemandedLHS, UndefLHS,
2530                                    ZeroLHS, TLO, Depth + 1))
2531       return true;
2532     if (SimplifyDemandedVectorElts(Op.getOperand(1), DemandedRHS, UndefRHS,
2533                                    ZeroRHS, TLO, Depth + 1))
2534       return true;
2535 
2536     // Simplify mask using undef elements from LHS/RHS.
2537     bool Updated = false;
2538     bool IdentityLHS = true, IdentityRHS = true;
2539     SmallVector<int, 32> NewMask(ShuffleMask.begin(), ShuffleMask.end());
2540     for (unsigned i = 0; i != NumElts; ++i) {
2541       int &M = NewMask[i];
2542       if (M < 0)
2543         continue;
2544       if (!DemandedElts[i] || (M < (int)NumElts && UndefLHS[M]) ||
2545           (M >= (int)NumElts && UndefRHS[M - NumElts])) {
2546         Updated = true;
2547         M = -1;
2548       }
2549       IdentityLHS &= (M < 0) || (M == (int)i);
2550       IdentityRHS &= (M < 0) || ((M - NumElts) == i);
2551     }
2552 
2553     // Update legal shuffle masks based on demanded elements if it won't reduce
2554     // to Identity which can cause premature removal of the shuffle mask.
2555     if (Updated && !IdentityLHS && !IdentityRHS && !TLO.LegalOps) {
2556       SDValue LegalShuffle =
2557           buildLegalVectorShuffle(VT, DL, Op.getOperand(0), Op.getOperand(1),
2558                                   NewMask, TLO.DAG);
2559       if (LegalShuffle)
2560         return TLO.CombineTo(Op, LegalShuffle);
2561     }
2562 
2563     // Propagate undef/zero elements from LHS/RHS.
2564     for (unsigned i = 0; i != NumElts; ++i) {
2565       int M = ShuffleMask[i];
2566       if (M < 0) {
2567         KnownUndef.setBit(i);
2568       } else if (M < (int)NumElts) {
2569         if (UndefLHS[M])
2570           KnownUndef.setBit(i);
2571         if (ZeroLHS[M])
2572           KnownZero.setBit(i);
2573       } else {
2574         if (UndefRHS[M - NumElts])
2575           KnownUndef.setBit(i);
2576         if (ZeroRHS[M - NumElts])
2577           KnownZero.setBit(i);
2578       }
2579     }
2580     break;
2581   }
2582   case ISD::ANY_EXTEND_VECTOR_INREG:
2583   case ISD::SIGN_EXTEND_VECTOR_INREG:
2584   case ISD::ZERO_EXTEND_VECTOR_INREG: {
2585     APInt SrcUndef, SrcZero;
2586     SDValue Src = Op.getOperand(0);
2587     unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
2588     APInt DemandedSrcElts = DemandedElts.zextOrSelf(NumSrcElts);
2589     if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, SrcUndef, SrcZero, TLO,
2590                                    Depth + 1))
2591       return true;
2592     KnownZero = SrcZero.zextOrTrunc(NumElts);
2593     KnownUndef = SrcUndef.zextOrTrunc(NumElts);
2594 
2595     if (Op.getOpcode() == ISD::ANY_EXTEND_VECTOR_INREG &&
2596         Op.getValueSizeInBits() == Src.getValueSizeInBits() &&
2597         DemandedSrcElts == 1 && TLO.DAG.getDataLayout().isLittleEndian()) {
2598       // aext - if we just need the bottom element then we can bitcast.
2599       return TLO.CombineTo(Op, TLO.DAG.getBitcast(VT, Src));
2600     }
2601 
2602     if (Op.getOpcode() == ISD::ZERO_EXTEND_VECTOR_INREG) {
2603       // zext(undef) upper bits are guaranteed to be zero.
2604       if (DemandedElts.isSubsetOf(KnownUndef))
2605         return TLO.CombineTo(Op, TLO.DAG.getConstant(0, SDLoc(Op), VT));
2606       KnownUndef.clearAllBits();
2607     }
2608     break;
2609   }
2610 
2611   // TODO: There are more binop opcodes that could be handled here - MUL, MIN,
2612   // MAX, saturated math, etc.
2613   case ISD::OR:
2614   case ISD::XOR:
2615   case ISD::ADD:
2616   case ISD::SUB:
2617   case ISD::FADD:
2618   case ISD::FSUB:
2619   case ISD::FMUL:
2620   case ISD::FDIV:
2621   case ISD::FREM: {
2622     APInt UndefRHS, ZeroRHS;
2623     if (SimplifyDemandedVectorElts(Op.getOperand(1), DemandedElts, UndefRHS,
2624                                    ZeroRHS, TLO, Depth + 1))
2625       return true;
2626     APInt UndefLHS, ZeroLHS;
2627     if (SimplifyDemandedVectorElts(Op.getOperand(0), DemandedElts, UndefLHS,
2628                                    ZeroLHS, TLO, Depth + 1))
2629       return true;
2630 
2631     KnownZero = ZeroLHS & ZeroRHS;
2632     KnownUndef = getKnownUndefForVectorBinop(Op, TLO.DAG, UndefLHS, UndefRHS);
2633     break;
2634   }
2635   case ISD::SHL:
2636   case ISD::SRL:
2637   case ISD::SRA:
2638   case ISD::ROTL:
2639   case ISD::ROTR: {
2640     APInt UndefRHS, ZeroRHS;
2641     if (SimplifyDemandedVectorElts(Op.getOperand(1), DemandedElts, UndefRHS,
2642                                    ZeroRHS, TLO, Depth + 1))
2643       return true;
2644     APInt UndefLHS, ZeroLHS;
2645     if (SimplifyDemandedVectorElts(Op.getOperand(0), DemandedElts, UndefLHS,
2646                                    ZeroLHS, TLO, Depth + 1))
2647       return true;
2648 
2649     KnownZero = ZeroLHS;
2650     KnownUndef = UndefLHS & UndefRHS; // TODO: use getKnownUndefForVectorBinop?
2651     break;
2652   }
2653   case ISD::MUL:
2654   case ISD::AND: {
2655     APInt SrcUndef, SrcZero;
2656     if (SimplifyDemandedVectorElts(Op.getOperand(1), DemandedElts, SrcUndef,
2657                                    SrcZero, TLO, Depth + 1))
2658       return true;
2659     if (SimplifyDemandedVectorElts(Op.getOperand(0), DemandedElts, KnownUndef,
2660                                    KnownZero, TLO, Depth + 1))
2661       return true;
2662 
2663     // If either side has a zero element, then the result element is zero, even
2664     // if the other is an UNDEF.
2665     // TODO: Extend getKnownUndefForVectorBinop to also deal with known zeros
2666     // and then handle 'and' nodes with the rest of the binop opcodes.
2667     KnownZero |= SrcZero;
2668     KnownUndef &= SrcUndef;
2669     KnownUndef &= ~KnownZero;
2670     break;
2671   }
2672   case ISD::TRUNCATE:
2673   case ISD::SIGN_EXTEND:
2674   case ISD::ZERO_EXTEND:
2675     if (SimplifyDemandedVectorElts(Op.getOperand(0), DemandedElts, KnownUndef,
2676                                    KnownZero, TLO, Depth + 1))
2677       return true;
2678 
2679     if (Op.getOpcode() == ISD::ZERO_EXTEND) {
2680       // zext(undef) upper bits are guaranteed to be zero.
2681       if (DemandedElts.isSubsetOf(KnownUndef))
2682         return TLO.CombineTo(Op, TLO.DAG.getConstant(0, SDLoc(Op), VT));
2683       KnownUndef.clearAllBits();
2684     }
2685     break;
2686   default: {
2687     if (Op.getOpcode() >= ISD::BUILTIN_OP_END) {
2688       if (SimplifyDemandedVectorEltsForTargetNode(Op, DemandedElts, KnownUndef,
2689                                                   KnownZero, TLO, Depth))
2690         return true;
2691     } else {
2692       KnownBits Known;
2693       APInt DemandedBits = APInt::getAllOnesValue(EltSizeInBits);
2694       if (SimplifyDemandedBits(Op, DemandedBits, OriginalDemandedElts, Known,
2695                                TLO, Depth, AssumeSingleUse))
2696         return true;
2697     }
2698     break;
2699   }
2700   }
2701   assert((KnownUndef & KnownZero) == 0 && "Elements flagged as undef AND zero");
2702 
2703   // Constant fold all undef cases.
2704   // TODO: Handle zero cases as well.
2705   if (DemandedElts.isSubsetOf(KnownUndef))
2706     return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT));
2707 
2708   return false;
2709 }
2710 
2711 /// Determine which of the bits specified in Mask are known to be either zero or
2712 /// one and return them in the Known.
2713 void TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
2714                                                    KnownBits &Known,
2715                                                    const APInt &DemandedElts,
2716                                                    const SelectionDAG &DAG,
2717                                                    unsigned Depth) const {
2718   assert((Op.getOpcode() >= ISD::BUILTIN_OP_END ||
2719           Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
2720           Op.getOpcode() == ISD::INTRINSIC_W_CHAIN ||
2721           Op.getOpcode() == ISD::INTRINSIC_VOID) &&
2722          "Should use MaskedValueIsZero if you don't know whether Op"
2723          " is a target node!");
2724   Known.resetAll();
2725 }
2726 
2727 void TargetLowering::computeKnownBitsForTargetInstr(
2728     GISelKnownBits &Analysis, Register R, KnownBits &Known,
2729     const APInt &DemandedElts, const MachineRegisterInfo &MRI,
2730     unsigned Depth) const {
2731   Known.resetAll();
2732 }
2733 
2734 void TargetLowering::computeKnownBitsForFrameIndex(const SDValue Op,
2735                                                    KnownBits &Known,
2736                                                    const APInt &DemandedElts,
2737                                                    const SelectionDAG &DAG,
2738                                                    unsigned Depth) const {
2739   assert(isa<FrameIndexSDNode>(Op) && "expected FrameIndex");
2740 
2741   if (unsigned Align = DAG.InferPtrAlignment(Op)) {
2742     // The low bits are known zero if the pointer is aligned.
2743     Known.Zero.setLowBits(Log2_32(Align));
2744   }
2745 }
2746 
2747 /// This method can be implemented by targets that want to expose additional
2748 /// information about sign bits to the DAG Combiner.
2749 unsigned TargetLowering::ComputeNumSignBitsForTargetNode(SDValue Op,
2750                                                          const APInt &,
2751                                                          const SelectionDAG &,
2752                                                          unsigned Depth) const {
2753   assert((Op.getOpcode() >= ISD::BUILTIN_OP_END ||
2754           Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
2755           Op.getOpcode() == ISD::INTRINSIC_W_CHAIN ||
2756           Op.getOpcode() == ISD::INTRINSIC_VOID) &&
2757          "Should use ComputeNumSignBits if you don't know whether Op"
2758          " is a target node!");
2759   return 1;
2760 }
2761 
2762 bool TargetLowering::SimplifyDemandedVectorEltsForTargetNode(
2763     SDValue Op, const APInt &DemandedElts, APInt &KnownUndef, APInt &KnownZero,
2764     TargetLoweringOpt &TLO, unsigned Depth) const {
2765   assert((Op.getOpcode() >= ISD::BUILTIN_OP_END ||
2766           Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
2767           Op.getOpcode() == ISD::INTRINSIC_W_CHAIN ||
2768           Op.getOpcode() == ISD::INTRINSIC_VOID) &&
2769          "Should use SimplifyDemandedVectorElts if you don't know whether Op"
2770          " is a target node!");
2771   return false;
2772 }
2773 
2774 bool TargetLowering::SimplifyDemandedBitsForTargetNode(
2775     SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
2776     KnownBits &Known, TargetLoweringOpt &TLO, unsigned Depth) const {
2777   assert((Op.getOpcode() >= ISD::BUILTIN_OP_END ||
2778           Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
2779           Op.getOpcode() == ISD::INTRINSIC_W_CHAIN ||
2780           Op.getOpcode() == ISD::INTRINSIC_VOID) &&
2781          "Should use SimplifyDemandedBits if you don't know whether Op"
2782          " is a target node!");
2783   computeKnownBitsForTargetNode(Op, Known, DemandedElts, TLO.DAG, Depth);
2784   return false;
2785 }
2786 
2787 SDValue TargetLowering::SimplifyMultipleUseDemandedBitsForTargetNode(
2788     SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
2789     SelectionDAG &DAG, unsigned Depth) const {
2790   assert(
2791       (Op.getOpcode() >= ISD::BUILTIN_OP_END ||
2792        Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
2793        Op.getOpcode() == ISD::INTRINSIC_W_CHAIN ||
2794        Op.getOpcode() == ISD::INTRINSIC_VOID) &&
2795       "Should use SimplifyMultipleUseDemandedBits if you don't know whether Op"
2796       " is a target node!");
2797   return SDValue();
2798 }
2799 
2800 SDValue
2801 TargetLowering::buildLegalVectorShuffle(EVT VT, const SDLoc &DL, SDValue N0,
2802                                         SDValue N1, MutableArrayRef<int> Mask,
2803                                         SelectionDAG &DAG) const {
2804   bool LegalMask = isShuffleMaskLegal(Mask, VT);
2805   if (!LegalMask) {
2806     std::swap(N0, N1);
2807     ShuffleVectorSDNode::commuteMask(Mask);
2808     LegalMask = isShuffleMaskLegal(Mask, VT);
2809   }
2810 
2811   if (!LegalMask)
2812     return SDValue();
2813 
2814   return DAG.getVectorShuffle(VT, DL, N0, N1, Mask);
2815 }
2816 
2817 const Constant *TargetLowering::getTargetConstantFromLoad(LoadSDNode*) const {
2818   return nullptr;
2819 }
2820 
2821 bool TargetLowering::isKnownNeverNaNForTargetNode(SDValue Op,
2822                                                   const SelectionDAG &DAG,
2823                                                   bool SNaN,
2824                                                   unsigned Depth) const {
2825   assert((Op.getOpcode() >= ISD::BUILTIN_OP_END ||
2826           Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
2827           Op.getOpcode() == ISD::INTRINSIC_W_CHAIN ||
2828           Op.getOpcode() == ISD::INTRINSIC_VOID) &&
2829          "Should use isKnownNeverNaN if you don't know whether Op"
2830          " is a target node!");
2831   return false;
2832 }
2833 
2834 // FIXME: Ideally, this would use ISD::isConstantSplatVector(), but that must
2835 // work with truncating build vectors and vectors with elements of less than
2836 // 8 bits.
2837 bool TargetLowering::isConstTrueVal(const SDNode *N) const {
2838   if (!N)
2839     return false;
2840 
2841   APInt CVal;
2842   if (auto *CN = dyn_cast<ConstantSDNode>(N)) {
2843     CVal = CN->getAPIntValue();
2844   } else if (auto *BV = dyn_cast<BuildVectorSDNode>(N)) {
2845     auto *CN = BV->getConstantSplatNode();
2846     if (!CN)
2847       return false;
2848 
2849     // If this is a truncating build vector, truncate the splat value.
2850     // Otherwise, we may fail to match the expected values below.
2851     unsigned BVEltWidth = BV->getValueType(0).getScalarSizeInBits();
2852     CVal = CN->getAPIntValue();
2853     if (BVEltWidth < CVal.getBitWidth())
2854       CVal = CVal.trunc(BVEltWidth);
2855   } else {
2856     return false;
2857   }
2858 
2859   switch (getBooleanContents(N->getValueType(0))) {
2860   case UndefinedBooleanContent:
2861     return CVal[0];
2862   case ZeroOrOneBooleanContent:
2863     return CVal.isOneValue();
2864   case ZeroOrNegativeOneBooleanContent:
2865     return CVal.isAllOnesValue();
2866   }
2867 
2868   llvm_unreachable("Invalid boolean contents");
2869 }
2870 
2871 bool TargetLowering::isConstFalseVal(const SDNode *N) const {
2872   if (!N)
2873     return false;
2874 
2875   const ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N);
2876   if (!CN) {
2877     const BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N);
2878     if (!BV)
2879       return false;
2880 
2881     // Only interested in constant splats, we don't care about undef
2882     // elements in identifying boolean constants and getConstantSplatNode
2883     // returns NULL if all ops are undef;
2884     CN = BV->getConstantSplatNode();
2885     if (!CN)
2886       return false;
2887   }
2888 
2889   if (getBooleanContents(N->getValueType(0)) == UndefinedBooleanContent)
2890     return !CN->getAPIntValue()[0];
2891 
2892   return CN->isNullValue();
2893 }
2894 
2895 bool TargetLowering::isExtendedTrueVal(const ConstantSDNode *N, EVT VT,
2896                                        bool SExt) const {
2897   if (VT == MVT::i1)
2898     return N->isOne();
2899 
2900   TargetLowering::BooleanContent Cnt = getBooleanContents(VT);
2901   switch (Cnt) {
2902   case TargetLowering::ZeroOrOneBooleanContent:
2903     // An extended value of 1 is always true, unless its original type is i1,
2904     // in which case it will be sign extended to -1.
2905     return (N->isOne() && !SExt) || (SExt && (N->getValueType(0) != MVT::i1));
2906   case TargetLowering::UndefinedBooleanContent:
2907   case TargetLowering::ZeroOrNegativeOneBooleanContent:
2908     return N->isAllOnesValue() && SExt;
2909   }
2910   llvm_unreachable("Unexpected enumeration.");
2911 }
2912 
2913 /// This helper function of SimplifySetCC tries to optimize the comparison when
2914 /// either operand of the SetCC node is a bitwise-and instruction.
2915 SDValue TargetLowering::foldSetCCWithAnd(EVT VT, SDValue N0, SDValue N1,
2916                                          ISD::CondCode Cond, const SDLoc &DL,
2917                                          DAGCombinerInfo &DCI) const {
2918   // Match these patterns in any of their permutations:
2919   // (X & Y) == Y
2920   // (X & Y) != Y
2921   if (N1.getOpcode() == ISD::AND && N0.getOpcode() != ISD::AND)
2922     std::swap(N0, N1);
2923 
2924   EVT OpVT = N0.getValueType();
2925   if (N0.getOpcode() != ISD::AND || !OpVT.isInteger() ||
2926       (Cond != ISD::SETEQ && Cond != ISD::SETNE))
2927     return SDValue();
2928 
2929   SDValue X, Y;
2930   if (N0.getOperand(0) == N1) {
2931     X = N0.getOperand(1);
2932     Y = N0.getOperand(0);
2933   } else if (N0.getOperand(1) == N1) {
2934     X = N0.getOperand(0);
2935     Y = N0.getOperand(1);
2936   } else {
2937     return SDValue();
2938   }
2939 
2940   SelectionDAG &DAG = DCI.DAG;
2941   SDValue Zero = DAG.getConstant(0, DL, OpVT);
2942   if (DAG.isKnownToBeAPowerOfTwo(Y)) {
2943     // Simplify X & Y == Y to X & Y != 0 if Y has exactly one bit set.
2944     // Note that where Y is variable and is known to have at most one bit set
2945     // (for example, if it is Z & 1) we cannot do this; the expressions are not
2946     // equivalent when Y == 0.
2947     assert(OpVT.isInteger());
2948     Cond = ISD::getSetCCInverse(Cond, OpVT);
2949     if (DCI.isBeforeLegalizeOps() ||
2950         isCondCodeLegal(Cond, N0.getSimpleValueType()))
2951       return DAG.getSetCC(DL, VT, N0, Zero, Cond);
2952   } else if (N0.hasOneUse() && hasAndNotCompare(Y)) {
2953     // If the target supports an 'and-not' or 'and-complement' logic operation,
2954     // try to use that to make a comparison operation more efficient.
2955     // But don't do this transform if the mask is a single bit because there are
2956     // more efficient ways to deal with that case (for example, 'bt' on x86 or
2957     // 'rlwinm' on PPC).
2958 
2959     // Bail out if the compare operand that we want to turn into a zero is
2960     // already a zero (otherwise, infinite loop).
2961     auto *YConst = dyn_cast<ConstantSDNode>(Y);
2962     if (YConst && YConst->isNullValue())
2963       return SDValue();
2964 
2965     // Transform this into: ~X & Y == 0.
2966     SDValue NotX = DAG.getNOT(SDLoc(X), X, OpVT);
2967     SDValue NewAnd = DAG.getNode(ISD::AND, SDLoc(N0), OpVT, NotX, Y);
2968     return DAG.getSetCC(DL, VT, NewAnd, Zero, Cond);
2969   }
2970 
2971   return SDValue();
2972 }
2973 
2974 /// There are multiple IR patterns that could be checking whether certain
2975 /// truncation of a signed number would be lossy or not. The pattern which is
2976 /// best at IR level, may not lower optimally. Thus, we want to unfold it.
2977 /// We are looking for the following pattern: (KeptBits is a constant)
2978 ///   (add %x, (1 << (KeptBits-1))) srccond (1 << KeptBits)
2979 /// KeptBits won't be bitwidth(x), that will be constant-folded to true/false.
2980 /// KeptBits also can't be 1, that would have been folded to  %x dstcond 0
2981 /// We will unfold it into the natural trunc+sext pattern:
2982 ///   ((%x << C) a>> C) dstcond %x
2983 /// Where  C = bitwidth(x) - KeptBits  and  C u< bitwidth(x)
2984 SDValue TargetLowering::optimizeSetCCOfSignedTruncationCheck(
2985     EVT SCCVT, SDValue N0, SDValue N1, ISD::CondCode Cond, DAGCombinerInfo &DCI,
2986     const SDLoc &DL) const {
2987   // We must be comparing with a constant.
2988   ConstantSDNode *C1;
2989   if (!(C1 = dyn_cast<ConstantSDNode>(N1)))
2990     return SDValue();
2991 
2992   // N0 should be:  add %x, (1 << (KeptBits-1))
2993   if (N0->getOpcode() != ISD::ADD)
2994     return SDValue();
2995 
2996   // And we must be 'add'ing a constant.
2997   ConstantSDNode *C01;
2998   if (!(C01 = dyn_cast<ConstantSDNode>(N0->getOperand(1))))
2999     return SDValue();
3000 
3001   SDValue X = N0->getOperand(0);
3002   EVT XVT = X.getValueType();
3003 
3004   // Validate constants ...
3005 
3006   APInt I1 = C1->getAPIntValue();
3007 
3008   ISD::CondCode NewCond;
3009   if (Cond == ISD::CondCode::SETULT) {
3010     NewCond = ISD::CondCode::SETEQ;
3011   } else if (Cond == ISD::CondCode::SETULE) {
3012     NewCond = ISD::CondCode::SETEQ;
3013     // But need to 'canonicalize' the constant.
3014     I1 += 1;
3015   } else if (Cond == ISD::CondCode::SETUGT) {
3016     NewCond = ISD::CondCode::SETNE;
3017     // But need to 'canonicalize' the constant.
3018     I1 += 1;
3019   } else if (Cond == ISD::CondCode::SETUGE) {
3020     NewCond = ISD::CondCode::SETNE;
3021   } else
3022     return SDValue();
3023 
3024   APInt I01 = C01->getAPIntValue();
3025 
3026   auto checkConstants = [&I1, &I01]() -> bool {
3027     // Both of them must be power-of-two, and the constant from setcc is bigger.
3028     return I1.ugt(I01) && I1.isPowerOf2() && I01.isPowerOf2();
3029   };
3030 
3031   if (checkConstants()) {
3032     // Great, e.g. got  icmp ult i16 (add i16 %x, 128), 256
3033   } else {
3034     // What if we invert constants? (and the target predicate)
3035     I1.negate();
3036     I01.negate();
3037     assert(XVT.isInteger());
3038     NewCond = getSetCCInverse(NewCond, XVT);
3039     if (!checkConstants())
3040       return SDValue();
3041     // Great, e.g. got  icmp uge i16 (add i16 %x, -128), -256
3042   }
3043 
3044   // They are power-of-two, so which bit is set?
3045   const unsigned KeptBits = I1.logBase2();
3046   const unsigned KeptBitsMinusOne = I01.logBase2();
3047 
3048   // Magic!
3049   if (KeptBits != (KeptBitsMinusOne + 1))
3050     return SDValue();
3051   assert(KeptBits > 0 && KeptBits < XVT.getSizeInBits() && "unreachable");
3052 
3053   // We don't want to do this in every single case.
3054   SelectionDAG &DAG = DCI.DAG;
3055   if (!DAG.getTargetLoweringInfo().shouldTransformSignedTruncationCheck(
3056           XVT, KeptBits))
3057     return SDValue();
3058 
3059   const unsigned MaskedBits = XVT.getSizeInBits() - KeptBits;
3060   assert(MaskedBits > 0 && MaskedBits < XVT.getSizeInBits() && "unreachable");
3061 
3062   // Unfold into:  ((%x << C) a>> C) cond %x
3063   // Where 'cond' will be either 'eq' or 'ne'.
3064   SDValue ShiftAmt = DAG.getConstant(MaskedBits, DL, XVT);
3065   SDValue T0 = DAG.getNode(ISD::SHL, DL, XVT, X, ShiftAmt);
3066   SDValue T1 = DAG.getNode(ISD::SRA, DL, XVT, T0, ShiftAmt);
3067   SDValue T2 = DAG.getSetCC(DL, SCCVT, T1, X, NewCond);
3068 
3069   return T2;
3070 }
3071 
3072 // (X & (C l>>/<< Y)) ==/!= 0  -->  ((X <</l>> Y) & C) ==/!= 0
3073 SDValue TargetLowering::optimizeSetCCByHoistingAndByConstFromLogicalShift(
3074     EVT SCCVT, SDValue N0, SDValue N1C, ISD::CondCode Cond,
3075     DAGCombinerInfo &DCI, const SDLoc &DL) const {
3076   assert(isConstOrConstSplat(N1C) &&
3077          isConstOrConstSplat(N1C)->getAPIntValue().isNullValue() &&
3078          "Should be a comparison with 0.");
3079   assert((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
3080          "Valid only for [in]equality comparisons.");
3081 
3082   unsigned NewShiftOpcode;
3083   SDValue X, C, Y;
3084 
3085   SelectionDAG &DAG = DCI.DAG;
3086   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3087 
3088   // Look for '(C l>>/<< Y)'.
3089   auto Match = [&NewShiftOpcode, &X, &C, &Y, &TLI, &DAG](SDValue V) {
3090     // The shift should be one-use.
3091     if (!V.hasOneUse())
3092       return false;
3093     unsigned OldShiftOpcode = V.getOpcode();
3094     switch (OldShiftOpcode) {
3095     case ISD::SHL:
3096       NewShiftOpcode = ISD::SRL;
3097       break;
3098     case ISD::SRL:
3099       NewShiftOpcode = ISD::SHL;
3100       break;
3101     default:
3102       return false; // must be a logical shift.
3103     }
3104     // We should be shifting a constant.
3105     // FIXME: best to use isConstantOrConstantVector().
3106     C = V.getOperand(0);
3107     ConstantSDNode *CC =
3108         isConstOrConstSplat(C, /*AllowUndefs=*/true, /*AllowTruncation=*/true);
3109     if (!CC)
3110       return false;
3111     Y = V.getOperand(1);
3112 
3113     ConstantSDNode *XC =
3114         isConstOrConstSplat(X, /*AllowUndefs=*/true, /*AllowTruncation=*/true);
3115     return TLI.shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(
3116         X, XC, CC, Y, OldShiftOpcode, NewShiftOpcode, DAG);
3117   };
3118 
3119   // LHS of comparison should be an one-use 'and'.
3120   if (N0.getOpcode() != ISD::AND || !N0.hasOneUse())
3121     return SDValue();
3122 
3123   X = N0.getOperand(0);
3124   SDValue Mask = N0.getOperand(1);
3125 
3126   // 'and' is commutative!
3127   if (!Match(Mask)) {
3128     std::swap(X, Mask);
3129     if (!Match(Mask))
3130       return SDValue();
3131   }
3132 
3133   EVT VT = X.getValueType();
3134 
3135   // Produce:
3136   // ((X 'OppositeShiftOpcode' Y) & C) Cond 0
3137   SDValue T0 = DAG.getNode(NewShiftOpcode, DL, VT, X, Y);
3138   SDValue T1 = DAG.getNode(ISD::AND, DL, VT, T0, C);
3139   SDValue T2 = DAG.getSetCC(DL, SCCVT, T1, N1C, Cond);
3140   return T2;
3141 }
3142 
3143 /// Try to fold an equality comparison with a {add/sub/xor} binary operation as
3144 /// the 1st operand (N0). Callers are expected to swap the N0/N1 parameters to
3145 /// handle the commuted versions of these patterns.
3146 SDValue TargetLowering::foldSetCCWithBinOp(EVT VT, SDValue N0, SDValue N1,
3147                                            ISD::CondCode Cond, const SDLoc &DL,
3148                                            DAGCombinerInfo &DCI) const {
3149   unsigned BOpcode = N0.getOpcode();
3150   assert((BOpcode == ISD::ADD || BOpcode == ISD::SUB || BOpcode == ISD::XOR) &&
3151          "Unexpected binop");
3152   assert((Cond == ISD::SETEQ || Cond == ISD::SETNE) && "Unexpected condcode");
3153 
3154   // (X + Y) == X --> Y == 0
3155   // (X - Y) == X --> Y == 0
3156   // (X ^ Y) == X --> Y == 0
3157   SelectionDAG &DAG = DCI.DAG;
3158   EVT OpVT = N0.getValueType();
3159   SDValue X = N0.getOperand(0);
3160   SDValue Y = N0.getOperand(1);
3161   if (X == N1)
3162     return DAG.getSetCC(DL, VT, Y, DAG.getConstant(0, DL, OpVT), Cond);
3163 
3164   if (Y != N1)
3165     return SDValue();
3166 
3167   // (X + Y) == Y --> X == 0
3168   // (X ^ Y) == Y --> X == 0
3169   if (BOpcode == ISD::ADD || BOpcode == ISD::XOR)
3170     return DAG.getSetCC(DL, VT, X, DAG.getConstant(0, DL, OpVT), Cond);
3171 
3172   // The shift would not be valid if the operands are boolean (i1).
3173   if (!N0.hasOneUse() || OpVT.getScalarSizeInBits() == 1)
3174     return SDValue();
3175 
3176   // (X - Y) == Y --> X == Y << 1
3177   EVT ShiftVT = getShiftAmountTy(OpVT, DAG.getDataLayout(),
3178                                  !DCI.isBeforeLegalize());
3179   SDValue One = DAG.getConstant(1, DL, ShiftVT);
3180   SDValue YShl1 = DAG.getNode(ISD::SHL, DL, N1.getValueType(), Y, One);
3181   if (!DCI.isCalledByLegalizer())
3182     DCI.AddToWorklist(YShl1.getNode());
3183   return DAG.getSetCC(DL, VT, X, YShl1, Cond);
3184 }
3185 
3186 /// Try to simplify a setcc built with the specified operands and cc. If it is
3187 /// unable to simplify it, return a null SDValue.
3188 SDValue TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
3189                                       ISD::CondCode Cond, bool foldBooleans,
3190                                       DAGCombinerInfo &DCI,
3191                                       const SDLoc &dl) const {
3192   SelectionDAG &DAG = DCI.DAG;
3193   const DataLayout &Layout = DAG.getDataLayout();
3194   EVT OpVT = N0.getValueType();
3195 
3196   // Constant fold or commute setcc.
3197   if (SDValue Fold = DAG.FoldSetCC(VT, N0, N1, Cond, dl))
3198     return Fold;
3199 
3200   // Ensure that the constant occurs on the RHS and fold constant comparisons.
3201   // TODO: Handle non-splat vector constants. All undef causes trouble.
3202   ISD::CondCode SwappedCC = ISD::getSetCCSwappedOperands(Cond);
3203   if (isConstOrConstSplat(N0) &&
3204       (DCI.isBeforeLegalizeOps() ||
3205        isCondCodeLegal(SwappedCC, N0.getSimpleValueType())))
3206     return DAG.getSetCC(dl, VT, N1, N0, SwappedCC);
3207 
3208   // If we have a subtract with the same 2 non-constant operands as this setcc
3209   // -- but in reverse order -- then try to commute the operands of this setcc
3210   // to match. A matching pair of setcc (cmp) and sub may be combined into 1
3211   // instruction on some targets.
3212   if (!isConstOrConstSplat(N0) && !isConstOrConstSplat(N1) &&
3213       (DCI.isBeforeLegalizeOps() ||
3214        isCondCodeLegal(SwappedCC, N0.getSimpleValueType())) &&
3215       DAG.getNodeIfExists(ISD::SUB, DAG.getVTList(OpVT), { N1, N0 } ) &&
3216       !DAG.getNodeIfExists(ISD::SUB, DAG.getVTList(OpVT), { N0, N1 } ))
3217     return DAG.getSetCC(dl, VT, N1, N0, SwappedCC);
3218 
3219   if (auto *N1C = dyn_cast<ConstantSDNode>(N1.getNode())) {
3220     const APInt &C1 = N1C->getAPIntValue();
3221 
3222     // If the LHS is '(srl (ctlz x), 5)', the RHS is 0/1, and this is an
3223     // equality comparison, then we're just comparing whether X itself is
3224     // zero.
3225     if (N0.getOpcode() == ISD::SRL && (C1.isNullValue() || C1.isOneValue()) &&
3226         N0.getOperand(0).getOpcode() == ISD::CTLZ &&
3227         N0.getOperand(1).getOpcode() == ISD::Constant) {
3228       const APInt &ShAmt = N0.getConstantOperandAPInt(1);
3229       if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
3230           ShAmt == Log2_32(N0.getValueSizeInBits())) {
3231         if ((C1 == 0) == (Cond == ISD::SETEQ)) {
3232           // (srl (ctlz x), 5) == 0  -> X != 0
3233           // (srl (ctlz x), 5) != 1  -> X != 0
3234           Cond = ISD::SETNE;
3235         } else {
3236           // (srl (ctlz x), 5) != 0  -> X == 0
3237           // (srl (ctlz x), 5) == 1  -> X == 0
3238           Cond = ISD::SETEQ;
3239         }
3240         SDValue Zero = DAG.getConstant(0, dl, N0.getValueType());
3241         return DAG.getSetCC(dl, VT, N0.getOperand(0).getOperand(0),
3242                             Zero, Cond);
3243       }
3244     }
3245 
3246     SDValue CTPOP = N0;
3247     // Look through truncs that don't change the value of a ctpop.
3248     if (N0.hasOneUse() && N0.getOpcode() == ISD::TRUNCATE)
3249       CTPOP = N0.getOperand(0);
3250 
3251     if (CTPOP.hasOneUse() && CTPOP.getOpcode() == ISD::CTPOP &&
3252         (N0 == CTPOP ||
3253          N0.getValueSizeInBits() > Log2_32_Ceil(CTPOP.getValueSizeInBits()))) {
3254       EVT CTVT = CTPOP.getValueType();
3255       SDValue CTOp = CTPOP.getOperand(0);
3256 
3257       // (ctpop x) u< 2 -> (x & x-1) == 0
3258       // (ctpop x) u> 1 -> (x & x-1) != 0
3259       if ((Cond == ISD::SETULT && C1 == 2) || (Cond == ISD::SETUGT && C1 == 1)){
3260         SDValue NegOne = DAG.getAllOnesConstant(dl, CTVT);
3261         SDValue Add = DAG.getNode(ISD::ADD, dl, CTVT, CTOp, NegOne);
3262         SDValue And = DAG.getNode(ISD::AND, dl, CTVT, CTOp, Add);
3263         ISD::CondCode CC = Cond == ISD::SETULT ? ISD::SETEQ : ISD::SETNE;
3264         return DAG.getSetCC(dl, VT, And, DAG.getConstant(0, dl, CTVT), CC);
3265       }
3266 
3267       // If ctpop is not supported, expand a power-of-2 comparison based on it.
3268       if (C1 == 1 && !isOperationLegalOrCustom(ISD::CTPOP, CTVT) &&
3269           (Cond == ISD::SETEQ || Cond == ISD::SETNE)) {
3270         // (ctpop x) == 1 --> (x != 0) && ((x & x-1) == 0)
3271         // (ctpop x) != 1 --> (x == 0) || ((x & x-1) != 0)
3272         SDValue Zero = DAG.getConstant(0, dl, CTVT);
3273         SDValue NegOne = DAG.getAllOnesConstant(dl, CTVT);
3274         assert(CTVT.isInteger());
3275         ISD::CondCode InvCond = ISD::getSetCCInverse(Cond, CTVT);
3276         SDValue Add = DAG.getNode(ISD::ADD, dl, CTVT, CTOp, NegOne);
3277         SDValue And = DAG.getNode(ISD::AND, dl, CTVT, CTOp, Add);
3278         SDValue LHS = DAG.getSetCC(dl, VT, CTOp, Zero, InvCond);
3279         SDValue RHS = DAG.getSetCC(dl, VT, And, Zero, Cond);
3280         unsigned LogicOpcode = Cond == ISD::SETEQ ? ISD::AND : ISD::OR;
3281         return DAG.getNode(LogicOpcode, dl, VT, LHS, RHS);
3282       }
3283     }
3284 
3285     // (zext x) == C --> x == (trunc C)
3286     // (sext x) == C --> x == (trunc C)
3287     if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
3288         DCI.isBeforeLegalize() && N0->hasOneUse()) {
3289       unsigned MinBits = N0.getValueSizeInBits();
3290       SDValue PreExt;
3291       bool Signed = false;
3292       if (N0->getOpcode() == ISD::ZERO_EXTEND) {
3293         // ZExt
3294         MinBits = N0->getOperand(0).getValueSizeInBits();
3295         PreExt = N0->getOperand(0);
3296       } else if (N0->getOpcode() == ISD::AND) {
3297         // DAGCombine turns costly ZExts into ANDs
3298         if (auto *C = dyn_cast<ConstantSDNode>(N0->getOperand(1)))
3299           if ((C->getAPIntValue()+1).isPowerOf2()) {
3300             MinBits = C->getAPIntValue().countTrailingOnes();
3301             PreExt = N0->getOperand(0);
3302           }
3303       } else if (N0->getOpcode() == ISD::SIGN_EXTEND) {
3304         // SExt
3305         MinBits = N0->getOperand(0).getValueSizeInBits();
3306         PreExt = N0->getOperand(0);
3307         Signed = true;
3308       } else if (auto *LN0 = dyn_cast<LoadSDNode>(N0)) {
3309         // ZEXTLOAD / SEXTLOAD
3310         if (LN0->getExtensionType() == ISD::ZEXTLOAD) {
3311           MinBits = LN0->getMemoryVT().getSizeInBits();
3312           PreExt = N0;
3313         } else if (LN0->getExtensionType() == ISD::SEXTLOAD) {
3314           Signed = true;
3315           MinBits = LN0->getMemoryVT().getSizeInBits();
3316           PreExt = N0;
3317         }
3318       }
3319 
3320       // Figure out how many bits we need to preserve this constant.
3321       unsigned ReqdBits = Signed ?
3322         C1.getBitWidth() - C1.getNumSignBits() + 1 :
3323         C1.getActiveBits();
3324 
3325       // Make sure we're not losing bits from the constant.
3326       if (MinBits > 0 &&
3327           MinBits < C1.getBitWidth() &&
3328           MinBits >= ReqdBits) {
3329         EVT MinVT = EVT::getIntegerVT(*DAG.getContext(), MinBits);
3330         if (isTypeDesirableForOp(ISD::SETCC, MinVT)) {
3331           // Will get folded away.
3332           SDValue Trunc = DAG.getNode(ISD::TRUNCATE, dl, MinVT, PreExt);
3333           if (MinBits == 1 && C1 == 1)
3334             // Invert the condition.
3335             return DAG.getSetCC(dl, VT, Trunc, DAG.getConstant(0, dl, MVT::i1),
3336                                 Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ);
3337           SDValue C = DAG.getConstant(C1.trunc(MinBits), dl, MinVT);
3338           return DAG.getSetCC(dl, VT, Trunc, C, Cond);
3339         }
3340 
3341         // If truncating the setcc operands is not desirable, we can still
3342         // simplify the expression in some cases:
3343         // setcc ([sz]ext (setcc x, y, cc)), 0, setne) -> setcc (x, y, cc)
3344         // setcc ([sz]ext (setcc x, y, cc)), 0, seteq) -> setcc (x, y, inv(cc))
3345         // setcc (zext (setcc x, y, cc)), 1, setne) -> setcc (x, y, inv(cc))
3346         // setcc (zext (setcc x, y, cc)), 1, seteq) -> setcc (x, y, cc)
3347         // setcc (sext (setcc x, y, cc)), -1, setne) -> setcc (x, y, inv(cc))
3348         // setcc (sext (setcc x, y, cc)), -1, seteq) -> setcc (x, y, cc)
3349         SDValue TopSetCC = N0->getOperand(0);
3350         unsigned N0Opc = N0->getOpcode();
3351         bool SExt = (N0Opc == ISD::SIGN_EXTEND);
3352         if (TopSetCC.getValueType() == MVT::i1 && VT == MVT::i1 &&
3353             TopSetCC.getOpcode() == ISD::SETCC &&
3354             (N0Opc == ISD::ZERO_EXTEND || N0Opc == ISD::SIGN_EXTEND) &&
3355             (isConstFalseVal(N1C) ||
3356              isExtendedTrueVal(N1C, N0->getValueType(0), SExt))) {
3357 
3358           bool Inverse = (N1C->isNullValue() && Cond == ISD::SETEQ) ||
3359                          (!N1C->isNullValue() && Cond == ISD::SETNE);
3360 
3361           if (!Inverse)
3362             return TopSetCC;
3363 
3364           ISD::CondCode InvCond = ISD::getSetCCInverse(
3365               cast<CondCodeSDNode>(TopSetCC.getOperand(2))->get(),
3366               TopSetCC.getOperand(0).getValueType());
3367           return DAG.getSetCC(dl, VT, TopSetCC.getOperand(0),
3368                                       TopSetCC.getOperand(1),
3369                                       InvCond);
3370         }
3371       }
3372     }
3373 
3374     // If the LHS is '(and load, const)', the RHS is 0, the test is for
3375     // equality or unsigned, and all 1 bits of the const are in the same
3376     // partial word, see if we can shorten the load.
3377     if (DCI.isBeforeLegalize() &&
3378         !ISD::isSignedIntSetCC(Cond) &&
3379         N0.getOpcode() == ISD::AND && C1 == 0 &&
3380         N0.getNode()->hasOneUse() &&
3381         isa<LoadSDNode>(N0.getOperand(0)) &&
3382         N0.getOperand(0).getNode()->hasOneUse() &&
3383         isa<ConstantSDNode>(N0.getOperand(1))) {
3384       LoadSDNode *Lod = cast<LoadSDNode>(N0.getOperand(0));
3385       APInt bestMask;
3386       unsigned bestWidth = 0, bestOffset = 0;
3387       if (Lod->isSimple() && Lod->isUnindexed()) {
3388         unsigned origWidth = N0.getValueSizeInBits();
3389         unsigned maskWidth = origWidth;
3390         // We can narrow (e.g.) 16-bit extending loads on 32-bit target to
3391         // 8 bits, but have to be careful...
3392         if (Lod->getExtensionType() != ISD::NON_EXTLOAD)
3393           origWidth = Lod->getMemoryVT().getSizeInBits();
3394         const APInt &Mask = N0.getConstantOperandAPInt(1);
3395         for (unsigned width = origWidth / 2; width>=8; width /= 2) {
3396           APInt newMask = APInt::getLowBitsSet(maskWidth, width);
3397           for (unsigned offset=0; offset<origWidth/width; offset++) {
3398             if (Mask.isSubsetOf(newMask)) {
3399               if (Layout.isLittleEndian())
3400                 bestOffset = (uint64_t)offset * (width/8);
3401               else
3402                 bestOffset = (origWidth/width - offset - 1) * (width/8);
3403               bestMask = Mask.lshr(offset * (width/8) * 8);
3404               bestWidth = width;
3405               break;
3406             }
3407             newMask <<= width;
3408           }
3409         }
3410       }
3411       if (bestWidth) {
3412         EVT newVT = EVT::getIntegerVT(*DAG.getContext(), bestWidth);
3413         if (newVT.isRound() &&
3414             shouldReduceLoadWidth(Lod, ISD::NON_EXTLOAD, newVT)) {
3415           SDValue Ptr = Lod->getBasePtr();
3416           if (bestOffset != 0)
3417             Ptr = DAG.getMemBasePlusOffset(Ptr, bestOffset, dl);
3418           unsigned NewAlign = MinAlign(Lod->getAlignment(), bestOffset);
3419           SDValue NewLoad = DAG.getLoad(
3420               newVT, dl, Lod->getChain(), Ptr,
3421               Lod->getPointerInfo().getWithOffset(bestOffset), NewAlign);
3422           return DAG.getSetCC(dl, VT,
3423                               DAG.getNode(ISD::AND, dl, newVT, NewLoad,
3424                                       DAG.getConstant(bestMask.trunc(bestWidth),
3425                                                       dl, newVT)),
3426                               DAG.getConstant(0LL, dl, newVT), Cond);
3427         }
3428       }
3429     }
3430 
3431     // If the LHS is a ZERO_EXTEND, perform the comparison on the input.
3432     if (N0.getOpcode() == ISD::ZERO_EXTEND) {
3433       unsigned InSize = N0.getOperand(0).getValueSizeInBits();
3434 
3435       // If the comparison constant has bits in the upper part, the
3436       // zero-extended value could never match.
3437       if (C1.intersects(APInt::getHighBitsSet(C1.getBitWidth(),
3438                                               C1.getBitWidth() - InSize))) {
3439         switch (Cond) {
3440         case ISD::SETUGT:
3441         case ISD::SETUGE:
3442         case ISD::SETEQ:
3443           return DAG.getConstant(0, dl, VT);
3444         case ISD::SETULT:
3445         case ISD::SETULE:
3446         case ISD::SETNE:
3447           return DAG.getConstant(1, dl, VT);
3448         case ISD::SETGT:
3449         case ISD::SETGE:
3450           // True if the sign bit of C1 is set.
3451           return DAG.getConstant(C1.isNegative(), dl, VT);
3452         case ISD::SETLT:
3453         case ISD::SETLE:
3454           // True if the sign bit of C1 isn't set.
3455           return DAG.getConstant(C1.isNonNegative(), dl, VT);
3456         default:
3457           break;
3458         }
3459       }
3460 
3461       // Otherwise, we can perform the comparison with the low bits.
3462       switch (Cond) {
3463       case ISD::SETEQ:
3464       case ISD::SETNE:
3465       case ISD::SETUGT:
3466       case ISD::SETUGE:
3467       case ISD::SETULT:
3468       case ISD::SETULE: {
3469         EVT newVT = N0.getOperand(0).getValueType();
3470         if (DCI.isBeforeLegalizeOps() ||
3471             (isOperationLegal(ISD::SETCC, newVT) &&
3472              isCondCodeLegal(Cond, newVT.getSimpleVT()))) {
3473           EVT NewSetCCVT = getSetCCResultType(Layout, *DAG.getContext(), newVT);
3474           SDValue NewConst = DAG.getConstant(C1.trunc(InSize), dl, newVT);
3475 
3476           SDValue NewSetCC = DAG.getSetCC(dl, NewSetCCVT, N0.getOperand(0),
3477                                           NewConst, Cond);
3478           return DAG.getBoolExtOrTrunc(NewSetCC, dl, VT, N0.getValueType());
3479         }
3480         break;
3481       }
3482       default:
3483         break; // todo, be more careful with signed comparisons
3484       }
3485     } else if (N0.getOpcode() == ISD::SIGN_EXTEND_INREG &&
3486                (Cond == ISD::SETEQ || Cond == ISD::SETNE)) {
3487       EVT ExtSrcTy = cast<VTSDNode>(N0.getOperand(1))->getVT();
3488       unsigned ExtSrcTyBits = ExtSrcTy.getSizeInBits();
3489       EVT ExtDstTy = N0.getValueType();
3490       unsigned ExtDstTyBits = ExtDstTy.getSizeInBits();
3491 
3492       // If the constant doesn't fit into the number of bits for the source of
3493       // the sign extension, it is impossible for both sides to be equal.
3494       if (C1.getMinSignedBits() > ExtSrcTyBits)
3495         return DAG.getConstant(Cond == ISD::SETNE, dl, VT);
3496 
3497       SDValue ZextOp;
3498       EVT Op0Ty = N0.getOperand(0).getValueType();
3499       if (Op0Ty == ExtSrcTy) {
3500         ZextOp = N0.getOperand(0);
3501       } else {
3502         APInt Imm = APInt::getLowBitsSet(ExtDstTyBits, ExtSrcTyBits);
3503         ZextOp = DAG.getNode(ISD::AND, dl, Op0Ty, N0.getOperand(0),
3504                              DAG.getConstant(Imm, dl, Op0Ty));
3505       }
3506       if (!DCI.isCalledByLegalizer())
3507         DCI.AddToWorklist(ZextOp.getNode());
3508       // Otherwise, make this a use of a zext.
3509       return DAG.getSetCC(dl, VT, ZextOp,
3510                           DAG.getConstant(C1 & APInt::getLowBitsSet(
3511                                                               ExtDstTyBits,
3512                                                               ExtSrcTyBits),
3513                                           dl, ExtDstTy),
3514                           Cond);
3515     } else if ((N1C->isNullValue() || N1C->isOne()) &&
3516                 (Cond == ISD::SETEQ || Cond == ISD::SETNE)) {
3517       // SETCC (SETCC), [0|1], [EQ|NE]  -> SETCC
3518       if (N0.getOpcode() == ISD::SETCC &&
3519           isTypeLegal(VT) && VT.bitsLE(N0.getValueType()) &&
3520           (N0.getValueType() == MVT::i1 ||
3521            getBooleanContents(N0.getOperand(0).getValueType()) ==
3522                        ZeroOrOneBooleanContent)) {
3523         bool TrueWhenTrue = (Cond == ISD::SETEQ) ^ (!N1C->isOne());
3524         if (TrueWhenTrue)
3525           return DAG.getNode(ISD::TRUNCATE, dl, VT, N0);
3526         // Invert the condition.
3527         ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get();
3528         CC = ISD::getSetCCInverse(CC, N0.getOperand(0).getValueType());
3529         if (DCI.isBeforeLegalizeOps() ||
3530             isCondCodeLegal(CC, N0.getOperand(0).getSimpleValueType()))
3531           return DAG.getSetCC(dl, VT, N0.getOperand(0), N0.getOperand(1), CC);
3532       }
3533 
3534       if ((N0.getOpcode() == ISD::XOR ||
3535            (N0.getOpcode() == ISD::AND &&
3536             N0.getOperand(0).getOpcode() == ISD::XOR &&
3537             N0.getOperand(1) == N0.getOperand(0).getOperand(1))) &&
3538           isa<ConstantSDNode>(N0.getOperand(1)) &&
3539           cast<ConstantSDNode>(N0.getOperand(1))->isOne()) {
3540         // If this is (X^1) == 0/1, swap the RHS and eliminate the xor.  We
3541         // can only do this if the top bits are known zero.
3542         unsigned BitWidth = N0.getValueSizeInBits();
3543         if (DAG.MaskedValueIsZero(N0,
3544                                   APInt::getHighBitsSet(BitWidth,
3545                                                         BitWidth-1))) {
3546           // Okay, get the un-inverted input value.
3547           SDValue Val;
3548           if (N0.getOpcode() == ISD::XOR) {
3549             Val = N0.getOperand(0);
3550           } else {
3551             assert(N0.getOpcode() == ISD::AND &&
3552                     N0.getOperand(0).getOpcode() == ISD::XOR);
3553             // ((X^1)&1)^1 -> X & 1
3554             Val = DAG.getNode(ISD::AND, dl, N0.getValueType(),
3555                               N0.getOperand(0).getOperand(0),
3556                               N0.getOperand(1));
3557           }
3558 
3559           return DAG.getSetCC(dl, VT, Val, N1,
3560                               Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ);
3561         }
3562       } else if (N1C->isOne()) {
3563         SDValue Op0 = N0;
3564         if (Op0.getOpcode() == ISD::TRUNCATE)
3565           Op0 = Op0.getOperand(0);
3566 
3567         if ((Op0.getOpcode() == ISD::XOR) &&
3568             Op0.getOperand(0).getOpcode() == ISD::SETCC &&
3569             Op0.getOperand(1).getOpcode() == ISD::SETCC) {
3570           SDValue XorLHS = Op0.getOperand(0);
3571           SDValue XorRHS = Op0.getOperand(1);
3572           // Ensure that the input setccs return an i1 type or 0/1 value.
3573           if (Op0.getValueType() == MVT::i1 ||
3574               (getBooleanContents(XorLHS.getOperand(0).getValueType()) ==
3575                       ZeroOrOneBooleanContent &&
3576                getBooleanContents(XorRHS.getOperand(0).getValueType()) ==
3577                         ZeroOrOneBooleanContent)) {
3578             // (xor (setcc), (setcc)) == / != 1 -> (setcc) != / == (setcc)
3579             Cond = (Cond == ISD::SETEQ) ? ISD::SETNE : ISD::SETEQ;
3580             return DAG.getSetCC(dl, VT, XorLHS, XorRHS, Cond);
3581           }
3582         }
3583         if (Op0.getOpcode() == ISD::AND &&
3584             isa<ConstantSDNode>(Op0.getOperand(1)) &&
3585             cast<ConstantSDNode>(Op0.getOperand(1))->isOne()) {
3586           // If this is (X&1) == / != 1, normalize it to (X&1) != / == 0.
3587           if (Op0.getValueType().bitsGT(VT))
3588             Op0 = DAG.getNode(ISD::AND, dl, VT,
3589                           DAG.getNode(ISD::TRUNCATE, dl, VT, Op0.getOperand(0)),
3590                           DAG.getConstant(1, dl, VT));
3591           else if (Op0.getValueType().bitsLT(VT))
3592             Op0 = DAG.getNode(ISD::AND, dl, VT,
3593                         DAG.getNode(ISD::ANY_EXTEND, dl, VT, Op0.getOperand(0)),
3594                         DAG.getConstant(1, dl, VT));
3595 
3596           return DAG.getSetCC(dl, VT, Op0,
3597                               DAG.getConstant(0, dl, Op0.getValueType()),
3598                               Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ);
3599         }
3600         if (Op0.getOpcode() == ISD::AssertZext &&
3601             cast<VTSDNode>(Op0.getOperand(1))->getVT() == MVT::i1)
3602           return DAG.getSetCC(dl, VT, Op0,
3603                               DAG.getConstant(0, dl, Op0.getValueType()),
3604                               Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ);
3605       }
3606     }
3607 
3608     // Given:
3609     //   icmp eq/ne (urem %x, %y), 0
3610     // Iff %x has 0 or 1 bits set, and %y has at least 2 bits set, omit 'urem':
3611     //   icmp eq/ne %x, 0
3612     if (N0.getOpcode() == ISD::UREM && N1C->isNullValue() &&
3613         (Cond == ISD::SETEQ || Cond == ISD::SETNE)) {
3614       KnownBits XKnown = DAG.computeKnownBits(N0.getOperand(0));
3615       KnownBits YKnown = DAG.computeKnownBits(N0.getOperand(1));
3616       if (XKnown.countMaxPopulation() == 1 && YKnown.countMinPopulation() >= 2)
3617         return DAG.getSetCC(dl, VT, N0.getOperand(0), N1, Cond);
3618     }
3619 
3620     if (SDValue V =
3621             optimizeSetCCOfSignedTruncationCheck(VT, N0, N1, Cond, DCI, dl))
3622       return V;
3623   }
3624 
3625   // These simplifications apply to splat vectors as well.
3626   // TODO: Handle more splat vector cases.
3627   if (auto *N1C = isConstOrConstSplat(N1)) {
3628     const APInt &C1 = N1C->getAPIntValue();
3629 
3630     APInt MinVal, MaxVal;
3631     unsigned OperandBitSize = N1C->getValueType(0).getScalarSizeInBits();
3632     if (ISD::isSignedIntSetCC(Cond)) {
3633       MinVal = APInt::getSignedMinValue(OperandBitSize);
3634       MaxVal = APInt::getSignedMaxValue(OperandBitSize);
3635     } else {
3636       MinVal = APInt::getMinValue(OperandBitSize);
3637       MaxVal = APInt::getMaxValue(OperandBitSize);
3638     }
3639 
3640     // Canonicalize GE/LE comparisons to use GT/LT comparisons.
3641     if (Cond == ISD::SETGE || Cond == ISD::SETUGE) {
3642       // X >= MIN --> true
3643       if (C1 == MinVal)
3644         return DAG.getBoolConstant(true, dl, VT, OpVT);
3645 
3646       if (!VT.isVector()) { // TODO: Support this for vectors.
3647         // X >= C0 --> X > (C0 - 1)
3648         APInt C = C1 - 1;
3649         ISD::CondCode NewCC = (Cond == ISD::SETGE) ? ISD::SETGT : ISD::SETUGT;
3650         if ((DCI.isBeforeLegalizeOps() ||
3651              isCondCodeLegal(NewCC, VT.getSimpleVT())) &&
3652             (!N1C->isOpaque() || (C.getBitWidth() <= 64 &&
3653                                   isLegalICmpImmediate(C.getSExtValue())))) {
3654           return DAG.getSetCC(dl, VT, N0,
3655                               DAG.getConstant(C, dl, N1.getValueType()),
3656                               NewCC);
3657         }
3658       }
3659     }
3660 
3661     if (Cond == ISD::SETLE || Cond == ISD::SETULE) {
3662       // X <= MAX --> true
3663       if (C1 == MaxVal)
3664         return DAG.getBoolConstant(true, dl, VT, OpVT);
3665 
3666       // X <= C0 --> X < (C0 + 1)
3667       if (!VT.isVector()) { // TODO: Support this for vectors.
3668         APInt C = C1 + 1;
3669         ISD::CondCode NewCC = (Cond == ISD::SETLE) ? ISD::SETLT : ISD::SETULT;
3670         if ((DCI.isBeforeLegalizeOps() ||
3671              isCondCodeLegal(NewCC, VT.getSimpleVT())) &&
3672             (!N1C->isOpaque() || (C.getBitWidth() <= 64 &&
3673                                   isLegalICmpImmediate(C.getSExtValue())))) {
3674           return DAG.getSetCC(dl, VT, N0,
3675                               DAG.getConstant(C, dl, N1.getValueType()),
3676                               NewCC);
3677         }
3678       }
3679     }
3680 
3681     if (Cond == ISD::SETLT || Cond == ISD::SETULT) {
3682       if (C1 == MinVal)
3683         return DAG.getBoolConstant(false, dl, VT, OpVT); // X < MIN --> false
3684 
3685       // TODO: Support this for vectors after legalize ops.
3686       if (!VT.isVector() || DCI.isBeforeLegalizeOps()) {
3687         // Canonicalize setlt X, Max --> setne X, Max
3688         if (C1 == MaxVal)
3689           return DAG.getSetCC(dl, VT, N0, N1, ISD::SETNE);
3690 
3691         // If we have setult X, 1, turn it into seteq X, 0
3692         if (C1 == MinVal+1)
3693           return DAG.getSetCC(dl, VT, N0,
3694                               DAG.getConstant(MinVal, dl, N0.getValueType()),
3695                               ISD::SETEQ);
3696       }
3697     }
3698 
3699     if (Cond == ISD::SETGT || Cond == ISD::SETUGT) {
3700       if (C1 == MaxVal)
3701         return DAG.getBoolConstant(false, dl, VT, OpVT); // X > MAX --> false
3702 
3703       // TODO: Support this for vectors after legalize ops.
3704       if (!VT.isVector() || DCI.isBeforeLegalizeOps()) {
3705         // Canonicalize setgt X, Min --> setne X, Min
3706         if (C1 == MinVal)
3707           return DAG.getSetCC(dl, VT, N0, N1, ISD::SETNE);
3708 
3709         // If we have setugt X, Max-1, turn it into seteq X, Max
3710         if (C1 == MaxVal-1)
3711           return DAG.getSetCC(dl, VT, N0,
3712                               DAG.getConstant(MaxVal, dl, N0.getValueType()),
3713                               ISD::SETEQ);
3714       }
3715     }
3716 
3717     if (Cond == ISD::SETEQ || Cond == ISD::SETNE) {
3718       // (X & (C l>>/<< Y)) ==/!= 0  -->  ((X <</l>> Y) & C) ==/!= 0
3719       if (C1.isNullValue())
3720         if (SDValue CC = optimizeSetCCByHoistingAndByConstFromLogicalShift(
3721                 VT, N0, N1, Cond, DCI, dl))
3722           return CC;
3723     }
3724 
3725     // If we have "setcc X, C0", check to see if we can shrink the immediate
3726     // by changing cc.
3727     // TODO: Support this for vectors after legalize ops.
3728     if (!VT.isVector() || DCI.isBeforeLegalizeOps()) {
3729       // SETUGT X, SINTMAX  -> SETLT X, 0
3730       if (Cond == ISD::SETUGT &&
3731           C1 == APInt::getSignedMaxValue(OperandBitSize))
3732         return DAG.getSetCC(dl, VT, N0,
3733                             DAG.getConstant(0, dl, N1.getValueType()),
3734                             ISD::SETLT);
3735 
3736       // SETULT X, SINTMIN  -> SETGT X, -1
3737       if (Cond == ISD::SETULT &&
3738           C1 == APInt::getSignedMinValue(OperandBitSize)) {
3739         SDValue ConstMinusOne =
3740             DAG.getConstant(APInt::getAllOnesValue(OperandBitSize), dl,
3741                             N1.getValueType());
3742         return DAG.getSetCC(dl, VT, N0, ConstMinusOne, ISD::SETGT);
3743       }
3744     }
3745   }
3746 
3747   // Back to non-vector simplifications.
3748   // TODO: Can we do these for vector splats?
3749   if (auto *N1C = dyn_cast<ConstantSDNode>(N1.getNode())) {
3750     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3751     const APInt &C1 = N1C->getAPIntValue();
3752     EVT ShValTy = N0.getValueType();
3753 
3754     // Fold bit comparisons when we can.
3755     if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
3756         (VT == ShValTy || (isTypeLegal(VT) && VT.bitsLE(ShValTy))) &&
3757         N0.getOpcode() == ISD::AND) {
3758       if (auto *AndRHS = dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
3759         EVT ShiftTy =
3760             getShiftAmountTy(ShValTy, Layout, !DCI.isBeforeLegalize());
3761         if (Cond == ISD::SETNE && C1 == 0) {// (X & 8) != 0  -->  (X & 8) >> 3
3762           // Perform the xform if the AND RHS is a single bit.
3763           unsigned ShCt = AndRHS->getAPIntValue().logBase2();
3764           if (AndRHS->getAPIntValue().isPowerOf2() &&
3765               !TLI.shouldAvoidTransformToShift(ShValTy, ShCt)) {
3766             return DAG.getNode(ISD::TRUNCATE, dl, VT,
3767                                DAG.getNode(ISD::SRL, dl, ShValTy, N0,
3768                                            DAG.getConstant(ShCt, dl, ShiftTy)));
3769           }
3770         } else if (Cond == ISD::SETEQ && C1 == AndRHS->getAPIntValue()) {
3771           // (X & 8) == 8  -->  (X & 8) >> 3
3772           // Perform the xform if C1 is a single bit.
3773           unsigned ShCt = C1.logBase2();
3774           if (C1.isPowerOf2() &&
3775               !TLI.shouldAvoidTransformToShift(ShValTy, ShCt)) {
3776             return DAG.getNode(ISD::TRUNCATE, dl, VT,
3777                                DAG.getNode(ISD::SRL, dl, ShValTy, N0,
3778                                            DAG.getConstant(ShCt, dl, ShiftTy)));
3779           }
3780         }
3781       }
3782     }
3783 
3784     if (C1.getMinSignedBits() <= 64 &&
3785         !isLegalICmpImmediate(C1.getSExtValue())) {
3786       EVT ShiftTy = getShiftAmountTy(ShValTy, Layout, !DCI.isBeforeLegalize());
3787       // (X & -256) == 256 -> (X >> 8) == 1
3788       if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
3789           N0.getOpcode() == ISD::AND && N0.hasOneUse()) {
3790         if (auto *AndRHS = dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
3791           const APInt &AndRHSC = AndRHS->getAPIntValue();
3792           if ((-AndRHSC).isPowerOf2() && (AndRHSC & C1) == C1) {
3793             unsigned ShiftBits = AndRHSC.countTrailingZeros();
3794             if (!TLI.shouldAvoidTransformToShift(ShValTy, ShiftBits)) {
3795               SDValue Shift =
3796                 DAG.getNode(ISD::SRL, dl, ShValTy, N0.getOperand(0),
3797                             DAG.getConstant(ShiftBits, dl, ShiftTy));
3798               SDValue CmpRHS = DAG.getConstant(C1.lshr(ShiftBits), dl, ShValTy);
3799               return DAG.getSetCC(dl, VT, Shift, CmpRHS, Cond);
3800             }
3801           }
3802         }
3803       } else if (Cond == ISD::SETULT || Cond == ISD::SETUGE ||
3804                  Cond == ISD::SETULE || Cond == ISD::SETUGT) {
3805         bool AdjOne = (Cond == ISD::SETULE || Cond == ISD::SETUGT);
3806         // X <  0x100000000 -> (X >> 32) <  1
3807         // X >= 0x100000000 -> (X >> 32) >= 1
3808         // X <= 0x0ffffffff -> (X >> 32) <  1
3809         // X >  0x0ffffffff -> (X >> 32) >= 1
3810         unsigned ShiftBits;
3811         APInt NewC = C1;
3812         ISD::CondCode NewCond = Cond;
3813         if (AdjOne) {
3814           ShiftBits = C1.countTrailingOnes();
3815           NewC = NewC + 1;
3816           NewCond = (Cond == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE;
3817         } else {
3818           ShiftBits = C1.countTrailingZeros();
3819         }
3820         NewC.lshrInPlace(ShiftBits);
3821         if (ShiftBits && NewC.getMinSignedBits() <= 64 &&
3822             isLegalICmpImmediate(NewC.getSExtValue()) &&
3823             !TLI.shouldAvoidTransformToShift(ShValTy, ShiftBits)) {
3824           SDValue Shift = DAG.getNode(ISD::SRL, dl, ShValTy, N0,
3825                                       DAG.getConstant(ShiftBits, dl, ShiftTy));
3826           SDValue CmpRHS = DAG.getConstant(NewC, dl, ShValTy);
3827           return DAG.getSetCC(dl, VT, Shift, CmpRHS, NewCond);
3828         }
3829       }
3830     }
3831   }
3832 
3833   if (!isa<ConstantFPSDNode>(N0) && isa<ConstantFPSDNode>(N1)) {
3834     auto *CFP = cast<ConstantFPSDNode>(N1);
3835     assert(!CFP->getValueAPF().isNaN() && "Unexpected NaN value");
3836 
3837     // Otherwise, we know the RHS is not a NaN.  Simplify the node to drop the
3838     // constant if knowing that the operand is non-nan is enough.  We prefer to
3839     // have SETO(x,x) instead of SETO(x, 0.0) because this avoids having to
3840     // materialize 0.0.
3841     if (Cond == ISD::SETO || Cond == ISD::SETUO)
3842       return DAG.getSetCC(dl, VT, N0, N0, Cond);
3843 
3844     // setcc (fneg x), C -> setcc swap(pred) x, -C
3845     if (N0.getOpcode() == ISD::FNEG) {
3846       ISD::CondCode SwapCond = ISD::getSetCCSwappedOperands(Cond);
3847       if (DCI.isBeforeLegalizeOps() ||
3848           isCondCodeLegal(SwapCond, N0.getSimpleValueType())) {
3849         SDValue NegN1 = DAG.getNode(ISD::FNEG, dl, N0.getValueType(), N1);
3850         return DAG.getSetCC(dl, VT, N0.getOperand(0), NegN1, SwapCond);
3851       }
3852     }
3853 
3854     // If the condition is not legal, see if we can find an equivalent one
3855     // which is legal.
3856     if (!isCondCodeLegal(Cond, N0.getSimpleValueType())) {
3857       // If the comparison was an awkward floating-point == or != and one of
3858       // the comparison operands is infinity or negative infinity, convert the
3859       // condition to a less-awkward <= or >=.
3860       if (CFP->getValueAPF().isInfinity()) {
3861         if (CFP->getValueAPF().isNegative()) {
3862           if (Cond == ISD::SETOEQ &&
3863               isCondCodeLegal(ISD::SETOLE, N0.getSimpleValueType()))
3864             return DAG.getSetCC(dl, VT, N0, N1, ISD::SETOLE);
3865           if (Cond == ISD::SETUEQ &&
3866               isCondCodeLegal(ISD::SETOLE, N0.getSimpleValueType()))
3867             return DAG.getSetCC(dl, VT, N0, N1, ISD::SETULE);
3868           if (Cond == ISD::SETUNE &&
3869               isCondCodeLegal(ISD::SETUGT, N0.getSimpleValueType()))
3870             return DAG.getSetCC(dl, VT, N0, N1, ISD::SETUGT);
3871           if (Cond == ISD::SETONE &&
3872               isCondCodeLegal(ISD::SETUGT, N0.getSimpleValueType()))
3873             return DAG.getSetCC(dl, VT, N0, N1, ISD::SETOGT);
3874         } else {
3875           if (Cond == ISD::SETOEQ &&
3876               isCondCodeLegal(ISD::SETOGE, N0.getSimpleValueType()))
3877             return DAG.getSetCC(dl, VT, N0, N1, ISD::SETOGE);
3878           if (Cond == ISD::SETUEQ &&
3879               isCondCodeLegal(ISD::SETOGE, N0.getSimpleValueType()))
3880             return DAG.getSetCC(dl, VT, N0, N1, ISD::SETUGE);
3881           if (Cond == ISD::SETUNE &&
3882               isCondCodeLegal(ISD::SETULT, N0.getSimpleValueType()))
3883             return DAG.getSetCC(dl, VT, N0, N1, ISD::SETULT);
3884           if (Cond == ISD::SETONE &&
3885               isCondCodeLegal(ISD::SETULT, N0.getSimpleValueType()))
3886             return DAG.getSetCC(dl, VT, N0, N1, ISD::SETOLT);
3887         }
3888       }
3889     }
3890   }
3891 
3892   if (N0 == N1) {
3893     // The sext(setcc()) => setcc() optimization relies on the appropriate
3894     // constant being emitted.
3895     assert(!N0.getValueType().isInteger() &&
3896            "Integer types should be handled by FoldSetCC");
3897 
3898     bool EqTrue = ISD::isTrueWhenEqual(Cond);
3899     unsigned UOF = ISD::getUnorderedFlavor(Cond);
3900     if (UOF == 2) // FP operators that are undefined on NaNs.
3901       return DAG.getBoolConstant(EqTrue, dl, VT, OpVT);
3902     if (UOF == unsigned(EqTrue))
3903       return DAG.getBoolConstant(EqTrue, dl, VT, OpVT);
3904     // Otherwise, we can't fold it.  However, we can simplify it to SETUO/SETO
3905     // if it is not already.
3906     ISD::CondCode NewCond = UOF == 0 ? ISD::SETO : ISD::SETUO;
3907     if (NewCond != Cond &&
3908         (DCI.isBeforeLegalizeOps() ||
3909                             isCondCodeLegal(NewCond, N0.getSimpleValueType())))
3910       return DAG.getSetCC(dl, VT, N0, N1, NewCond);
3911   }
3912 
3913   if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
3914       N0.getValueType().isInteger()) {
3915     if (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::SUB ||
3916         N0.getOpcode() == ISD::XOR) {
3917       // Simplify (X+Y) == (X+Z) -->  Y == Z
3918       if (N0.getOpcode() == N1.getOpcode()) {
3919         if (N0.getOperand(0) == N1.getOperand(0))
3920           return DAG.getSetCC(dl, VT, N0.getOperand(1), N1.getOperand(1), Cond);
3921         if (N0.getOperand(1) == N1.getOperand(1))
3922           return DAG.getSetCC(dl, VT, N0.getOperand(0), N1.getOperand(0), Cond);
3923         if (isCommutativeBinOp(N0.getOpcode())) {
3924           // If X op Y == Y op X, try other combinations.
3925           if (N0.getOperand(0) == N1.getOperand(1))
3926             return DAG.getSetCC(dl, VT, N0.getOperand(1), N1.getOperand(0),
3927                                 Cond);
3928           if (N0.getOperand(1) == N1.getOperand(0))
3929             return DAG.getSetCC(dl, VT, N0.getOperand(0), N1.getOperand(1),
3930                                 Cond);
3931         }
3932       }
3933 
3934       // If RHS is a legal immediate value for a compare instruction, we need
3935       // to be careful about increasing register pressure needlessly.
3936       bool LegalRHSImm = false;
3937 
3938       if (auto *RHSC = dyn_cast<ConstantSDNode>(N1)) {
3939         if (auto *LHSR = dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
3940           // Turn (X+C1) == C2 --> X == C2-C1
3941           if (N0.getOpcode() == ISD::ADD && N0.getNode()->hasOneUse()) {
3942             return DAG.getSetCC(dl, VT, N0.getOperand(0),
3943                                 DAG.getConstant(RHSC->getAPIntValue()-
3944                                                 LHSR->getAPIntValue(),
3945                                 dl, N0.getValueType()), Cond);
3946           }
3947 
3948           // Turn (X^C1) == C2 into X == C1^C2 iff X&~C1 = 0.
3949           if (N0.getOpcode() == ISD::XOR)
3950             // If we know that all of the inverted bits are zero, don't bother
3951             // performing the inversion.
3952             if (DAG.MaskedValueIsZero(N0.getOperand(0), ~LHSR->getAPIntValue()))
3953               return
3954                 DAG.getSetCC(dl, VT, N0.getOperand(0),
3955                              DAG.getConstant(LHSR->getAPIntValue() ^
3956                                                RHSC->getAPIntValue(),
3957                                              dl, N0.getValueType()),
3958                              Cond);
3959         }
3960 
3961         // Turn (C1-X) == C2 --> X == C1-C2
3962         if (auto *SUBC = dyn_cast<ConstantSDNode>(N0.getOperand(0))) {
3963           if (N0.getOpcode() == ISD::SUB && N0.getNode()->hasOneUse()) {
3964             return
3965               DAG.getSetCC(dl, VT, N0.getOperand(1),
3966                            DAG.getConstant(SUBC->getAPIntValue() -
3967                                              RHSC->getAPIntValue(),
3968                                            dl, N0.getValueType()),
3969                            Cond);
3970           }
3971         }
3972 
3973         // Could RHSC fold directly into a compare?
3974         if (RHSC->getValueType(0).getSizeInBits() <= 64)
3975           LegalRHSImm = isLegalICmpImmediate(RHSC->getSExtValue());
3976       }
3977 
3978       // (X+Y) == X --> Y == 0 and similar folds.
3979       // Don't do this if X is an immediate that can fold into a cmp
3980       // instruction and X+Y has other uses. It could be an induction variable
3981       // chain, and the transform would increase register pressure.
3982       if (!LegalRHSImm || N0.hasOneUse())
3983         if (SDValue V = foldSetCCWithBinOp(VT, N0, N1, Cond, dl, DCI))
3984           return V;
3985     }
3986 
3987     if (N1.getOpcode() == ISD::ADD || N1.getOpcode() == ISD::SUB ||
3988         N1.getOpcode() == ISD::XOR)
3989       if (SDValue V = foldSetCCWithBinOp(VT, N1, N0, Cond, dl, DCI))
3990         return V;
3991 
3992     if (SDValue V = foldSetCCWithAnd(VT, N0, N1, Cond, dl, DCI))
3993       return V;
3994   }
3995 
3996   // Fold remainder of division by a constant.
3997   if ((N0.getOpcode() == ISD::UREM || N0.getOpcode() == ISD::SREM) &&
3998       N0.hasOneUse() && (Cond == ISD::SETEQ || Cond == ISD::SETNE)) {
3999     AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes();
4000 
4001     // When division is cheap or optimizing for minimum size,
4002     // fall through to DIVREM creation by skipping this fold.
4003     if (!isIntDivCheap(VT, Attr) && !Attr.hasFnAttribute(Attribute::MinSize)) {
4004       if (N0.getOpcode() == ISD::UREM) {
4005         if (SDValue Folded = buildUREMEqFold(VT, N0, N1, Cond, DCI, dl))
4006           return Folded;
4007       } else if (N0.getOpcode() == ISD::SREM) {
4008         if (SDValue Folded = buildSREMEqFold(VT, N0, N1, Cond, DCI, dl))
4009           return Folded;
4010       }
4011     }
4012   }
4013 
4014   // Fold away ALL boolean setcc's.
4015   if (N0.getValueType().getScalarType() == MVT::i1 && foldBooleans) {
4016     SDValue Temp;
4017     switch (Cond) {
4018     default: llvm_unreachable("Unknown integer setcc!");
4019     case ISD::SETEQ:  // X == Y  -> ~(X^Y)
4020       Temp = DAG.getNode(ISD::XOR, dl, OpVT, N0, N1);
4021       N0 = DAG.getNOT(dl, Temp, OpVT);
4022       if (!DCI.isCalledByLegalizer())
4023         DCI.AddToWorklist(Temp.getNode());
4024       break;
4025     case ISD::SETNE:  // X != Y   -->  (X^Y)
4026       N0 = DAG.getNode(ISD::XOR, dl, OpVT, N0, N1);
4027       break;
4028     case ISD::SETGT:  // X >s Y   -->  X == 0 & Y == 1  -->  ~X & Y
4029     case ISD::SETULT: // X <u Y   -->  X == 0 & Y == 1  -->  ~X & Y
4030       Temp = DAG.getNOT(dl, N0, OpVT);
4031       N0 = DAG.getNode(ISD::AND, dl, OpVT, N1, Temp);
4032       if (!DCI.isCalledByLegalizer())
4033         DCI.AddToWorklist(Temp.getNode());
4034       break;
4035     case ISD::SETLT:  // X <s Y   --> X == 1 & Y == 0  -->  ~Y & X
4036     case ISD::SETUGT: // X >u Y   --> X == 1 & Y == 0  -->  ~Y & X
4037       Temp = DAG.getNOT(dl, N1, OpVT);
4038       N0 = DAG.getNode(ISD::AND, dl, OpVT, N0, Temp);
4039       if (!DCI.isCalledByLegalizer())
4040         DCI.AddToWorklist(Temp.getNode());
4041       break;
4042     case ISD::SETULE: // X <=u Y  --> X == 0 | Y == 1  -->  ~X | Y
4043     case ISD::SETGE:  // X >=s Y  --> X == 0 | Y == 1  -->  ~X | Y
4044       Temp = DAG.getNOT(dl, N0, OpVT);
4045       N0 = DAG.getNode(ISD::OR, dl, OpVT, N1, Temp);
4046       if (!DCI.isCalledByLegalizer())
4047         DCI.AddToWorklist(Temp.getNode());
4048       break;
4049     case ISD::SETUGE: // X >=u Y  --> X == 1 | Y == 0  -->  ~Y | X
4050     case ISD::SETLE:  // X <=s Y  --> X == 1 | Y == 0  -->  ~Y | X
4051       Temp = DAG.getNOT(dl, N1, OpVT);
4052       N0 = DAG.getNode(ISD::OR, dl, OpVT, N0, Temp);
4053       break;
4054     }
4055     if (VT.getScalarType() != MVT::i1) {
4056       if (!DCI.isCalledByLegalizer())
4057         DCI.AddToWorklist(N0.getNode());
4058       // FIXME: If running after legalize, we probably can't do this.
4059       ISD::NodeType ExtendCode = getExtendForContent(getBooleanContents(OpVT));
4060       N0 = DAG.getNode(ExtendCode, dl, VT, N0);
4061     }
4062     return N0;
4063   }
4064 
4065   // Could not fold it.
4066   return SDValue();
4067 }
4068 
4069 /// Returns true (and the GlobalValue and the offset) if the node is a
4070 /// GlobalAddress + offset.
4071 bool TargetLowering::isGAPlusOffset(SDNode *WN, const GlobalValue *&GA,
4072                                     int64_t &Offset) const {
4073 
4074   SDNode *N = unwrapAddress(SDValue(WN, 0)).getNode();
4075 
4076   if (auto *GASD = dyn_cast<GlobalAddressSDNode>(N)) {
4077     GA = GASD->getGlobal();
4078     Offset += GASD->getOffset();
4079     return true;
4080   }
4081 
4082   if (N->getOpcode() == ISD::ADD) {
4083     SDValue N1 = N->getOperand(0);
4084     SDValue N2 = N->getOperand(1);
4085     if (isGAPlusOffset(N1.getNode(), GA, Offset)) {
4086       if (auto *V = dyn_cast<ConstantSDNode>(N2)) {
4087         Offset += V->getSExtValue();
4088         return true;
4089       }
4090     } else if (isGAPlusOffset(N2.getNode(), GA, Offset)) {
4091       if (auto *V = dyn_cast<ConstantSDNode>(N1)) {
4092         Offset += V->getSExtValue();
4093         return true;
4094       }
4095     }
4096   }
4097 
4098   return false;
4099 }
4100 
4101 SDValue TargetLowering::PerformDAGCombine(SDNode *N,
4102                                           DAGCombinerInfo &DCI) const {
4103   // Default implementation: no optimization.
4104   return SDValue();
4105 }
4106 
4107 //===----------------------------------------------------------------------===//
4108 //  Inline Assembler Implementation Methods
4109 //===----------------------------------------------------------------------===//
4110 
4111 TargetLowering::ConstraintType
4112 TargetLowering::getConstraintType(StringRef Constraint) const {
4113   unsigned S = Constraint.size();
4114 
4115   if (S == 1) {
4116     switch (Constraint[0]) {
4117     default: break;
4118     case 'r':
4119       return C_RegisterClass;
4120     case 'm': // memory
4121     case 'o': // offsetable
4122     case 'V': // not offsetable
4123       return C_Memory;
4124     case 'n': // Simple Integer
4125     case 'E': // Floating Point Constant
4126     case 'F': // Floating Point Constant
4127       return C_Immediate;
4128     case 'i': // Simple Integer or Relocatable Constant
4129     case 's': // Relocatable Constant
4130     case 'p': // Address.
4131     case 'X': // Allow ANY value.
4132     case 'I': // Target registers.
4133     case 'J':
4134     case 'K':
4135     case 'L':
4136     case 'M':
4137     case 'N':
4138     case 'O':
4139     case 'P':
4140     case '<':
4141     case '>':
4142       return C_Other;
4143     }
4144   }
4145 
4146   if (S > 1 && Constraint[0] == '{' && Constraint[S - 1] == '}') {
4147     if (S == 8 && Constraint.substr(1, 6) == "memory") // "{memory}"
4148       return C_Memory;
4149     return C_Register;
4150   }
4151   return C_Unknown;
4152 }
4153 
4154 /// Try to replace an X constraint, which matches anything, with another that
4155 /// has more specific requirements based on the type of the corresponding
4156 /// operand.
4157 const char *TargetLowering::LowerXConstraint(EVT ConstraintVT) const {
4158   if (ConstraintVT.isInteger())
4159     return "r";
4160   if (ConstraintVT.isFloatingPoint())
4161     return "f"; // works for many targets
4162   return nullptr;
4163 }
4164 
4165 SDValue TargetLowering::LowerAsmOutputForConstraint(
4166     SDValue &Chain, SDValue &Flag, SDLoc DL, const AsmOperandInfo &OpInfo,
4167     SelectionDAG &DAG) const {
4168   return SDValue();
4169 }
4170 
4171 /// Lower the specified operand into the Ops vector.
4172 /// If it is invalid, don't add anything to Ops.
4173 void TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
4174                                                   std::string &Constraint,
4175                                                   std::vector<SDValue> &Ops,
4176                                                   SelectionDAG &DAG) const {
4177 
4178   if (Constraint.length() > 1) return;
4179 
4180   char ConstraintLetter = Constraint[0];
4181   switch (ConstraintLetter) {
4182   default: break;
4183   case 'X':     // Allows any operand; labels (basic block) use this.
4184     if (Op.getOpcode() == ISD::BasicBlock ||
4185         Op.getOpcode() == ISD::TargetBlockAddress) {
4186       Ops.push_back(Op);
4187       return;
4188     }
4189     LLVM_FALLTHROUGH;
4190   case 'i':    // Simple Integer or Relocatable Constant
4191   case 'n':    // Simple Integer
4192   case 's': {  // Relocatable Constant
4193 
4194     GlobalAddressSDNode *GA;
4195     ConstantSDNode *C;
4196     BlockAddressSDNode *BA;
4197     uint64_t Offset = 0;
4198 
4199     // Match (GA) or (C) or (GA+C) or (GA-C) or ((GA+C)+C) or (((GA+C)+C)+C),
4200     // etc., since getelementpointer is variadic. We can't use
4201     // SelectionDAG::FoldSymbolOffset because it expects the GA to be accessible
4202     // while in this case the GA may be furthest from the root node which is
4203     // likely an ISD::ADD.
4204     while (1) {
4205       if ((GA = dyn_cast<GlobalAddressSDNode>(Op)) && ConstraintLetter != 'n') {
4206         Ops.push_back(DAG.getTargetGlobalAddress(GA->getGlobal(), SDLoc(Op),
4207                                                  GA->getValueType(0),
4208                                                  Offset + GA->getOffset()));
4209         return;
4210       } else if ((C = dyn_cast<ConstantSDNode>(Op)) &&
4211                  ConstraintLetter != 's') {
4212         // gcc prints these as sign extended.  Sign extend value to 64 bits
4213         // now; without this it would get ZExt'd later in
4214         // ScheduleDAGSDNodes::EmitNode, which is very generic.
4215         bool IsBool = C->getConstantIntValue()->getBitWidth() == 1;
4216         BooleanContent BCont = getBooleanContents(MVT::i64);
4217         ISD::NodeType ExtOpc = IsBool ? getExtendForContent(BCont)
4218                                       : ISD::SIGN_EXTEND;
4219         int64_t ExtVal = ExtOpc == ISD::ZERO_EXTEND ? C->getZExtValue()
4220                                                     : C->getSExtValue();
4221         Ops.push_back(DAG.getTargetConstant(Offset + ExtVal,
4222                                             SDLoc(C), MVT::i64));
4223         return;
4224       } else if ((BA = dyn_cast<BlockAddressSDNode>(Op)) &&
4225                  ConstraintLetter != 'n') {
4226         Ops.push_back(DAG.getTargetBlockAddress(
4227             BA->getBlockAddress(), BA->getValueType(0),
4228             Offset + BA->getOffset(), BA->getTargetFlags()));
4229         return;
4230       } else {
4231         const unsigned OpCode = Op.getOpcode();
4232         if (OpCode == ISD::ADD || OpCode == ISD::SUB) {
4233           if ((C = dyn_cast<ConstantSDNode>(Op.getOperand(0))))
4234             Op = Op.getOperand(1);
4235           // Subtraction is not commutative.
4236           else if (OpCode == ISD::ADD &&
4237                    (C = dyn_cast<ConstantSDNode>(Op.getOperand(1))))
4238             Op = Op.getOperand(0);
4239           else
4240             return;
4241           Offset += (OpCode == ISD::ADD ? 1 : -1) * C->getSExtValue();
4242           continue;
4243         }
4244       }
4245       return;
4246     }
4247     break;
4248   }
4249   }
4250 }
4251 
4252 std::pair<unsigned, const TargetRegisterClass *>
4253 TargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *RI,
4254                                              StringRef Constraint,
4255                                              MVT VT) const {
4256   if (Constraint.empty() || Constraint[0] != '{')
4257     return std::make_pair(0u, static_cast<TargetRegisterClass *>(nullptr));
4258   assert(*(Constraint.end() - 1) == '}' && "Not a brace enclosed constraint?");
4259 
4260   // Remove the braces from around the name.
4261   StringRef RegName(Constraint.data() + 1, Constraint.size() - 2);
4262 
4263   std::pair<unsigned, const TargetRegisterClass *> R =
4264       std::make_pair(0u, static_cast<const TargetRegisterClass *>(nullptr));
4265 
4266   // Figure out which register class contains this reg.
4267   for (const TargetRegisterClass *RC : RI->regclasses()) {
4268     // If none of the value types for this register class are valid, we
4269     // can't use it.  For example, 64-bit reg classes on 32-bit targets.
4270     if (!isLegalRC(*RI, *RC))
4271       continue;
4272 
4273     for (TargetRegisterClass::iterator I = RC->begin(), E = RC->end();
4274          I != E; ++I) {
4275       if (RegName.equals_lower(RI->getRegAsmName(*I))) {
4276         std::pair<unsigned, const TargetRegisterClass *> S =
4277             std::make_pair(*I, RC);
4278 
4279         // If this register class has the requested value type, return it,
4280         // otherwise keep searching and return the first class found
4281         // if no other is found which explicitly has the requested type.
4282         if (RI->isTypeLegalForClass(*RC, VT))
4283           return S;
4284         if (!R.second)
4285           R = S;
4286       }
4287     }
4288   }
4289 
4290   return R;
4291 }
4292 
4293 //===----------------------------------------------------------------------===//
4294 // Constraint Selection.
4295 
4296 /// Return true of this is an input operand that is a matching constraint like
4297 /// "4".
4298 bool TargetLowering::AsmOperandInfo::isMatchingInputConstraint() const {
4299   assert(!ConstraintCode.empty() && "No known constraint!");
4300   return isdigit(static_cast<unsigned char>(ConstraintCode[0]));
4301 }
4302 
4303 /// If this is an input matching constraint, this method returns the output
4304 /// operand it matches.
4305 unsigned TargetLowering::AsmOperandInfo::getMatchedOperand() const {
4306   assert(!ConstraintCode.empty() && "No known constraint!");
4307   return atoi(ConstraintCode.c_str());
4308 }
4309 
4310 /// Split up the constraint string from the inline assembly value into the
4311 /// specific constraints and their prefixes, and also tie in the associated
4312 /// operand values.
4313 /// If this returns an empty vector, and if the constraint string itself
4314 /// isn't empty, there was an error parsing.
4315 TargetLowering::AsmOperandInfoVector
4316 TargetLowering::ParseConstraints(const DataLayout &DL,
4317                                  const TargetRegisterInfo *TRI,
4318                                  ImmutableCallSite CS) const {
4319   /// Information about all of the constraints.
4320   AsmOperandInfoVector ConstraintOperands;
4321   const InlineAsm *IA = cast<InlineAsm>(CS.getCalledValue());
4322   unsigned maCount = 0; // Largest number of multiple alternative constraints.
4323 
4324   // Do a prepass over the constraints, canonicalizing them, and building up the
4325   // ConstraintOperands list.
4326   unsigned ArgNo = 0; // ArgNo - The argument of the CallInst.
4327   unsigned ResNo = 0; // ResNo - The result number of the next output.
4328 
4329   for (InlineAsm::ConstraintInfo &CI : IA->ParseConstraints()) {
4330     ConstraintOperands.emplace_back(std::move(CI));
4331     AsmOperandInfo &OpInfo = ConstraintOperands.back();
4332 
4333     // Update multiple alternative constraint count.
4334     if (OpInfo.multipleAlternatives.size() > maCount)
4335       maCount = OpInfo.multipleAlternatives.size();
4336 
4337     OpInfo.ConstraintVT = MVT::Other;
4338 
4339     // Compute the value type for each operand.
4340     switch (OpInfo.Type) {
4341     case InlineAsm::isOutput:
4342       // Indirect outputs just consume an argument.
4343       if (OpInfo.isIndirect) {
4344         OpInfo.CallOperandVal = const_cast<Value *>(CS.getArgument(ArgNo++));
4345         break;
4346       }
4347 
4348       // The return value of the call is this value.  As such, there is no
4349       // corresponding argument.
4350       assert(!CS.getType()->isVoidTy() &&
4351              "Bad inline asm!");
4352       if (StructType *STy = dyn_cast<StructType>(CS.getType())) {
4353         OpInfo.ConstraintVT =
4354             getSimpleValueType(DL, STy->getElementType(ResNo));
4355       } else {
4356         assert(ResNo == 0 && "Asm only has one result!");
4357         OpInfo.ConstraintVT = getSimpleValueType(DL, CS.getType());
4358       }
4359       ++ResNo;
4360       break;
4361     case InlineAsm::isInput:
4362       OpInfo.CallOperandVal = const_cast<Value *>(CS.getArgument(ArgNo++));
4363       break;
4364     case InlineAsm::isClobber:
4365       // Nothing to do.
4366       break;
4367     }
4368 
4369     if (OpInfo.CallOperandVal) {
4370       llvm::Type *OpTy = OpInfo.CallOperandVal->getType();
4371       if (OpInfo.isIndirect) {
4372         llvm::PointerType *PtrTy = dyn_cast<PointerType>(OpTy);
4373         if (!PtrTy)
4374           report_fatal_error("Indirect operand for inline asm not a pointer!");
4375         OpTy = PtrTy->getElementType();
4376       }
4377 
4378       // Look for vector wrapped in a struct. e.g. { <16 x i8> }.
4379       if (StructType *STy = dyn_cast<StructType>(OpTy))
4380         if (STy->getNumElements() == 1)
4381           OpTy = STy->getElementType(0);
4382 
4383       // If OpTy is not a single value, it may be a struct/union that we
4384       // can tile with integers.
4385       if (!OpTy->isSingleValueType() && OpTy->isSized()) {
4386         unsigned BitSize = DL.getTypeSizeInBits(OpTy);
4387         switch (BitSize) {
4388         default: break;
4389         case 1:
4390         case 8:
4391         case 16:
4392         case 32:
4393         case 64:
4394         case 128:
4395           OpInfo.ConstraintVT =
4396               MVT::getVT(IntegerType::get(OpTy->getContext(), BitSize), true);
4397           break;
4398         }
4399       } else if (PointerType *PT = dyn_cast<PointerType>(OpTy)) {
4400         unsigned PtrSize = DL.getPointerSizeInBits(PT->getAddressSpace());
4401         OpInfo.ConstraintVT = MVT::getIntegerVT(PtrSize);
4402       } else {
4403         OpInfo.ConstraintVT = MVT::getVT(OpTy, true);
4404       }
4405     }
4406   }
4407 
4408   // If we have multiple alternative constraints, select the best alternative.
4409   if (!ConstraintOperands.empty()) {
4410     if (maCount) {
4411       unsigned bestMAIndex = 0;
4412       int bestWeight = -1;
4413       // weight:  -1 = invalid match, and 0 = so-so match to 5 = good match.
4414       int weight = -1;
4415       unsigned maIndex;
4416       // Compute the sums of the weights for each alternative, keeping track
4417       // of the best (highest weight) one so far.
4418       for (maIndex = 0; maIndex < maCount; ++maIndex) {
4419         int weightSum = 0;
4420         for (unsigned cIndex = 0, eIndex = ConstraintOperands.size();
4421              cIndex != eIndex; ++cIndex) {
4422           AsmOperandInfo &OpInfo = ConstraintOperands[cIndex];
4423           if (OpInfo.Type == InlineAsm::isClobber)
4424             continue;
4425 
4426           // If this is an output operand with a matching input operand,
4427           // look up the matching input. If their types mismatch, e.g. one
4428           // is an integer, the other is floating point, or their sizes are
4429           // different, flag it as an maCantMatch.
4430           if (OpInfo.hasMatchingInput()) {
4431             AsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput];
4432             if (OpInfo.ConstraintVT != Input.ConstraintVT) {
4433               if ((OpInfo.ConstraintVT.isInteger() !=
4434                    Input.ConstraintVT.isInteger()) ||
4435                   (OpInfo.ConstraintVT.getSizeInBits() !=
4436                    Input.ConstraintVT.getSizeInBits())) {
4437                 weightSum = -1; // Can't match.
4438                 break;
4439               }
4440             }
4441           }
4442           weight = getMultipleConstraintMatchWeight(OpInfo, maIndex);
4443           if (weight == -1) {
4444             weightSum = -1;
4445             break;
4446           }
4447           weightSum += weight;
4448         }
4449         // Update best.
4450         if (weightSum > bestWeight) {
4451           bestWeight = weightSum;
4452           bestMAIndex = maIndex;
4453         }
4454       }
4455 
4456       // Now select chosen alternative in each constraint.
4457       for (unsigned cIndex = 0, eIndex = ConstraintOperands.size();
4458            cIndex != eIndex; ++cIndex) {
4459         AsmOperandInfo &cInfo = ConstraintOperands[cIndex];
4460         if (cInfo.Type == InlineAsm::isClobber)
4461           continue;
4462         cInfo.selectAlternative(bestMAIndex);
4463       }
4464     }
4465   }
4466 
4467   // Check and hook up tied operands, choose constraint code to use.
4468   for (unsigned cIndex = 0, eIndex = ConstraintOperands.size();
4469        cIndex != eIndex; ++cIndex) {
4470     AsmOperandInfo &OpInfo = ConstraintOperands[cIndex];
4471 
4472     // If this is an output operand with a matching input operand, look up the
4473     // matching input. If their types mismatch, e.g. one is an integer, the
4474     // other is floating point, or their sizes are different, flag it as an
4475     // error.
4476     if (OpInfo.hasMatchingInput()) {
4477       AsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput];
4478 
4479       if (OpInfo.ConstraintVT != Input.ConstraintVT) {
4480         std::pair<unsigned, const TargetRegisterClass *> MatchRC =
4481             getRegForInlineAsmConstraint(TRI, OpInfo.ConstraintCode,
4482                                          OpInfo.ConstraintVT);
4483         std::pair<unsigned, const TargetRegisterClass *> InputRC =
4484             getRegForInlineAsmConstraint(TRI, Input.ConstraintCode,
4485                                          Input.ConstraintVT);
4486         if ((OpInfo.ConstraintVT.isInteger() !=
4487              Input.ConstraintVT.isInteger()) ||
4488             (MatchRC.second != InputRC.second)) {
4489           report_fatal_error("Unsupported asm: input constraint"
4490                              " with a matching output constraint of"
4491                              " incompatible type!");
4492         }
4493       }
4494     }
4495   }
4496 
4497   return ConstraintOperands;
4498 }
4499 
4500 /// Return an integer indicating how general CT is.
4501 static unsigned getConstraintGenerality(TargetLowering::ConstraintType CT) {
4502   switch (CT) {
4503   case TargetLowering::C_Immediate:
4504   case TargetLowering::C_Other:
4505   case TargetLowering::C_Unknown:
4506     return 0;
4507   case TargetLowering::C_Register:
4508     return 1;
4509   case TargetLowering::C_RegisterClass:
4510     return 2;
4511   case TargetLowering::C_Memory:
4512     return 3;
4513   }
4514   llvm_unreachable("Invalid constraint type");
4515 }
4516 
4517 /// Examine constraint type and operand type and determine a weight value.
4518 /// This object must already have been set up with the operand type
4519 /// and the current alternative constraint selected.
4520 TargetLowering::ConstraintWeight
4521   TargetLowering::getMultipleConstraintMatchWeight(
4522     AsmOperandInfo &info, int maIndex) const {
4523   InlineAsm::ConstraintCodeVector *rCodes;
4524   if (maIndex >= (int)info.multipleAlternatives.size())
4525     rCodes = &info.Codes;
4526   else
4527     rCodes = &info.multipleAlternatives[maIndex].Codes;
4528   ConstraintWeight BestWeight = CW_Invalid;
4529 
4530   // Loop over the options, keeping track of the most general one.
4531   for (unsigned i = 0, e = rCodes->size(); i != e; ++i) {
4532     ConstraintWeight weight =
4533       getSingleConstraintMatchWeight(info, (*rCodes)[i].c_str());
4534     if (weight > BestWeight)
4535       BestWeight = weight;
4536   }
4537 
4538   return BestWeight;
4539 }
4540 
4541 /// Examine constraint type and operand type and determine a weight value.
4542 /// This object must already have been set up with the operand type
4543 /// and the current alternative constraint selected.
4544 TargetLowering::ConstraintWeight
4545   TargetLowering::getSingleConstraintMatchWeight(
4546     AsmOperandInfo &info, const char *constraint) const {
4547   ConstraintWeight weight = CW_Invalid;
4548   Value *CallOperandVal = info.CallOperandVal;
4549     // If we don't have a value, we can't do a match,
4550     // but allow it at the lowest weight.
4551   if (!CallOperandVal)
4552     return CW_Default;
4553   // Look at the constraint type.
4554   switch (*constraint) {
4555     case 'i': // immediate integer.
4556     case 'n': // immediate integer with a known value.
4557       if (isa<ConstantInt>(CallOperandVal))
4558         weight = CW_Constant;
4559       break;
4560     case 's': // non-explicit intregal immediate.
4561       if (isa<GlobalValue>(CallOperandVal))
4562         weight = CW_Constant;
4563       break;
4564     case 'E': // immediate float if host format.
4565     case 'F': // immediate float.
4566       if (isa<ConstantFP>(CallOperandVal))
4567         weight = CW_Constant;
4568       break;
4569     case '<': // memory operand with autodecrement.
4570     case '>': // memory operand with autoincrement.
4571     case 'm': // memory operand.
4572     case 'o': // offsettable memory operand
4573     case 'V': // non-offsettable memory operand
4574       weight = CW_Memory;
4575       break;
4576     case 'r': // general register.
4577     case 'g': // general register, memory operand or immediate integer.
4578               // note: Clang converts "g" to "imr".
4579       if (CallOperandVal->getType()->isIntegerTy())
4580         weight = CW_Register;
4581       break;
4582     case 'X': // any operand.
4583   default:
4584     weight = CW_Default;
4585     break;
4586   }
4587   return weight;
4588 }
4589 
4590 /// If there are multiple different constraints that we could pick for this
4591 /// operand (e.g. "imr") try to pick the 'best' one.
4592 /// This is somewhat tricky: constraints fall into four classes:
4593 ///    Other         -> immediates and magic values
4594 ///    Register      -> one specific register
4595 ///    RegisterClass -> a group of regs
4596 ///    Memory        -> memory
4597 /// Ideally, we would pick the most specific constraint possible: if we have
4598 /// something that fits into a register, we would pick it.  The problem here
4599 /// is that if we have something that could either be in a register or in
4600 /// memory that use of the register could cause selection of *other*
4601 /// operands to fail: they might only succeed if we pick memory.  Because of
4602 /// this the heuristic we use is:
4603 ///
4604 ///  1) If there is an 'other' constraint, and if the operand is valid for
4605 ///     that constraint, use it.  This makes us take advantage of 'i'
4606 ///     constraints when available.
4607 ///  2) Otherwise, pick the most general constraint present.  This prefers
4608 ///     'm' over 'r', for example.
4609 ///
4610 static void ChooseConstraint(TargetLowering::AsmOperandInfo &OpInfo,
4611                              const TargetLowering &TLI,
4612                              SDValue Op, SelectionDAG *DAG) {
4613   assert(OpInfo.Codes.size() > 1 && "Doesn't have multiple constraint options");
4614   unsigned BestIdx = 0;
4615   TargetLowering::ConstraintType BestType = TargetLowering::C_Unknown;
4616   int BestGenerality = -1;
4617 
4618   // Loop over the options, keeping track of the most general one.
4619   for (unsigned i = 0, e = OpInfo.Codes.size(); i != e; ++i) {
4620     TargetLowering::ConstraintType CType =
4621       TLI.getConstraintType(OpInfo.Codes[i]);
4622 
4623     // Indirect 'other' or 'immediate' constraints are not allowed.
4624     if (OpInfo.isIndirect && !(CType == TargetLowering::C_Memory ||
4625                                CType == TargetLowering::C_Register ||
4626                                CType == TargetLowering::C_RegisterClass))
4627       continue;
4628 
4629     // If this is an 'other' or 'immediate' constraint, see if the operand is
4630     // valid for it. For example, on X86 we might have an 'rI' constraint. If
4631     // the operand is an integer in the range [0..31] we want to use I (saving a
4632     // load of a register), otherwise we must use 'r'.
4633     if ((CType == TargetLowering::C_Other ||
4634          CType == TargetLowering::C_Immediate) && Op.getNode()) {
4635       assert(OpInfo.Codes[i].size() == 1 &&
4636              "Unhandled multi-letter 'other' constraint");
4637       std::vector<SDValue> ResultOps;
4638       TLI.LowerAsmOperandForConstraint(Op, OpInfo.Codes[i],
4639                                        ResultOps, *DAG);
4640       if (!ResultOps.empty()) {
4641         BestType = CType;
4642         BestIdx = i;
4643         break;
4644       }
4645     }
4646 
4647     // Things with matching constraints can only be registers, per gcc
4648     // documentation.  This mainly affects "g" constraints.
4649     if (CType == TargetLowering::C_Memory && OpInfo.hasMatchingInput())
4650       continue;
4651 
4652     // This constraint letter is more general than the previous one, use it.
4653     int Generality = getConstraintGenerality(CType);
4654     if (Generality > BestGenerality) {
4655       BestType = CType;
4656       BestIdx = i;
4657       BestGenerality = Generality;
4658     }
4659   }
4660 
4661   OpInfo.ConstraintCode = OpInfo.Codes[BestIdx];
4662   OpInfo.ConstraintType = BestType;
4663 }
4664 
4665 /// Determines the constraint code and constraint type to use for the specific
4666 /// AsmOperandInfo, setting OpInfo.ConstraintCode and OpInfo.ConstraintType.
4667 void TargetLowering::ComputeConstraintToUse(AsmOperandInfo &OpInfo,
4668                                             SDValue Op,
4669                                             SelectionDAG *DAG) const {
4670   assert(!OpInfo.Codes.empty() && "Must have at least one constraint");
4671 
4672   // Single-letter constraints ('r') are very common.
4673   if (OpInfo.Codes.size() == 1) {
4674     OpInfo.ConstraintCode = OpInfo.Codes[0];
4675     OpInfo.ConstraintType = getConstraintType(OpInfo.ConstraintCode);
4676   } else {
4677     ChooseConstraint(OpInfo, *this, Op, DAG);
4678   }
4679 
4680   // 'X' matches anything.
4681   if (OpInfo.ConstraintCode == "X" && OpInfo.CallOperandVal) {
4682     // Labels and constants are handled elsewhere ('X' is the only thing
4683     // that matches labels).  For Functions, the type here is the type of
4684     // the result, which is not what we want to look at; leave them alone.
4685     Value *v = OpInfo.CallOperandVal;
4686     if (isa<BasicBlock>(v) || isa<ConstantInt>(v) || isa<Function>(v)) {
4687       OpInfo.CallOperandVal = v;
4688       return;
4689     }
4690 
4691     if (Op.getNode() && Op.getOpcode() == ISD::TargetBlockAddress)
4692       return;
4693 
4694     // Otherwise, try to resolve it to something we know about by looking at
4695     // the actual operand type.
4696     if (const char *Repl = LowerXConstraint(OpInfo.ConstraintVT)) {
4697       OpInfo.ConstraintCode = Repl;
4698       OpInfo.ConstraintType = getConstraintType(OpInfo.ConstraintCode);
4699     }
4700   }
4701 }
4702 
4703 /// Given an exact SDIV by a constant, create a multiplication
4704 /// with the multiplicative inverse of the constant.
4705 static SDValue BuildExactSDIV(const TargetLowering &TLI, SDNode *N,
4706                               const SDLoc &dl, SelectionDAG &DAG,
4707                               SmallVectorImpl<SDNode *> &Created) {
4708   SDValue Op0 = N->getOperand(0);
4709   SDValue Op1 = N->getOperand(1);
4710   EVT VT = N->getValueType(0);
4711   EVT SVT = VT.getScalarType();
4712   EVT ShVT = TLI.getShiftAmountTy(VT, DAG.getDataLayout());
4713   EVT ShSVT = ShVT.getScalarType();
4714 
4715   bool UseSRA = false;
4716   SmallVector<SDValue, 16> Shifts, Factors;
4717 
4718   auto BuildSDIVPattern = [&](ConstantSDNode *C) {
4719     if (C->isNullValue())
4720       return false;
4721     APInt Divisor = C->getAPIntValue();
4722     unsigned Shift = Divisor.countTrailingZeros();
4723     if (Shift) {
4724       Divisor.ashrInPlace(Shift);
4725       UseSRA = true;
4726     }
4727     // Calculate the multiplicative inverse, using Newton's method.
4728     APInt t;
4729     APInt Factor = Divisor;
4730     while ((t = Divisor * Factor) != 1)
4731       Factor *= APInt(Divisor.getBitWidth(), 2) - t;
4732     Shifts.push_back(DAG.getConstant(Shift, dl, ShSVT));
4733     Factors.push_back(DAG.getConstant(Factor, dl, SVT));
4734     return true;
4735   };
4736 
4737   // Collect all magic values from the build vector.
4738   if (!ISD::matchUnaryPredicate(Op1, BuildSDIVPattern))
4739     return SDValue();
4740 
4741   SDValue Shift, Factor;
4742   if (VT.isVector()) {
4743     Shift = DAG.getBuildVector(ShVT, dl, Shifts);
4744     Factor = DAG.getBuildVector(VT, dl, Factors);
4745   } else {
4746     Shift = Shifts[0];
4747     Factor = Factors[0];
4748   }
4749 
4750   SDValue Res = Op0;
4751 
4752   // Shift the value upfront if it is even, so the LSB is one.
4753   if (UseSRA) {
4754     // TODO: For UDIV use SRL instead of SRA.
4755     SDNodeFlags Flags;
4756     Flags.setExact(true);
4757     Res = DAG.getNode(ISD::SRA, dl, VT, Res, Shift, Flags);
4758     Created.push_back(Res.getNode());
4759   }
4760 
4761   return DAG.getNode(ISD::MUL, dl, VT, Res, Factor);
4762 }
4763 
4764 SDValue TargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
4765                               SelectionDAG &DAG,
4766                               SmallVectorImpl<SDNode *> &Created) const {
4767   AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes();
4768   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4769   if (TLI.isIntDivCheap(N->getValueType(0), Attr))
4770     return SDValue(N, 0); // Lower SDIV as SDIV
4771   return SDValue();
4772 }
4773 
4774 /// Given an ISD::SDIV node expressing a divide by constant,
4775 /// return a DAG expression to select that will generate the same value by
4776 /// multiplying by a magic number.
4777 /// Ref: "Hacker's Delight" or "The PowerPC Compiler Writer's Guide".
4778 SDValue TargetLowering::BuildSDIV(SDNode *N, SelectionDAG &DAG,
4779                                   bool IsAfterLegalization,
4780                                   SmallVectorImpl<SDNode *> &Created) const {
4781   SDLoc dl(N);
4782   EVT VT = N->getValueType(0);
4783   EVT SVT = VT.getScalarType();
4784   EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout());
4785   EVT ShSVT = ShVT.getScalarType();
4786   unsigned EltBits = VT.getScalarSizeInBits();
4787 
4788   // Check to see if we can do this.
4789   // FIXME: We should be more aggressive here.
4790   if (!isTypeLegal(VT))
4791     return SDValue();
4792 
4793   // If the sdiv has an 'exact' bit we can use a simpler lowering.
4794   if (N->getFlags().hasExact())
4795     return BuildExactSDIV(*this, N, dl, DAG, Created);
4796 
4797   SmallVector<SDValue, 16> MagicFactors, Factors, Shifts, ShiftMasks;
4798 
4799   auto BuildSDIVPattern = [&](ConstantSDNode *C) {
4800     if (C->isNullValue())
4801       return false;
4802 
4803     const APInt &Divisor = C->getAPIntValue();
4804     APInt::ms magics = Divisor.magic();
4805     int NumeratorFactor = 0;
4806     int ShiftMask = -1;
4807 
4808     if (Divisor.isOneValue() || Divisor.isAllOnesValue()) {
4809       // If d is +1/-1, we just multiply the numerator by +1/-1.
4810       NumeratorFactor = Divisor.getSExtValue();
4811       magics.m = 0;
4812       magics.s = 0;
4813       ShiftMask = 0;
4814     } else if (Divisor.isStrictlyPositive() && magics.m.isNegative()) {
4815       // If d > 0 and m < 0, add the numerator.
4816       NumeratorFactor = 1;
4817     } else if (Divisor.isNegative() && magics.m.isStrictlyPositive()) {
4818       // If d < 0 and m > 0, subtract the numerator.
4819       NumeratorFactor = -1;
4820     }
4821 
4822     MagicFactors.push_back(DAG.getConstant(magics.m, dl, SVT));
4823     Factors.push_back(DAG.getConstant(NumeratorFactor, dl, SVT));
4824     Shifts.push_back(DAG.getConstant(magics.s, dl, ShSVT));
4825     ShiftMasks.push_back(DAG.getConstant(ShiftMask, dl, SVT));
4826     return true;
4827   };
4828 
4829   SDValue N0 = N->getOperand(0);
4830   SDValue N1 = N->getOperand(1);
4831 
4832   // Collect the shifts / magic values from each element.
4833   if (!ISD::matchUnaryPredicate(N1, BuildSDIVPattern))
4834     return SDValue();
4835 
4836   SDValue MagicFactor, Factor, Shift, ShiftMask;
4837   if (VT.isVector()) {
4838     MagicFactor = DAG.getBuildVector(VT, dl, MagicFactors);
4839     Factor = DAG.getBuildVector(VT, dl, Factors);
4840     Shift = DAG.getBuildVector(ShVT, dl, Shifts);
4841     ShiftMask = DAG.getBuildVector(VT, dl, ShiftMasks);
4842   } else {
4843     MagicFactor = MagicFactors[0];
4844     Factor = Factors[0];
4845     Shift = Shifts[0];
4846     ShiftMask = ShiftMasks[0];
4847   }
4848 
4849   // Multiply the numerator (operand 0) by the magic value.
4850   // FIXME: We should support doing a MUL in a wider type.
4851   SDValue Q;
4852   if (IsAfterLegalization ? isOperationLegal(ISD::MULHS, VT)
4853                           : isOperationLegalOrCustom(ISD::MULHS, VT))
4854     Q = DAG.getNode(ISD::MULHS, dl, VT, N0, MagicFactor);
4855   else if (IsAfterLegalization ? isOperationLegal(ISD::SMUL_LOHI, VT)
4856                                : isOperationLegalOrCustom(ISD::SMUL_LOHI, VT)) {
4857     SDValue LoHi =
4858         DAG.getNode(ISD::SMUL_LOHI, dl, DAG.getVTList(VT, VT), N0, MagicFactor);
4859     Q = SDValue(LoHi.getNode(), 1);
4860   } else
4861     return SDValue(); // No mulhs or equivalent.
4862   Created.push_back(Q.getNode());
4863 
4864   // (Optionally) Add/subtract the numerator using Factor.
4865   Factor = DAG.getNode(ISD::MUL, dl, VT, N0, Factor);
4866   Created.push_back(Factor.getNode());
4867   Q = DAG.getNode(ISD::ADD, dl, VT, Q, Factor);
4868   Created.push_back(Q.getNode());
4869 
4870   // Shift right algebraic by shift value.
4871   Q = DAG.getNode(ISD::SRA, dl, VT, Q, Shift);
4872   Created.push_back(Q.getNode());
4873 
4874   // Extract the sign bit, mask it and add it to the quotient.
4875   SDValue SignShift = DAG.getConstant(EltBits - 1, dl, ShVT);
4876   SDValue T = DAG.getNode(ISD::SRL, dl, VT, Q, SignShift);
4877   Created.push_back(T.getNode());
4878   T = DAG.getNode(ISD::AND, dl, VT, T, ShiftMask);
4879   Created.push_back(T.getNode());
4880   return DAG.getNode(ISD::ADD, dl, VT, Q, T);
4881 }
4882 
4883 /// Given an ISD::UDIV node expressing a divide by constant,
4884 /// return a DAG expression to select that will generate the same value by
4885 /// multiplying by a magic number.
4886 /// Ref: "Hacker's Delight" or "The PowerPC Compiler Writer's Guide".
4887 SDValue TargetLowering::BuildUDIV(SDNode *N, SelectionDAG &DAG,
4888                                   bool IsAfterLegalization,
4889                                   SmallVectorImpl<SDNode *> &Created) const {
4890   SDLoc dl(N);
4891   EVT VT = N->getValueType(0);
4892   EVT SVT = VT.getScalarType();
4893   EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout());
4894   EVT ShSVT = ShVT.getScalarType();
4895   unsigned EltBits = VT.getScalarSizeInBits();
4896 
4897   // Check to see if we can do this.
4898   // FIXME: We should be more aggressive here.
4899   if (!isTypeLegal(VT))
4900     return SDValue();
4901 
4902   bool UseNPQ = false;
4903   SmallVector<SDValue, 16> PreShifts, PostShifts, MagicFactors, NPQFactors;
4904 
4905   auto BuildUDIVPattern = [&](ConstantSDNode *C) {
4906     if (C->isNullValue())
4907       return false;
4908     // FIXME: We should use a narrower constant when the upper
4909     // bits are known to be zero.
4910     APInt Divisor = C->getAPIntValue();
4911     APInt::mu magics = Divisor.magicu();
4912     unsigned PreShift = 0, PostShift = 0;
4913 
4914     // If the divisor is even, we can avoid using the expensive fixup by
4915     // shifting the divided value upfront.
4916     if (magics.a != 0 && !Divisor[0]) {
4917       PreShift = Divisor.countTrailingZeros();
4918       // Get magic number for the shifted divisor.
4919       magics = Divisor.lshr(PreShift).magicu(PreShift);
4920       assert(magics.a == 0 && "Should use cheap fixup now");
4921     }
4922 
4923     APInt Magic = magics.m;
4924 
4925     unsigned SelNPQ;
4926     if (magics.a == 0 || Divisor.isOneValue()) {
4927       assert(magics.s < Divisor.getBitWidth() &&
4928              "We shouldn't generate an undefined shift!");
4929       PostShift = magics.s;
4930       SelNPQ = false;
4931     } else {
4932       PostShift = magics.s - 1;
4933       SelNPQ = true;
4934     }
4935 
4936     PreShifts.push_back(DAG.getConstant(PreShift, dl, ShSVT));
4937     MagicFactors.push_back(DAG.getConstant(Magic, dl, SVT));
4938     NPQFactors.push_back(
4939         DAG.getConstant(SelNPQ ? APInt::getOneBitSet(EltBits, EltBits - 1)
4940                                : APInt::getNullValue(EltBits),
4941                         dl, SVT));
4942     PostShifts.push_back(DAG.getConstant(PostShift, dl, ShSVT));
4943     UseNPQ |= SelNPQ;
4944     return true;
4945   };
4946 
4947   SDValue N0 = N->getOperand(0);
4948   SDValue N1 = N->getOperand(1);
4949 
4950   // Collect the shifts/magic values from each element.
4951   if (!ISD::matchUnaryPredicate(N1, BuildUDIVPattern))
4952     return SDValue();
4953 
4954   SDValue PreShift, PostShift, MagicFactor, NPQFactor;
4955   if (VT.isVector()) {
4956     PreShift = DAG.getBuildVector(ShVT, dl, PreShifts);
4957     MagicFactor = DAG.getBuildVector(VT, dl, MagicFactors);
4958     NPQFactor = DAG.getBuildVector(VT, dl, NPQFactors);
4959     PostShift = DAG.getBuildVector(ShVT, dl, PostShifts);
4960   } else {
4961     PreShift = PreShifts[0];
4962     MagicFactor = MagicFactors[0];
4963     PostShift = PostShifts[0];
4964   }
4965 
4966   SDValue Q = N0;
4967   Q = DAG.getNode(ISD::SRL, dl, VT, Q, PreShift);
4968   Created.push_back(Q.getNode());
4969 
4970   // FIXME: We should support doing a MUL in a wider type.
4971   auto GetMULHU = [&](SDValue X, SDValue Y) {
4972     if (IsAfterLegalization ? isOperationLegal(ISD::MULHU, VT)
4973                             : isOperationLegalOrCustom(ISD::MULHU, VT))
4974       return DAG.getNode(ISD::MULHU, dl, VT, X, Y);
4975     if (IsAfterLegalization ? isOperationLegal(ISD::UMUL_LOHI, VT)
4976                             : isOperationLegalOrCustom(ISD::UMUL_LOHI, VT)) {
4977       SDValue LoHi =
4978           DAG.getNode(ISD::UMUL_LOHI, dl, DAG.getVTList(VT, VT), X, Y);
4979       return SDValue(LoHi.getNode(), 1);
4980     }
4981     return SDValue(); // No mulhu or equivalent
4982   };
4983 
4984   // Multiply the numerator (operand 0) by the magic value.
4985   Q = GetMULHU(Q, MagicFactor);
4986   if (!Q)
4987     return SDValue();
4988 
4989   Created.push_back(Q.getNode());
4990 
4991   if (UseNPQ) {
4992     SDValue NPQ = DAG.getNode(ISD::SUB, dl, VT, N0, Q);
4993     Created.push_back(NPQ.getNode());
4994 
4995     // For vectors we might have a mix of non-NPQ/NPQ paths, so use
4996     // MULHU to act as a SRL-by-1 for NPQ, else multiply by zero.
4997     if (VT.isVector())
4998       NPQ = GetMULHU(NPQ, NPQFactor);
4999     else
5000       NPQ = DAG.getNode(ISD::SRL, dl, VT, NPQ, DAG.getConstant(1, dl, ShVT));
5001 
5002     Created.push_back(NPQ.getNode());
5003 
5004     Q = DAG.getNode(ISD::ADD, dl, VT, NPQ, Q);
5005     Created.push_back(Q.getNode());
5006   }
5007 
5008   Q = DAG.getNode(ISD::SRL, dl, VT, Q, PostShift);
5009   Created.push_back(Q.getNode());
5010 
5011   SDValue One = DAG.getConstant(1, dl, VT);
5012   SDValue IsOne = DAG.getSetCC(dl, VT, N1, One, ISD::SETEQ);
5013   return DAG.getSelect(dl, VT, IsOne, N0, Q);
5014 }
5015 
5016 /// If all values in Values that *don't* match the predicate are same 'splat'
5017 /// value, then replace all values with that splat value.
5018 /// Else, if AlternativeReplacement was provided, then replace all values that
5019 /// do match predicate with AlternativeReplacement value.
5020 static void
5021 turnVectorIntoSplatVector(MutableArrayRef<SDValue> Values,
5022                           std::function<bool(SDValue)> Predicate,
5023                           SDValue AlternativeReplacement = SDValue()) {
5024   SDValue Replacement;
5025   // Is there a value for which the Predicate does *NOT* match? What is it?
5026   auto SplatValue = llvm::find_if_not(Values, Predicate);
5027   if (SplatValue != Values.end()) {
5028     // Does Values consist only of SplatValue's and values matching Predicate?
5029     if (llvm::all_of(Values, [Predicate, SplatValue](SDValue Value) {
5030           return Value == *SplatValue || Predicate(Value);
5031         })) // Then we shall replace values matching predicate with SplatValue.
5032       Replacement = *SplatValue;
5033   }
5034   if (!Replacement) {
5035     // Oops, we did not find the "baseline" splat value.
5036     if (!AlternativeReplacement)
5037       return; // Nothing to do.
5038     // Let's replace with provided value then.
5039     Replacement = AlternativeReplacement;
5040   }
5041   std::replace_if(Values.begin(), Values.end(), Predicate, Replacement);
5042 }
5043 
5044 /// Given an ISD::UREM used only by an ISD::SETEQ or ISD::SETNE
5045 /// where the divisor is constant and the comparison target is zero,
5046 /// return a DAG expression that will generate the same comparison result
5047 /// using only multiplications, additions and shifts/rotations.
5048 /// Ref: "Hacker's Delight" 10-17.
5049 SDValue TargetLowering::buildUREMEqFold(EVT SETCCVT, SDValue REMNode,
5050                                         SDValue CompTargetNode,
5051                                         ISD::CondCode Cond,
5052                                         DAGCombinerInfo &DCI,
5053                                         const SDLoc &DL) const {
5054   SmallVector<SDNode *, 5> Built;
5055   if (SDValue Folded = prepareUREMEqFold(SETCCVT, REMNode, CompTargetNode, Cond,
5056                                          DCI, DL, Built)) {
5057     for (SDNode *N : Built)
5058       DCI.AddToWorklist(N);
5059     return Folded;
5060   }
5061 
5062   return SDValue();
5063 }
5064 
5065 SDValue
5066 TargetLowering::prepareUREMEqFold(EVT SETCCVT, SDValue REMNode,
5067                                   SDValue CompTargetNode, ISD::CondCode Cond,
5068                                   DAGCombinerInfo &DCI, const SDLoc &DL,
5069                                   SmallVectorImpl<SDNode *> &Created) const {
5070   // fold (seteq/ne (urem N, D), 0) -> (setule/ugt (rotr (mul N, P), K), Q)
5071   // - D must be constant, with D = D0 * 2^K where D0 is odd
5072   // - P is the multiplicative inverse of D0 modulo 2^W
5073   // - Q = floor(((2^W) - 1) / D)
5074   // where W is the width of the common type of N and D.
5075   assert((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
5076          "Only applicable for (in)equality comparisons.");
5077 
5078   SelectionDAG &DAG = DCI.DAG;
5079 
5080   EVT VT = REMNode.getValueType();
5081   EVT SVT = VT.getScalarType();
5082   EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout());
5083   EVT ShSVT = ShVT.getScalarType();
5084 
5085   // If MUL is unavailable, we cannot proceed in any case.
5086   if (!isOperationLegalOrCustom(ISD::MUL, VT))
5087     return SDValue();
5088 
5089   bool ComparingWithAllZeros = true;
5090   bool AllComparisonsWithNonZerosAreTautological = true;
5091   bool HadTautologicalLanes = false;
5092   bool AllLanesAreTautological = true;
5093   bool HadEvenDivisor = false;
5094   bool AllDivisorsArePowerOfTwo = true;
5095   bool HadTautologicalInvertedLanes = false;
5096   SmallVector<SDValue, 16> PAmts, KAmts, QAmts, IAmts;
5097 
5098   auto BuildUREMPattern = [&](ConstantSDNode *CDiv, ConstantSDNode *CCmp) {
5099     // Division by 0 is UB. Leave it to be constant-folded elsewhere.
5100     if (CDiv->isNullValue())
5101       return false;
5102 
5103     const APInt &D = CDiv->getAPIntValue();
5104     const APInt &Cmp = CCmp->getAPIntValue();
5105 
5106     ComparingWithAllZeros &= Cmp.isNullValue();
5107 
5108     // x u% C1` is *always* less than C1. So given `x u% C1 == C2`,
5109     // if C2 is not less than C1, the comparison is always false.
5110     // But we will only be able to produce the comparison that will give the
5111     // opposive tautological answer. So this lane would need to be fixed up.
5112     bool TautologicalInvertedLane = D.ule(Cmp);
5113     HadTautologicalInvertedLanes |= TautologicalInvertedLane;
5114 
5115     // If all lanes are tautological (either all divisors are ones, or divisor
5116     // is not greater than the constant we are comparing with),
5117     // we will prefer to avoid the fold.
5118     bool TautologicalLane = D.isOneValue() || TautologicalInvertedLane;
5119     HadTautologicalLanes |= TautologicalLane;
5120     AllLanesAreTautological &= TautologicalLane;
5121 
5122     // If we are comparing with non-zero, we need'll need  to subtract said
5123     // comparison value from the LHS. But there is no point in doing that if
5124     // every lane where we are comparing with non-zero is tautological..
5125     if (!Cmp.isNullValue())
5126       AllComparisonsWithNonZerosAreTautological &= TautologicalLane;
5127 
5128     // Decompose D into D0 * 2^K
5129     unsigned K = D.countTrailingZeros();
5130     assert((!D.isOneValue() || (K == 0)) && "For divisor '1' we won't rotate.");
5131     APInt D0 = D.lshr(K);
5132 
5133     // D is even if it has trailing zeros.
5134     HadEvenDivisor |= (K != 0);
5135     // D is a power-of-two if D0 is one.
5136     // If all divisors are power-of-two, we will prefer to avoid the fold.
5137     AllDivisorsArePowerOfTwo &= D0.isOneValue();
5138 
5139     // P = inv(D0, 2^W)
5140     // 2^W requires W + 1 bits, so we have to extend and then truncate.
5141     unsigned W = D.getBitWidth();
5142     APInt P = D0.zext(W + 1)
5143                   .multiplicativeInverse(APInt::getSignedMinValue(W + 1))
5144                   .trunc(W);
5145     assert(!P.isNullValue() && "No multiplicative inverse!"); // unreachable
5146     assert((D0 * P).isOneValue() && "Multiplicative inverse sanity check.");
5147 
5148     // Q = floor((2^W - 1) u/ D)
5149     // R = ((2^W - 1) u% D)
5150     APInt Q, R;
5151     APInt::udivrem(APInt::getAllOnesValue(W), D, Q, R);
5152 
5153     // If we are comparing with zero, then that comparison constant is okay,
5154     // else it may need to be one less than that.
5155     if (Cmp.ugt(R))
5156       Q -= 1;
5157 
5158     assert(APInt::getAllOnesValue(ShSVT.getSizeInBits()).ugt(K) &&
5159            "We are expecting that K is always less than all-ones for ShSVT");
5160 
5161     // If the lane is tautological the result can be constant-folded.
5162     if (TautologicalLane) {
5163       // Set P and K amount to a bogus values so we can try to splat them.
5164       P = 0;
5165       K = -1;
5166       // And ensure that comparison constant is tautological,
5167       // it will always compare true/false.
5168       Q = -1;
5169     }
5170 
5171     PAmts.push_back(DAG.getConstant(P, DL, SVT));
5172     KAmts.push_back(
5173         DAG.getConstant(APInt(ShSVT.getSizeInBits(), K), DL, ShSVT));
5174     QAmts.push_back(DAG.getConstant(Q, DL, SVT));
5175     return true;
5176   };
5177 
5178   SDValue N = REMNode.getOperand(0);
5179   SDValue D = REMNode.getOperand(1);
5180 
5181   // Collect the values from each element.
5182   if (!ISD::matchBinaryPredicate(D, CompTargetNode, BuildUREMPattern))
5183     return SDValue();
5184 
5185   // If all lanes are tautological, the result can be constant-folded.
5186   if (AllLanesAreTautological)
5187     return SDValue();
5188 
5189   // If this is a urem by a powers-of-two, avoid the fold since it can be
5190   // best implemented as a bit test.
5191   if (AllDivisorsArePowerOfTwo)
5192     return SDValue();
5193 
5194   SDValue PVal, KVal, QVal;
5195   if (VT.isVector()) {
5196     if (HadTautologicalLanes) {
5197       // Try to turn PAmts into a splat, since we don't care about the values
5198       // that are currently '0'. If we can't, just keep '0'`s.
5199       turnVectorIntoSplatVector(PAmts, isNullConstant);
5200       // Try to turn KAmts into a splat, since we don't care about the values
5201       // that are currently '-1'. If we can't, change them to '0'`s.
5202       turnVectorIntoSplatVector(KAmts, isAllOnesConstant,
5203                                 DAG.getConstant(0, DL, ShSVT));
5204     }
5205 
5206     PVal = DAG.getBuildVector(VT, DL, PAmts);
5207     KVal = DAG.getBuildVector(ShVT, DL, KAmts);
5208     QVal = DAG.getBuildVector(VT, DL, QAmts);
5209   } else {
5210     PVal = PAmts[0];
5211     KVal = KAmts[0];
5212     QVal = QAmts[0];
5213   }
5214 
5215   if (!ComparingWithAllZeros && !AllComparisonsWithNonZerosAreTautological) {
5216     if (!isOperationLegalOrCustom(ISD::SUB, VT))
5217       return SDValue(); // FIXME: Could/should use `ISD::ADD`?
5218     assert(CompTargetNode.getValueType() == N.getValueType() &&
5219            "Expecting that the types on LHS and RHS of comparisons match.");
5220     N = DAG.getNode(ISD::SUB, DL, VT, N, CompTargetNode);
5221   }
5222 
5223   // (mul N, P)
5224   SDValue Op0 = DAG.getNode(ISD::MUL, DL, VT, N, PVal);
5225   Created.push_back(Op0.getNode());
5226 
5227   // Rotate right only if any divisor was even. We avoid rotates for all-odd
5228   // divisors as a performance improvement, since rotating by 0 is a no-op.
5229   if (HadEvenDivisor) {
5230     // We need ROTR to do this.
5231     if (!isOperationLegalOrCustom(ISD::ROTR, VT))
5232       return SDValue();
5233     SDNodeFlags Flags;
5234     Flags.setExact(true);
5235     // UREM: (rotr (mul N, P), K)
5236     Op0 = DAG.getNode(ISD::ROTR, DL, VT, Op0, KVal, Flags);
5237     Created.push_back(Op0.getNode());
5238   }
5239 
5240   // UREM: (setule/setugt (rotr (mul N, P), K), Q)
5241   SDValue NewCC =
5242       DAG.getSetCC(DL, SETCCVT, Op0, QVal,
5243                    ((Cond == ISD::SETEQ) ? ISD::SETULE : ISD::SETUGT));
5244   if (!HadTautologicalInvertedLanes)
5245     return NewCC;
5246 
5247   // If any lanes previously compared always-false, the NewCC will give
5248   // always-true result for them, so we need to fixup those lanes.
5249   // Or the other way around for inequality predicate.
5250   assert(VT.isVector() && "Can/should only get here for vectors.");
5251   Created.push_back(NewCC.getNode());
5252 
5253   // x u% C1` is *always* less than C1. So given `x u% C1 == C2`,
5254   // if C2 is not less than C1, the comparison is always false.
5255   // But we have produced the comparison that will give the
5256   // opposive tautological answer. So these lanes would need to be fixed up.
5257   SDValue TautologicalInvertedChannels =
5258       DAG.getSetCC(DL, SETCCVT, D, CompTargetNode, ISD::SETULE);
5259   Created.push_back(TautologicalInvertedChannels.getNode());
5260 
5261   if (isOperationLegalOrCustom(ISD::VSELECT, SETCCVT)) {
5262     // If we have a vector select, let's replace the comparison results in the
5263     // affected lanes with the correct tautological result.
5264     SDValue Replacement = DAG.getBoolConstant(Cond == ISD::SETEQ ? false : true,
5265                                               DL, SETCCVT, SETCCVT);
5266     return DAG.getNode(ISD::VSELECT, DL, SETCCVT, TautologicalInvertedChannels,
5267                        Replacement, NewCC);
5268   }
5269 
5270   // Else, we can just invert the comparison result in the appropriate lanes.
5271   if (isOperationLegalOrCustom(ISD::XOR, SETCCVT))
5272     return DAG.getNode(ISD::XOR, DL, SETCCVT, NewCC,
5273                        TautologicalInvertedChannels);
5274 
5275   return SDValue(); // Don't know how to lower.
5276 }
5277 
5278 /// Given an ISD::SREM used only by an ISD::SETEQ or ISD::SETNE
5279 /// where the divisor is constant and the comparison target is zero,
5280 /// return a DAG expression that will generate the same comparison result
5281 /// using only multiplications, additions and shifts/rotations.
5282 /// Ref: "Hacker's Delight" 10-17.
5283 SDValue TargetLowering::buildSREMEqFold(EVT SETCCVT, SDValue REMNode,
5284                                         SDValue CompTargetNode,
5285                                         ISD::CondCode Cond,
5286                                         DAGCombinerInfo &DCI,
5287                                         const SDLoc &DL) const {
5288   SmallVector<SDNode *, 7> Built;
5289   if (SDValue Folded = prepareSREMEqFold(SETCCVT, REMNode, CompTargetNode, Cond,
5290                                          DCI, DL, Built)) {
5291     assert(Built.size() <= 7 && "Max size prediction failed.");
5292     for (SDNode *N : Built)
5293       DCI.AddToWorklist(N);
5294     return Folded;
5295   }
5296 
5297   return SDValue();
5298 }
5299 
5300 SDValue
5301 TargetLowering::prepareSREMEqFold(EVT SETCCVT, SDValue REMNode,
5302                                   SDValue CompTargetNode, ISD::CondCode Cond,
5303                                   DAGCombinerInfo &DCI, const SDLoc &DL,
5304                                   SmallVectorImpl<SDNode *> &Created) const {
5305   // Fold:
5306   //   (seteq/ne (srem N, D), 0)
5307   // To:
5308   //   (setule/ugt (rotr (add (mul N, P), A), K), Q)
5309   //
5310   // - D must be constant, with D = D0 * 2^K where D0 is odd
5311   // - P is the multiplicative inverse of D0 modulo 2^W
5312   // - A = bitwiseand(floor((2^(W - 1) - 1) / D0), (-(2^k)))
5313   // - Q = floor((2 * A) / (2^K))
5314   // where W is the width of the common type of N and D.
5315   assert((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
5316          "Only applicable for (in)equality comparisons.");
5317 
5318   SelectionDAG &DAG = DCI.DAG;
5319 
5320   EVT VT = REMNode.getValueType();
5321   EVT SVT = VT.getScalarType();
5322   EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout());
5323   EVT ShSVT = ShVT.getScalarType();
5324 
5325   // If MUL is unavailable, we cannot proceed in any case.
5326   if (!isOperationLegalOrCustom(ISD::MUL, VT))
5327     return SDValue();
5328 
5329   // TODO: Could support comparing with non-zero too.
5330   ConstantSDNode *CompTarget = isConstOrConstSplat(CompTargetNode);
5331   if (!CompTarget || !CompTarget->isNullValue())
5332     return SDValue();
5333 
5334   bool HadIntMinDivisor = false;
5335   bool HadOneDivisor = false;
5336   bool AllDivisorsAreOnes = true;
5337   bool HadEvenDivisor = false;
5338   bool NeedToApplyOffset = false;
5339   bool AllDivisorsArePowerOfTwo = true;
5340   SmallVector<SDValue, 16> PAmts, AAmts, KAmts, QAmts;
5341 
5342   auto BuildSREMPattern = [&](ConstantSDNode *C) {
5343     // Division by 0 is UB. Leave it to be constant-folded elsewhere.
5344     if (C->isNullValue())
5345       return false;
5346 
5347     // FIXME: we don't fold `rem %X, -C` to `rem %X, C` in DAGCombine.
5348 
5349     // WARNING: this fold is only valid for positive divisors!
5350     APInt D = C->getAPIntValue();
5351     if (D.isNegative())
5352       D.negate(); //  `rem %X, -C` is equivalent to `rem %X, C`
5353 
5354     HadIntMinDivisor |= D.isMinSignedValue();
5355 
5356     // If all divisors are ones, we will prefer to avoid the fold.
5357     HadOneDivisor |= D.isOneValue();
5358     AllDivisorsAreOnes &= D.isOneValue();
5359 
5360     // Decompose D into D0 * 2^K
5361     unsigned K = D.countTrailingZeros();
5362     assert((!D.isOneValue() || (K == 0)) && "For divisor '1' we won't rotate.");
5363     APInt D0 = D.lshr(K);
5364 
5365     if (!D.isMinSignedValue()) {
5366       // D is even if it has trailing zeros; unless it's INT_MIN, in which case
5367       // we don't care about this lane in this fold, we'll special-handle it.
5368       HadEvenDivisor |= (K != 0);
5369     }
5370 
5371     // D is a power-of-two if D0 is one. This includes INT_MIN.
5372     // If all divisors are power-of-two, we will prefer to avoid the fold.
5373     AllDivisorsArePowerOfTwo &= D0.isOneValue();
5374 
5375     // P = inv(D0, 2^W)
5376     // 2^W requires W + 1 bits, so we have to extend and then truncate.
5377     unsigned W = D.getBitWidth();
5378     APInt P = D0.zext(W + 1)
5379                   .multiplicativeInverse(APInt::getSignedMinValue(W + 1))
5380                   .trunc(W);
5381     assert(!P.isNullValue() && "No multiplicative inverse!"); // unreachable
5382     assert((D0 * P).isOneValue() && "Multiplicative inverse sanity check.");
5383 
5384     // A = floor((2^(W - 1) - 1) / D0) & -2^K
5385     APInt A = APInt::getSignedMaxValue(W).udiv(D0);
5386     A.clearLowBits(K);
5387 
5388     if (!D.isMinSignedValue()) {
5389       // If divisor INT_MIN, then we don't care about this lane in this fold,
5390       // we'll special-handle it.
5391       NeedToApplyOffset |= A != 0;
5392     }
5393 
5394     // Q = floor((2 * A) / (2^K))
5395     APInt Q = (2 * A).udiv(APInt::getOneBitSet(W, K));
5396 
5397     assert(APInt::getAllOnesValue(SVT.getSizeInBits()).ugt(A) &&
5398            "We are expecting that A is always less than all-ones for SVT");
5399     assert(APInt::getAllOnesValue(ShSVT.getSizeInBits()).ugt(K) &&
5400            "We are expecting that K is always less than all-ones for ShSVT");
5401 
5402     // If the divisor is 1 the result can be constant-folded. Likewise, we
5403     // don't care about INT_MIN lanes, those can be set to undef if appropriate.
5404     if (D.isOneValue()) {
5405       // Set P, A and K to a bogus values so we can try to splat them.
5406       P = 0;
5407       A = -1;
5408       K = -1;
5409 
5410       // x ?% 1 == 0  <-->  true  <-->  x u<= -1
5411       Q = -1;
5412     }
5413 
5414     PAmts.push_back(DAG.getConstant(P, DL, SVT));
5415     AAmts.push_back(DAG.getConstant(A, DL, SVT));
5416     KAmts.push_back(
5417         DAG.getConstant(APInt(ShSVT.getSizeInBits(), K), DL, ShSVT));
5418     QAmts.push_back(DAG.getConstant(Q, DL, SVT));
5419     return true;
5420   };
5421 
5422   SDValue N = REMNode.getOperand(0);
5423   SDValue D = REMNode.getOperand(1);
5424 
5425   // Collect the values from each element.
5426   if (!ISD::matchUnaryPredicate(D, BuildSREMPattern))
5427     return SDValue();
5428 
5429   // If this is a srem by a one, avoid the fold since it can be constant-folded.
5430   if (AllDivisorsAreOnes)
5431     return SDValue();
5432 
5433   // If this is a srem by a powers-of-two (including INT_MIN), avoid the fold
5434   // since it can be best implemented as a bit test.
5435   if (AllDivisorsArePowerOfTwo)
5436     return SDValue();
5437 
5438   SDValue PVal, AVal, KVal, QVal;
5439   if (VT.isVector()) {
5440     if (HadOneDivisor) {
5441       // Try to turn PAmts into a splat, since we don't care about the values
5442       // that are currently '0'. If we can't, just keep '0'`s.
5443       turnVectorIntoSplatVector(PAmts, isNullConstant);
5444       // Try to turn AAmts into a splat, since we don't care about the
5445       // values that are currently '-1'. If we can't, change them to '0'`s.
5446       turnVectorIntoSplatVector(AAmts, isAllOnesConstant,
5447                                 DAG.getConstant(0, DL, SVT));
5448       // Try to turn KAmts into a splat, since we don't care about the values
5449       // that are currently '-1'. If we can't, change them to '0'`s.
5450       turnVectorIntoSplatVector(KAmts, isAllOnesConstant,
5451                                 DAG.getConstant(0, DL, ShSVT));
5452     }
5453 
5454     PVal = DAG.getBuildVector(VT, DL, PAmts);
5455     AVal = DAG.getBuildVector(VT, DL, AAmts);
5456     KVal = DAG.getBuildVector(ShVT, DL, KAmts);
5457     QVal = DAG.getBuildVector(VT, DL, QAmts);
5458   } else {
5459     PVal = PAmts[0];
5460     AVal = AAmts[0];
5461     KVal = KAmts[0];
5462     QVal = QAmts[0];
5463   }
5464 
5465   // (mul N, P)
5466   SDValue Op0 = DAG.getNode(ISD::MUL, DL, VT, N, PVal);
5467   Created.push_back(Op0.getNode());
5468 
5469   if (NeedToApplyOffset) {
5470     // We need ADD to do this.
5471     if (!isOperationLegalOrCustom(ISD::ADD, VT))
5472       return SDValue();
5473 
5474     // (add (mul N, P), A)
5475     Op0 = DAG.getNode(ISD::ADD, DL, VT, Op0, AVal);
5476     Created.push_back(Op0.getNode());
5477   }
5478 
5479   // Rotate right only if any divisor was even. We avoid rotates for all-odd
5480   // divisors as a performance improvement, since rotating by 0 is a no-op.
5481   if (HadEvenDivisor) {
5482     // We need ROTR to do this.
5483     if (!isOperationLegalOrCustom(ISD::ROTR, VT))
5484       return SDValue();
5485     SDNodeFlags Flags;
5486     Flags.setExact(true);
5487     // SREM: (rotr (add (mul N, P), A), K)
5488     Op0 = DAG.getNode(ISD::ROTR, DL, VT, Op0, KVal, Flags);
5489     Created.push_back(Op0.getNode());
5490   }
5491 
5492   // SREM: (setule/setugt (rotr (add (mul N, P), A), K), Q)
5493   SDValue Fold =
5494       DAG.getSetCC(DL, SETCCVT, Op0, QVal,
5495                    ((Cond == ISD::SETEQ) ? ISD::SETULE : ISD::SETUGT));
5496 
5497   // If we didn't have lanes with INT_MIN divisor, then we're done.
5498   if (!HadIntMinDivisor)
5499     return Fold;
5500 
5501   // That fold is only valid for positive divisors. Which effectively means,
5502   // it is invalid for INT_MIN divisors. So if we have such a lane,
5503   // we must fix-up results for said lanes.
5504   assert(VT.isVector() && "Can/should only get here for vectors.");
5505 
5506   if (!isOperationLegalOrCustom(ISD::SETEQ, VT) ||
5507       !isOperationLegalOrCustom(ISD::AND, VT) ||
5508       !isOperationLegalOrCustom(Cond, VT) ||
5509       !isOperationLegalOrCustom(ISD::VSELECT, VT))
5510     return SDValue();
5511 
5512   Created.push_back(Fold.getNode());
5513 
5514   SDValue IntMin = DAG.getConstant(
5515       APInt::getSignedMinValue(SVT.getScalarSizeInBits()), DL, VT);
5516   SDValue IntMax = DAG.getConstant(
5517       APInt::getSignedMaxValue(SVT.getScalarSizeInBits()), DL, VT);
5518   SDValue Zero =
5519       DAG.getConstant(APInt::getNullValue(SVT.getScalarSizeInBits()), DL, VT);
5520 
5521   // Which lanes had INT_MIN divisors? Divisor is constant, so const-folded.
5522   SDValue DivisorIsIntMin = DAG.getSetCC(DL, SETCCVT, D, IntMin, ISD::SETEQ);
5523   Created.push_back(DivisorIsIntMin.getNode());
5524 
5525   // (N s% INT_MIN) ==/!= 0  <-->  (N & INT_MAX) ==/!= 0
5526   SDValue Masked = DAG.getNode(ISD::AND, DL, VT, N, IntMax);
5527   Created.push_back(Masked.getNode());
5528   SDValue MaskedIsZero = DAG.getSetCC(DL, SETCCVT, Masked, Zero, Cond);
5529   Created.push_back(MaskedIsZero.getNode());
5530 
5531   // To produce final result we need to blend 2 vectors: 'SetCC' and
5532   // 'MaskedIsZero'. If the divisor for channel was *NOT* INT_MIN, we pick
5533   // from 'Fold', else pick from 'MaskedIsZero'. Since 'DivisorIsIntMin' is
5534   // constant-folded, select can get lowered to a shuffle with constant mask.
5535   SDValue Blended =
5536       DAG.getNode(ISD::VSELECT, DL, VT, DivisorIsIntMin, MaskedIsZero, Fold);
5537 
5538   return Blended;
5539 }
5540 
5541 bool TargetLowering::
5542 verifyReturnAddressArgumentIsConstant(SDValue Op, SelectionDAG &DAG) const {
5543   if (!isa<ConstantSDNode>(Op.getOperand(0))) {
5544     DAG.getContext()->emitError("argument to '__builtin_return_address' must "
5545                                 "be a constant integer");
5546     return true;
5547   }
5548 
5549   return false;
5550 }
5551 
5552 char TargetLowering::isNegatibleForFree(SDValue Op, SelectionDAG &DAG,
5553                                         bool LegalOperations, bool ForCodeSize,
5554                                         unsigned Depth) const {
5555   // fneg is removable even if it has multiple uses.
5556   if (Op.getOpcode() == ISD::FNEG)
5557     return 2;
5558 
5559   // Don't allow anything with multiple uses unless we know it is free.
5560   EVT VT = Op.getValueType();
5561   const SDNodeFlags Flags = Op->getFlags();
5562   const TargetOptions &Options = DAG.getTarget().Options;
5563   if (!Op.hasOneUse() && !(Op.getOpcode() == ISD::FP_EXTEND &&
5564                            isFPExtFree(VT, Op.getOperand(0).getValueType())))
5565     return 0;
5566 
5567   // Don't recurse exponentially.
5568   if (Depth > SelectionDAG::MaxRecursionDepth)
5569     return 0;
5570 
5571   switch (Op.getOpcode()) {
5572   case ISD::ConstantFP: {
5573     if (!LegalOperations)
5574       return 1;
5575 
5576     // Don't invert constant FP values after legalization unless the target says
5577     // the negated constant is legal.
5578     return isOperationLegal(ISD::ConstantFP, VT) ||
5579            isFPImmLegal(neg(cast<ConstantFPSDNode>(Op)->getValueAPF()), VT,
5580                         ForCodeSize);
5581   }
5582   case ISD::BUILD_VECTOR: {
5583     // Only permit BUILD_VECTOR of constants.
5584     if (llvm::any_of(Op->op_values(), [&](SDValue N) {
5585           return !N.isUndef() && !isa<ConstantFPSDNode>(N);
5586         }))
5587       return 0;
5588     if (!LegalOperations)
5589       return 1;
5590     if (isOperationLegal(ISD::ConstantFP, VT) &&
5591         isOperationLegal(ISD::BUILD_VECTOR, VT))
5592       return 1;
5593     return llvm::all_of(Op->op_values(), [&](SDValue N) {
5594       return N.isUndef() ||
5595              isFPImmLegal(neg(cast<ConstantFPSDNode>(N)->getValueAPF()), VT,
5596                           ForCodeSize);
5597     });
5598   }
5599   case ISD::FADD:
5600     if (!Options.NoSignedZerosFPMath && !Flags.hasNoSignedZeros())
5601       return 0;
5602 
5603     // After operation legalization, it might not be legal to create new FSUBs.
5604     if (LegalOperations && !isOperationLegalOrCustom(ISD::FSUB, VT))
5605       return 0;
5606 
5607     // fold (fneg (fadd A, B)) -> (fsub (fneg A), B)
5608     if (char V = isNegatibleForFree(Op.getOperand(0), DAG, LegalOperations,
5609                                     ForCodeSize, Depth + 1))
5610       return V;
5611     // fold (fneg (fadd A, B)) -> (fsub (fneg B), A)
5612     return isNegatibleForFree(Op.getOperand(1), DAG, LegalOperations,
5613                               ForCodeSize, Depth + 1);
5614   case ISD::FSUB:
5615     // We can't turn -(A-B) into B-A when we honor signed zeros.
5616     if (!Options.NoSignedZerosFPMath && !Flags.hasNoSignedZeros())
5617       return 0;
5618 
5619     // fold (fneg (fsub A, B)) -> (fsub B, A)
5620     return 1;
5621 
5622   case ISD::FMUL:
5623   case ISD::FDIV:
5624     // fold (fneg (fmul X, Y)) -> (fmul (fneg X), Y) or (fmul X, (fneg Y))
5625     if (char V = isNegatibleForFree(Op.getOperand(0), DAG, LegalOperations,
5626                                     ForCodeSize, Depth + 1))
5627       return V;
5628 
5629     // Ignore X * 2.0 because that is expected to be canonicalized to X + X.
5630     if (auto *C = isConstOrConstSplatFP(Op.getOperand(1)))
5631       if (C->isExactlyValue(2.0) && Op.getOpcode() == ISD::FMUL)
5632         return 0;
5633 
5634     return isNegatibleForFree(Op.getOperand(1), DAG, LegalOperations,
5635                               ForCodeSize, Depth + 1);
5636 
5637   case ISD::FMA:
5638   case ISD::FMAD: {
5639     if (!Options.NoSignedZerosFPMath && !Flags.hasNoSignedZeros())
5640       return 0;
5641 
5642     // fold (fneg (fma X, Y, Z)) -> (fma (fneg X), Y, (fneg Z))
5643     // fold (fneg (fma X, Y, Z)) -> (fma X, (fneg Y), (fneg Z))
5644     char V2 = isNegatibleForFree(Op.getOperand(2), DAG, LegalOperations,
5645                                  ForCodeSize, Depth + 1);
5646     if (!V2)
5647       return 0;
5648 
5649     // One of Op0/Op1 must be cheaply negatible, then select the cheapest.
5650     char V0 = isNegatibleForFree(Op.getOperand(0), DAG, LegalOperations,
5651                                  ForCodeSize, Depth + 1);
5652     char V1 = isNegatibleForFree(Op.getOperand(1), DAG, LegalOperations,
5653                                  ForCodeSize, Depth + 1);
5654     char V01 = std::max(V0, V1);
5655     return V01 ? std::max(V01, V2) : 0;
5656   }
5657 
5658   case ISD::FP_EXTEND:
5659   case ISD::FP_ROUND:
5660   case ISD::FSIN:
5661     return isNegatibleForFree(Op.getOperand(0), DAG, LegalOperations,
5662                               ForCodeSize, Depth + 1);
5663   }
5664 
5665   return 0;
5666 }
5667 
5668 SDValue TargetLowering::getNegatedExpression(SDValue Op, SelectionDAG &DAG,
5669                                              bool LegalOperations,
5670                                              bool ForCodeSize,
5671                                              unsigned Depth) const {
5672   // fneg is removable even if it has multiple uses.
5673   if (Op.getOpcode() == ISD::FNEG)
5674     return Op.getOperand(0);
5675 
5676   assert(Depth <= SelectionDAG::MaxRecursionDepth &&
5677          "getNegatedExpression doesn't match isNegatibleForFree");
5678   const SDNodeFlags Flags = Op->getFlags();
5679 
5680   switch (Op.getOpcode()) {
5681   case ISD::ConstantFP: {
5682     APFloat V = cast<ConstantFPSDNode>(Op)->getValueAPF();
5683     V.changeSign();
5684     return DAG.getConstantFP(V, SDLoc(Op), Op.getValueType());
5685   }
5686   case ISD::BUILD_VECTOR: {
5687     SmallVector<SDValue, 4> Ops;
5688     for (SDValue C : Op->op_values()) {
5689       if (C.isUndef()) {
5690         Ops.push_back(C);
5691         continue;
5692       }
5693       APFloat V = cast<ConstantFPSDNode>(C)->getValueAPF();
5694       V.changeSign();
5695       Ops.push_back(DAG.getConstantFP(V, SDLoc(Op), C.getValueType()));
5696     }
5697     return DAG.getBuildVector(Op.getValueType(), SDLoc(Op), Ops);
5698   }
5699   case ISD::FADD:
5700     assert((DAG.getTarget().Options.NoSignedZerosFPMath ||
5701             Flags.hasNoSignedZeros()) &&
5702            "Expected NSZ fp-flag");
5703 
5704     // fold (fneg (fadd A, B)) -> (fsub (fneg A), B)
5705     if (isNegatibleForFree(Op.getOperand(0), DAG, LegalOperations, ForCodeSize,
5706                            Depth + 1))
5707       return DAG.getNode(ISD::FSUB, SDLoc(Op), Op.getValueType(),
5708                          getNegatedExpression(Op.getOperand(0), DAG,
5709                                               LegalOperations, ForCodeSize,
5710                                               Depth + 1),
5711                          Op.getOperand(1), Flags);
5712     // fold (fneg (fadd A, B)) -> (fsub (fneg B), A)
5713     return DAG.getNode(ISD::FSUB, SDLoc(Op), Op.getValueType(),
5714                        getNegatedExpression(Op.getOperand(1), DAG,
5715                                             LegalOperations, ForCodeSize,
5716                                             Depth + 1),
5717                        Op.getOperand(0), Flags);
5718   case ISD::FSUB:
5719     // fold (fneg (fsub 0, B)) -> B
5720     if (ConstantFPSDNode *N0CFP =
5721             isConstOrConstSplatFP(Op.getOperand(0), /*AllowUndefs*/ true))
5722       if (N0CFP->isZero())
5723         return Op.getOperand(1);
5724 
5725     // fold (fneg (fsub A, B)) -> (fsub B, A)
5726     return DAG.getNode(ISD::FSUB, SDLoc(Op), Op.getValueType(),
5727                        Op.getOperand(1), Op.getOperand(0), Flags);
5728 
5729   case ISD::FMUL:
5730   case ISD::FDIV:
5731     // fold (fneg (fmul X, Y)) -> (fmul (fneg X), Y)
5732     if (isNegatibleForFree(Op.getOperand(0), DAG, LegalOperations, ForCodeSize,
5733                            Depth + 1))
5734       return DAG.getNode(Op.getOpcode(), SDLoc(Op), Op.getValueType(),
5735                          getNegatedExpression(Op.getOperand(0), DAG,
5736                                               LegalOperations, ForCodeSize,
5737                                               Depth + 1),
5738                          Op.getOperand(1), Flags);
5739 
5740     // fold (fneg (fmul X, Y)) -> (fmul X, (fneg Y))
5741     return DAG.getNode(
5742         Op.getOpcode(), SDLoc(Op), Op.getValueType(), Op.getOperand(0),
5743         getNegatedExpression(Op.getOperand(1), DAG, LegalOperations,
5744                              ForCodeSize, Depth + 1),
5745         Flags);
5746 
5747   case ISD::FMA:
5748   case ISD::FMAD: {
5749     assert((DAG.getTarget().Options.NoSignedZerosFPMath ||
5750             Flags.hasNoSignedZeros()) &&
5751            "Expected NSZ fp-flag");
5752 
5753     SDValue Neg2 = getNegatedExpression(Op.getOperand(2), DAG, LegalOperations,
5754                                         ForCodeSize, Depth + 1);
5755 
5756     char V0 = isNegatibleForFree(Op.getOperand(0), DAG, LegalOperations,
5757                                  ForCodeSize, Depth + 1);
5758     char V1 = isNegatibleForFree(Op.getOperand(1), DAG, LegalOperations,
5759                                  ForCodeSize, Depth + 1);
5760     // TODO: This is a hack. It is possible that costs have changed between now
5761     //       and the initial calls to isNegatibleForFree(). That is because we
5762     //       are rewriting the expression, and that may change the number of
5763     //       uses (and therefore the cost) of values. If the negation costs are
5764     //       equal, only negate this value if it is a constant. Otherwise, try
5765     //       operand 1. A better fix would eliminate uses as a cost factor or
5766     //       track the change in uses as we rewrite the expression.
5767     if (V0 > V1 || (V0 == V1 && isa<ConstantFPSDNode>(Op.getOperand(0)))) {
5768       // fold (fneg (fma X, Y, Z)) -> (fma (fneg X), Y, (fneg Z))
5769       SDValue Neg0 = getNegatedExpression(
5770           Op.getOperand(0), DAG, LegalOperations, ForCodeSize, Depth + 1);
5771       return DAG.getNode(Op.getOpcode(), SDLoc(Op), Op.getValueType(), Neg0,
5772                          Op.getOperand(1), Neg2, Flags);
5773     }
5774 
5775     // fold (fneg (fma X, Y, Z)) -> (fma X, (fneg Y), (fneg Z))
5776     SDValue Neg1 = getNegatedExpression(Op.getOperand(1), DAG, LegalOperations,
5777                                         ForCodeSize, Depth + 1);
5778     return DAG.getNode(Op.getOpcode(), SDLoc(Op), Op.getValueType(),
5779                        Op.getOperand(0), Neg1, Neg2, Flags);
5780   }
5781 
5782   case ISD::FP_EXTEND:
5783   case ISD::FSIN:
5784     return DAG.getNode(Op.getOpcode(), SDLoc(Op), Op.getValueType(),
5785                        getNegatedExpression(Op.getOperand(0), DAG,
5786                                             LegalOperations, ForCodeSize,
5787                                             Depth + 1));
5788   case ISD::FP_ROUND:
5789     return DAG.getNode(ISD::FP_ROUND, SDLoc(Op), Op.getValueType(),
5790                        getNegatedExpression(Op.getOperand(0), DAG,
5791                                             LegalOperations, ForCodeSize,
5792                                             Depth + 1),
5793                        Op.getOperand(1));
5794   }
5795 
5796   llvm_unreachable("Unknown code");
5797 }
5798 
5799 //===----------------------------------------------------------------------===//
5800 // Legalization Utilities
5801 //===----------------------------------------------------------------------===//
5802 
5803 bool TargetLowering::expandMUL_LOHI(unsigned Opcode, EVT VT, SDLoc dl,
5804                                     SDValue LHS, SDValue RHS,
5805                                     SmallVectorImpl<SDValue> &Result,
5806                                     EVT HiLoVT, SelectionDAG &DAG,
5807                                     MulExpansionKind Kind, SDValue LL,
5808                                     SDValue LH, SDValue RL, SDValue RH) const {
5809   assert(Opcode == ISD::MUL || Opcode == ISD::UMUL_LOHI ||
5810          Opcode == ISD::SMUL_LOHI);
5811 
5812   bool HasMULHS = (Kind == MulExpansionKind::Always) ||
5813                   isOperationLegalOrCustom(ISD::MULHS, HiLoVT);
5814   bool HasMULHU = (Kind == MulExpansionKind::Always) ||
5815                   isOperationLegalOrCustom(ISD::MULHU, HiLoVT);
5816   bool HasSMUL_LOHI = (Kind == MulExpansionKind::Always) ||
5817                       isOperationLegalOrCustom(ISD::SMUL_LOHI, HiLoVT);
5818   bool HasUMUL_LOHI = (Kind == MulExpansionKind::Always) ||
5819                       isOperationLegalOrCustom(ISD::UMUL_LOHI, HiLoVT);
5820 
5821   if (!HasMULHU && !HasMULHS && !HasUMUL_LOHI && !HasSMUL_LOHI)
5822     return false;
5823 
5824   unsigned OuterBitSize = VT.getScalarSizeInBits();
5825   unsigned InnerBitSize = HiLoVT.getScalarSizeInBits();
5826   unsigned LHSSB = DAG.ComputeNumSignBits(LHS);
5827   unsigned RHSSB = DAG.ComputeNumSignBits(RHS);
5828 
5829   // LL, LH, RL, and RH must be either all NULL or all set to a value.
5830   assert((LL.getNode() && LH.getNode() && RL.getNode() && RH.getNode()) ||
5831          (!LL.getNode() && !LH.getNode() && !RL.getNode() && !RH.getNode()));
5832 
5833   SDVTList VTs = DAG.getVTList(HiLoVT, HiLoVT);
5834   auto MakeMUL_LOHI = [&](SDValue L, SDValue R, SDValue &Lo, SDValue &Hi,
5835                           bool Signed) -> bool {
5836     if ((Signed && HasSMUL_LOHI) || (!Signed && HasUMUL_LOHI)) {
5837       Lo = DAG.getNode(Signed ? ISD::SMUL_LOHI : ISD::UMUL_LOHI, dl, VTs, L, R);
5838       Hi = SDValue(Lo.getNode(), 1);
5839       return true;
5840     }
5841     if ((Signed && HasMULHS) || (!Signed && HasMULHU)) {
5842       Lo = DAG.getNode(ISD::MUL, dl, HiLoVT, L, R);
5843       Hi = DAG.getNode(Signed ? ISD::MULHS : ISD::MULHU, dl, HiLoVT, L, R);
5844       return true;
5845     }
5846     return false;
5847   };
5848 
5849   SDValue Lo, Hi;
5850 
5851   if (!LL.getNode() && !RL.getNode() &&
5852       isOperationLegalOrCustom(ISD::TRUNCATE, HiLoVT)) {
5853     LL = DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, LHS);
5854     RL = DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, RHS);
5855   }
5856 
5857   if (!LL.getNode())
5858     return false;
5859 
5860   APInt HighMask = APInt::getHighBitsSet(OuterBitSize, InnerBitSize);
5861   if (DAG.MaskedValueIsZero(LHS, HighMask) &&
5862       DAG.MaskedValueIsZero(RHS, HighMask)) {
5863     // The inputs are both zero-extended.
5864     if (MakeMUL_LOHI(LL, RL, Lo, Hi, false)) {
5865       Result.push_back(Lo);
5866       Result.push_back(Hi);
5867       if (Opcode != ISD::MUL) {
5868         SDValue Zero = DAG.getConstant(0, dl, HiLoVT);
5869         Result.push_back(Zero);
5870         Result.push_back(Zero);
5871       }
5872       return true;
5873     }
5874   }
5875 
5876   if (!VT.isVector() && Opcode == ISD::MUL && LHSSB > InnerBitSize &&
5877       RHSSB > InnerBitSize) {
5878     // The input values are both sign-extended.
5879     // TODO non-MUL case?
5880     if (MakeMUL_LOHI(LL, RL, Lo, Hi, true)) {
5881       Result.push_back(Lo);
5882       Result.push_back(Hi);
5883       return true;
5884     }
5885   }
5886 
5887   unsigned ShiftAmount = OuterBitSize - InnerBitSize;
5888   EVT ShiftAmountTy = getShiftAmountTy(VT, DAG.getDataLayout());
5889   if (APInt::getMaxValue(ShiftAmountTy.getSizeInBits()).ult(ShiftAmount)) {
5890     // FIXME getShiftAmountTy does not always return a sensible result when VT
5891     // is an illegal type, and so the type may be too small to fit the shift
5892     // amount. Override it with i32. The shift will have to be legalized.
5893     ShiftAmountTy = MVT::i32;
5894   }
5895   SDValue Shift = DAG.getConstant(ShiftAmount, dl, ShiftAmountTy);
5896 
5897   if (!LH.getNode() && !RH.getNode() &&
5898       isOperationLegalOrCustom(ISD::SRL, VT) &&
5899       isOperationLegalOrCustom(ISD::TRUNCATE, HiLoVT)) {
5900     LH = DAG.getNode(ISD::SRL, dl, VT, LHS, Shift);
5901     LH = DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, LH);
5902     RH = DAG.getNode(ISD::SRL, dl, VT, RHS, Shift);
5903     RH = DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, RH);
5904   }
5905 
5906   if (!LH.getNode())
5907     return false;
5908 
5909   if (!MakeMUL_LOHI(LL, RL, Lo, Hi, false))
5910     return false;
5911 
5912   Result.push_back(Lo);
5913 
5914   if (Opcode == ISD::MUL) {
5915     RH = DAG.getNode(ISD::MUL, dl, HiLoVT, LL, RH);
5916     LH = DAG.getNode(ISD::MUL, dl, HiLoVT, LH, RL);
5917     Hi = DAG.getNode(ISD::ADD, dl, HiLoVT, Hi, RH);
5918     Hi = DAG.getNode(ISD::ADD, dl, HiLoVT, Hi, LH);
5919     Result.push_back(Hi);
5920     return true;
5921   }
5922 
5923   // Compute the full width result.
5924   auto Merge = [&](SDValue Lo, SDValue Hi) -> SDValue {
5925     Lo = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Lo);
5926     Hi = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Hi);
5927     Hi = DAG.getNode(ISD::SHL, dl, VT, Hi, Shift);
5928     return DAG.getNode(ISD::OR, dl, VT, Lo, Hi);
5929   };
5930 
5931   SDValue Next = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Hi);
5932   if (!MakeMUL_LOHI(LL, RH, Lo, Hi, false))
5933     return false;
5934 
5935   // This is effectively the add part of a multiply-add of half-sized operands,
5936   // so it cannot overflow.
5937   Next = DAG.getNode(ISD::ADD, dl, VT, Next, Merge(Lo, Hi));
5938 
5939   if (!MakeMUL_LOHI(LH, RL, Lo, Hi, false))
5940     return false;
5941 
5942   SDValue Zero = DAG.getConstant(0, dl, HiLoVT);
5943   EVT BoolType = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
5944 
5945   bool UseGlue = (isOperationLegalOrCustom(ISD::ADDC, VT) &&
5946                   isOperationLegalOrCustom(ISD::ADDE, VT));
5947   if (UseGlue)
5948     Next = DAG.getNode(ISD::ADDC, dl, DAG.getVTList(VT, MVT::Glue), Next,
5949                        Merge(Lo, Hi));
5950   else
5951     Next = DAG.getNode(ISD::ADDCARRY, dl, DAG.getVTList(VT, BoolType), Next,
5952                        Merge(Lo, Hi), DAG.getConstant(0, dl, BoolType));
5953 
5954   SDValue Carry = Next.getValue(1);
5955   Result.push_back(DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, Next));
5956   Next = DAG.getNode(ISD::SRL, dl, VT, Next, Shift);
5957 
5958   if (!MakeMUL_LOHI(LH, RH, Lo, Hi, Opcode == ISD::SMUL_LOHI))
5959     return false;
5960 
5961   if (UseGlue)
5962     Hi = DAG.getNode(ISD::ADDE, dl, DAG.getVTList(HiLoVT, MVT::Glue), Hi, Zero,
5963                      Carry);
5964   else
5965     Hi = DAG.getNode(ISD::ADDCARRY, dl, DAG.getVTList(HiLoVT, BoolType), Hi,
5966                      Zero, Carry);
5967 
5968   Next = DAG.getNode(ISD::ADD, dl, VT, Next, Merge(Lo, Hi));
5969 
5970   if (Opcode == ISD::SMUL_LOHI) {
5971     SDValue NextSub = DAG.getNode(ISD::SUB, dl, VT, Next,
5972                                   DAG.getNode(ISD::ZERO_EXTEND, dl, VT, RL));
5973     Next = DAG.getSelectCC(dl, LH, Zero, NextSub, Next, ISD::SETLT);
5974 
5975     NextSub = DAG.getNode(ISD::SUB, dl, VT, Next,
5976                           DAG.getNode(ISD::ZERO_EXTEND, dl, VT, LL));
5977     Next = DAG.getSelectCC(dl, RH, Zero, NextSub, Next, ISD::SETLT);
5978   }
5979 
5980   Result.push_back(DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, Next));
5981   Next = DAG.getNode(ISD::SRL, dl, VT, Next, Shift);
5982   Result.push_back(DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, Next));
5983   return true;
5984 }
5985 
5986 bool TargetLowering::expandMUL(SDNode *N, SDValue &Lo, SDValue &Hi, EVT HiLoVT,
5987                                SelectionDAG &DAG, MulExpansionKind Kind,
5988                                SDValue LL, SDValue LH, SDValue RL,
5989                                SDValue RH) const {
5990   SmallVector<SDValue, 2> Result;
5991   bool Ok = expandMUL_LOHI(N->getOpcode(), N->getValueType(0), N,
5992                            N->getOperand(0), N->getOperand(1), Result, HiLoVT,
5993                            DAG, Kind, LL, LH, RL, RH);
5994   if (Ok) {
5995     assert(Result.size() == 2);
5996     Lo = Result[0];
5997     Hi = Result[1];
5998   }
5999   return Ok;
6000 }
6001 
6002 bool TargetLowering::expandFunnelShift(SDNode *Node, SDValue &Result,
6003                                        SelectionDAG &DAG) const {
6004   EVT VT = Node->getValueType(0);
6005 
6006   if (VT.isVector() && (!isOperationLegalOrCustom(ISD::SHL, VT) ||
6007                         !isOperationLegalOrCustom(ISD::SRL, VT) ||
6008                         !isOperationLegalOrCustom(ISD::SUB, VT) ||
6009                         !isOperationLegalOrCustomOrPromote(ISD::OR, VT)))
6010     return false;
6011 
6012   // fshl: (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
6013   // fshr: (X << (BW - (Z % BW))) | (Y >> (Z % BW))
6014   SDValue X = Node->getOperand(0);
6015   SDValue Y = Node->getOperand(1);
6016   SDValue Z = Node->getOperand(2);
6017 
6018   unsigned EltSizeInBits = VT.getScalarSizeInBits();
6019   bool IsFSHL = Node->getOpcode() == ISD::FSHL;
6020   SDLoc DL(SDValue(Node, 0));
6021 
6022   EVT ShVT = Z.getValueType();
6023   SDValue BitWidthC = DAG.getConstant(EltSizeInBits, DL, ShVT);
6024   SDValue Zero = DAG.getConstant(0, DL, ShVT);
6025 
6026   SDValue ShAmt;
6027   if (isPowerOf2_32(EltSizeInBits)) {
6028     SDValue Mask = DAG.getConstant(EltSizeInBits - 1, DL, ShVT);
6029     ShAmt = DAG.getNode(ISD::AND, DL, ShVT, Z, Mask);
6030   } else {
6031     ShAmt = DAG.getNode(ISD::UREM, DL, ShVT, Z, BitWidthC);
6032   }
6033 
6034   SDValue InvShAmt = DAG.getNode(ISD::SUB, DL, ShVT, BitWidthC, ShAmt);
6035   SDValue ShX = DAG.getNode(ISD::SHL, DL, VT, X, IsFSHL ? ShAmt : InvShAmt);
6036   SDValue ShY = DAG.getNode(ISD::SRL, DL, VT, Y, IsFSHL ? InvShAmt : ShAmt);
6037   SDValue Or = DAG.getNode(ISD::OR, DL, VT, ShX, ShY);
6038 
6039   // If (Z % BW == 0), then the opposite direction shift is shift-by-bitwidth,
6040   // and that is undefined. We must compare and select to avoid UB.
6041   EVT CCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), ShVT);
6042 
6043   // For fshl, 0-shift returns the 1st arg (X).
6044   // For fshr, 0-shift returns the 2nd arg (Y).
6045   SDValue IsZeroShift = DAG.getSetCC(DL, CCVT, ShAmt, Zero, ISD::SETEQ);
6046   Result = DAG.getSelect(DL, VT, IsZeroShift, IsFSHL ? X : Y, Or);
6047   return true;
6048 }
6049 
6050 // TODO: Merge with expandFunnelShift.
6051 bool TargetLowering::expandROT(SDNode *Node, SDValue &Result,
6052                                SelectionDAG &DAG) const {
6053   EVT VT = Node->getValueType(0);
6054   unsigned EltSizeInBits = VT.getScalarSizeInBits();
6055   bool IsLeft = Node->getOpcode() == ISD::ROTL;
6056   SDValue Op0 = Node->getOperand(0);
6057   SDValue Op1 = Node->getOperand(1);
6058   SDLoc DL(SDValue(Node, 0));
6059 
6060   EVT ShVT = Op1.getValueType();
6061   SDValue BitWidthC = DAG.getConstant(EltSizeInBits, DL, ShVT);
6062 
6063   // If a rotate in the other direction is legal, use it.
6064   unsigned RevRot = IsLeft ? ISD::ROTR : ISD::ROTL;
6065   if (isOperationLegal(RevRot, VT)) {
6066     SDValue Sub = DAG.getNode(ISD::SUB, DL, ShVT, BitWidthC, Op1);
6067     Result = DAG.getNode(RevRot, DL, VT, Op0, Sub);
6068     return true;
6069   }
6070 
6071   if (VT.isVector() && (!isOperationLegalOrCustom(ISD::SHL, VT) ||
6072                         !isOperationLegalOrCustom(ISD::SRL, VT) ||
6073                         !isOperationLegalOrCustom(ISD::SUB, VT) ||
6074                         !isOperationLegalOrCustomOrPromote(ISD::OR, VT) ||
6075                         !isOperationLegalOrCustomOrPromote(ISD::AND, VT)))
6076     return false;
6077 
6078   // Otherwise,
6079   //   (rotl x, c) -> (or (shl x, (and c, w-1)), (srl x, (and w-c, w-1)))
6080   //   (rotr x, c) -> (or (srl x, (and c, w-1)), (shl x, (and w-c, w-1)))
6081   //
6082   assert(isPowerOf2_32(EltSizeInBits) && EltSizeInBits > 1 &&
6083          "Expecting the type bitwidth to be a power of 2");
6084   unsigned ShOpc = IsLeft ? ISD::SHL : ISD::SRL;
6085   unsigned HsOpc = IsLeft ? ISD::SRL : ISD::SHL;
6086   SDValue BitWidthMinusOneC = DAG.getConstant(EltSizeInBits - 1, DL, ShVT);
6087   SDValue NegOp1 = DAG.getNode(ISD::SUB, DL, ShVT, BitWidthC, Op1);
6088   SDValue And0 = DAG.getNode(ISD::AND, DL, ShVT, Op1, BitWidthMinusOneC);
6089   SDValue And1 = DAG.getNode(ISD::AND, DL, ShVT, NegOp1, BitWidthMinusOneC);
6090   Result = DAG.getNode(ISD::OR, DL, VT, DAG.getNode(ShOpc, DL, VT, Op0, And0),
6091                        DAG.getNode(HsOpc, DL, VT, Op0, And1));
6092   return true;
6093 }
6094 
6095 bool TargetLowering::expandFP_TO_SINT(SDNode *Node, SDValue &Result,
6096                                       SelectionDAG &DAG) const {
6097   unsigned OpNo = Node->isStrictFPOpcode() ? 1 : 0;
6098   SDValue Src = Node->getOperand(OpNo);
6099   EVT SrcVT = Src.getValueType();
6100   EVT DstVT = Node->getValueType(0);
6101   SDLoc dl(SDValue(Node, 0));
6102 
6103   // FIXME: Only f32 to i64 conversions are supported.
6104   if (SrcVT != MVT::f32 || DstVT != MVT::i64)
6105     return false;
6106 
6107   if (Node->isStrictFPOpcode())
6108     // When a NaN is converted to an integer a trap is allowed. We can't
6109     // use this expansion here because it would eliminate that trap. Other
6110     // traps are also allowed and cannot be eliminated. See
6111     // IEEE 754-2008 sec 5.8.
6112     return false;
6113 
6114   // Expand f32 -> i64 conversion
6115   // This algorithm comes from compiler-rt's implementation of fixsfdi:
6116   // https://github.com/llvm/llvm-project/blob/master/compiler-rt/lib/builtins/fixsfdi.c
6117   unsigned SrcEltBits = SrcVT.getScalarSizeInBits();
6118   EVT IntVT = SrcVT.changeTypeToInteger();
6119   EVT IntShVT = getShiftAmountTy(IntVT, DAG.getDataLayout());
6120 
6121   SDValue ExponentMask = DAG.getConstant(0x7F800000, dl, IntVT);
6122   SDValue ExponentLoBit = DAG.getConstant(23, dl, IntVT);
6123   SDValue Bias = DAG.getConstant(127, dl, IntVT);
6124   SDValue SignMask = DAG.getConstant(APInt::getSignMask(SrcEltBits), dl, IntVT);
6125   SDValue SignLowBit = DAG.getConstant(SrcEltBits - 1, dl, IntVT);
6126   SDValue MantissaMask = DAG.getConstant(0x007FFFFF, dl, IntVT);
6127 
6128   SDValue Bits = DAG.getNode(ISD::BITCAST, dl, IntVT, Src);
6129 
6130   SDValue ExponentBits = DAG.getNode(
6131       ISD::SRL, dl, IntVT, DAG.getNode(ISD::AND, dl, IntVT, Bits, ExponentMask),
6132       DAG.getZExtOrTrunc(ExponentLoBit, dl, IntShVT));
6133   SDValue Exponent = DAG.getNode(ISD::SUB, dl, IntVT, ExponentBits, Bias);
6134 
6135   SDValue Sign = DAG.getNode(ISD::SRA, dl, IntVT,
6136                              DAG.getNode(ISD::AND, dl, IntVT, Bits, SignMask),
6137                              DAG.getZExtOrTrunc(SignLowBit, dl, IntShVT));
6138   Sign = DAG.getSExtOrTrunc(Sign, dl, DstVT);
6139 
6140   SDValue R = DAG.getNode(ISD::OR, dl, IntVT,
6141                           DAG.getNode(ISD::AND, dl, IntVT, Bits, MantissaMask),
6142                           DAG.getConstant(0x00800000, dl, IntVT));
6143 
6144   R = DAG.getZExtOrTrunc(R, dl, DstVT);
6145 
6146   R = DAG.getSelectCC(
6147       dl, Exponent, ExponentLoBit,
6148       DAG.getNode(ISD::SHL, dl, DstVT, R,
6149                   DAG.getZExtOrTrunc(
6150                       DAG.getNode(ISD::SUB, dl, IntVT, Exponent, ExponentLoBit),
6151                       dl, IntShVT)),
6152       DAG.getNode(ISD::SRL, dl, DstVT, R,
6153                   DAG.getZExtOrTrunc(
6154                       DAG.getNode(ISD::SUB, dl, IntVT, ExponentLoBit, Exponent),
6155                       dl, IntShVT)),
6156       ISD::SETGT);
6157 
6158   SDValue Ret = DAG.getNode(ISD::SUB, dl, DstVT,
6159                             DAG.getNode(ISD::XOR, dl, DstVT, R, Sign), Sign);
6160 
6161   Result = DAG.getSelectCC(dl, Exponent, DAG.getConstant(0, dl, IntVT),
6162                            DAG.getConstant(0, dl, DstVT), Ret, ISD::SETLT);
6163   return true;
6164 }
6165 
6166 bool TargetLowering::expandFP_TO_UINT(SDNode *Node, SDValue &Result,
6167                                       SDValue &Chain,
6168                                       SelectionDAG &DAG) const {
6169   SDLoc dl(SDValue(Node, 0));
6170   unsigned OpNo = Node->isStrictFPOpcode() ? 1 : 0;
6171   SDValue Src = Node->getOperand(OpNo);
6172 
6173   EVT SrcVT = Src.getValueType();
6174   EVT DstVT = Node->getValueType(0);
6175   EVT SetCCVT =
6176       getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), SrcVT);
6177   EVT DstSetCCVT =
6178       getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), DstVT);
6179 
6180   // Only expand vector types if we have the appropriate vector bit operations.
6181   unsigned SIntOpcode = Node->isStrictFPOpcode() ? ISD::STRICT_FP_TO_SINT :
6182                                                    ISD::FP_TO_SINT;
6183   if (DstVT.isVector() && (!isOperationLegalOrCustom(SIntOpcode, DstVT) ||
6184                            !isOperationLegalOrCustomOrPromote(ISD::XOR, SrcVT)))
6185     return false;
6186 
6187   // If the maximum float value is smaller then the signed integer range,
6188   // the destination signmask can't be represented by the float, so we can
6189   // just use FP_TO_SINT directly.
6190   const fltSemantics &APFSem = DAG.EVTToAPFloatSemantics(SrcVT);
6191   APFloat APF(APFSem, APInt::getNullValue(SrcVT.getScalarSizeInBits()));
6192   APInt SignMask = APInt::getSignMask(DstVT.getScalarSizeInBits());
6193   if (APFloat::opOverflow &
6194       APF.convertFromAPInt(SignMask, false, APFloat::rmNearestTiesToEven)) {
6195     if (Node->isStrictFPOpcode()) {
6196       Result = DAG.getNode(ISD::STRICT_FP_TO_SINT, dl, { DstVT, MVT::Other },
6197                            { Node->getOperand(0), Src });
6198       Chain = Result.getValue(1);
6199     } else
6200       Result = DAG.getNode(ISD::FP_TO_SINT, dl, DstVT, Src);
6201     return true;
6202   }
6203 
6204   SDValue Cst = DAG.getConstantFP(APF, dl, SrcVT);
6205   SDValue Sel;
6206 
6207   if (Node->isStrictFPOpcode()) {
6208     Sel = DAG.getSetCC(dl, SetCCVT, Src, Cst, ISD::SETLT,
6209                        Node->getOperand(0), /*IsSignaling*/ true);
6210     Chain = Sel.getValue(1);
6211   } else {
6212     Sel = DAG.getSetCC(dl, SetCCVT, Src, Cst, ISD::SETLT);
6213   }
6214 
6215   bool Strict = Node->isStrictFPOpcode() ||
6216                 shouldUseStrictFP_TO_INT(SrcVT, DstVT, /*IsSigned*/ false);
6217 
6218   if (Strict) {
6219     // Expand based on maximum range of FP_TO_SINT, if the value exceeds the
6220     // signmask then offset (the result of which should be fully representable).
6221     // Sel = Src < 0x8000000000000000
6222     // FltOfs = select Sel, 0, 0x8000000000000000
6223     // IntOfs = select Sel, 0, 0x8000000000000000
6224     // Result = fp_to_sint(Src - FltOfs) ^ IntOfs
6225 
6226     // TODO: Should any fast-math-flags be set for the FSUB?
6227     SDValue FltOfs = DAG.getSelect(dl, SrcVT, Sel,
6228                                    DAG.getConstantFP(0.0, dl, SrcVT), Cst);
6229     Sel = DAG.getBoolExtOrTrunc(Sel, dl, DstSetCCVT, DstVT);
6230     SDValue IntOfs = DAG.getSelect(dl, DstVT, Sel,
6231                                    DAG.getConstant(0, dl, DstVT),
6232                                    DAG.getConstant(SignMask, dl, DstVT));
6233     SDValue SInt;
6234     if (Node->isStrictFPOpcode()) {
6235       SDValue Val = DAG.getNode(ISD::STRICT_FSUB, dl, { SrcVT, MVT::Other },
6236                                 { Chain, Src, FltOfs });
6237       SInt = DAG.getNode(ISD::STRICT_FP_TO_SINT, dl, { DstVT, MVT::Other },
6238                          { Val.getValue(1), Val });
6239       Chain = SInt.getValue(1);
6240     } else {
6241       SDValue Val = DAG.getNode(ISD::FSUB, dl, SrcVT, Src, FltOfs);
6242       SInt = DAG.getNode(ISD::FP_TO_SINT, dl, DstVT, Val);
6243     }
6244     Result = DAG.getNode(ISD::XOR, dl, DstVT, SInt, IntOfs);
6245   } else {
6246     // Expand based on maximum range of FP_TO_SINT:
6247     // True = fp_to_sint(Src)
6248     // False = 0x8000000000000000 + fp_to_sint(Src - 0x8000000000000000)
6249     // Result = select (Src < 0x8000000000000000), True, False
6250 
6251     SDValue True = DAG.getNode(ISD::FP_TO_SINT, dl, DstVT, Src);
6252     // TODO: Should any fast-math-flags be set for the FSUB?
6253     SDValue False = DAG.getNode(ISD::FP_TO_SINT, dl, DstVT,
6254                                 DAG.getNode(ISD::FSUB, dl, SrcVT, Src, Cst));
6255     False = DAG.getNode(ISD::XOR, dl, DstVT, False,
6256                         DAG.getConstant(SignMask, dl, DstVT));
6257     Sel = DAG.getBoolExtOrTrunc(Sel, dl, DstSetCCVT, DstVT);
6258     Result = DAG.getSelect(dl, DstVT, Sel, True, False);
6259   }
6260   return true;
6261 }
6262 
6263 bool TargetLowering::expandUINT_TO_FP(SDNode *Node, SDValue &Result,
6264                                       SDValue &Chain,
6265                                       SelectionDAG &DAG) const {
6266   unsigned OpNo = Node->isStrictFPOpcode() ? 1 : 0;
6267   SDValue Src = Node->getOperand(OpNo);
6268   EVT SrcVT = Src.getValueType();
6269   EVT DstVT = Node->getValueType(0);
6270 
6271   if (SrcVT.getScalarType() != MVT::i64 || DstVT.getScalarType() != MVT::f64)
6272     return false;
6273 
6274   // Only expand vector types if we have the appropriate vector bit operations.
6275   if (SrcVT.isVector() && (!isOperationLegalOrCustom(ISD::SRL, SrcVT) ||
6276                            !isOperationLegalOrCustom(ISD::FADD, DstVT) ||
6277                            !isOperationLegalOrCustom(ISD::FSUB, DstVT) ||
6278                            !isOperationLegalOrCustomOrPromote(ISD::OR, SrcVT) ||
6279                            !isOperationLegalOrCustomOrPromote(ISD::AND, SrcVT)))
6280     return false;
6281 
6282   SDLoc dl(SDValue(Node, 0));
6283   EVT ShiftVT = getShiftAmountTy(SrcVT, DAG.getDataLayout());
6284 
6285   // Implementation of unsigned i64 to f64 following the algorithm in
6286   // __floatundidf in compiler_rt. This implementation has the advantage
6287   // of performing rounding correctly, both in the default rounding mode
6288   // and in all alternate rounding modes.
6289   SDValue TwoP52 = DAG.getConstant(UINT64_C(0x4330000000000000), dl, SrcVT);
6290   SDValue TwoP84PlusTwoP52 = DAG.getConstantFP(
6291       BitsToDouble(UINT64_C(0x4530000000100000)), dl, DstVT);
6292   SDValue TwoP84 = DAG.getConstant(UINT64_C(0x4530000000000000), dl, SrcVT);
6293   SDValue LoMask = DAG.getConstant(UINT64_C(0x00000000FFFFFFFF), dl, SrcVT);
6294   SDValue HiShift = DAG.getConstant(32, dl, ShiftVT);
6295 
6296   SDValue Lo = DAG.getNode(ISD::AND, dl, SrcVT, Src, LoMask);
6297   SDValue Hi = DAG.getNode(ISD::SRL, dl, SrcVT, Src, HiShift);
6298   SDValue LoOr = DAG.getNode(ISD::OR, dl, SrcVT, Lo, TwoP52);
6299   SDValue HiOr = DAG.getNode(ISD::OR, dl, SrcVT, Hi, TwoP84);
6300   SDValue LoFlt = DAG.getBitcast(DstVT, LoOr);
6301   SDValue HiFlt = DAG.getBitcast(DstVT, HiOr);
6302   if (Node->isStrictFPOpcode()) {
6303     SDValue HiSub =
6304         DAG.getNode(ISD::STRICT_FSUB, dl, {DstVT, MVT::Other},
6305                     {Node->getOperand(0), HiFlt, TwoP84PlusTwoP52});
6306     Result = DAG.getNode(ISD::STRICT_FADD, dl, {DstVT, MVT::Other},
6307                          {HiSub.getValue(1), LoFlt, HiSub});
6308     Chain = Result.getValue(1);
6309   } else {
6310     SDValue HiSub =
6311         DAG.getNode(ISD::FSUB, dl, DstVT, HiFlt, TwoP84PlusTwoP52);
6312     Result = DAG.getNode(ISD::FADD, dl, DstVT, LoFlt, HiSub);
6313   }
6314   return true;
6315 }
6316 
6317 SDValue TargetLowering::expandFMINNUM_FMAXNUM(SDNode *Node,
6318                                               SelectionDAG &DAG) const {
6319   SDLoc dl(Node);
6320   unsigned NewOp = Node->getOpcode() == ISD::FMINNUM ?
6321     ISD::FMINNUM_IEEE : ISD::FMAXNUM_IEEE;
6322   EVT VT = Node->getValueType(0);
6323   if (isOperationLegalOrCustom(NewOp, VT)) {
6324     SDValue Quiet0 = Node->getOperand(0);
6325     SDValue Quiet1 = Node->getOperand(1);
6326 
6327     if (!Node->getFlags().hasNoNaNs()) {
6328       // Insert canonicalizes if it's possible we need to quiet to get correct
6329       // sNaN behavior.
6330       if (!DAG.isKnownNeverSNaN(Quiet0)) {
6331         Quiet0 = DAG.getNode(ISD::FCANONICALIZE, dl, VT, Quiet0,
6332                              Node->getFlags());
6333       }
6334       if (!DAG.isKnownNeverSNaN(Quiet1)) {
6335         Quiet1 = DAG.getNode(ISD::FCANONICALIZE, dl, VT, Quiet1,
6336                              Node->getFlags());
6337       }
6338     }
6339 
6340     return DAG.getNode(NewOp, dl, VT, Quiet0, Quiet1, Node->getFlags());
6341   }
6342 
6343   // If the target has FMINIMUM/FMAXIMUM but not FMINNUM/FMAXNUM use that
6344   // instead if there are no NaNs.
6345   if (Node->getFlags().hasNoNaNs()) {
6346     unsigned IEEE2018Op =
6347         Node->getOpcode() == ISD::FMINNUM ? ISD::FMINIMUM : ISD::FMAXIMUM;
6348     if (isOperationLegalOrCustom(IEEE2018Op, VT)) {
6349       return DAG.getNode(IEEE2018Op, dl, VT, Node->getOperand(0),
6350                          Node->getOperand(1), Node->getFlags());
6351     }
6352   }
6353 
6354   // If none of the above worked, but there are no NaNs, then expand to
6355   // a compare/select sequence.  This is required for correctness since
6356   // InstCombine might have canonicalized a fcmp+select sequence to a
6357   // FMINNUM/FMAXNUM node.  If we were to fall through to the default
6358   // expansion to libcall, we might introduce a link-time dependency
6359   // on libm into a file that originally did not have one.
6360   if (Node->getFlags().hasNoNaNs()) {
6361     ISD::CondCode Pred =
6362         Node->getOpcode() == ISD::FMINNUM ? ISD::SETLT : ISD::SETGT;
6363     SDValue Op1 = Node->getOperand(0);
6364     SDValue Op2 = Node->getOperand(1);
6365     SDValue SelCC = DAG.getSelectCC(dl, Op1, Op2, Op1, Op2, Pred);
6366     // Copy FMF flags, but always set the no-signed-zeros flag
6367     // as this is implied by the FMINNUM/FMAXNUM semantics.
6368     SDNodeFlags Flags = Node->getFlags();
6369     Flags.setNoSignedZeros(true);
6370     SelCC->setFlags(Flags);
6371     return SelCC;
6372   }
6373 
6374   return SDValue();
6375 }
6376 
6377 bool TargetLowering::expandCTPOP(SDNode *Node, SDValue &Result,
6378                                  SelectionDAG &DAG) const {
6379   SDLoc dl(Node);
6380   EVT VT = Node->getValueType(0);
6381   EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout());
6382   SDValue Op = Node->getOperand(0);
6383   unsigned Len = VT.getScalarSizeInBits();
6384   assert(VT.isInteger() && "CTPOP not implemented for this type.");
6385 
6386   // TODO: Add support for irregular type lengths.
6387   if (!(Len <= 128 && Len % 8 == 0))
6388     return false;
6389 
6390   // Only expand vector types if we have the appropriate vector bit operations.
6391   if (VT.isVector() && (!isOperationLegalOrCustom(ISD::ADD, VT) ||
6392                         !isOperationLegalOrCustom(ISD::SUB, VT) ||
6393                         !isOperationLegalOrCustom(ISD::SRL, VT) ||
6394                         (Len != 8 && !isOperationLegalOrCustom(ISD::MUL, VT)) ||
6395                         !isOperationLegalOrCustomOrPromote(ISD::AND, VT)))
6396     return false;
6397 
6398   // This is the "best" algorithm from
6399   // http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
6400   SDValue Mask55 =
6401       DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x55)), dl, VT);
6402   SDValue Mask33 =
6403       DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x33)), dl, VT);
6404   SDValue Mask0F =
6405       DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x0F)), dl, VT);
6406   SDValue Mask01 =
6407       DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x01)), dl, VT);
6408 
6409   // v = v - ((v >> 1) & 0x55555555...)
6410   Op = DAG.getNode(ISD::SUB, dl, VT, Op,
6411                    DAG.getNode(ISD::AND, dl, VT,
6412                                DAG.getNode(ISD::SRL, dl, VT, Op,
6413                                            DAG.getConstant(1, dl, ShVT)),
6414                                Mask55));
6415   // v = (v & 0x33333333...) + ((v >> 2) & 0x33333333...)
6416   Op = DAG.getNode(ISD::ADD, dl, VT, DAG.getNode(ISD::AND, dl, VT, Op, Mask33),
6417                    DAG.getNode(ISD::AND, dl, VT,
6418                                DAG.getNode(ISD::SRL, dl, VT, Op,
6419                                            DAG.getConstant(2, dl, ShVT)),
6420                                Mask33));
6421   // v = (v + (v >> 4)) & 0x0F0F0F0F...
6422   Op = DAG.getNode(ISD::AND, dl, VT,
6423                    DAG.getNode(ISD::ADD, dl, VT, Op,
6424                                DAG.getNode(ISD::SRL, dl, VT, Op,
6425                                            DAG.getConstant(4, dl, ShVT))),
6426                    Mask0F);
6427   // v = (v * 0x01010101...) >> (Len - 8)
6428   if (Len > 8)
6429     Op =
6430         DAG.getNode(ISD::SRL, dl, VT, DAG.getNode(ISD::MUL, dl, VT, Op, Mask01),
6431                     DAG.getConstant(Len - 8, dl, ShVT));
6432 
6433   Result = Op;
6434   return true;
6435 }
6436 
6437 bool TargetLowering::expandCTLZ(SDNode *Node, SDValue &Result,
6438                                 SelectionDAG &DAG) const {
6439   SDLoc dl(Node);
6440   EVT VT = Node->getValueType(0);
6441   EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout());
6442   SDValue Op = Node->getOperand(0);
6443   unsigned NumBitsPerElt = VT.getScalarSizeInBits();
6444 
6445   // If the non-ZERO_UNDEF version is supported we can use that instead.
6446   if (Node->getOpcode() == ISD::CTLZ_ZERO_UNDEF &&
6447       isOperationLegalOrCustom(ISD::CTLZ, VT)) {
6448     Result = DAG.getNode(ISD::CTLZ, dl, VT, Op);
6449     return true;
6450   }
6451 
6452   // If the ZERO_UNDEF version is supported use that and handle the zero case.
6453   if (isOperationLegalOrCustom(ISD::CTLZ_ZERO_UNDEF, VT)) {
6454     EVT SetCCVT =
6455         getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
6456     SDValue CTLZ = DAG.getNode(ISD::CTLZ_ZERO_UNDEF, dl, VT, Op);
6457     SDValue Zero = DAG.getConstant(0, dl, VT);
6458     SDValue SrcIsZero = DAG.getSetCC(dl, SetCCVT, Op, Zero, ISD::SETEQ);
6459     Result = DAG.getNode(ISD::SELECT, dl, VT, SrcIsZero,
6460                          DAG.getConstant(NumBitsPerElt, dl, VT), CTLZ);
6461     return true;
6462   }
6463 
6464   // Only expand vector types if we have the appropriate vector bit operations.
6465   if (VT.isVector() && (!isPowerOf2_32(NumBitsPerElt) ||
6466                         !isOperationLegalOrCustom(ISD::CTPOP, VT) ||
6467                         !isOperationLegalOrCustom(ISD::SRL, VT) ||
6468                         !isOperationLegalOrCustomOrPromote(ISD::OR, VT)))
6469     return false;
6470 
6471   // for now, we do this:
6472   // x = x | (x >> 1);
6473   // x = x | (x >> 2);
6474   // ...
6475   // x = x | (x >>16);
6476   // x = x | (x >>32); // for 64-bit input
6477   // return popcount(~x);
6478   //
6479   // Ref: "Hacker's Delight" by Henry Warren
6480   for (unsigned i = 0; (1U << i) <= (NumBitsPerElt / 2); ++i) {
6481     SDValue Tmp = DAG.getConstant(1ULL << i, dl, ShVT);
6482     Op = DAG.getNode(ISD::OR, dl, VT, Op,
6483                      DAG.getNode(ISD::SRL, dl, VT, Op, Tmp));
6484   }
6485   Op = DAG.getNOT(dl, Op, VT);
6486   Result = DAG.getNode(ISD::CTPOP, dl, VT, Op);
6487   return true;
6488 }
6489 
6490 bool TargetLowering::expandCTTZ(SDNode *Node, SDValue &Result,
6491                                 SelectionDAG &DAG) const {
6492   SDLoc dl(Node);
6493   EVT VT = Node->getValueType(0);
6494   SDValue Op = Node->getOperand(0);
6495   unsigned NumBitsPerElt = VT.getScalarSizeInBits();
6496 
6497   // If the non-ZERO_UNDEF version is supported we can use that instead.
6498   if (Node->getOpcode() == ISD::CTTZ_ZERO_UNDEF &&
6499       isOperationLegalOrCustom(ISD::CTTZ, VT)) {
6500     Result = DAG.getNode(ISD::CTTZ, dl, VT, Op);
6501     return true;
6502   }
6503 
6504   // If the ZERO_UNDEF version is supported use that and handle the zero case.
6505   if (isOperationLegalOrCustom(ISD::CTTZ_ZERO_UNDEF, VT)) {
6506     EVT SetCCVT =
6507         getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
6508     SDValue CTTZ = DAG.getNode(ISD::CTTZ_ZERO_UNDEF, dl, VT, Op);
6509     SDValue Zero = DAG.getConstant(0, dl, VT);
6510     SDValue SrcIsZero = DAG.getSetCC(dl, SetCCVT, Op, Zero, ISD::SETEQ);
6511     Result = DAG.getNode(ISD::SELECT, dl, VT, SrcIsZero,
6512                          DAG.getConstant(NumBitsPerElt, dl, VT), CTTZ);
6513     return true;
6514   }
6515 
6516   // Only expand vector types if we have the appropriate vector bit operations.
6517   if (VT.isVector() && (!isPowerOf2_32(NumBitsPerElt) ||
6518                         (!isOperationLegalOrCustom(ISD::CTPOP, VT) &&
6519                          !isOperationLegalOrCustom(ISD::CTLZ, VT)) ||
6520                         !isOperationLegalOrCustom(ISD::SUB, VT) ||
6521                         !isOperationLegalOrCustomOrPromote(ISD::AND, VT) ||
6522                         !isOperationLegalOrCustomOrPromote(ISD::XOR, VT)))
6523     return false;
6524 
6525   // for now, we use: { return popcount(~x & (x - 1)); }
6526   // unless the target has ctlz but not ctpop, in which case we use:
6527   // { return 32 - nlz(~x & (x-1)); }
6528   // Ref: "Hacker's Delight" by Henry Warren
6529   SDValue Tmp = DAG.getNode(
6530       ISD::AND, dl, VT, DAG.getNOT(dl, Op, VT),
6531       DAG.getNode(ISD::SUB, dl, VT, Op, DAG.getConstant(1, dl, VT)));
6532 
6533   // If ISD::CTLZ is legal and CTPOP isn't, then do that instead.
6534   if (isOperationLegal(ISD::CTLZ, VT) && !isOperationLegal(ISD::CTPOP, VT)) {
6535     Result =
6536         DAG.getNode(ISD::SUB, dl, VT, DAG.getConstant(NumBitsPerElt, dl, VT),
6537                     DAG.getNode(ISD::CTLZ, dl, VT, Tmp));
6538     return true;
6539   }
6540 
6541   Result = DAG.getNode(ISD::CTPOP, dl, VT, Tmp);
6542   return true;
6543 }
6544 
6545 bool TargetLowering::expandABS(SDNode *N, SDValue &Result,
6546                                SelectionDAG &DAG) const {
6547   SDLoc dl(N);
6548   EVT VT = N->getValueType(0);
6549   EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout());
6550   SDValue Op = N->getOperand(0);
6551 
6552   // Only expand vector types if we have the appropriate vector operations.
6553   if (VT.isVector() && (!isOperationLegalOrCustom(ISD::SRA, VT) ||
6554                         !isOperationLegalOrCustom(ISD::ADD, VT) ||
6555                         !isOperationLegalOrCustomOrPromote(ISD::XOR, VT)))
6556     return false;
6557 
6558   SDValue Shift =
6559       DAG.getNode(ISD::SRA, dl, VT, Op,
6560                   DAG.getConstant(VT.getScalarSizeInBits() - 1, dl, ShVT));
6561   SDValue Add = DAG.getNode(ISD::ADD, dl, VT, Op, Shift);
6562   Result = DAG.getNode(ISD::XOR, dl, VT, Add, Shift);
6563   return true;
6564 }
6565 
6566 std::pair<SDValue, SDValue>
6567 TargetLowering::scalarizeVectorLoad(LoadSDNode *LD,
6568                                     SelectionDAG &DAG) const {
6569   SDLoc SL(LD);
6570   SDValue Chain = LD->getChain();
6571   SDValue BasePTR = LD->getBasePtr();
6572   EVT SrcVT = LD->getMemoryVT();
6573   ISD::LoadExtType ExtType = LD->getExtensionType();
6574 
6575   unsigned NumElem = SrcVT.getVectorNumElements();
6576 
6577   EVT SrcEltVT = SrcVT.getScalarType();
6578   EVT DstEltVT = LD->getValueType(0).getScalarType();
6579 
6580   unsigned Stride = SrcEltVT.getSizeInBits() / 8;
6581   assert(SrcEltVT.isByteSized());
6582 
6583   SmallVector<SDValue, 8> Vals;
6584   SmallVector<SDValue, 8> LoadChains;
6585 
6586   for (unsigned Idx = 0; Idx < NumElem; ++Idx) {
6587     SDValue ScalarLoad =
6588         DAG.getExtLoad(ExtType, SL, DstEltVT, Chain, BasePTR,
6589                        LD->getPointerInfo().getWithOffset(Idx * Stride),
6590                        SrcEltVT, MinAlign(LD->getAlignment(), Idx * Stride),
6591                        LD->getMemOperand()->getFlags(), LD->getAAInfo());
6592 
6593     BasePTR = DAG.getObjectPtrOffset(SL, BasePTR, Stride);
6594 
6595     Vals.push_back(ScalarLoad.getValue(0));
6596     LoadChains.push_back(ScalarLoad.getValue(1));
6597   }
6598 
6599   SDValue NewChain = DAG.getNode(ISD::TokenFactor, SL, MVT::Other, LoadChains);
6600   SDValue Value = DAG.getBuildVector(LD->getValueType(0), SL, Vals);
6601 
6602   return std::make_pair(Value, NewChain);
6603 }
6604 
6605 SDValue TargetLowering::scalarizeVectorStore(StoreSDNode *ST,
6606                                              SelectionDAG &DAG) const {
6607   SDLoc SL(ST);
6608 
6609   SDValue Chain = ST->getChain();
6610   SDValue BasePtr = ST->getBasePtr();
6611   SDValue Value = ST->getValue();
6612   EVT StVT = ST->getMemoryVT();
6613 
6614   // The type of the data we want to save
6615   EVT RegVT = Value.getValueType();
6616   EVT RegSclVT = RegVT.getScalarType();
6617 
6618   // The type of data as saved in memory.
6619   EVT MemSclVT = StVT.getScalarType();
6620 
6621   unsigned NumElem = StVT.getVectorNumElements();
6622 
6623   // A vector must always be stored in memory as-is, i.e. without any padding
6624   // between the elements, since various code depend on it, e.g. in the
6625   // handling of a bitcast of a vector type to int, which may be done with a
6626   // vector store followed by an integer load. A vector that does not have
6627   // elements that are byte-sized must therefore be stored as an integer
6628   // built out of the extracted vector elements.
6629   if (!MemSclVT.isByteSized()) {
6630     unsigned NumBits = StVT.getSizeInBits();
6631     EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), NumBits);
6632 
6633     SDValue CurrVal = DAG.getConstant(0, SL, IntVT);
6634 
6635     for (unsigned Idx = 0; Idx < NumElem; ++Idx) {
6636       SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, RegSclVT, Value,
6637                                 DAG.getVectorIdxConstant(Idx, SL));
6638       SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SL, MemSclVT, Elt);
6639       SDValue ExtElt = DAG.getNode(ISD::ZERO_EXTEND, SL, IntVT, Trunc);
6640       unsigned ShiftIntoIdx =
6641           (DAG.getDataLayout().isBigEndian() ? (NumElem - 1) - Idx : Idx);
6642       SDValue ShiftAmount =
6643           DAG.getConstant(ShiftIntoIdx * MemSclVT.getSizeInBits(), SL, IntVT);
6644       SDValue ShiftedElt =
6645           DAG.getNode(ISD::SHL, SL, IntVT, ExtElt, ShiftAmount);
6646       CurrVal = DAG.getNode(ISD::OR, SL, IntVT, CurrVal, ShiftedElt);
6647     }
6648 
6649     return DAG.getStore(Chain, SL, CurrVal, BasePtr, ST->getPointerInfo(),
6650                         ST->getAlignment(), ST->getMemOperand()->getFlags(),
6651                         ST->getAAInfo());
6652   }
6653 
6654   // Store Stride in bytes
6655   unsigned Stride = MemSclVT.getSizeInBits() / 8;
6656   assert(Stride && "Zero stride!");
6657   // Extract each of the elements from the original vector and save them into
6658   // memory individually.
6659   SmallVector<SDValue, 8> Stores;
6660   for (unsigned Idx = 0; Idx < NumElem; ++Idx) {
6661     SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, RegSclVT, Value,
6662                               DAG.getVectorIdxConstant(Idx, SL));
6663 
6664     SDValue Ptr = DAG.getObjectPtrOffset(SL, BasePtr, Idx * Stride);
6665 
6666     // This scalar TruncStore may be illegal, but we legalize it later.
6667     SDValue Store = DAG.getTruncStore(
6668         Chain, SL, Elt, Ptr, ST->getPointerInfo().getWithOffset(Idx * Stride),
6669         MemSclVT, MinAlign(ST->getAlignment(), Idx * Stride),
6670         ST->getMemOperand()->getFlags(), ST->getAAInfo());
6671 
6672     Stores.push_back(Store);
6673   }
6674 
6675   return DAG.getNode(ISD::TokenFactor, SL, MVT::Other, Stores);
6676 }
6677 
6678 std::pair<SDValue, SDValue>
6679 TargetLowering::expandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG) const {
6680   assert(LD->getAddressingMode() == ISD::UNINDEXED &&
6681          "unaligned indexed loads not implemented!");
6682   SDValue Chain = LD->getChain();
6683   SDValue Ptr = LD->getBasePtr();
6684   EVT VT = LD->getValueType(0);
6685   EVT LoadedVT = LD->getMemoryVT();
6686   SDLoc dl(LD);
6687   auto &MF = DAG.getMachineFunction();
6688 
6689   if (VT.isFloatingPoint() || VT.isVector()) {
6690     EVT intVT = EVT::getIntegerVT(*DAG.getContext(), LoadedVT.getSizeInBits());
6691     if (isTypeLegal(intVT) && isTypeLegal(LoadedVT)) {
6692       if (!isOperationLegalOrCustom(ISD::LOAD, intVT) &&
6693           LoadedVT.isVector()) {
6694         // Scalarize the load and let the individual components be handled.
6695         return scalarizeVectorLoad(LD, DAG);
6696       }
6697 
6698       // Expand to a (misaligned) integer load of the same size,
6699       // then bitconvert to floating point or vector.
6700       SDValue newLoad = DAG.getLoad(intVT, dl, Chain, Ptr,
6701                                     LD->getMemOperand());
6702       SDValue Result = DAG.getNode(ISD::BITCAST, dl, LoadedVT, newLoad);
6703       if (LoadedVT != VT)
6704         Result = DAG.getNode(VT.isFloatingPoint() ? ISD::FP_EXTEND :
6705                              ISD::ANY_EXTEND, dl, VT, Result);
6706 
6707       return std::make_pair(Result, newLoad.getValue(1));
6708     }
6709 
6710     // Copy the value to a (aligned) stack slot using (unaligned) integer
6711     // loads and stores, then do a (aligned) load from the stack slot.
6712     MVT RegVT = getRegisterType(*DAG.getContext(), intVT);
6713     unsigned LoadedBytes = LoadedVT.getStoreSize();
6714     unsigned RegBytes = RegVT.getSizeInBits() / 8;
6715     unsigned NumRegs = (LoadedBytes + RegBytes - 1) / RegBytes;
6716 
6717     // Make sure the stack slot is also aligned for the register type.
6718     SDValue StackBase = DAG.CreateStackTemporary(LoadedVT, RegVT);
6719     auto FrameIndex = cast<FrameIndexSDNode>(StackBase.getNode())->getIndex();
6720     SmallVector<SDValue, 8> Stores;
6721     SDValue StackPtr = StackBase;
6722     unsigned Offset = 0;
6723 
6724     EVT PtrVT = Ptr.getValueType();
6725     EVT StackPtrVT = StackPtr.getValueType();
6726 
6727     SDValue PtrIncrement = DAG.getConstant(RegBytes, dl, PtrVT);
6728     SDValue StackPtrIncrement = DAG.getConstant(RegBytes, dl, StackPtrVT);
6729 
6730     // Do all but one copies using the full register width.
6731     for (unsigned i = 1; i < NumRegs; i++) {
6732       // Load one integer register's worth from the original location.
6733       SDValue Load = DAG.getLoad(
6734           RegVT, dl, Chain, Ptr, LD->getPointerInfo().getWithOffset(Offset),
6735           MinAlign(LD->getAlignment(), Offset), LD->getMemOperand()->getFlags(),
6736           LD->getAAInfo());
6737       // Follow the load with a store to the stack slot.  Remember the store.
6738       Stores.push_back(DAG.getStore(
6739           Load.getValue(1), dl, Load, StackPtr,
6740           MachinePointerInfo::getFixedStack(MF, FrameIndex, Offset)));
6741       // Increment the pointers.
6742       Offset += RegBytes;
6743 
6744       Ptr = DAG.getObjectPtrOffset(dl, Ptr, PtrIncrement);
6745       StackPtr = DAG.getObjectPtrOffset(dl, StackPtr, StackPtrIncrement);
6746     }
6747 
6748     // The last copy may be partial.  Do an extending load.
6749     EVT MemVT = EVT::getIntegerVT(*DAG.getContext(),
6750                                   8 * (LoadedBytes - Offset));
6751     SDValue Load =
6752         DAG.getExtLoad(ISD::EXTLOAD, dl, RegVT, Chain, Ptr,
6753                        LD->getPointerInfo().getWithOffset(Offset), MemVT,
6754                        MinAlign(LD->getAlignment(), Offset),
6755                        LD->getMemOperand()->getFlags(), LD->getAAInfo());
6756     // Follow the load with a store to the stack slot.  Remember the store.
6757     // On big-endian machines this requires a truncating store to ensure
6758     // that the bits end up in the right place.
6759     Stores.push_back(DAG.getTruncStore(
6760         Load.getValue(1), dl, Load, StackPtr,
6761         MachinePointerInfo::getFixedStack(MF, FrameIndex, Offset), MemVT));
6762 
6763     // The order of the stores doesn't matter - say it with a TokenFactor.
6764     SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores);
6765 
6766     // Finally, perform the original load only redirected to the stack slot.
6767     Load = DAG.getExtLoad(LD->getExtensionType(), dl, VT, TF, StackBase,
6768                           MachinePointerInfo::getFixedStack(MF, FrameIndex, 0),
6769                           LoadedVT);
6770 
6771     // Callers expect a MERGE_VALUES node.
6772     return std::make_pair(Load, TF);
6773   }
6774 
6775   assert(LoadedVT.isInteger() && !LoadedVT.isVector() &&
6776          "Unaligned load of unsupported type.");
6777 
6778   // Compute the new VT that is half the size of the old one.  This is an
6779   // integer MVT.
6780   unsigned NumBits = LoadedVT.getSizeInBits();
6781   EVT NewLoadedVT;
6782   NewLoadedVT = EVT::getIntegerVT(*DAG.getContext(), NumBits/2);
6783   NumBits >>= 1;
6784 
6785   unsigned Alignment = LD->getAlignment();
6786   unsigned IncrementSize = NumBits / 8;
6787   ISD::LoadExtType HiExtType = LD->getExtensionType();
6788 
6789   // If the original load is NON_EXTLOAD, the hi part load must be ZEXTLOAD.
6790   if (HiExtType == ISD::NON_EXTLOAD)
6791     HiExtType = ISD::ZEXTLOAD;
6792 
6793   // Load the value in two parts
6794   SDValue Lo, Hi;
6795   if (DAG.getDataLayout().isLittleEndian()) {
6796     Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, VT, Chain, Ptr, LD->getPointerInfo(),
6797                         NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(),
6798                         LD->getAAInfo());
6799 
6800     Ptr = DAG.getObjectPtrOffset(dl, Ptr, IncrementSize);
6801     Hi = DAG.getExtLoad(HiExtType, dl, VT, Chain, Ptr,
6802                         LD->getPointerInfo().getWithOffset(IncrementSize),
6803                         NewLoadedVT, MinAlign(Alignment, IncrementSize),
6804                         LD->getMemOperand()->getFlags(), LD->getAAInfo());
6805   } else {
6806     Hi = DAG.getExtLoad(HiExtType, dl, VT, Chain, Ptr, LD->getPointerInfo(),
6807                         NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(),
6808                         LD->getAAInfo());
6809 
6810     Ptr = DAG.getObjectPtrOffset(dl, Ptr, IncrementSize);
6811     Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, VT, Chain, Ptr,
6812                         LD->getPointerInfo().getWithOffset(IncrementSize),
6813                         NewLoadedVT, MinAlign(Alignment, IncrementSize),
6814                         LD->getMemOperand()->getFlags(), LD->getAAInfo());
6815   }
6816 
6817   // aggregate the two parts
6818   SDValue ShiftAmount =
6819       DAG.getConstant(NumBits, dl, getShiftAmountTy(Hi.getValueType(),
6820                                                     DAG.getDataLayout()));
6821   SDValue Result = DAG.getNode(ISD::SHL, dl, VT, Hi, ShiftAmount);
6822   Result = DAG.getNode(ISD::OR, dl, VT, Result, Lo);
6823 
6824   SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1),
6825                              Hi.getValue(1));
6826 
6827   return std::make_pair(Result, TF);
6828 }
6829 
6830 SDValue TargetLowering::expandUnalignedStore(StoreSDNode *ST,
6831                                              SelectionDAG &DAG) const {
6832   assert(ST->getAddressingMode() == ISD::UNINDEXED &&
6833          "unaligned indexed stores not implemented!");
6834   SDValue Chain = ST->getChain();
6835   SDValue Ptr = ST->getBasePtr();
6836   SDValue Val = ST->getValue();
6837   EVT VT = Val.getValueType();
6838   int Alignment = ST->getAlignment();
6839   auto &MF = DAG.getMachineFunction();
6840   EVT StoreMemVT = ST->getMemoryVT();
6841 
6842   SDLoc dl(ST);
6843   if (StoreMemVT.isFloatingPoint() || StoreMemVT.isVector()) {
6844     EVT intVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits());
6845     if (isTypeLegal(intVT)) {
6846       if (!isOperationLegalOrCustom(ISD::STORE, intVT) &&
6847           StoreMemVT.isVector()) {
6848         // Scalarize the store and let the individual components be handled.
6849         SDValue Result = scalarizeVectorStore(ST, DAG);
6850         return Result;
6851       }
6852       // Expand to a bitconvert of the value to the integer type of the
6853       // same size, then a (misaligned) int store.
6854       // FIXME: Does not handle truncating floating point stores!
6855       SDValue Result = DAG.getNode(ISD::BITCAST, dl, intVT, Val);
6856       Result = DAG.getStore(Chain, dl, Result, Ptr, ST->getPointerInfo(),
6857                             Alignment, ST->getMemOperand()->getFlags());
6858       return Result;
6859     }
6860     // Do a (aligned) store to a stack slot, then copy from the stack slot
6861     // to the final destination using (unaligned) integer loads and stores.
6862     MVT RegVT = getRegisterType(
6863         *DAG.getContext(),
6864         EVT::getIntegerVT(*DAG.getContext(), StoreMemVT.getSizeInBits()));
6865     EVT PtrVT = Ptr.getValueType();
6866     unsigned StoredBytes = StoreMemVT.getStoreSize();
6867     unsigned RegBytes = RegVT.getSizeInBits() / 8;
6868     unsigned NumRegs = (StoredBytes + RegBytes - 1) / RegBytes;
6869 
6870     // Make sure the stack slot is also aligned for the register type.
6871     SDValue StackPtr = DAG.CreateStackTemporary(StoreMemVT, RegVT);
6872     auto FrameIndex = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
6873 
6874     // Perform the original store, only redirected to the stack slot.
6875     SDValue Store = DAG.getTruncStore(
6876         Chain, dl, Val, StackPtr,
6877         MachinePointerInfo::getFixedStack(MF, FrameIndex, 0), StoreMemVT);
6878 
6879     EVT StackPtrVT = StackPtr.getValueType();
6880 
6881     SDValue PtrIncrement = DAG.getConstant(RegBytes, dl, PtrVT);
6882     SDValue StackPtrIncrement = DAG.getConstant(RegBytes, dl, StackPtrVT);
6883     SmallVector<SDValue, 8> Stores;
6884     unsigned Offset = 0;
6885 
6886     // Do all but one copies using the full register width.
6887     for (unsigned i = 1; i < NumRegs; i++) {
6888       // Load one integer register's worth from the stack slot.
6889       SDValue Load = DAG.getLoad(
6890           RegVT, dl, Store, StackPtr,
6891           MachinePointerInfo::getFixedStack(MF, FrameIndex, Offset));
6892       // Store it to the final location.  Remember the store.
6893       Stores.push_back(DAG.getStore(Load.getValue(1), dl, Load, Ptr,
6894                                     ST->getPointerInfo().getWithOffset(Offset),
6895                                     MinAlign(ST->getAlignment(), Offset),
6896                                     ST->getMemOperand()->getFlags()));
6897       // Increment the pointers.
6898       Offset += RegBytes;
6899       StackPtr = DAG.getObjectPtrOffset(dl, StackPtr, StackPtrIncrement);
6900       Ptr = DAG.getObjectPtrOffset(dl, Ptr, PtrIncrement);
6901     }
6902 
6903     // The last store may be partial.  Do a truncating store.  On big-endian
6904     // machines this requires an extending load from the stack slot to ensure
6905     // that the bits are in the right place.
6906     EVT LoadMemVT =
6907         EVT::getIntegerVT(*DAG.getContext(), 8 * (StoredBytes - Offset));
6908 
6909     // Load from the stack slot.
6910     SDValue Load = DAG.getExtLoad(
6911         ISD::EXTLOAD, dl, RegVT, Store, StackPtr,
6912         MachinePointerInfo::getFixedStack(MF, FrameIndex, Offset), LoadMemVT);
6913 
6914     Stores.push_back(
6915         DAG.getTruncStore(Load.getValue(1), dl, Load, Ptr,
6916                           ST->getPointerInfo().getWithOffset(Offset), LoadMemVT,
6917                           MinAlign(ST->getAlignment(), Offset),
6918                           ST->getMemOperand()->getFlags(), ST->getAAInfo()));
6919     // The order of the stores doesn't matter - say it with a TokenFactor.
6920     SDValue Result = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores);
6921     return Result;
6922   }
6923 
6924   assert(StoreMemVT.isInteger() && !StoreMemVT.isVector() &&
6925          "Unaligned store of unknown type.");
6926   // Get the half-size VT
6927   EVT NewStoredVT = StoreMemVT.getHalfSizedIntegerVT(*DAG.getContext());
6928   int NumBits = NewStoredVT.getSizeInBits();
6929   int IncrementSize = NumBits / 8;
6930 
6931   // Divide the stored value in two parts.
6932   SDValue ShiftAmount = DAG.getConstant(
6933       NumBits, dl, getShiftAmountTy(Val.getValueType(), DAG.getDataLayout()));
6934   SDValue Lo = Val;
6935   SDValue Hi = DAG.getNode(ISD::SRL, dl, VT, Val, ShiftAmount);
6936 
6937   // Store the two parts
6938   SDValue Store1, Store2;
6939   Store1 = DAG.getTruncStore(Chain, dl,
6940                              DAG.getDataLayout().isLittleEndian() ? Lo : Hi,
6941                              Ptr, ST->getPointerInfo(), NewStoredVT, Alignment,
6942                              ST->getMemOperand()->getFlags());
6943 
6944   Ptr = DAG.getObjectPtrOffset(dl, Ptr, IncrementSize);
6945   Alignment = MinAlign(Alignment, IncrementSize);
6946   Store2 = DAG.getTruncStore(
6947       Chain, dl, DAG.getDataLayout().isLittleEndian() ? Hi : Lo, Ptr,
6948       ST->getPointerInfo().getWithOffset(IncrementSize), NewStoredVT, Alignment,
6949       ST->getMemOperand()->getFlags(), ST->getAAInfo());
6950 
6951   SDValue Result =
6952       DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Store1, Store2);
6953   return Result;
6954 }
6955 
6956 SDValue
6957 TargetLowering::IncrementMemoryAddress(SDValue Addr, SDValue Mask,
6958                                        const SDLoc &DL, EVT DataVT,
6959                                        SelectionDAG &DAG,
6960                                        bool IsCompressedMemory) const {
6961   SDValue Increment;
6962   EVT AddrVT = Addr.getValueType();
6963   EVT MaskVT = Mask.getValueType();
6964   assert(DataVT.getVectorNumElements() == MaskVT.getVectorNumElements() &&
6965          "Incompatible types of Data and Mask");
6966   if (IsCompressedMemory) {
6967     // Incrementing the pointer according to number of '1's in the mask.
6968     EVT MaskIntVT = EVT::getIntegerVT(*DAG.getContext(), MaskVT.getSizeInBits());
6969     SDValue MaskInIntReg = DAG.getBitcast(MaskIntVT, Mask);
6970     if (MaskIntVT.getSizeInBits() < 32) {
6971       MaskInIntReg = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, MaskInIntReg);
6972       MaskIntVT = MVT::i32;
6973     }
6974 
6975     // Count '1's with POPCNT.
6976     Increment = DAG.getNode(ISD::CTPOP, DL, MaskIntVT, MaskInIntReg);
6977     Increment = DAG.getZExtOrTrunc(Increment, DL, AddrVT);
6978     // Scale is an element size in bytes.
6979     SDValue Scale = DAG.getConstant(DataVT.getScalarSizeInBits() / 8, DL,
6980                                     AddrVT);
6981     Increment = DAG.getNode(ISD::MUL, DL, AddrVT, Increment, Scale);
6982   } else
6983     Increment = DAG.getConstant(DataVT.getStoreSize(), DL, AddrVT);
6984 
6985   return DAG.getNode(ISD::ADD, DL, AddrVT, Addr, Increment);
6986 }
6987 
6988 static SDValue clampDynamicVectorIndex(SelectionDAG &DAG,
6989                                        SDValue Idx,
6990                                        EVT VecVT,
6991                                        const SDLoc &dl) {
6992   if (isa<ConstantSDNode>(Idx))
6993     return Idx;
6994 
6995   EVT IdxVT = Idx.getValueType();
6996   unsigned NElts = VecVT.getVectorNumElements();
6997   if (isPowerOf2_32(NElts)) {
6998     APInt Imm = APInt::getLowBitsSet(IdxVT.getSizeInBits(),
6999                                      Log2_32(NElts));
7000     return DAG.getNode(ISD::AND, dl, IdxVT, Idx,
7001                        DAG.getConstant(Imm, dl, IdxVT));
7002   }
7003 
7004   return DAG.getNode(ISD::UMIN, dl, IdxVT, Idx,
7005                      DAG.getConstant(NElts - 1, dl, IdxVT));
7006 }
7007 
7008 SDValue TargetLowering::getVectorElementPointer(SelectionDAG &DAG,
7009                                                 SDValue VecPtr, EVT VecVT,
7010                                                 SDValue Index) const {
7011   SDLoc dl(Index);
7012   // Make sure the index type is big enough to compute in.
7013   Index = DAG.getZExtOrTrunc(Index, dl, VecPtr.getValueType());
7014 
7015   EVT EltVT = VecVT.getVectorElementType();
7016 
7017   // Calculate the element offset and add it to the pointer.
7018   unsigned EltSize = EltVT.getSizeInBits() / 8; // FIXME: should be ABI size.
7019   assert(EltSize * 8 == EltVT.getSizeInBits() &&
7020          "Converting bits to bytes lost precision");
7021 
7022   Index = clampDynamicVectorIndex(DAG, Index, VecVT, dl);
7023 
7024   EVT IdxVT = Index.getValueType();
7025 
7026   Index = DAG.getNode(ISD::MUL, dl, IdxVT, Index,
7027                       DAG.getConstant(EltSize, dl, IdxVT));
7028   return DAG.getMemBasePlusOffset(VecPtr, Index, dl);
7029 }
7030 
7031 //===----------------------------------------------------------------------===//
7032 // Implementation of Emulated TLS Model
7033 //===----------------------------------------------------------------------===//
7034 
7035 SDValue TargetLowering::LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA,
7036                                                 SelectionDAG &DAG) const {
7037   // Access to address of TLS varialbe xyz is lowered to a function call:
7038   //   __emutls_get_address( address of global variable named "__emutls_v.xyz" )
7039   EVT PtrVT = getPointerTy(DAG.getDataLayout());
7040   PointerType *VoidPtrType = Type::getInt8PtrTy(*DAG.getContext());
7041   SDLoc dl(GA);
7042 
7043   ArgListTy Args;
7044   ArgListEntry Entry;
7045   std::string NameString = ("__emutls_v." + GA->getGlobal()->getName()).str();
7046   Module *VariableModule = const_cast<Module*>(GA->getGlobal()->getParent());
7047   StringRef EmuTlsVarName(NameString);
7048   GlobalVariable *EmuTlsVar = VariableModule->getNamedGlobal(EmuTlsVarName);
7049   assert(EmuTlsVar && "Cannot find EmuTlsVar ");
7050   Entry.Node = DAG.getGlobalAddress(EmuTlsVar, dl, PtrVT);
7051   Entry.Ty = VoidPtrType;
7052   Args.push_back(Entry);
7053 
7054   SDValue EmuTlsGetAddr = DAG.getExternalSymbol("__emutls_get_address", PtrVT);
7055 
7056   TargetLowering::CallLoweringInfo CLI(DAG);
7057   CLI.setDebugLoc(dl).setChain(DAG.getEntryNode());
7058   CLI.setLibCallee(CallingConv::C, VoidPtrType, EmuTlsGetAddr, std::move(Args));
7059   std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
7060 
7061   // TLSADDR will be codegen'ed as call. Inform MFI that function has calls.
7062   // At last for X86 targets, maybe good for other targets too?
7063   MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
7064   MFI.setAdjustsStack(true); // Is this only for X86 target?
7065   MFI.setHasCalls(true);
7066 
7067   assert((GA->getOffset() == 0) &&
7068          "Emulated TLS must have zero offset in GlobalAddressSDNode");
7069   return CallResult.first;
7070 }
7071 
7072 SDValue TargetLowering::lowerCmpEqZeroToCtlzSrl(SDValue Op,
7073                                                 SelectionDAG &DAG) const {
7074   assert((Op->getOpcode() == ISD::SETCC) && "Input has to be a SETCC node.");
7075   if (!isCtlzFast())
7076     return SDValue();
7077   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
7078   SDLoc dl(Op);
7079   if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
7080     if (C->isNullValue() && CC == ISD::SETEQ) {
7081       EVT VT = Op.getOperand(0).getValueType();
7082       SDValue Zext = Op.getOperand(0);
7083       if (VT.bitsLT(MVT::i32)) {
7084         VT = MVT::i32;
7085         Zext = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Op.getOperand(0));
7086       }
7087       unsigned Log2b = Log2_32(VT.getSizeInBits());
7088       SDValue Clz = DAG.getNode(ISD::CTLZ, dl, VT, Zext);
7089       SDValue Scc = DAG.getNode(ISD::SRL, dl, VT, Clz,
7090                                 DAG.getConstant(Log2b, dl, MVT::i32));
7091       return DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Scc);
7092     }
7093   }
7094   return SDValue();
7095 }
7096 
7097 SDValue TargetLowering::expandAddSubSat(SDNode *Node, SelectionDAG &DAG) const {
7098   unsigned Opcode = Node->getOpcode();
7099   SDValue LHS = Node->getOperand(0);
7100   SDValue RHS = Node->getOperand(1);
7101   EVT VT = LHS.getValueType();
7102   SDLoc dl(Node);
7103 
7104   assert(VT == RHS.getValueType() && "Expected operands to be the same type");
7105   assert(VT.isInteger() && "Expected operands to be integers");
7106 
7107   // usub.sat(a, b) -> umax(a, b) - b
7108   if (Opcode == ISD::USUBSAT && isOperationLegalOrCustom(ISD::UMAX, VT)) {
7109     SDValue Max = DAG.getNode(ISD::UMAX, dl, VT, LHS, RHS);
7110     return DAG.getNode(ISD::SUB, dl, VT, Max, RHS);
7111   }
7112 
7113   if (Opcode == ISD::UADDSAT && isOperationLegalOrCustom(ISD::UMIN, VT)) {
7114     SDValue InvRHS = DAG.getNOT(dl, RHS, VT);
7115     SDValue Min = DAG.getNode(ISD::UMIN, dl, VT, LHS, InvRHS);
7116     return DAG.getNode(ISD::ADD, dl, VT, Min, RHS);
7117   }
7118 
7119   unsigned OverflowOp;
7120   switch (Opcode) {
7121   case ISD::SADDSAT:
7122     OverflowOp = ISD::SADDO;
7123     break;
7124   case ISD::UADDSAT:
7125     OverflowOp = ISD::UADDO;
7126     break;
7127   case ISD::SSUBSAT:
7128     OverflowOp = ISD::SSUBO;
7129     break;
7130   case ISD::USUBSAT:
7131     OverflowOp = ISD::USUBO;
7132     break;
7133   default:
7134     llvm_unreachable("Expected method to receive signed or unsigned saturation "
7135                      "addition or subtraction node.");
7136   }
7137 
7138   unsigned BitWidth = LHS.getScalarValueSizeInBits();
7139   EVT BoolVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
7140   SDValue Result = DAG.getNode(OverflowOp, dl, DAG.getVTList(VT, BoolVT),
7141                                LHS, RHS);
7142   SDValue SumDiff = Result.getValue(0);
7143   SDValue Overflow = Result.getValue(1);
7144   SDValue Zero = DAG.getConstant(0, dl, VT);
7145   SDValue AllOnes = DAG.getAllOnesConstant(dl, VT);
7146 
7147   if (Opcode == ISD::UADDSAT) {
7148     if (getBooleanContents(VT) == ZeroOrNegativeOneBooleanContent) {
7149       // (LHS + RHS) | OverflowMask
7150       SDValue OverflowMask = DAG.getSExtOrTrunc(Overflow, dl, VT);
7151       return DAG.getNode(ISD::OR, dl, VT, SumDiff, OverflowMask);
7152     }
7153     // Overflow ? 0xffff.... : (LHS + RHS)
7154     return DAG.getSelect(dl, VT, Overflow, AllOnes, SumDiff);
7155   } else if (Opcode == ISD::USUBSAT) {
7156     if (getBooleanContents(VT) == ZeroOrNegativeOneBooleanContent) {
7157       // (LHS - RHS) & ~OverflowMask
7158       SDValue OverflowMask = DAG.getSExtOrTrunc(Overflow, dl, VT);
7159       SDValue Not = DAG.getNOT(dl, OverflowMask, VT);
7160       return DAG.getNode(ISD::AND, dl, VT, SumDiff, Not);
7161     }
7162     // Overflow ? 0 : (LHS - RHS)
7163     return DAG.getSelect(dl, VT, Overflow, Zero, SumDiff);
7164   } else {
7165     // SatMax -> Overflow && SumDiff < 0
7166     // SatMin -> Overflow && SumDiff >= 0
7167     APInt MinVal = APInt::getSignedMinValue(BitWidth);
7168     APInt MaxVal = APInt::getSignedMaxValue(BitWidth);
7169     SDValue SatMin = DAG.getConstant(MinVal, dl, VT);
7170     SDValue SatMax = DAG.getConstant(MaxVal, dl, VT);
7171     SDValue SumNeg = DAG.getSetCC(dl, BoolVT, SumDiff, Zero, ISD::SETLT);
7172     Result = DAG.getSelect(dl, VT, SumNeg, SatMax, SatMin);
7173     return DAG.getSelect(dl, VT, Overflow, Result, SumDiff);
7174   }
7175 }
7176 
7177 SDValue
7178 TargetLowering::expandFixedPointMul(SDNode *Node, SelectionDAG &DAG) const {
7179   assert((Node->getOpcode() == ISD::SMULFIX ||
7180           Node->getOpcode() == ISD::UMULFIX ||
7181           Node->getOpcode() == ISD::SMULFIXSAT ||
7182           Node->getOpcode() == ISD::UMULFIXSAT) &&
7183          "Expected a fixed point multiplication opcode");
7184 
7185   SDLoc dl(Node);
7186   SDValue LHS = Node->getOperand(0);
7187   SDValue RHS = Node->getOperand(1);
7188   EVT VT = LHS.getValueType();
7189   unsigned Scale = Node->getConstantOperandVal(2);
7190   bool Saturating = (Node->getOpcode() == ISD::SMULFIXSAT ||
7191                      Node->getOpcode() == ISD::UMULFIXSAT);
7192   bool Signed = (Node->getOpcode() == ISD::SMULFIX ||
7193                  Node->getOpcode() == ISD::SMULFIXSAT);
7194   EVT BoolVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
7195   unsigned VTSize = VT.getScalarSizeInBits();
7196 
7197   if (!Scale) {
7198     // [us]mul.fix(a, b, 0) -> mul(a, b)
7199     if (!Saturating) {
7200       if (isOperationLegalOrCustom(ISD::MUL, VT))
7201         return DAG.getNode(ISD::MUL, dl, VT, LHS, RHS);
7202     } else if (Signed && isOperationLegalOrCustom(ISD::SMULO, VT)) {
7203       SDValue Result =
7204           DAG.getNode(ISD::SMULO, dl, DAG.getVTList(VT, BoolVT), LHS, RHS);
7205       SDValue Product = Result.getValue(0);
7206       SDValue Overflow = Result.getValue(1);
7207       SDValue Zero = DAG.getConstant(0, dl, VT);
7208 
7209       APInt MinVal = APInt::getSignedMinValue(VTSize);
7210       APInt MaxVal = APInt::getSignedMaxValue(VTSize);
7211       SDValue SatMin = DAG.getConstant(MinVal, dl, VT);
7212       SDValue SatMax = DAG.getConstant(MaxVal, dl, VT);
7213       SDValue ProdNeg = DAG.getSetCC(dl, BoolVT, Product, Zero, ISD::SETLT);
7214       Result = DAG.getSelect(dl, VT, ProdNeg, SatMax, SatMin);
7215       return DAG.getSelect(dl, VT, Overflow, Result, Product);
7216     } else if (!Signed && isOperationLegalOrCustom(ISD::UMULO, VT)) {
7217       SDValue Result =
7218           DAG.getNode(ISD::UMULO, dl, DAG.getVTList(VT, BoolVT), LHS, RHS);
7219       SDValue Product = Result.getValue(0);
7220       SDValue Overflow = Result.getValue(1);
7221 
7222       APInt MaxVal = APInt::getMaxValue(VTSize);
7223       SDValue SatMax = DAG.getConstant(MaxVal, dl, VT);
7224       return DAG.getSelect(dl, VT, Overflow, SatMax, Product);
7225     }
7226   }
7227 
7228   assert(((Signed && Scale < VTSize) || (!Signed && Scale <= VTSize)) &&
7229          "Expected scale to be less than the number of bits if signed or at "
7230          "most the number of bits if unsigned.");
7231   assert(LHS.getValueType() == RHS.getValueType() &&
7232          "Expected both operands to be the same type");
7233 
7234   // Get the upper and lower bits of the result.
7235   SDValue Lo, Hi;
7236   unsigned LoHiOp = Signed ? ISD::SMUL_LOHI : ISD::UMUL_LOHI;
7237   unsigned HiOp = Signed ? ISD::MULHS : ISD::MULHU;
7238   if (isOperationLegalOrCustom(LoHiOp, VT)) {
7239     SDValue Result = DAG.getNode(LoHiOp, dl, DAG.getVTList(VT, VT), LHS, RHS);
7240     Lo = Result.getValue(0);
7241     Hi = Result.getValue(1);
7242   } else if (isOperationLegalOrCustom(HiOp, VT)) {
7243     Lo = DAG.getNode(ISD::MUL, dl, VT, LHS, RHS);
7244     Hi = DAG.getNode(HiOp, dl, VT, LHS, RHS);
7245   } else if (VT.isVector()) {
7246     return SDValue();
7247   } else {
7248     report_fatal_error("Unable to expand fixed point multiplication.");
7249   }
7250 
7251   if (Scale == VTSize)
7252     // Result is just the top half since we'd be shifting by the width of the
7253     // operand. Overflow impossible so this works for both UMULFIX and
7254     // UMULFIXSAT.
7255     return Hi;
7256 
7257   // The result will need to be shifted right by the scale since both operands
7258   // are scaled. The result is given to us in 2 halves, so we only want part of
7259   // both in the result.
7260   EVT ShiftTy = getShiftAmountTy(VT, DAG.getDataLayout());
7261   SDValue Result = DAG.getNode(ISD::FSHR, dl, VT, Hi, Lo,
7262                                DAG.getConstant(Scale, dl, ShiftTy));
7263   if (!Saturating)
7264     return Result;
7265 
7266   if (!Signed) {
7267     // Unsigned overflow happened if the upper (VTSize - Scale) bits (of the
7268     // widened multiplication) aren't all zeroes.
7269 
7270     // Saturate to max if ((Hi >> Scale) != 0),
7271     // which is the same as if (Hi > ((1 << Scale) - 1))
7272     APInt MaxVal = APInt::getMaxValue(VTSize);
7273     SDValue LowMask = DAG.getConstant(APInt::getLowBitsSet(VTSize, Scale),
7274                                       dl, VT);
7275     Result = DAG.getSelectCC(dl, Hi, LowMask,
7276                              DAG.getConstant(MaxVal, dl, VT), Result,
7277                              ISD::SETUGT);
7278 
7279     return Result;
7280   }
7281 
7282   // Signed overflow happened if the upper (VTSize - Scale + 1) bits (of the
7283   // widened multiplication) aren't all ones or all zeroes.
7284 
7285   SDValue SatMin = DAG.getConstant(APInt::getSignedMinValue(VTSize), dl, VT);
7286   SDValue SatMax = DAG.getConstant(APInt::getSignedMaxValue(VTSize), dl, VT);
7287 
7288   if (Scale == 0) {
7289     SDValue Sign = DAG.getNode(ISD::SRA, dl, VT, Lo,
7290                                DAG.getConstant(VTSize - 1, dl, ShiftTy));
7291     SDValue Overflow = DAG.getSetCC(dl, BoolVT, Hi, Sign, ISD::SETNE);
7292     // Saturated to SatMin if wide product is negative, and SatMax if wide
7293     // product is positive ...
7294     SDValue Zero = DAG.getConstant(0, dl, VT);
7295     SDValue ResultIfOverflow = DAG.getSelectCC(dl, Hi, Zero, SatMin, SatMax,
7296                                                ISD::SETLT);
7297     // ... but only if we overflowed.
7298     return DAG.getSelect(dl, VT, Overflow, ResultIfOverflow, Result);
7299   }
7300 
7301   //  We handled Scale==0 above so all the bits to examine is in Hi.
7302 
7303   // Saturate to max if ((Hi >> (Scale - 1)) > 0),
7304   // which is the same as if (Hi > (1 << (Scale - 1)) - 1)
7305   SDValue LowMask = DAG.getConstant(APInt::getLowBitsSet(VTSize, Scale - 1),
7306                                     dl, VT);
7307   Result = DAG.getSelectCC(dl, Hi, LowMask, SatMax, Result, ISD::SETGT);
7308   // Saturate to min if (Hi >> (Scale - 1)) < -1),
7309   // which is the same as if (HI < (-1 << (Scale - 1))
7310   SDValue HighMask =
7311       DAG.getConstant(APInt::getHighBitsSet(VTSize, VTSize - Scale + 1),
7312                       dl, VT);
7313   Result = DAG.getSelectCC(dl, Hi, HighMask, SatMin, Result, ISD::SETLT);
7314   return Result;
7315 }
7316 
7317 SDValue
7318 TargetLowering::expandFixedPointDiv(unsigned Opcode, const SDLoc &dl,
7319                                     SDValue LHS, SDValue RHS,
7320                                     unsigned Scale, SelectionDAG &DAG) const {
7321   assert((Opcode == ISD::SDIVFIX ||
7322           Opcode == ISD::UDIVFIX) &&
7323          "Expected a fixed point division opcode");
7324 
7325   EVT VT = LHS.getValueType();
7326   bool Signed = Opcode == ISD::SDIVFIX;
7327   EVT BoolVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
7328 
7329   // If there is enough room in the type to upscale the LHS or downscale the
7330   // RHS before the division, we can perform it in this type without having to
7331   // resize. For signed operations, the LHS headroom is the number of
7332   // redundant sign bits, and for unsigned ones it is the number of zeroes.
7333   // The headroom for the RHS is the number of trailing zeroes.
7334   unsigned LHSLead = Signed ? DAG.ComputeNumSignBits(LHS) - 1
7335                             : DAG.computeKnownBits(LHS).countMinLeadingZeros();
7336   unsigned RHSTrail = DAG.computeKnownBits(RHS).countMinTrailingZeros();
7337 
7338   if (LHSLead + RHSTrail < Scale)
7339     return SDValue();
7340 
7341   unsigned LHSShift = std::min(LHSLead, Scale);
7342   unsigned RHSShift = Scale - LHSShift;
7343 
7344   // At this point, we know that if we shift the LHS up by LHSShift and the
7345   // RHS down by RHSShift, we can emit a regular division with a final scaling
7346   // factor of Scale.
7347 
7348   EVT ShiftTy = getShiftAmountTy(VT, DAG.getDataLayout());
7349   if (LHSShift)
7350     LHS = DAG.getNode(ISD::SHL, dl, VT, LHS,
7351                       DAG.getConstant(LHSShift, dl, ShiftTy));
7352   if (RHSShift)
7353     RHS = DAG.getNode(Signed ? ISD::SRA : ISD::SRL, dl, VT, RHS,
7354                       DAG.getConstant(RHSShift, dl, ShiftTy));
7355 
7356   SDValue Quot;
7357   if (Signed) {
7358     // For signed operations, if the resulting quotient is negative and the
7359     // remainder is nonzero, subtract 1 from the quotient to round towards
7360     // negative infinity.
7361     SDValue Rem;
7362     // FIXME: Ideally we would always produce an SDIVREM here, but if the
7363     // type isn't legal, SDIVREM cannot be expanded. There is no reason why
7364     // we couldn't just form a libcall, but the type legalizer doesn't do it.
7365     if (isTypeLegal(VT) &&
7366         isOperationLegalOrCustom(ISD::SDIVREM, VT)) {
7367       Quot = DAG.getNode(ISD::SDIVREM, dl,
7368                          DAG.getVTList(VT, VT),
7369                          LHS, RHS);
7370       Rem = Quot.getValue(1);
7371       Quot = Quot.getValue(0);
7372     } else {
7373       Quot = DAG.getNode(ISD::SDIV, dl, VT,
7374                          LHS, RHS);
7375       Rem = DAG.getNode(ISD::SREM, dl, VT,
7376                         LHS, RHS);
7377     }
7378     SDValue Zero = DAG.getConstant(0, dl, VT);
7379     SDValue RemNonZero = DAG.getSetCC(dl, BoolVT, Rem, Zero, ISD::SETNE);
7380     SDValue LHSNeg = DAG.getSetCC(dl, BoolVT, LHS, Zero, ISD::SETLT);
7381     SDValue RHSNeg = DAG.getSetCC(dl, BoolVT, RHS, Zero, ISD::SETLT);
7382     SDValue QuotNeg = DAG.getNode(ISD::XOR, dl, BoolVT, LHSNeg, RHSNeg);
7383     SDValue Sub1 = DAG.getNode(ISD::SUB, dl, VT, Quot,
7384                                DAG.getConstant(1, dl, VT));
7385     Quot = DAG.getSelect(dl, VT,
7386                          DAG.getNode(ISD::AND, dl, BoolVT, RemNonZero, QuotNeg),
7387                          Sub1, Quot);
7388   } else
7389     Quot = DAG.getNode(ISD::UDIV, dl, VT,
7390                        LHS, RHS);
7391 
7392   // TODO: Saturation.
7393 
7394   return Quot;
7395 }
7396 
7397 void TargetLowering::expandUADDSUBO(
7398     SDNode *Node, SDValue &Result, SDValue &Overflow, SelectionDAG &DAG) const {
7399   SDLoc dl(Node);
7400   SDValue LHS = Node->getOperand(0);
7401   SDValue RHS = Node->getOperand(1);
7402   bool IsAdd = Node->getOpcode() == ISD::UADDO;
7403 
7404   // If ADD/SUBCARRY is legal, use that instead.
7405   unsigned OpcCarry = IsAdd ? ISD::ADDCARRY : ISD::SUBCARRY;
7406   if (isOperationLegalOrCustom(OpcCarry, Node->getValueType(0))) {
7407     SDValue CarryIn = DAG.getConstant(0, dl, Node->getValueType(1));
7408     SDValue NodeCarry = DAG.getNode(OpcCarry, dl, Node->getVTList(),
7409                                     { LHS, RHS, CarryIn });
7410     Result = SDValue(NodeCarry.getNode(), 0);
7411     Overflow = SDValue(NodeCarry.getNode(), 1);
7412     return;
7413   }
7414 
7415   Result = DAG.getNode(IsAdd ? ISD::ADD : ISD::SUB, dl,
7416                             LHS.getValueType(), LHS, RHS);
7417 
7418   EVT ResultType = Node->getValueType(1);
7419   EVT SetCCType = getSetCCResultType(
7420       DAG.getDataLayout(), *DAG.getContext(), Node->getValueType(0));
7421   ISD::CondCode CC = IsAdd ? ISD::SETULT : ISD::SETUGT;
7422   SDValue SetCC = DAG.getSetCC(dl, SetCCType, Result, LHS, CC);
7423   Overflow = DAG.getBoolExtOrTrunc(SetCC, dl, ResultType, ResultType);
7424 }
7425 
7426 void TargetLowering::expandSADDSUBO(
7427     SDNode *Node, SDValue &Result, SDValue &Overflow, SelectionDAG &DAG) const {
7428   SDLoc dl(Node);
7429   SDValue LHS = Node->getOperand(0);
7430   SDValue RHS = Node->getOperand(1);
7431   bool IsAdd = Node->getOpcode() == ISD::SADDO;
7432 
7433   Result = DAG.getNode(IsAdd ? ISD::ADD : ISD::SUB, dl,
7434                             LHS.getValueType(), LHS, RHS);
7435 
7436   EVT ResultType = Node->getValueType(1);
7437   EVT OType = getSetCCResultType(
7438       DAG.getDataLayout(), *DAG.getContext(), Node->getValueType(0));
7439 
7440   // If SADDSAT/SSUBSAT is legal, compare results to detect overflow.
7441   unsigned OpcSat = IsAdd ? ISD::SADDSAT : ISD::SSUBSAT;
7442   if (isOperationLegalOrCustom(OpcSat, LHS.getValueType())) {
7443     SDValue Sat = DAG.getNode(OpcSat, dl, LHS.getValueType(), LHS, RHS);
7444     SDValue SetCC = DAG.getSetCC(dl, OType, Result, Sat, ISD::SETNE);
7445     Overflow = DAG.getBoolExtOrTrunc(SetCC, dl, ResultType, ResultType);
7446     return;
7447   }
7448 
7449   SDValue Zero = DAG.getConstant(0, dl, LHS.getValueType());
7450 
7451   // For an addition, the result should be less than one of the operands (LHS)
7452   // if and only if the other operand (RHS) is negative, otherwise there will
7453   // be overflow.
7454   // For a subtraction, the result should be less than one of the operands
7455   // (LHS) if and only if the other operand (RHS) is (non-zero) positive,
7456   // otherwise there will be overflow.
7457   SDValue ResultLowerThanLHS = DAG.getSetCC(dl, OType, Result, LHS, ISD::SETLT);
7458   SDValue ConditionRHS =
7459       DAG.getSetCC(dl, OType, RHS, Zero, IsAdd ? ISD::SETLT : ISD::SETGT);
7460 
7461   Overflow = DAG.getBoolExtOrTrunc(
7462       DAG.getNode(ISD::XOR, dl, OType, ConditionRHS, ResultLowerThanLHS), dl,
7463       ResultType, ResultType);
7464 }
7465 
7466 bool TargetLowering::expandMULO(SDNode *Node, SDValue &Result,
7467                                 SDValue &Overflow, SelectionDAG &DAG) const {
7468   SDLoc dl(Node);
7469   EVT VT = Node->getValueType(0);
7470   EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
7471   SDValue LHS = Node->getOperand(0);
7472   SDValue RHS = Node->getOperand(1);
7473   bool isSigned = Node->getOpcode() == ISD::SMULO;
7474 
7475   // For power-of-two multiplications we can use a simpler shift expansion.
7476   if (ConstantSDNode *RHSC = isConstOrConstSplat(RHS)) {
7477     const APInt &C = RHSC->getAPIntValue();
7478     // mulo(X, 1 << S) -> { X << S, (X << S) >> S != X }
7479     if (C.isPowerOf2()) {
7480       // smulo(x, signed_min) is same as umulo(x, signed_min).
7481       bool UseArithShift = isSigned && !C.isMinSignedValue();
7482       EVT ShiftAmtTy = getShiftAmountTy(VT, DAG.getDataLayout());
7483       SDValue ShiftAmt = DAG.getConstant(C.logBase2(), dl, ShiftAmtTy);
7484       Result = DAG.getNode(ISD::SHL, dl, VT, LHS, ShiftAmt);
7485       Overflow = DAG.getSetCC(dl, SetCCVT,
7486           DAG.getNode(UseArithShift ? ISD::SRA : ISD::SRL,
7487                       dl, VT, Result, ShiftAmt),
7488           LHS, ISD::SETNE);
7489       return true;
7490     }
7491   }
7492 
7493   EVT WideVT = EVT::getIntegerVT(*DAG.getContext(), VT.getScalarSizeInBits() * 2);
7494   if (VT.isVector())
7495     WideVT = EVT::getVectorVT(*DAG.getContext(), WideVT,
7496                               VT.getVectorNumElements());
7497 
7498   SDValue BottomHalf;
7499   SDValue TopHalf;
7500   static const unsigned Ops[2][3] =
7501       { { ISD::MULHU, ISD::UMUL_LOHI, ISD::ZERO_EXTEND },
7502         { ISD::MULHS, ISD::SMUL_LOHI, ISD::SIGN_EXTEND }};
7503   if (isOperationLegalOrCustom(Ops[isSigned][0], VT)) {
7504     BottomHalf = DAG.getNode(ISD::MUL, dl, VT, LHS, RHS);
7505     TopHalf = DAG.getNode(Ops[isSigned][0], dl, VT, LHS, RHS);
7506   } else if (isOperationLegalOrCustom(Ops[isSigned][1], VT)) {
7507     BottomHalf = DAG.getNode(Ops[isSigned][1], dl, DAG.getVTList(VT, VT), LHS,
7508                              RHS);
7509     TopHalf = BottomHalf.getValue(1);
7510   } else if (isTypeLegal(WideVT)) {
7511     LHS = DAG.getNode(Ops[isSigned][2], dl, WideVT, LHS);
7512     RHS = DAG.getNode(Ops[isSigned][2], dl, WideVT, RHS);
7513     SDValue Mul = DAG.getNode(ISD::MUL, dl, WideVT, LHS, RHS);
7514     BottomHalf = DAG.getNode(ISD::TRUNCATE, dl, VT, Mul);
7515     SDValue ShiftAmt = DAG.getConstant(VT.getScalarSizeInBits(), dl,
7516         getShiftAmountTy(WideVT, DAG.getDataLayout()));
7517     TopHalf = DAG.getNode(ISD::TRUNCATE, dl, VT,
7518                           DAG.getNode(ISD::SRL, dl, WideVT, Mul, ShiftAmt));
7519   } else {
7520     if (VT.isVector())
7521       return false;
7522 
7523     // We can fall back to a libcall with an illegal type for the MUL if we
7524     // have a libcall big enough.
7525     // Also, we can fall back to a division in some cases, but that's a big
7526     // performance hit in the general case.
7527     RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
7528     if (WideVT == MVT::i16)
7529       LC = RTLIB::MUL_I16;
7530     else if (WideVT == MVT::i32)
7531       LC = RTLIB::MUL_I32;
7532     else if (WideVT == MVT::i64)
7533       LC = RTLIB::MUL_I64;
7534     else if (WideVT == MVT::i128)
7535       LC = RTLIB::MUL_I128;
7536     assert(LC != RTLIB::UNKNOWN_LIBCALL && "Cannot expand this operation!");
7537 
7538     SDValue HiLHS;
7539     SDValue HiRHS;
7540     if (isSigned) {
7541       // The high part is obtained by SRA'ing all but one of the bits of low
7542       // part.
7543       unsigned LoSize = VT.getSizeInBits();
7544       HiLHS =
7545           DAG.getNode(ISD::SRA, dl, VT, LHS,
7546                       DAG.getConstant(LoSize - 1, dl,
7547                                       getPointerTy(DAG.getDataLayout())));
7548       HiRHS =
7549           DAG.getNode(ISD::SRA, dl, VT, RHS,
7550                       DAG.getConstant(LoSize - 1, dl,
7551                                       getPointerTy(DAG.getDataLayout())));
7552     } else {
7553         HiLHS = DAG.getConstant(0, dl, VT);
7554         HiRHS = DAG.getConstant(0, dl, VT);
7555     }
7556 
7557     // Here we're passing the 2 arguments explicitly as 4 arguments that are
7558     // pre-lowered to the correct types. This all depends upon WideVT not
7559     // being a legal type for the architecture and thus has to be split to
7560     // two arguments.
7561     SDValue Ret;
7562     TargetLowering::MakeLibCallOptions CallOptions;
7563     CallOptions.setSExt(isSigned);
7564     CallOptions.setIsPostTypeLegalization(true);
7565     if (shouldSplitFunctionArgumentsAsLittleEndian(DAG.getDataLayout())) {
7566       // Halves of WideVT are packed into registers in different order
7567       // depending on platform endianness. This is usually handled by
7568       // the C calling convention, but we can't defer to it in
7569       // the legalizer.
7570       SDValue Args[] = { LHS, HiLHS, RHS, HiRHS };
7571       Ret = makeLibCall(DAG, LC, WideVT, Args, CallOptions, dl).first;
7572     } else {
7573       SDValue Args[] = { HiLHS, LHS, HiRHS, RHS };
7574       Ret = makeLibCall(DAG, LC, WideVT, Args, CallOptions, dl).first;
7575     }
7576     assert(Ret.getOpcode() == ISD::MERGE_VALUES &&
7577            "Ret value is a collection of constituent nodes holding result.");
7578     if (DAG.getDataLayout().isLittleEndian()) {
7579       // Same as above.
7580       BottomHalf = Ret.getOperand(0);
7581       TopHalf = Ret.getOperand(1);
7582     } else {
7583       BottomHalf = Ret.getOperand(1);
7584       TopHalf = Ret.getOperand(0);
7585     }
7586   }
7587 
7588   Result = BottomHalf;
7589   if (isSigned) {
7590     SDValue ShiftAmt = DAG.getConstant(
7591         VT.getScalarSizeInBits() - 1, dl,
7592         getShiftAmountTy(BottomHalf.getValueType(), DAG.getDataLayout()));
7593     SDValue Sign = DAG.getNode(ISD::SRA, dl, VT, BottomHalf, ShiftAmt);
7594     Overflow = DAG.getSetCC(dl, SetCCVT, TopHalf, Sign, ISD::SETNE);
7595   } else {
7596     Overflow = DAG.getSetCC(dl, SetCCVT, TopHalf,
7597                             DAG.getConstant(0, dl, VT), ISD::SETNE);
7598   }
7599 
7600   // Truncate the result if SetCC returns a larger type than needed.
7601   EVT RType = Node->getValueType(1);
7602   if (RType.getSizeInBits() < Overflow.getValueSizeInBits())
7603     Overflow = DAG.getNode(ISD::TRUNCATE, dl, RType, Overflow);
7604 
7605   assert(RType.getSizeInBits() == Overflow.getValueSizeInBits() &&
7606          "Unexpected result type for S/UMULO legalization");
7607   return true;
7608 }
7609 
7610 SDValue TargetLowering::expandVecReduce(SDNode *Node, SelectionDAG &DAG) const {
7611   SDLoc dl(Node);
7612   bool NoNaN = Node->getFlags().hasNoNaNs();
7613   unsigned BaseOpcode = 0;
7614   switch (Node->getOpcode()) {
7615   default: llvm_unreachable("Expected VECREDUCE opcode");
7616   case ISD::VECREDUCE_FADD: BaseOpcode = ISD::FADD; break;
7617   case ISD::VECREDUCE_FMUL: BaseOpcode = ISD::FMUL; break;
7618   case ISD::VECREDUCE_ADD:  BaseOpcode = ISD::ADD; break;
7619   case ISD::VECREDUCE_MUL:  BaseOpcode = ISD::MUL; break;
7620   case ISD::VECREDUCE_AND:  BaseOpcode = ISD::AND; break;
7621   case ISD::VECREDUCE_OR:   BaseOpcode = ISD::OR; break;
7622   case ISD::VECREDUCE_XOR:  BaseOpcode = ISD::XOR; break;
7623   case ISD::VECREDUCE_SMAX: BaseOpcode = ISD::SMAX; break;
7624   case ISD::VECREDUCE_SMIN: BaseOpcode = ISD::SMIN; break;
7625   case ISD::VECREDUCE_UMAX: BaseOpcode = ISD::UMAX; break;
7626   case ISD::VECREDUCE_UMIN: BaseOpcode = ISD::UMIN; break;
7627   case ISD::VECREDUCE_FMAX:
7628     BaseOpcode = NoNaN ? ISD::FMAXNUM : ISD::FMAXIMUM;
7629     break;
7630   case ISD::VECREDUCE_FMIN:
7631     BaseOpcode = NoNaN ? ISD::FMINNUM : ISD::FMINIMUM;
7632     break;
7633   }
7634 
7635   SDValue Op = Node->getOperand(0);
7636   EVT VT = Op.getValueType();
7637 
7638   // Try to use a shuffle reduction for power of two vectors.
7639   if (VT.isPow2VectorType()) {
7640     while (VT.getVectorNumElements() > 1) {
7641       EVT HalfVT = VT.getHalfNumVectorElementsVT(*DAG.getContext());
7642       if (!isOperationLegalOrCustom(BaseOpcode, HalfVT))
7643         break;
7644 
7645       SDValue Lo, Hi;
7646       std::tie(Lo, Hi) = DAG.SplitVector(Op, dl);
7647       Op = DAG.getNode(BaseOpcode, dl, HalfVT, Lo, Hi);
7648       VT = HalfVT;
7649     }
7650   }
7651 
7652   EVT EltVT = VT.getVectorElementType();
7653   unsigned NumElts = VT.getVectorNumElements();
7654 
7655   SmallVector<SDValue, 8> Ops;
7656   DAG.ExtractVectorElements(Op, Ops, 0, NumElts);
7657 
7658   SDValue Res = Ops[0];
7659   for (unsigned i = 1; i < NumElts; i++)
7660     Res = DAG.getNode(BaseOpcode, dl, EltVT, Res, Ops[i], Node->getFlags());
7661 
7662   // Result type may be wider than element type.
7663   if (EltVT != Node->getValueType(0))
7664     Res = DAG.getNode(ISD::ANY_EXTEND, dl, Node->getValueType(0), Res);
7665   return Res;
7666 }
7667