1 //===-- TargetLowering.cpp - Implement the TargetLowering class -----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This implements the TargetLowering class.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "llvm/CodeGen/TargetLowering.h"
14 #include "llvm/ADT/STLExtras.h"
15 #include "llvm/CodeGen/CallingConvLower.h"
16 #include "llvm/CodeGen/MachineFrameInfo.h"
17 #include "llvm/CodeGen/MachineFunction.h"
18 #include "llvm/CodeGen/MachineJumpTableInfo.h"
19 #include "llvm/CodeGen/MachineRegisterInfo.h"
20 #include "llvm/CodeGen/SelectionDAG.h"
21 #include "llvm/CodeGen/TargetRegisterInfo.h"
22 #include "llvm/CodeGen/TargetSubtargetInfo.h"
23 #include "llvm/IR/DataLayout.h"
24 #include "llvm/IR/DerivedTypes.h"
25 #include "llvm/IR/GlobalVariable.h"
26 #include "llvm/IR/LLVMContext.h"
27 #include "llvm/MC/MCAsmInfo.h"
28 #include "llvm/MC/MCExpr.h"
29 #include "llvm/Support/ErrorHandling.h"
30 #include "llvm/Support/KnownBits.h"
31 #include "llvm/Support/MathExtras.h"
32 #include "llvm/Target/TargetLoweringObjectFile.h"
33 #include "llvm/Target/TargetMachine.h"
34 #include <cctype>
35 using namespace llvm;
36 
37 /// NOTE: The TargetMachine owns TLOF.
38 TargetLowering::TargetLowering(const TargetMachine &tm)
39     : TargetLoweringBase(tm) {}
40 
41 const char *TargetLowering::getTargetNodeName(unsigned Opcode) const {
42   return nullptr;
43 }
44 
45 bool TargetLowering::isPositionIndependent() const {
46   return getTargetMachine().isPositionIndependent();
47 }
48 
49 /// Check whether a given call node is in tail position within its function. If
50 /// so, it sets Chain to the input chain of the tail call.
51 bool TargetLowering::isInTailCallPosition(SelectionDAG &DAG, SDNode *Node,
52                                           SDValue &Chain) const {
53   const Function &F = DAG.getMachineFunction().getFunction();
54 
55   // First, check if tail calls have been disabled in this function.
56   if (F.getFnAttribute("disable-tail-calls").getValueAsString() == "true")
57     return false;
58 
59   // Conservatively require the attributes of the call to match those of
60   // the return. Ignore NoAlias and NonNull because they don't affect the
61   // call sequence.
62   AttributeList CallerAttrs = F.getAttributes();
63   if (AttrBuilder(CallerAttrs, AttributeList::ReturnIndex)
64           .removeAttribute(Attribute::NoAlias)
65           .removeAttribute(Attribute::NonNull)
66           .hasAttributes())
67     return false;
68 
69   // It's not safe to eliminate the sign / zero extension of the return value.
70   if (CallerAttrs.hasAttribute(AttributeList::ReturnIndex, Attribute::ZExt) ||
71       CallerAttrs.hasAttribute(AttributeList::ReturnIndex, Attribute::SExt))
72     return false;
73 
74   // Check if the only use is a function return node.
75   return isUsedByReturnOnly(Node, Chain);
76 }
77 
78 bool TargetLowering::parametersInCSRMatch(const MachineRegisterInfo &MRI,
79     const uint32_t *CallerPreservedMask,
80     const SmallVectorImpl<CCValAssign> &ArgLocs,
81     const SmallVectorImpl<SDValue> &OutVals) const {
82   for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) {
83     const CCValAssign &ArgLoc = ArgLocs[I];
84     if (!ArgLoc.isRegLoc())
85       continue;
86     Register Reg = ArgLoc.getLocReg();
87     // Only look at callee saved registers.
88     if (MachineOperand::clobbersPhysReg(CallerPreservedMask, Reg))
89       continue;
90     // Check that we pass the value used for the caller.
91     // (We look for a CopyFromReg reading a virtual register that is used
92     //  for the function live-in value of register Reg)
93     SDValue Value = OutVals[I];
94     if (Value->getOpcode() != ISD::CopyFromReg)
95       return false;
96     unsigned ArgReg = cast<RegisterSDNode>(Value->getOperand(1))->getReg();
97     if (MRI.getLiveInPhysReg(ArgReg) != Reg)
98       return false;
99   }
100   return true;
101 }
102 
103 /// Set CallLoweringInfo attribute flags based on a call instruction
104 /// and called function attributes.
105 void TargetLoweringBase::ArgListEntry::setAttributes(const CallBase *Call,
106                                                      unsigned ArgIdx) {
107   IsSExt = Call->paramHasAttr(ArgIdx, Attribute::SExt);
108   IsZExt = Call->paramHasAttr(ArgIdx, Attribute::ZExt);
109   IsInReg = Call->paramHasAttr(ArgIdx, Attribute::InReg);
110   IsSRet = Call->paramHasAttr(ArgIdx, Attribute::StructRet);
111   IsNest = Call->paramHasAttr(ArgIdx, Attribute::Nest);
112   IsByVal = Call->paramHasAttr(ArgIdx, Attribute::ByVal);
113   IsInAlloca = Call->paramHasAttr(ArgIdx, Attribute::InAlloca);
114   IsReturned = Call->paramHasAttr(ArgIdx, Attribute::Returned);
115   IsSwiftSelf = Call->paramHasAttr(ArgIdx, Attribute::SwiftSelf);
116   IsSwiftError = Call->paramHasAttr(ArgIdx, Attribute::SwiftError);
117   Alignment = Call->getParamAlignment(ArgIdx);
118   ByValType = nullptr;
119   if (Call->paramHasAttr(ArgIdx, Attribute::ByVal))
120     ByValType = Call->getParamByValType(ArgIdx);
121 }
122 
123 /// Generate a libcall taking the given operands as arguments and returning a
124 /// result of type RetVT.
125 std::pair<SDValue, SDValue>
126 TargetLowering::makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, EVT RetVT,
127                             ArrayRef<SDValue> Ops,
128                             MakeLibCallOptions CallOptions,
129                             const SDLoc &dl,
130                             SDValue InChain) const {
131   if (!InChain)
132     InChain = DAG.getEntryNode();
133 
134   TargetLowering::ArgListTy Args;
135   Args.reserve(Ops.size());
136 
137   TargetLowering::ArgListEntry Entry;
138   for (unsigned i = 0; i < Ops.size(); ++i) {
139     SDValue NewOp = Ops[i];
140     Entry.Node = NewOp;
141     Entry.Ty = Entry.Node.getValueType().getTypeForEVT(*DAG.getContext());
142     Entry.IsSExt = shouldSignExtendTypeInLibCall(NewOp.getValueType(),
143                                                  CallOptions.IsSExt);
144     Entry.IsZExt = !Entry.IsSExt;
145 
146     if (CallOptions.IsSoften &&
147         !shouldExtendTypeInLibCall(CallOptions.OpsVTBeforeSoften[i])) {
148       Entry.IsSExt = Entry.IsZExt = false;
149     }
150     Args.push_back(Entry);
151   }
152 
153   if (LC == RTLIB::UNKNOWN_LIBCALL)
154     report_fatal_error("Unsupported library call operation!");
155   SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC),
156                                          getPointerTy(DAG.getDataLayout()));
157 
158   Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext());
159   TargetLowering::CallLoweringInfo CLI(DAG);
160   bool signExtend = shouldSignExtendTypeInLibCall(RetVT, CallOptions.IsSExt);
161   bool zeroExtend = !signExtend;
162 
163   if (CallOptions.IsSoften &&
164       !shouldExtendTypeInLibCall(CallOptions.RetVTBeforeSoften)) {
165     signExtend = zeroExtend = false;
166   }
167 
168   CLI.setDebugLoc(dl)
169       .setChain(InChain)
170       .setLibCallee(getLibcallCallingConv(LC), RetTy, Callee, std::move(Args))
171       .setNoReturn(CallOptions.DoesNotReturn)
172       .setDiscardResult(!CallOptions.IsReturnValueUsed)
173       .setIsPostTypeLegalization(CallOptions.IsPostTypeLegalization)
174       .setSExtResult(signExtend)
175       .setZExtResult(zeroExtend);
176   return LowerCallTo(CLI);
177 }
178 
179 bool
180 TargetLowering::findOptimalMemOpLowering(std::vector<EVT> &MemOps,
181                                          unsigned Limit, uint64_t Size,
182                                          unsigned DstAlign, unsigned SrcAlign,
183                                          bool IsMemset,
184                                          bool ZeroMemset,
185                                          bool MemcpyStrSrc,
186                                          bool AllowOverlap,
187                                          unsigned DstAS, unsigned SrcAS,
188                                          const AttributeList &FuncAttributes) const {
189   // If 'SrcAlign' is zero, that means the memory operation does not need to
190   // load the value, i.e. memset or memcpy from constant string. Otherwise,
191   // it's the inferred alignment of the source. 'DstAlign', on the other hand,
192   // is the specified alignment of the memory operation. If it is zero, that
193   // means it's possible to change the alignment of the destination.
194   // 'MemcpyStrSrc' indicates whether the memcpy source is constant so it does
195   // not need to be loaded.
196   if (!(SrcAlign == 0 || SrcAlign >= DstAlign))
197     return false;
198 
199   EVT VT = getOptimalMemOpType(Size, DstAlign, SrcAlign,
200                                IsMemset, ZeroMemset, MemcpyStrSrc,
201                                FuncAttributes);
202 
203   if (VT == MVT::Other) {
204     // Use the largest integer type whose alignment constraints are satisfied.
205     // We only need to check DstAlign here as SrcAlign is always greater or
206     // equal to DstAlign (or zero).
207     VT = MVT::i64;
208     while (DstAlign && DstAlign < VT.getSizeInBits() / 8 &&
209            !allowsMisalignedMemoryAccesses(VT, DstAS, DstAlign))
210       VT = (MVT::SimpleValueType)(VT.getSimpleVT().SimpleTy - 1);
211     assert(VT.isInteger());
212 
213     // Find the largest legal integer type.
214     MVT LVT = MVT::i64;
215     while (!isTypeLegal(LVT))
216       LVT = (MVT::SimpleValueType)(LVT.SimpleTy - 1);
217     assert(LVT.isInteger());
218 
219     // If the type we've chosen is larger than the largest legal integer type
220     // then use that instead.
221     if (VT.bitsGT(LVT))
222       VT = LVT;
223   }
224 
225   unsigned NumMemOps = 0;
226   while (Size != 0) {
227     unsigned VTSize = VT.getSizeInBits() / 8;
228     while (VTSize > Size) {
229       // For now, only use non-vector load / store's for the left-over pieces.
230       EVT NewVT = VT;
231       unsigned NewVTSize;
232 
233       bool Found = false;
234       if (VT.isVector() || VT.isFloatingPoint()) {
235         NewVT = (VT.getSizeInBits() > 64) ? MVT::i64 : MVT::i32;
236         if (isOperationLegalOrCustom(ISD::STORE, NewVT) &&
237             isSafeMemOpType(NewVT.getSimpleVT()))
238           Found = true;
239         else if (NewVT == MVT::i64 &&
240                  isOperationLegalOrCustom(ISD::STORE, MVT::f64) &&
241                  isSafeMemOpType(MVT::f64)) {
242           // i64 is usually not legal on 32-bit targets, but f64 may be.
243           NewVT = MVT::f64;
244           Found = true;
245         }
246       }
247 
248       if (!Found) {
249         do {
250           NewVT = (MVT::SimpleValueType)(NewVT.getSimpleVT().SimpleTy - 1);
251           if (NewVT == MVT::i8)
252             break;
253         } while (!isSafeMemOpType(NewVT.getSimpleVT()));
254       }
255       NewVTSize = NewVT.getSizeInBits() / 8;
256 
257       // If the new VT cannot cover all of the remaining bits, then consider
258       // issuing a (or a pair of) unaligned and overlapping load / store.
259       bool Fast;
260       if (NumMemOps && AllowOverlap && NewVTSize < Size &&
261           allowsMisalignedMemoryAccesses(VT, DstAS, DstAlign,
262                                          MachineMemOperand::MONone, &Fast) &&
263           Fast)
264         VTSize = Size;
265       else {
266         VT = NewVT;
267         VTSize = NewVTSize;
268       }
269     }
270 
271     if (++NumMemOps > Limit)
272       return false;
273 
274     MemOps.push_back(VT);
275     Size -= VTSize;
276   }
277 
278   return true;
279 }
280 
281 /// Soften the operands of a comparison. This code is shared among BR_CC,
282 /// SELECT_CC, and SETCC handlers.
283 void TargetLowering::softenSetCCOperands(SelectionDAG &DAG, EVT VT,
284                                          SDValue &NewLHS, SDValue &NewRHS,
285                                          ISD::CondCode &CCCode,
286                                          const SDLoc &dl, const SDValue OldLHS,
287                                          const SDValue OldRHS) const {
288   SDValue Chain;
289   return softenSetCCOperands(DAG, VT, NewLHS, NewRHS, CCCode, dl, OldLHS,
290                              OldRHS, Chain);
291 }
292 
293 void TargetLowering::softenSetCCOperands(SelectionDAG &DAG, EVT VT,
294                                          SDValue &NewLHS, SDValue &NewRHS,
295                                          ISD::CondCode &CCCode,
296                                          const SDLoc &dl, const SDValue OldLHS,
297                                          const SDValue OldRHS,
298                                          SDValue &Chain,
299                                          bool IsSignaling) const {
300   // FIXME: Currently we cannot really respect all IEEE predicates due to libgcc
301   // not supporting it. We can update this code when libgcc provides such
302   // functions.
303 
304   assert((VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f128 || VT == MVT::ppcf128)
305          && "Unsupported setcc type!");
306 
307   // Expand into one or more soft-fp libcall(s).
308   RTLIB::Libcall LC1 = RTLIB::UNKNOWN_LIBCALL, LC2 = RTLIB::UNKNOWN_LIBCALL;
309   bool ShouldInvertCC = false;
310   switch (CCCode) {
311   case ISD::SETEQ:
312   case ISD::SETOEQ:
313     LC1 = (VT == MVT::f32) ? RTLIB::OEQ_F32 :
314           (VT == MVT::f64) ? RTLIB::OEQ_F64 :
315           (VT == MVT::f128) ? RTLIB::OEQ_F128 : RTLIB::OEQ_PPCF128;
316     break;
317   case ISD::SETNE:
318   case ISD::SETUNE:
319     LC1 = (VT == MVT::f32) ? RTLIB::UNE_F32 :
320           (VT == MVT::f64) ? RTLIB::UNE_F64 :
321           (VT == MVT::f128) ? RTLIB::UNE_F128 : RTLIB::UNE_PPCF128;
322     break;
323   case ISD::SETGE:
324   case ISD::SETOGE:
325     LC1 = (VT == MVT::f32) ? RTLIB::OGE_F32 :
326           (VT == MVT::f64) ? RTLIB::OGE_F64 :
327           (VT == MVT::f128) ? RTLIB::OGE_F128 : RTLIB::OGE_PPCF128;
328     break;
329   case ISD::SETLT:
330   case ISD::SETOLT:
331     LC1 = (VT == MVT::f32) ? RTLIB::OLT_F32 :
332           (VT == MVT::f64) ? RTLIB::OLT_F64 :
333           (VT == MVT::f128) ? RTLIB::OLT_F128 : RTLIB::OLT_PPCF128;
334     break;
335   case ISD::SETLE:
336   case ISD::SETOLE:
337     LC1 = (VT == MVT::f32) ? RTLIB::OLE_F32 :
338           (VT == MVT::f64) ? RTLIB::OLE_F64 :
339           (VT == MVT::f128) ? RTLIB::OLE_F128 : RTLIB::OLE_PPCF128;
340     break;
341   case ISD::SETGT:
342   case ISD::SETOGT:
343     LC1 = (VT == MVT::f32) ? RTLIB::OGT_F32 :
344           (VT == MVT::f64) ? RTLIB::OGT_F64 :
345           (VT == MVT::f128) ? RTLIB::OGT_F128 : RTLIB::OGT_PPCF128;
346     break;
347   case ISD::SETUO:
348     LC1 = (VT == MVT::f32) ? RTLIB::UO_F32 :
349           (VT == MVT::f64) ? RTLIB::UO_F64 :
350           (VT == MVT::f128) ? RTLIB::UO_F128 : RTLIB::UO_PPCF128;
351     break;
352   case ISD::SETO:
353     LC1 = (VT == MVT::f32) ? RTLIB::O_F32 :
354           (VT == MVT::f64) ? RTLIB::O_F64 :
355           (VT == MVT::f128) ? RTLIB::O_F128 : RTLIB::O_PPCF128;
356     break;
357   case ISD::SETONE:
358     // SETONE = SETOLT | SETOGT
359     LC1 = (VT == MVT::f32) ? RTLIB::OLT_F32 :
360           (VT == MVT::f64) ? RTLIB::OLT_F64 :
361           (VT == MVT::f128) ? RTLIB::OLT_F128 : RTLIB::OLT_PPCF128;
362     LC2 = (VT == MVT::f32) ? RTLIB::OGT_F32 :
363           (VT == MVT::f64) ? RTLIB::OGT_F64 :
364           (VT == MVT::f128) ? RTLIB::OGT_F128 : RTLIB::OGT_PPCF128;
365     break;
366   case ISD::SETUEQ:
367     LC1 = (VT == MVT::f32) ? RTLIB::UO_F32 :
368           (VT == MVT::f64) ? RTLIB::UO_F64 :
369           (VT == MVT::f128) ? RTLIB::UO_F128 : RTLIB::UO_PPCF128;
370     LC2 = (VT == MVT::f32) ? RTLIB::OEQ_F32 :
371           (VT == MVT::f64) ? RTLIB::OEQ_F64 :
372           (VT == MVT::f128) ? RTLIB::OEQ_F128 : RTLIB::OEQ_PPCF128;
373     break;
374   default:
375     // Invert CC for unordered comparisons
376     ShouldInvertCC = true;
377     switch (CCCode) {
378     case ISD::SETULT:
379       LC1 = (VT == MVT::f32) ? RTLIB::OGE_F32 :
380             (VT == MVT::f64) ? RTLIB::OGE_F64 :
381             (VT == MVT::f128) ? RTLIB::OGE_F128 : RTLIB::OGE_PPCF128;
382       break;
383     case ISD::SETULE:
384       LC1 = (VT == MVT::f32) ? RTLIB::OGT_F32 :
385             (VT == MVT::f64) ? RTLIB::OGT_F64 :
386             (VT == MVT::f128) ? RTLIB::OGT_F128 : RTLIB::OGT_PPCF128;
387       break;
388     case ISD::SETUGT:
389       LC1 = (VT == MVT::f32) ? RTLIB::OLE_F32 :
390             (VT == MVT::f64) ? RTLIB::OLE_F64 :
391             (VT == MVT::f128) ? RTLIB::OLE_F128 : RTLIB::OLE_PPCF128;
392       break;
393     case ISD::SETUGE:
394       LC1 = (VT == MVT::f32) ? RTLIB::OLT_F32 :
395             (VT == MVT::f64) ? RTLIB::OLT_F64 :
396             (VT == MVT::f128) ? RTLIB::OLT_F128 : RTLIB::OLT_PPCF128;
397       break;
398     default: llvm_unreachable("Do not know how to soften this setcc!");
399     }
400   }
401 
402   // Use the target specific return value for comparions lib calls.
403   EVT RetVT = getCmpLibcallReturnType();
404   SDValue Ops[2] = {NewLHS, NewRHS};
405   TargetLowering::MakeLibCallOptions CallOptions;
406   EVT OpsVT[2] = { OldLHS.getValueType(),
407                    OldRHS.getValueType() };
408   CallOptions.setTypeListBeforeSoften(OpsVT, RetVT, true);
409   auto Call = makeLibCall(DAG, LC1, RetVT, Ops, CallOptions, dl, Chain);
410   NewLHS = Call.first;
411   NewRHS = DAG.getConstant(0, dl, RetVT);
412 
413   CCCode = getCmpLibcallCC(LC1);
414   if (ShouldInvertCC) {
415     assert(RetVT.isInteger());
416     CCCode = getSetCCInverse(CCCode, RetVT);
417   }
418 
419   if (LC2 == RTLIB::UNKNOWN_LIBCALL) {
420     // Update Chain.
421     Chain = Call.second;
422   } else {
423     SDValue Tmp = DAG.getNode(
424         ISD::SETCC, dl,
425         getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), RetVT),
426         NewLHS, NewRHS, DAG.getCondCode(CCCode));
427     auto Call2 = makeLibCall(DAG, LC2, RetVT, Ops, CallOptions, dl, Chain);
428     NewLHS = DAG.getNode(
429         ISD::SETCC, dl,
430         getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), RetVT),
431         Call2.first, NewRHS, DAG.getCondCode(getCmpLibcallCC(LC2)));
432     if (Chain)
433       Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Call.second,
434                           Call2.second);
435     NewLHS = DAG.getNode(ISD::OR, dl, Tmp.getValueType(), Tmp, NewLHS);
436     NewRHS = SDValue();
437   }
438 }
439 
440 /// Return the entry encoding for a jump table in the current function. The
441 /// returned value is a member of the MachineJumpTableInfo::JTEntryKind enum.
442 unsigned TargetLowering::getJumpTableEncoding() const {
443   // In non-pic modes, just use the address of a block.
444   if (!isPositionIndependent())
445     return MachineJumpTableInfo::EK_BlockAddress;
446 
447   // In PIC mode, if the target supports a GPRel32 directive, use it.
448   if (getTargetMachine().getMCAsmInfo()->getGPRel32Directive() != nullptr)
449     return MachineJumpTableInfo::EK_GPRel32BlockAddress;
450 
451   // Otherwise, use a label difference.
452   return MachineJumpTableInfo::EK_LabelDifference32;
453 }
454 
455 SDValue TargetLowering::getPICJumpTableRelocBase(SDValue Table,
456                                                  SelectionDAG &DAG) const {
457   // If our PIC model is GP relative, use the global offset table as the base.
458   unsigned JTEncoding = getJumpTableEncoding();
459 
460   if ((JTEncoding == MachineJumpTableInfo::EK_GPRel64BlockAddress) ||
461       (JTEncoding == MachineJumpTableInfo::EK_GPRel32BlockAddress))
462     return DAG.getGLOBAL_OFFSET_TABLE(getPointerTy(DAG.getDataLayout()));
463 
464   return Table;
465 }
466 
467 /// This returns the relocation base for the given PIC jumptable, the same as
468 /// getPICJumpTableRelocBase, but as an MCExpr.
469 const MCExpr *
470 TargetLowering::getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
471                                              unsigned JTI,MCContext &Ctx) const{
472   // The normal PIC reloc base is the label at the start of the jump table.
473   return MCSymbolRefExpr::create(MF->getJTISymbol(JTI, Ctx), Ctx);
474 }
475 
476 bool
477 TargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
478   const TargetMachine &TM = getTargetMachine();
479   const GlobalValue *GV = GA->getGlobal();
480 
481   // If the address is not even local to this DSO we will have to load it from
482   // a got and then add the offset.
483   if (!TM.shouldAssumeDSOLocal(*GV->getParent(), GV))
484     return false;
485 
486   // If the code is position independent we will have to add a base register.
487   if (isPositionIndependent())
488     return false;
489 
490   // Otherwise we can do it.
491   return true;
492 }
493 
494 //===----------------------------------------------------------------------===//
495 //  Optimization Methods
496 //===----------------------------------------------------------------------===//
497 
498 /// If the specified instruction has a constant integer operand and there are
499 /// bits set in that constant that are not demanded, then clear those bits and
500 /// return true.
501 bool TargetLowering::ShrinkDemandedConstant(SDValue Op, const APInt &Demanded,
502                                             TargetLoweringOpt &TLO) const {
503   SDLoc DL(Op);
504   unsigned Opcode = Op.getOpcode();
505 
506   // Do target-specific constant optimization.
507   if (targetShrinkDemandedConstant(Op, Demanded, TLO))
508     return TLO.New.getNode();
509 
510   // FIXME: ISD::SELECT, ISD::SELECT_CC
511   switch (Opcode) {
512   default:
513     break;
514   case ISD::XOR:
515   case ISD::AND:
516   case ISD::OR: {
517     auto *Op1C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
518     if (!Op1C)
519       return false;
520 
521     // If this is a 'not' op, don't touch it because that's a canonical form.
522     const APInt &C = Op1C->getAPIntValue();
523     if (Opcode == ISD::XOR && Demanded.isSubsetOf(C))
524       return false;
525 
526     if (!C.isSubsetOf(Demanded)) {
527       EVT VT = Op.getValueType();
528       SDValue NewC = TLO.DAG.getConstant(Demanded & C, DL, VT);
529       SDValue NewOp = TLO.DAG.getNode(Opcode, DL, VT, Op.getOperand(0), NewC);
530       return TLO.CombineTo(Op, NewOp);
531     }
532 
533     break;
534   }
535   }
536 
537   return false;
538 }
539 
540 /// Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if the casts are free.
541 /// This uses isZExtFree and ZERO_EXTEND for the widening cast, but it could be
542 /// generalized for targets with other types of implicit widening casts.
543 bool TargetLowering::ShrinkDemandedOp(SDValue Op, unsigned BitWidth,
544                                       const APInt &Demanded,
545                                       TargetLoweringOpt &TLO) const {
546   assert(Op.getNumOperands() == 2 &&
547          "ShrinkDemandedOp only supports binary operators!");
548   assert(Op.getNode()->getNumValues() == 1 &&
549          "ShrinkDemandedOp only supports nodes with one result!");
550 
551   SelectionDAG &DAG = TLO.DAG;
552   SDLoc dl(Op);
553 
554   // Early return, as this function cannot handle vector types.
555   if (Op.getValueType().isVector())
556     return false;
557 
558   // Don't do this if the node has another user, which may require the
559   // full value.
560   if (!Op.getNode()->hasOneUse())
561     return false;
562 
563   // Search for the smallest integer type with free casts to and from
564   // Op's type. For expedience, just check power-of-2 integer types.
565   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
566   unsigned DemandedSize = Demanded.getActiveBits();
567   unsigned SmallVTBits = DemandedSize;
568   if (!isPowerOf2_32(SmallVTBits))
569     SmallVTBits = NextPowerOf2(SmallVTBits);
570   for (; SmallVTBits < BitWidth; SmallVTBits = NextPowerOf2(SmallVTBits)) {
571     EVT SmallVT = EVT::getIntegerVT(*DAG.getContext(), SmallVTBits);
572     if (TLI.isTruncateFree(Op.getValueType(), SmallVT) &&
573         TLI.isZExtFree(SmallVT, Op.getValueType())) {
574       // We found a type with free casts.
575       SDValue X = DAG.getNode(
576           Op.getOpcode(), dl, SmallVT,
577           DAG.getNode(ISD::TRUNCATE, dl, SmallVT, Op.getOperand(0)),
578           DAG.getNode(ISD::TRUNCATE, dl, SmallVT, Op.getOperand(1)));
579       assert(DemandedSize <= SmallVTBits && "Narrowed below demanded bits?");
580       SDValue Z = DAG.getNode(ISD::ANY_EXTEND, dl, Op.getValueType(), X);
581       return TLO.CombineTo(Op, Z);
582     }
583   }
584   return false;
585 }
586 
587 bool TargetLowering::SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits,
588                                           DAGCombinerInfo &DCI) const {
589   SelectionDAG &DAG = DCI.DAG;
590   TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
591                         !DCI.isBeforeLegalizeOps());
592   KnownBits Known;
593 
594   bool Simplified = SimplifyDemandedBits(Op, DemandedBits, Known, TLO);
595   if (Simplified) {
596     DCI.AddToWorklist(Op.getNode());
597     DCI.CommitTargetLoweringOpt(TLO);
598   }
599   return Simplified;
600 }
601 
602 bool TargetLowering::SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits,
603                                           KnownBits &Known,
604                                           TargetLoweringOpt &TLO,
605                                           unsigned Depth,
606                                           bool AssumeSingleUse) const {
607   EVT VT = Op.getValueType();
608   APInt DemandedElts = VT.isVector()
609                            ? APInt::getAllOnesValue(VT.getVectorNumElements())
610                            : APInt(1, 1);
611   return SimplifyDemandedBits(Op, DemandedBits, DemandedElts, Known, TLO, Depth,
612                               AssumeSingleUse);
613 }
614 
615 // TODO: Can we merge SelectionDAG::GetDemandedBits into this?
616 // TODO: Under what circumstances can we create nodes? Constant folding?
617 SDValue TargetLowering::SimplifyMultipleUseDemandedBits(
618     SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
619     SelectionDAG &DAG, unsigned Depth) const {
620   // Limit search depth.
621   if (Depth >= SelectionDAG::MaxRecursionDepth)
622     return SDValue();
623 
624   // Ignore UNDEFs.
625   if (Op.isUndef())
626     return SDValue();
627 
628   // Not demanding any bits/elts from Op.
629   if (DemandedBits == 0 || DemandedElts == 0)
630     return DAG.getUNDEF(Op.getValueType());
631 
632   unsigned NumElts = DemandedElts.getBitWidth();
633   KnownBits LHSKnown, RHSKnown;
634   switch (Op.getOpcode()) {
635   case ISD::BITCAST: {
636     SDValue Src = peekThroughBitcasts(Op.getOperand(0));
637     EVT SrcVT = Src.getValueType();
638     EVT DstVT = Op.getValueType();
639     unsigned NumSrcEltBits = SrcVT.getScalarSizeInBits();
640     unsigned NumDstEltBits = DstVT.getScalarSizeInBits();
641 
642     if (NumSrcEltBits == NumDstEltBits)
643       if (SDValue V = SimplifyMultipleUseDemandedBits(
644               Src, DemandedBits, DemandedElts, DAG, Depth + 1))
645         return DAG.getBitcast(DstVT, V);
646 
647     // TODO - bigendian once we have test coverage.
648     if (SrcVT.isVector() && (NumDstEltBits % NumSrcEltBits) == 0 &&
649         DAG.getDataLayout().isLittleEndian()) {
650       unsigned Scale = NumDstEltBits / NumSrcEltBits;
651       unsigned NumSrcElts = SrcVT.getVectorNumElements();
652       APInt DemandedSrcBits = APInt::getNullValue(NumSrcEltBits);
653       APInt DemandedSrcElts = APInt::getNullValue(NumSrcElts);
654       for (unsigned i = 0; i != Scale; ++i) {
655         unsigned Offset = i * NumSrcEltBits;
656         APInt Sub = DemandedBits.extractBits(NumSrcEltBits, Offset);
657         if (!Sub.isNullValue()) {
658           DemandedSrcBits |= Sub;
659           for (unsigned j = 0; j != NumElts; ++j)
660             if (DemandedElts[j])
661               DemandedSrcElts.setBit((j * Scale) + i);
662         }
663       }
664 
665       if (SDValue V = SimplifyMultipleUseDemandedBits(
666               Src, DemandedSrcBits, DemandedSrcElts, DAG, Depth + 1))
667         return DAG.getBitcast(DstVT, V);
668     }
669 
670     // TODO - bigendian once we have test coverage.
671     if ((NumSrcEltBits % NumDstEltBits) == 0 &&
672         DAG.getDataLayout().isLittleEndian()) {
673       unsigned Scale = NumSrcEltBits / NumDstEltBits;
674       unsigned NumSrcElts = SrcVT.isVector() ? SrcVT.getVectorNumElements() : 1;
675       APInt DemandedSrcBits = APInt::getNullValue(NumSrcEltBits);
676       APInt DemandedSrcElts = APInt::getNullValue(NumSrcElts);
677       for (unsigned i = 0; i != NumElts; ++i)
678         if (DemandedElts[i]) {
679           unsigned Offset = (i % Scale) * NumDstEltBits;
680           DemandedSrcBits.insertBits(DemandedBits, Offset);
681           DemandedSrcElts.setBit(i / Scale);
682         }
683 
684       if (SDValue V = SimplifyMultipleUseDemandedBits(
685               Src, DemandedSrcBits, DemandedSrcElts, DAG, Depth + 1))
686         return DAG.getBitcast(DstVT, V);
687     }
688 
689     break;
690   }
691   case ISD::AND: {
692     LHSKnown = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
693     RHSKnown = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
694 
695     // If all of the demanded bits are known 1 on one side, return the other.
696     // These bits cannot contribute to the result of the 'and' in this
697     // context.
698     if (DemandedBits.isSubsetOf(LHSKnown.Zero | RHSKnown.One))
699       return Op.getOperand(0);
700     if (DemandedBits.isSubsetOf(RHSKnown.Zero | LHSKnown.One))
701       return Op.getOperand(1);
702     break;
703   }
704   case ISD::OR: {
705     LHSKnown = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
706     RHSKnown = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
707 
708     // If all of the demanded bits are known zero on one side, return the
709     // other.  These bits cannot contribute to the result of the 'or' in this
710     // context.
711     if (DemandedBits.isSubsetOf(LHSKnown.One | RHSKnown.Zero))
712       return Op.getOperand(0);
713     if (DemandedBits.isSubsetOf(RHSKnown.One | LHSKnown.Zero))
714       return Op.getOperand(1);
715     break;
716   }
717   case ISD::XOR: {
718     LHSKnown = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
719     RHSKnown = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
720 
721     // If all of the demanded bits are known zero on one side, return the
722     // other.
723     if (DemandedBits.isSubsetOf(RHSKnown.Zero))
724       return Op.getOperand(0);
725     if (DemandedBits.isSubsetOf(LHSKnown.Zero))
726       return Op.getOperand(1);
727     break;
728   }
729   case ISD::SETCC: {
730     SDValue Op0 = Op.getOperand(0);
731     SDValue Op1 = Op.getOperand(1);
732     ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
733     // If (1) we only need the sign-bit, (2) the setcc operands are the same
734     // width as the setcc result, and (3) the result of a setcc conforms to 0 or
735     // -1, we may be able to bypass the setcc.
736     if (DemandedBits.isSignMask() &&
737         Op0.getScalarValueSizeInBits() == DemandedBits.getBitWidth() &&
738         getBooleanContents(Op0.getValueType()) ==
739             BooleanContent::ZeroOrNegativeOneBooleanContent) {
740       // If we're testing X < 0, then this compare isn't needed - just use X!
741       // FIXME: We're limiting to integer types here, but this should also work
742       // if we don't care about FP signed-zero. The use of SETLT with FP means
743       // that we don't care about NaNs.
744       if (CC == ISD::SETLT && Op1.getValueType().isInteger() &&
745           (isNullConstant(Op1) || ISD::isBuildVectorAllZeros(Op1.getNode())))
746         return Op0;
747     }
748     break;
749   }
750   case ISD::SIGN_EXTEND_INREG: {
751     // If none of the extended bits are demanded, eliminate the sextinreg.
752     EVT ExVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
753     if (DemandedBits.getActiveBits() <= ExVT.getScalarSizeInBits())
754       return Op.getOperand(0);
755     break;
756   }
757   case ISD::INSERT_VECTOR_ELT: {
758     // If we don't demand the inserted element, return the base vector.
759     SDValue Vec = Op.getOperand(0);
760     auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2));
761     EVT VecVT = Vec.getValueType();
762     if (CIdx && CIdx->getAPIntValue().ult(VecVT.getVectorNumElements()) &&
763         !DemandedElts[CIdx->getZExtValue()])
764       return Vec;
765     break;
766   }
767   case ISD::VECTOR_SHUFFLE: {
768     ArrayRef<int> ShuffleMask = cast<ShuffleVectorSDNode>(Op)->getMask();
769 
770     // If all the demanded elts are from one operand and are inline,
771     // then we can use the operand directly.
772     bool AllUndef = true, IdentityLHS = true, IdentityRHS = true;
773     for (unsigned i = 0; i != NumElts; ++i) {
774       int M = ShuffleMask[i];
775       if (M < 0 || !DemandedElts[i])
776         continue;
777       AllUndef = false;
778       IdentityLHS &= (M == (int)i);
779       IdentityRHS &= ((M - NumElts) == i);
780     }
781 
782     if (AllUndef)
783       return DAG.getUNDEF(Op.getValueType());
784     if (IdentityLHS)
785       return Op.getOperand(0);
786     if (IdentityRHS)
787       return Op.getOperand(1);
788     break;
789   }
790   default:
791     if (Op.getOpcode() >= ISD::BUILTIN_OP_END)
792       if (SDValue V = SimplifyMultipleUseDemandedBitsForTargetNode(
793               Op, DemandedBits, DemandedElts, DAG, Depth))
794         return V;
795     break;
796   }
797   return SDValue();
798 }
799 
800 /// Look at Op. At this point, we know that only the OriginalDemandedBits of the
801 /// result of Op are ever used downstream. If we can use this information to
802 /// simplify Op, create a new simplified DAG node and return true, returning the
803 /// original and new nodes in Old and New. Otherwise, analyze the expression and
804 /// return a mask of Known bits for the expression (used to simplify the
805 /// caller).  The Known bits may only be accurate for those bits in the
806 /// OriginalDemandedBits and OriginalDemandedElts.
807 bool TargetLowering::SimplifyDemandedBits(
808     SDValue Op, const APInt &OriginalDemandedBits,
809     const APInt &OriginalDemandedElts, KnownBits &Known, TargetLoweringOpt &TLO,
810     unsigned Depth, bool AssumeSingleUse) const {
811   unsigned BitWidth = OriginalDemandedBits.getBitWidth();
812   assert(Op.getScalarValueSizeInBits() == BitWidth &&
813          "Mask size mismatches value type size!");
814 
815   unsigned NumElts = OriginalDemandedElts.getBitWidth();
816   assert((!Op.getValueType().isVector() ||
817           NumElts == Op.getValueType().getVectorNumElements()) &&
818          "Unexpected vector size");
819 
820   APInt DemandedBits = OriginalDemandedBits;
821   APInt DemandedElts = OriginalDemandedElts;
822   SDLoc dl(Op);
823   auto &DL = TLO.DAG.getDataLayout();
824 
825   // Don't know anything.
826   Known = KnownBits(BitWidth);
827 
828   // Undef operand.
829   if (Op.isUndef())
830     return false;
831 
832   if (Op.getOpcode() == ISD::Constant) {
833     // We know all of the bits for a constant!
834     Known.One = cast<ConstantSDNode>(Op)->getAPIntValue();
835     Known.Zero = ~Known.One;
836     return false;
837   }
838 
839   // Other users may use these bits.
840   EVT VT = Op.getValueType();
841   if (!Op.getNode()->hasOneUse() && !AssumeSingleUse) {
842     if (Depth != 0) {
843       // If not at the root, Just compute the Known bits to
844       // simplify things downstream.
845       Known = TLO.DAG.computeKnownBits(Op, DemandedElts, Depth);
846       return false;
847     }
848     // If this is the root being simplified, allow it to have multiple uses,
849     // just set the DemandedBits/Elts to all bits.
850     DemandedBits = APInt::getAllOnesValue(BitWidth);
851     DemandedElts = APInt::getAllOnesValue(NumElts);
852   } else if (OriginalDemandedBits == 0 || OriginalDemandedElts == 0) {
853     // Not demanding any bits/elts from Op.
854     return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT));
855   } else if (Depth >= SelectionDAG::MaxRecursionDepth) {
856     // Limit search depth.
857     return false;
858   }
859 
860   KnownBits Known2, KnownOut;
861   switch (Op.getOpcode()) {
862   case ISD::TargetConstant:
863     llvm_unreachable("Can't simplify this node");
864   case ISD::SCALAR_TO_VECTOR: {
865     if (!DemandedElts[0])
866       return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT));
867 
868     KnownBits SrcKnown;
869     SDValue Src = Op.getOperand(0);
870     unsigned SrcBitWidth = Src.getScalarValueSizeInBits();
871     APInt SrcDemandedBits = DemandedBits.zextOrSelf(SrcBitWidth);
872     if (SimplifyDemandedBits(Src, SrcDemandedBits, SrcKnown, TLO, Depth + 1))
873       return true;
874     Known = SrcKnown.zextOrTrunc(BitWidth, false);
875     break;
876   }
877   case ISD::BUILD_VECTOR:
878     // Collect the known bits that are shared by every demanded element.
879     // TODO: Call SimplifyDemandedBits for non-constant demanded elements.
880     Known = TLO.DAG.computeKnownBits(Op, DemandedElts, Depth);
881     return false; // Don't fall through, will infinitely loop.
882   case ISD::LOAD: {
883     LoadSDNode *LD = cast<LoadSDNode>(Op);
884     if (getTargetConstantFromLoad(LD)) {
885       Known = TLO.DAG.computeKnownBits(Op, DemandedElts, Depth);
886       return false; // Don't fall through, will infinitely loop.
887     }
888     break;
889   }
890   case ISD::INSERT_VECTOR_ELT: {
891     SDValue Vec = Op.getOperand(0);
892     SDValue Scl = Op.getOperand(1);
893     auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2));
894     EVT VecVT = Vec.getValueType();
895 
896     // If index isn't constant, assume we need all vector elements AND the
897     // inserted element.
898     APInt DemandedVecElts(DemandedElts);
899     if (CIdx && CIdx->getAPIntValue().ult(VecVT.getVectorNumElements())) {
900       unsigned Idx = CIdx->getZExtValue();
901       DemandedVecElts.clearBit(Idx);
902 
903       // Inserted element is not required.
904       if (!DemandedElts[Idx])
905         return TLO.CombineTo(Op, Vec);
906     }
907 
908     KnownBits KnownScl;
909     unsigned NumSclBits = Scl.getScalarValueSizeInBits();
910     APInt DemandedSclBits = DemandedBits.zextOrTrunc(NumSclBits);
911     if (SimplifyDemandedBits(Scl, DemandedSclBits, KnownScl, TLO, Depth + 1))
912       return true;
913 
914     Known = KnownScl.zextOrTrunc(BitWidth, false);
915 
916     KnownBits KnownVec;
917     if (SimplifyDemandedBits(Vec, DemandedBits, DemandedVecElts, KnownVec, TLO,
918                              Depth + 1))
919       return true;
920 
921     if (!!DemandedVecElts) {
922       Known.One &= KnownVec.One;
923       Known.Zero &= KnownVec.Zero;
924     }
925 
926     return false;
927   }
928   case ISD::INSERT_SUBVECTOR: {
929     SDValue Base = Op.getOperand(0);
930     SDValue Sub = Op.getOperand(1);
931     EVT SubVT = Sub.getValueType();
932     unsigned NumSubElts = SubVT.getVectorNumElements();
933 
934     // If index isn't constant, assume we need the original demanded base
935     // elements and ALL the inserted subvector elements.
936     APInt BaseElts = DemandedElts;
937     APInt SubElts = APInt::getAllOnesValue(NumSubElts);
938     if (isa<ConstantSDNode>(Op.getOperand(2))) {
939       const APInt &Idx = Op.getConstantOperandAPInt(2);
940       if (Idx.ule(NumElts - NumSubElts)) {
941         unsigned SubIdx = Idx.getZExtValue();
942         SubElts = DemandedElts.extractBits(NumSubElts, SubIdx);
943         BaseElts.insertBits(APInt::getNullValue(NumSubElts), SubIdx);
944       }
945     }
946 
947     KnownBits KnownSub, KnownBase;
948     if (SimplifyDemandedBits(Sub, DemandedBits, SubElts, KnownSub, TLO,
949                              Depth + 1))
950       return true;
951     if (SimplifyDemandedBits(Base, DemandedBits, BaseElts, KnownBase, TLO,
952                              Depth + 1))
953       return true;
954 
955     Known.Zero.setAllBits();
956     Known.One.setAllBits();
957     if (!!SubElts) {
958         Known.One &= KnownSub.One;
959         Known.Zero &= KnownSub.Zero;
960     }
961     if (!!BaseElts) {
962         Known.One &= KnownBase.One;
963         Known.Zero &= KnownBase.Zero;
964     }
965     break;
966   }
967   case ISD::EXTRACT_SUBVECTOR: {
968     // If index isn't constant, assume we need all the source vector elements.
969     SDValue Src = Op.getOperand(0);
970     ConstantSDNode *SubIdx = dyn_cast<ConstantSDNode>(Op.getOperand(1));
971     unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
972     APInt SrcElts = APInt::getAllOnesValue(NumSrcElts);
973     if (SubIdx && SubIdx->getAPIntValue().ule(NumSrcElts - NumElts)) {
974       // Offset the demanded elts by the subvector index.
975       uint64_t Idx = SubIdx->getZExtValue();
976       SrcElts = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx);
977     }
978     if (SimplifyDemandedBits(Src, DemandedBits, SrcElts, Known, TLO, Depth + 1))
979       return true;
980     break;
981   }
982   case ISD::CONCAT_VECTORS: {
983     Known.Zero.setAllBits();
984     Known.One.setAllBits();
985     EVT SubVT = Op.getOperand(0).getValueType();
986     unsigned NumSubVecs = Op.getNumOperands();
987     unsigned NumSubElts = SubVT.getVectorNumElements();
988     for (unsigned i = 0; i != NumSubVecs; ++i) {
989       APInt DemandedSubElts =
990           DemandedElts.extractBits(NumSubElts, i * NumSubElts);
991       if (SimplifyDemandedBits(Op.getOperand(i), DemandedBits, DemandedSubElts,
992                                Known2, TLO, Depth + 1))
993         return true;
994       // Known bits are shared by every demanded subvector element.
995       if (!!DemandedSubElts) {
996         Known.One &= Known2.One;
997         Known.Zero &= Known2.Zero;
998       }
999     }
1000     break;
1001   }
1002   case ISD::VECTOR_SHUFFLE: {
1003     ArrayRef<int> ShuffleMask = cast<ShuffleVectorSDNode>(Op)->getMask();
1004 
1005     // Collect demanded elements from shuffle operands..
1006     APInt DemandedLHS(NumElts, 0);
1007     APInt DemandedRHS(NumElts, 0);
1008     for (unsigned i = 0; i != NumElts; ++i) {
1009       if (!DemandedElts[i])
1010         continue;
1011       int M = ShuffleMask[i];
1012       if (M < 0) {
1013         // For UNDEF elements, we don't know anything about the common state of
1014         // the shuffle result.
1015         DemandedLHS.clearAllBits();
1016         DemandedRHS.clearAllBits();
1017         break;
1018       }
1019       assert(0 <= M && M < (int)(2 * NumElts) && "Shuffle index out of range");
1020       if (M < (int)NumElts)
1021         DemandedLHS.setBit(M);
1022       else
1023         DemandedRHS.setBit(M - NumElts);
1024     }
1025 
1026     if (!!DemandedLHS || !!DemandedRHS) {
1027       SDValue Op0 = Op.getOperand(0);
1028       SDValue Op1 = Op.getOperand(1);
1029 
1030       Known.Zero.setAllBits();
1031       Known.One.setAllBits();
1032       if (!!DemandedLHS) {
1033         if (SimplifyDemandedBits(Op0, DemandedBits, DemandedLHS, Known2, TLO,
1034                                  Depth + 1))
1035           return true;
1036         Known.One &= Known2.One;
1037         Known.Zero &= Known2.Zero;
1038       }
1039       if (!!DemandedRHS) {
1040         if (SimplifyDemandedBits(Op1, DemandedBits, DemandedRHS, Known2, TLO,
1041                                  Depth + 1))
1042           return true;
1043         Known.One &= Known2.One;
1044         Known.Zero &= Known2.Zero;
1045       }
1046 
1047       // Attempt to avoid multi-use ops if we don't need anything from them.
1048       SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
1049           Op0, DemandedBits, DemandedLHS, TLO.DAG, Depth + 1);
1050       SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits(
1051           Op1, DemandedBits, DemandedRHS, TLO.DAG, Depth + 1);
1052       if (DemandedOp0 || DemandedOp1) {
1053         Op0 = DemandedOp0 ? DemandedOp0 : Op0;
1054         Op1 = DemandedOp1 ? DemandedOp1 : Op1;
1055         SDValue NewOp = TLO.DAG.getVectorShuffle(VT, dl, Op0, Op1, ShuffleMask);
1056         return TLO.CombineTo(Op, NewOp);
1057       }
1058     }
1059     break;
1060   }
1061   case ISD::AND: {
1062     SDValue Op0 = Op.getOperand(0);
1063     SDValue Op1 = Op.getOperand(1);
1064 
1065     // If the RHS is a constant, check to see if the LHS would be zero without
1066     // using the bits from the RHS.  Below, we use knowledge about the RHS to
1067     // simplify the LHS, here we're using information from the LHS to simplify
1068     // the RHS.
1069     if (ConstantSDNode *RHSC = isConstOrConstSplat(Op1)) {
1070       // Do not increment Depth here; that can cause an infinite loop.
1071       KnownBits LHSKnown = TLO.DAG.computeKnownBits(Op0, DemandedElts, Depth);
1072       // If the LHS already has zeros where RHSC does, this 'and' is dead.
1073       if ((LHSKnown.Zero & DemandedBits) ==
1074           (~RHSC->getAPIntValue() & DemandedBits))
1075         return TLO.CombineTo(Op, Op0);
1076 
1077       // If any of the set bits in the RHS are known zero on the LHS, shrink
1078       // the constant.
1079       if (ShrinkDemandedConstant(Op, ~LHSKnown.Zero & DemandedBits, TLO))
1080         return true;
1081 
1082       // Bitwise-not (xor X, -1) is a special case: we don't usually shrink its
1083       // constant, but if this 'and' is only clearing bits that were just set by
1084       // the xor, then this 'and' can be eliminated by shrinking the mask of
1085       // the xor. For example, for a 32-bit X:
1086       // and (xor (srl X, 31), -1), 1 --> xor (srl X, 31), 1
1087       if (isBitwiseNot(Op0) && Op0.hasOneUse() &&
1088           LHSKnown.One == ~RHSC->getAPIntValue()) {
1089         SDValue Xor = TLO.DAG.getNode(ISD::XOR, dl, VT, Op0.getOperand(0), Op1);
1090         return TLO.CombineTo(Op, Xor);
1091       }
1092     }
1093 
1094     if (SimplifyDemandedBits(Op1, DemandedBits, DemandedElts, Known, TLO,
1095                              Depth + 1))
1096       return true;
1097     assert(!Known.hasConflict() && "Bits known to be one AND zero?");
1098     if (SimplifyDemandedBits(Op0, ~Known.Zero & DemandedBits, DemandedElts,
1099                              Known2, TLO, Depth + 1))
1100       return true;
1101     assert(!Known2.hasConflict() && "Bits known to be one AND zero?");
1102 
1103     // Attempt to avoid multi-use ops if we don't need anything from them.
1104     if (!DemandedBits.isAllOnesValue() || !DemandedElts.isAllOnesValue()) {
1105       SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
1106           Op0, DemandedBits, DemandedElts, TLO.DAG, Depth + 1);
1107       SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits(
1108           Op1, DemandedBits, DemandedElts, TLO.DAG, Depth + 1);
1109       if (DemandedOp0 || DemandedOp1) {
1110         Op0 = DemandedOp0 ? DemandedOp0 : Op0;
1111         Op1 = DemandedOp1 ? DemandedOp1 : Op1;
1112         SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, VT, Op0, Op1);
1113         return TLO.CombineTo(Op, NewOp);
1114       }
1115     }
1116 
1117     // If all of the demanded bits are known one on one side, return the other.
1118     // These bits cannot contribute to the result of the 'and'.
1119     if (DemandedBits.isSubsetOf(Known2.Zero | Known.One))
1120       return TLO.CombineTo(Op, Op0);
1121     if (DemandedBits.isSubsetOf(Known.Zero | Known2.One))
1122       return TLO.CombineTo(Op, Op1);
1123     // If all of the demanded bits in the inputs are known zeros, return zero.
1124     if (DemandedBits.isSubsetOf(Known.Zero | Known2.Zero))
1125       return TLO.CombineTo(Op, TLO.DAG.getConstant(0, dl, VT));
1126     // If the RHS is a constant, see if we can simplify it.
1127     if (ShrinkDemandedConstant(Op, ~Known2.Zero & DemandedBits, TLO))
1128       return true;
1129     // If the operation can be done in a smaller type, do so.
1130     if (ShrinkDemandedOp(Op, BitWidth, DemandedBits, TLO))
1131       return true;
1132 
1133     // Output known-1 bits are only known if set in both the LHS & RHS.
1134     Known.One &= Known2.One;
1135     // Output known-0 are known to be clear if zero in either the LHS | RHS.
1136     Known.Zero |= Known2.Zero;
1137     break;
1138   }
1139   case ISD::OR: {
1140     SDValue Op0 = Op.getOperand(0);
1141     SDValue Op1 = Op.getOperand(1);
1142 
1143     if (SimplifyDemandedBits(Op1, DemandedBits, DemandedElts, Known, TLO,
1144                              Depth + 1))
1145       return true;
1146     assert(!Known.hasConflict() && "Bits known to be one AND zero?");
1147     if (SimplifyDemandedBits(Op0, ~Known.One & DemandedBits, DemandedElts,
1148                              Known2, TLO, Depth + 1))
1149       return true;
1150     assert(!Known2.hasConflict() && "Bits known to be one AND zero?");
1151 
1152     // Attempt to avoid multi-use ops if we don't need anything from them.
1153     if (!DemandedBits.isAllOnesValue() || !DemandedElts.isAllOnesValue()) {
1154       SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
1155           Op0, DemandedBits, DemandedElts, TLO.DAG, Depth + 1);
1156       SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits(
1157           Op1, DemandedBits, DemandedElts, TLO.DAG, Depth + 1);
1158       if (DemandedOp0 || DemandedOp1) {
1159         Op0 = DemandedOp0 ? DemandedOp0 : Op0;
1160         Op1 = DemandedOp1 ? DemandedOp1 : Op1;
1161         SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, VT, Op0, Op1);
1162         return TLO.CombineTo(Op, NewOp);
1163       }
1164     }
1165 
1166     // If all of the demanded bits are known zero on one side, return the other.
1167     // These bits cannot contribute to the result of the 'or'.
1168     if (DemandedBits.isSubsetOf(Known2.One | Known.Zero))
1169       return TLO.CombineTo(Op, Op0);
1170     if (DemandedBits.isSubsetOf(Known.One | Known2.Zero))
1171       return TLO.CombineTo(Op, Op1);
1172     // If the RHS is a constant, see if we can simplify it.
1173     if (ShrinkDemandedConstant(Op, DemandedBits, TLO))
1174       return true;
1175     // If the operation can be done in a smaller type, do so.
1176     if (ShrinkDemandedOp(Op, BitWidth, DemandedBits, TLO))
1177       return true;
1178 
1179     // Output known-0 bits are only known if clear in both the LHS & RHS.
1180     Known.Zero &= Known2.Zero;
1181     // Output known-1 are known to be set if set in either the LHS | RHS.
1182     Known.One |= Known2.One;
1183     break;
1184   }
1185   case ISD::XOR: {
1186     SDValue Op0 = Op.getOperand(0);
1187     SDValue Op1 = Op.getOperand(1);
1188 
1189     if (SimplifyDemandedBits(Op1, DemandedBits, DemandedElts, Known, TLO,
1190                              Depth + 1))
1191       return true;
1192     assert(!Known.hasConflict() && "Bits known to be one AND zero?");
1193     if (SimplifyDemandedBits(Op0, DemandedBits, DemandedElts, Known2, TLO,
1194                              Depth + 1))
1195       return true;
1196     assert(!Known2.hasConflict() && "Bits known to be one AND zero?");
1197 
1198     // Attempt to avoid multi-use ops if we don't need anything from them.
1199     if (!DemandedBits.isAllOnesValue() || !DemandedElts.isAllOnesValue()) {
1200       SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
1201           Op0, DemandedBits, DemandedElts, TLO.DAG, Depth + 1);
1202       SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits(
1203           Op1, DemandedBits, DemandedElts, TLO.DAG, Depth + 1);
1204       if (DemandedOp0 || DemandedOp1) {
1205         Op0 = DemandedOp0 ? DemandedOp0 : Op0;
1206         Op1 = DemandedOp1 ? DemandedOp1 : Op1;
1207         SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, VT, Op0, Op1);
1208         return TLO.CombineTo(Op, NewOp);
1209       }
1210     }
1211 
1212     // If all of the demanded bits are known zero on one side, return the other.
1213     // These bits cannot contribute to the result of the 'xor'.
1214     if (DemandedBits.isSubsetOf(Known.Zero))
1215       return TLO.CombineTo(Op, Op0);
1216     if (DemandedBits.isSubsetOf(Known2.Zero))
1217       return TLO.CombineTo(Op, Op1);
1218     // If the operation can be done in a smaller type, do so.
1219     if (ShrinkDemandedOp(Op, BitWidth, DemandedBits, TLO))
1220       return true;
1221 
1222     // If all of the unknown bits are known to be zero on one side or the other
1223     // (but not both) turn this into an *inclusive* or.
1224     //    e.g. (A & C1)^(B & C2) -> (A & C1)|(B & C2) iff C1&C2 == 0
1225     if (DemandedBits.isSubsetOf(Known.Zero | Known2.Zero))
1226       return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::OR, dl, VT, Op0, Op1));
1227 
1228     // Output known-0 bits are known if clear or set in both the LHS & RHS.
1229     KnownOut.Zero = (Known.Zero & Known2.Zero) | (Known.One & Known2.One);
1230     // Output known-1 are known to be set if set in only one of the LHS, RHS.
1231     KnownOut.One = (Known.Zero & Known2.One) | (Known.One & Known2.Zero);
1232 
1233     if (ConstantSDNode *C = isConstOrConstSplat(Op1)) {
1234       // If one side is a constant, and all of the known set bits on the other
1235       // side are also set in the constant, turn this into an AND, as we know
1236       // the bits will be cleared.
1237       //    e.g. (X | C1) ^ C2 --> (X | C1) & ~C2 iff (C1&C2) == C2
1238       // NB: it is okay if more bits are known than are requested
1239       if (C->getAPIntValue() == Known2.One) {
1240         SDValue ANDC =
1241             TLO.DAG.getConstant(~C->getAPIntValue() & DemandedBits, dl, VT);
1242         return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::AND, dl, VT, Op0, ANDC));
1243       }
1244 
1245       // If the RHS is a constant, see if we can change it. Don't alter a -1
1246       // constant because that's a 'not' op, and that is better for combining
1247       // and codegen.
1248       if (!C->isAllOnesValue()) {
1249         if (DemandedBits.isSubsetOf(C->getAPIntValue())) {
1250           // We're flipping all demanded bits. Flip the undemanded bits too.
1251           SDValue New = TLO.DAG.getNOT(dl, Op0, VT);
1252           return TLO.CombineTo(Op, New);
1253         }
1254         // If we can't turn this into a 'not', try to shrink the constant.
1255         if (ShrinkDemandedConstant(Op, DemandedBits, TLO))
1256           return true;
1257       }
1258     }
1259 
1260     Known = std::move(KnownOut);
1261     break;
1262   }
1263   case ISD::SELECT:
1264     if (SimplifyDemandedBits(Op.getOperand(2), DemandedBits, Known, TLO,
1265                              Depth + 1))
1266       return true;
1267     if (SimplifyDemandedBits(Op.getOperand(1), DemandedBits, Known2, TLO,
1268                              Depth + 1))
1269       return true;
1270     assert(!Known.hasConflict() && "Bits known to be one AND zero?");
1271     assert(!Known2.hasConflict() && "Bits known to be one AND zero?");
1272 
1273     // If the operands are constants, see if we can simplify them.
1274     if (ShrinkDemandedConstant(Op, DemandedBits, TLO))
1275       return true;
1276 
1277     // Only known if known in both the LHS and RHS.
1278     Known.One &= Known2.One;
1279     Known.Zero &= Known2.Zero;
1280     break;
1281   case ISD::SELECT_CC:
1282     if (SimplifyDemandedBits(Op.getOperand(3), DemandedBits, Known, TLO,
1283                              Depth + 1))
1284       return true;
1285     if (SimplifyDemandedBits(Op.getOperand(2), DemandedBits, Known2, TLO,
1286                              Depth + 1))
1287       return true;
1288     assert(!Known.hasConflict() && "Bits known to be one AND zero?");
1289     assert(!Known2.hasConflict() && "Bits known to be one AND zero?");
1290 
1291     // If the operands are constants, see if we can simplify them.
1292     if (ShrinkDemandedConstant(Op, DemandedBits, TLO))
1293       return true;
1294 
1295     // Only known if known in both the LHS and RHS.
1296     Known.One &= Known2.One;
1297     Known.Zero &= Known2.Zero;
1298     break;
1299   case ISD::SETCC: {
1300     SDValue Op0 = Op.getOperand(0);
1301     SDValue Op1 = Op.getOperand(1);
1302     ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
1303     // If (1) we only need the sign-bit, (2) the setcc operands are the same
1304     // width as the setcc result, and (3) the result of a setcc conforms to 0 or
1305     // -1, we may be able to bypass the setcc.
1306     if (DemandedBits.isSignMask() &&
1307         Op0.getScalarValueSizeInBits() == BitWidth &&
1308         getBooleanContents(Op0.getValueType()) ==
1309             BooleanContent::ZeroOrNegativeOneBooleanContent) {
1310       // If we're testing X < 0, then this compare isn't needed - just use X!
1311       // FIXME: We're limiting to integer types here, but this should also work
1312       // if we don't care about FP signed-zero. The use of SETLT with FP means
1313       // that we don't care about NaNs.
1314       if (CC == ISD::SETLT && Op1.getValueType().isInteger() &&
1315           (isNullConstant(Op1) || ISD::isBuildVectorAllZeros(Op1.getNode())))
1316         return TLO.CombineTo(Op, Op0);
1317 
1318       // TODO: Should we check for other forms of sign-bit comparisons?
1319       // Examples: X <= -1, X >= 0
1320     }
1321     if (getBooleanContents(Op0.getValueType()) ==
1322             TargetLowering::ZeroOrOneBooleanContent &&
1323         BitWidth > 1)
1324       Known.Zero.setBitsFrom(1);
1325     break;
1326   }
1327   case ISD::SHL: {
1328     SDValue Op0 = Op.getOperand(0);
1329     SDValue Op1 = Op.getOperand(1);
1330 
1331     if (ConstantSDNode *SA = isConstOrConstSplat(Op1, DemandedElts)) {
1332       // If the shift count is an invalid immediate, don't do anything.
1333       if (SA->getAPIntValue().uge(BitWidth))
1334         break;
1335 
1336       unsigned ShAmt = SA->getZExtValue();
1337       if (ShAmt == 0)
1338         return TLO.CombineTo(Op, Op0);
1339 
1340       // If this is ((X >>u C1) << ShAmt), see if we can simplify this into a
1341       // single shift.  We can do this if the bottom bits (which are shifted
1342       // out) are never demanded.
1343       // TODO - support non-uniform vector amounts.
1344       if (Op0.getOpcode() == ISD::SRL) {
1345         if (!DemandedBits.intersects(APInt::getLowBitsSet(BitWidth, ShAmt))) {
1346           if (ConstantSDNode *SA2 =
1347                   isConstOrConstSplat(Op0.getOperand(1), DemandedElts)) {
1348             if (SA2->getAPIntValue().ult(BitWidth)) {
1349               unsigned C1 = SA2->getZExtValue();
1350               unsigned Opc = ISD::SHL;
1351               int Diff = ShAmt - C1;
1352               if (Diff < 0) {
1353                 Diff = -Diff;
1354                 Opc = ISD::SRL;
1355               }
1356 
1357               SDValue NewSA = TLO.DAG.getConstant(Diff, dl, Op1.getValueType());
1358               return TLO.CombineTo(
1359                   Op, TLO.DAG.getNode(Opc, dl, VT, Op0.getOperand(0), NewSA));
1360             }
1361           }
1362         }
1363       }
1364 
1365       if (SimplifyDemandedBits(Op0, DemandedBits.lshr(ShAmt), DemandedElts,
1366                                Known, TLO, Depth + 1))
1367         return true;
1368 
1369       // Try shrinking the operation as long as the shift amount will still be
1370       // in range.
1371       if ((ShAmt < DemandedBits.getActiveBits()) &&
1372           ShrinkDemandedOp(Op, BitWidth, DemandedBits, TLO))
1373         return true;
1374 
1375       // Convert (shl (anyext x, c)) to (anyext (shl x, c)) if the high bits
1376       // are not demanded. This will likely allow the anyext to be folded away.
1377       if (Op0.getOpcode() == ISD::ANY_EXTEND) {
1378         SDValue InnerOp = Op0.getOperand(0);
1379         EVT InnerVT = InnerOp.getValueType();
1380         unsigned InnerBits = InnerVT.getScalarSizeInBits();
1381         if (ShAmt < InnerBits && DemandedBits.getActiveBits() <= InnerBits &&
1382             isTypeDesirableForOp(ISD::SHL, InnerVT)) {
1383           EVT ShTy = getShiftAmountTy(InnerVT, DL);
1384           if (!APInt(BitWidth, ShAmt).isIntN(ShTy.getSizeInBits()))
1385             ShTy = InnerVT;
1386           SDValue NarrowShl =
1387               TLO.DAG.getNode(ISD::SHL, dl, InnerVT, InnerOp,
1388                               TLO.DAG.getConstant(ShAmt, dl, ShTy));
1389           return TLO.CombineTo(
1390               Op, TLO.DAG.getNode(ISD::ANY_EXTEND, dl, VT, NarrowShl));
1391         }
1392         // Repeat the SHL optimization above in cases where an extension
1393         // intervenes: (shl (anyext (shr x, c1)), c2) to
1394         // (shl (anyext x), c2-c1).  This requires that the bottom c1 bits
1395         // aren't demanded (as above) and that the shifted upper c1 bits of
1396         // x aren't demanded.
1397         if (Op0.hasOneUse() && InnerOp.getOpcode() == ISD::SRL &&
1398             InnerOp.hasOneUse()) {
1399           if (ConstantSDNode *SA2 =
1400                   isConstOrConstSplat(InnerOp.getOperand(1))) {
1401             unsigned InnerShAmt = SA2->getLimitedValue(InnerBits);
1402             if (InnerShAmt < ShAmt && InnerShAmt < InnerBits &&
1403                 DemandedBits.getActiveBits() <=
1404                     (InnerBits - InnerShAmt + ShAmt) &&
1405                 DemandedBits.countTrailingZeros() >= ShAmt) {
1406               SDValue NewSA = TLO.DAG.getConstant(ShAmt - InnerShAmt, dl,
1407                                                   Op1.getValueType());
1408               SDValue NewExt = TLO.DAG.getNode(ISD::ANY_EXTEND, dl, VT,
1409                                                InnerOp.getOperand(0));
1410               return TLO.CombineTo(
1411                   Op, TLO.DAG.getNode(ISD::SHL, dl, VT, NewExt, NewSA));
1412             }
1413           }
1414         }
1415       }
1416 
1417       Known.Zero <<= ShAmt;
1418       Known.One <<= ShAmt;
1419       // low bits known zero.
1420       Known.Zero.setLowBits(ShAmt);
1421     }
1422     break;
1423   }
1424   case ISD::SRL: {
1425     SDValue Op0 = Op.getOperand(0);
1426     SDValue Op1 = Op.getOperand(1);
1427 
1428     if (ConstantSDNode *SA = isConstOrConstSplat(Op1, DemandedElts)) {
1429       // If the shift count is an invalid immediate, don't do anything.
1430       if (SA->getAPIntValue().uge(BitWidth))
1431         break;
1432 
1433       unsigned ShAmt = SA->getZExtValue();
1434       if (ShAmt == 0)
1435         return TLO.CombineTo(Op, Op0);
1436 
1437       EVT ShiftVT = Op1.getValueType();
1438       APInt InDemandedMask = (DemandedBits << ShAmt);
1439 
1440       // If the shift is exact, then it does demand the low bits (and knows that
1441       // they are zero).
1442       if (Op->getFlags().hasExact())
1443         InDemandedMask.setLowBits(ShAmt);
1444 
1445       // If this is ((X << C1) >>u ShAmt), see if we can simplify this into a
1446       // single shift.  We can do this if the top bits (which are shifted out)
1447       // are never demanded.
1448       // TODO - support non-uniform vector amounts.
1449       if (Op0.getOpcode() == ISD::SHL) {
1450         if (ConstantSDNode *SA2 =
1451                 isConstOrConstSplat(Op0.getOperand(1), DemandedElts)) {
1452           if (!DemandedBits.intersects(
1453                   APInt::getHighBitsSet(BitWidth, ShAmt))) {
1454             if (SA2->getAPIntValue().ult(BitWidth)) {
1455               unsigned C1 = SA2->getZExtValue();
1456               unsigned Opc = ISD::SRL;
1457               int Diff = ShAmt - C1;
1458               if (Diff < 0) {
1459                 Diff = -Diff;
1460                 Opc = ISD::SHL;
1461               }
1462 
1463               SDValue NewSA = TLO.DAG.getConstant(Diff, dl, ShiftVT);
1464               return TLO.CombineTo(
1465                   Op, TLO.DAG.getNode(Opc, dl, VT, Op0.getOperand(0), NewSA));
1466             }
1467           }
1468         }
1469       }
1470 
1471       // Compute the new bits that are at the top now.
1472       if (SimplifyDemandedBits(Op0, InDemandedMask, DemandedElts, Known, TLO,
1473                                Depth + 1))
1474         return true;
1475       assert(!Known.hasConflict() && "Bits known to be one AND zero?");
1476       Known.Zero.lshrInPlace(ShAmt);
1477       Known.One.lshrInPlace(ShAmt);
1478 
1479       Known.Zero.setHighBits(ShAmt); // High bits known zero.
1480     }
1481     break;
1482   }
1483   case ISD::SRA: {
1484     SDValue Op0 = Op.getOperand(0);
1485     SDValue Op1 = Op.getOperand(1);
1486 
1487     // If this is an arithmetic shift right and only the low-bit is set, we can
1488     // always convert this into a logical shr, even if the shift amount is
1489     // variable.  The low bit of the shift cannot be an input sign bit unless
1490     // the shift amount is >= the size of the datatype, which is undefined.
1491     if (DemandedBits.isOneValue())
1492       return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL, dl, VT, Op0, Op1));
1493 
1494     if (ConstantSDNode *SA = isConstOrConstSplat(Op1, DemandedElts)) {
1495       // If the shift count is an invalid immediate, don't do anything.
1496       if (SA->getAPIntValue().uge(BitWidth))
1497         break;
1498 
1499       unsigned ShAmt = SA->getZExtValue();
1500       if (ShAmt == 0)
1501         return TLO.CombineTo(Op, Op0);
1502 
1503       APInt InDemandedMask = (DemandedBits << ShAmt);
1504 
1505       // If the shift is exact, then it does demand the low bits (and knows that
1506       // they are zero).
1507       if (Op->getFlags().hasExact())
1508         InDemandedMask.setLowBits(ShAmt);
1509 
1510       // If any of the demanded bits are produced by the sign extension, we also
1511       // demand the input sign bit.
1512       if (DemandedBits.countLeadingZeros() < ShAmt)
1513         InDemandedMask.setSignBit();
1514 
1515       if (SimplifyDemandedBits(Op0, InDemandedMask, DemandedElts, Known, TLO,
1516                                Depth + 1))
1517         return true;
1518       assert(!Known.hasConflict() && "Bits known to be one AND zero?");
1519       Known.Zero.lshrInPlace(ShAmt);
1520       Known.One.lshrInPlace(ShAmt);
1521 
1522       // If the input sign bit is known to be zero, or if none of the top bits
1523       // are demanded, turn this into an unsigned shift right.
1524       if (Known.Zero[BitWidth - ShAmt - 1] ||
1525           DemandedBits.countLeadingZeros() >= ShAmt) {
1526         SDNodeFlags Flags;
1527         Flags.setExact(Op->getFlags().hasExact());
1528         return TLO.CombineTo(
1529             Op, TLO.DAG.getNode(ISD::SRL, dl, VT, Op0, Op1, Flags));
1530       }
1531 
1532       int Log2 = DemandedBits.exactLogBase2();
1533       if (Log2 >= 0) {
1534         // The bit must come from the sign.
1535         SDValue NewSA =
1536             TLO.DAG.getConstant(BitWidth - 1 - Log2, dl, Op1.getValueType());
1537         return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL, dl, VT, Op0, NewSA));
1538       }
1539 
1540       if (Known.One[BitWidth - ShAmt - 1])
1541         // New bits are known one.
1542         Known.One.setHighBits(ShAmt);
1543     }
1544     break;
1545   }
1546   case ISD::FSHL:
1547   case ISD::FSHR: {
1548     SDValue Op0 = Op.getOperand(0);
1549     SDValue Op1 = Op.getOperand(1);
1550     SDValue Op2 = Op.getOperand(2);
1551     bool IsFSHL = (Op.getOpcode() == ISD::FSHL);
1552 
1553     if (ConstantSDNode *SA = isConstOrConstSplat(Op2, DemandedElts)) {
1554       unsigned Amt = SA->getAPIntValue().urem(BitWidth);
1555 
1556       // For fshl, 0-shift returns the 1st arg.
1557       // For fshr, 0-shift returns the 2nd arg.
1558       if (Amt == 0) {
1559         if (SimplifyDemandedBits(IsFSHL ? Op0 : Op1, DemandedBits, DemandedElts,
1560                                  Known, TLO, Depth + 1))
1561           return true;
1562         break;
1563       }
1564 
1565       // fshl: (Op0 << Amt) | (Op1 >> (BW - Amt))
1566       // fshr: (Op0 << (BW - Amt)) | (Op1 >> Amt)
1567       APInt Demanded0 = DemandedBits.lshr(IsFSHL ? Amt : (BitWidth - Amt));
1568       APInt Demanded1 = DemandedBits << (IsFSHL ? (BitWidth - Amt) : Amt);
1569       if (SimplifyDemandedBits(Op0, Demanded0, DemandedElts, Known2, TLO,
1570                                Depth + 1))
1571         return true;
1572       if (SimplifyDemandedBits(Op1, Demanded1, DemandedElts, Known, TLO,
1573                                Depth + 1))
1574         return true;
1575 
1576       Known2.One <<= (IsFSHL ? Amt : (BitWidth - Amt));
1577       Known2.Zero <<= (IsFSHL ? Amt : (BitWidth - Amt));
1578       Known.One.lshrInPlace(IsFSHL ? (BitWidth - Amt) : Amt);
1579       Known.Zero.lshrInPlace(IsFSHL ? (BitWidth - Amt) : Amt);
1580       Known.One |= Known2.One;
1581       Known.Zero |= Known2.Zero;
1582     }
1583     break;
1584   }
1585   case ISD::BITREVERSE: {
1586     SDValue Src = Op.getOperand(0);
1587     APInt DemandedSrcBits = DemandedBits.reverseBits();
1588     if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedElts, Known2, TLO,
1589                              Depth + 1))
1590       return true;
1591     Known.One = Known2.One.reverseBits();
1592     Known.Zero = Known2.Zero.reverseBits();
1593     break;
1594   }
1595   case ISD::BSWAP: {
1596     SDValue Src = Op.getOperand(0);
1597     APInt DemandedSrcBits = DemandedBits.byteSwap();
1598     if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedElts, Known2, TLO,
1599                              Depth + 1))
1600       return true;
1601     Known.One = Known2.One.byteSwap();
1602     Known.Zero = Known2.Zero.byteSwap();
1603     break;
1604   }
1605   case ISD::SIGN_EXTEND_INREG: {
1606     SDValue Op0 = Op.getOperand(0);
1607     EVT ExVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
1608     unsigned ExVTBits = ExVT.getScalarSizeInBits();
1609 
1610     // If we only care about the highest bit, don't bother shifting right.
1611     if (DemandedBits.isSignMask()) {
1612       unsigned NumSignBits = TLO.DAG.ComputeNumSignBits(Op0);
1613       bool AlreadySignExtended = NumSignBits >= BitWidth - ExVTBits + 1;
1614       // However if the input is already sign extended we expect the sign
1615       // extension to be dropped altogether later and do not simplify.
1616       if (!AlreadySignExtended) {
1617         // Compute the correct shift amount type, which must be getShiftAmountTy
1618         // for scalar types after legalization.
1619         EVT ShiftAmtTy = VT;
1620         if (TLO.LegalTypes() && !ShiftAmtTy.isVector())
1621           ShiftAmtTy = getShiftAmountTy(ShiftAmtTy, DL);
1622 
1623         SDValue ShiftAmt =
1624             TLO.DAG.getConstant(BitWidth - ExVTBits, dl, ShiftAmtTy);
1625         return TLO.CombineTo(Op,
1626                              TLO.DAG.getNode(ISD::SHL, dl, VT, Op0, ShiftAmt));
1627       }
1628     }
1629 
1630     // If none of the extended bits are demanded, eliminate the sextinreg.
1631     if (DemandedBits.getActiveBits() <= ExVTBits)
1632       return TLO.CombineTo(Op, Op0);
1633 
1634     APInt InputDemandedBits = DemandedBits.getLoBits(ExVTBits);
1635 
1636     // Since the sign extended bits are demanded, we know that the sign
1637     // bit is demanded.
1638     InputDemandedBits.setBit(ExVTBits - 1);
1639 
1640     if (SimplifyDemandedBits(Op0, InputDemandedBits, Known, TLO, Depth + 1))
1641       return true;
1642     assert(!Known.hasConflict() && "Bits known to be one AND zero?");
1643 
1644     // If the sign bit of the input is known set or clear, then we know the
1645     // top bits of the result.
1646 
1647     // If the input sign bit is known zero, convert this into a zero extension.
1648     if (Known.Zero[ExVTBits - 1])
1649       return TLO.CombineTo(
1650           Op, TLO.DAG.getZeroExtendInReg(Op0, dl, ExVT.getScalarType()));
1651 
1652     APInt Mask = APInt::getLowBitsSet(BitWidth, ExVTBits);
1653     if (Known.One[ExVTBits - 1]) { // Input sign bit known set
1654       Known.One.setBitsFrom(ExVTBits);
1655       Known.Zero &= Mask;
1656     } else { // Input sign bit unknown
1657       Known.Zero &= Mask;
1658       Known.One &= Mask;
1659     }
1660     break;
1661   }
1662   case ISD::BUILD_PAIR: {
1663     EVT HalfVT = Op.getOperand(0).getValueType();
1664     unsigned HalfBitWidth = HalfVT.getScalarSizeInBits();
1665 
1666     APInt MaskLo = DemandedBits.getLoBits(HalfBitWidth).trunc(HalfBitWidth);
1667     APInt MaskHi = DemandedBits.getHiBits(HalfBitWidth).trunc(HalfBitWidth);
1668 
1669     KnownBits KnownLo, KnownHi;
1670 
1671     if (SimplifyDemandedBits(Op.getOperand(0), MaskLo, KnownLo, TLO, Depth + 1))
1672       return true;
1673 
1674     if (SimplifyDemandedBits(Op.getOperand(1), MaskHi, KnownHi, TLO, Depth + 1))
1675       return true;
1676 
1677     Known.Zero = KnownLo.Zero.zext(BitWidth) |
1678                  KnownHi.Zero.zext(BitWidth).shl(HalfBitWidth);
1679 
1680     Known.One = KnownLo.One.zext(BitWidth) |
1681                 KnownHi.One.zext(BitWidth).shl(HalfBitWidth);
1682     break;
1683   }
1684   case ISD::ZERO_EXTEND:
1685   case ISD::ZERO_EXTEND_VECTOR_INREG: {
1686     SDValue Src = Op.getOperand(0);
1687     EVT SrcVT = Src.getValueType();
1688     unsigned InBits = SrcVT.getScalarSizeInBits();
1689     unsigned InElts = SrcVT.isVector() ? SrcVT.getVectorNumElements() : 1;
1690     bool IsVecInReg = Op.getOpcode() == ISD::ZERO_EXTEND_VECTOR_INREG;
1691 
1692     // If none of the top bits are demanded, convert this into an any_extend.
1693     if (DemandedBits.getActiveBits() <= InBits) {
1694       // If we only need the non-extended bits of the bottom element
1695       // then we can just bitcast to the result.
1696       if (IsVecInReg && DemandedElts == 1 &&
1697           VT.getSizeInBits() == SrcVT.getSizeInBits() &&
1698           TLO.DAG.getDataLayout().isLittleEndian())
1699         return TLO.CombineTo(Op, TLO.DAG.getBitcast(VT, Src));
1700 
1701       unsigned Opc =
1702           IsVecInReg ? ISD::ANY_EXTEND_VECTOR_INREG : ISD::ANY_EXTEND;
1703       if (!TLO.LegalOperations() || isOperationLegal(Opc, VT))
1704         return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, dl, VT, Src));
1705     }
1706 
1707     APInt InDemandedBits = DemandedBits.trunc(InBits);
1708     APInt InDemandedElts = DemandedElts.zextOrSelf(InElts);
1709     if (SimplifyDemandedBits(Src, InDemandedBits, InDemandedElts, Known, TLO,
1710                              Depth + 1))
1711       return true;
1712     assert(!Known.hasConflict() && "Bits known to be one AND zero?");
1713     assert(Known.getBitWidth() == InBits && "Src width has changed?");
1714     Known = Known.zext(BitWidth, true /* ExtendedBitsAreKnownZero */);
1715     break;
1716   }
1717   case ISD::SIGN_EXTEND:
1718   case ISD::SIGN_EXTEND_VECTOR_INREG: {
1719     SDValue Src = Op.getOperand(0);
1720     EVT SrcVT = Src.getValueType();
1721     unsigned InBits = SrcVT.getScalarSizeInBits();
1722     unsigned InElts = SrcVT.isVector() ? SrcVT.getVectorNumElements() : 1;
1723     bool IsVecInReg = Op.getOpcode() == ISD::SIGN_EXTEND_VECTOR_INREG;
1724 
1725     // If none of the top bits are demanded, convert this into an any_extend.
1726     if (DemandedBits.getActiveBits() <= InBits) {
1727       // If we only need the non-extended bits of the bottom element
1728       // then we can just bitcast to the result.
1729       if (IsVecInReg && DemandedElts == 1 &&
1730           VT.getSizeInBits() == SrcVT.getSizeInBits() &&
1731           TLO.DAG.getDataLayout().isLittleEndian())
1732         return TLO.CombineTo(Op, TLO.DAG.getBitcast(VT, Src));
1733 
1734       unsigned Opc =
1735           IsVecInReg ? ISD::ANY_EXTEND_VECTOR_INREG : ISD::ANY_EXTEND;
1736       if (!TLO.LegalOperations() || isOperationLegal(Opc, VT))
1737         return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, dl, VT, Src));
1738     }
1739 
1740     APInt InDemandedBits = DemandedBits.trunc(InBits);
1741     APInt InDemandedElts = DemandedElts.zextOrSelf(InElts);
1742 
1743     // Since some of the sign extended bits are demanded, we know that the sign
1744     // bit is demanded.
1745     InDemandedBits.setBit(InBits - 1);
1746 
1747     if (SimplifyDemandedBits(Src, InDemandedBits, InDemandedElts, Known, TLO,
1748                              Depth + 1))
1749       return true;
1750     assert(!Known.hasConflict() && "Bits known to be one AND zero?");
1751     assert(Known.getBitWidth() == InBits && "Src width has changed?");
1752 
1753     // If the sign bit is known one, the top bits match.
1754     Known = Known.sext(BitWidth);
1755 
1756     // If the sign bit is known zero, convert this to a zero extend.
1757     if (Known.isNonNegative()) {
1758       unsigned Opc =
1759           IsVecInReg ? ISD::ZERO_EXTEND_VECTOR_INREG : ISD::ZERO_EXTEND;
1760       if (!TLO.LegalOperations() || isOperationLegal(Opc, VT))
1761         return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, dl, VT, Src));
1762     }
1763     break;
1764   }
1765   case ISD::ANY_EXTEND:
1766   case ISD::ANY_EXTEND_VECTOR_INREG: {
1767     SDValue Src = Op.getOperand(0);
1768     EVT SrcVT = Src.getValueType();
1769     unsigned InBits = SrcVT.getScalarSizeInBits();
1770     unsigned InElts = SrcVT.isVector() ? SrcVT.getVectorNumElements() : 1;
1771     bool IsVecInReg = Op.getOpcode() == ISD::ANY_EXTEND_VECTOR_INREG;
1772 
1773     // If we only need the bottom element then we can just bitcast.
1774     // TODO: Handle ANY_EXTEND?
1775     if (IsVecInReg && DemandedElts == 1 &&
1776         VT.getSizeInBits() == SrcVT.getSizeInBits() &&
1777         TLO.DAG.getDataLayout().isLittleEndian())
1778       return TLO.CombineTo(Op, TLO.DAG.getBitcast(VT, Src));
1779 
1780     APInt InDemandedBits = DemandedBits.trunc(InBits);
1781     APInt InDemandedElts = DemandedElts.zextOrSelf(InElts);
1782     if (SimplifyDemandedBits(Src, InDemandedBits, InDemandedElts, Known, TLO,
1783                              Depth + 1))
1784       return true;
1785     assert(!Known.hasConflict() && "Bits known to be one AND zero?");
1786     assert(Known.getBitWidth() == InBits && "Src width has changed?");
1787     Known = Known.zext(BitWidth, false /* => any extend */);
1788     break;
1789   }
1790   case ISD::TRUNCATE: {
1791     SDValue Src = Op.getOperand(0);
1792 
1793     // Simplify the input, using demanded bit information, and compute the known
1794     // zero/one bits live out.
1795     unsigned OperandBitWidth = Src.getScalarValueSizeInBits();
1796     APInt TruncMask = DemandedBits.zext(OperandBitWidth);
1797     if (SimplifyDemandedBits(Src, TruncMask, Known, TLO, Depth + 1))
1798       return true;
1799     Known = Known.trunc(BitWidth);
1800 
1801     // Attempt to avoid multi-use ops if we don't need anything from them.
1802     if (SDValue NewSrc = SimplifyMultipleUseDemandedBits(
1803             Src, TruncMask, DemandedElts, TLO.DAG, Depth + 1))
1804       return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::TRUNCATE, dl, VT, NewSrc));
1805 
1806     // If the input is only used by this truncate, see if we can shrink it based
1807     // on the known demanded bits.
1808     if (Src.getNode()->hasOneUse()) {
1809       switch (Src.getOpcode()) {
1810       default:
1811         break;
1812       case ISD::SRL:
1813         // Shrink SRL by a constant if none of the high bits shifted in are
1814         // demanded.
1815         if (TLO.LegalTypes() && !isTypeDesirableForOp(ISD::SRL, VT))
1816           // Do not turn (vt1 truncate (vt2 srl)) into (vt1 srl) if vt1 is
1817           // undesirable.
1818           break;
1819 
1820         SDValue ShAmt = Src.getOperand(1);
1821         auto *ShAmtC = dyn_cast<ConstantSDNode>(ShAmt);
1822         if (!ShAmtC || ShAmtC->getAPIntValue().uge(BitWidth))
1823           break;
1824         uint64_t ShVal = ShAmtC->getZExtValue();
1825 
1826         APInt HighBits =
1827             APInt::getHighBitsSet(OperandBitWidth, OperandBitWidth - BitWidth);
1828         HighBits.lshrInPlace(ShVal);
1829         HighBits = HighBits.trunc(BitWidth);
1830 
1831         if (!(HighBits & DemandedBits)) {
1832           // None of the shifted in bits are needed.  Add a truncate of the
1833           // shift input, then shift it.
1834           if (TLO.LegalTypes())
1835             ShAmt = TLO.DAG.getConstant(ShVal, dl, getShiftAmountTy(VT, DL));
1836           SDValue NewTrunc =
1837               TLO.DAG.getNode(ISD::TRUNCATE, dl, VT, Src.getOperand(0));
1838           return TLO.CombineTo(
1839               Op, TLO.DAG.getNode(ISD::SRL, dl, VT, NewTrunc, ShAmt));
1840         }
1841         break;
1842       }
1843     }
1844 
1845     assert(!Known.hasConflict() && "Bits known to be one AND zero?");
1846     break;
1847   }
1848   case ISD::AssertZext: {
1849     // AssertZext demands all of the high bits, plus any of the low bits
1850     // demanded by its users.
1851     EVT ZVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
1852     APInt InMask = APInt::getLowBitsSet(BitWidth, ZVT.getSizeInBits());
1853     if (SimplifyDemandedBits(Op.getOperand(0), ~InMask | DemandedBits, Known,
1854                              TLO, Depth + 1))
1855       return true;
1856     assert(!Known.hasConflict() && "Bits known to be one AND zero?");
1857 
1858     Known.Zero |= ~InMask;
1859     break;
1860   }
1861   case ISD::EXTRACT_VECTOR_ELT: {
1862     SDValue Src = Op.getOperand(0);
1863     SDValue Idx = Op.getOperand(1);
1864     unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
1865     unsigned EltBitWidth = Src.getScalarValueSizeInBits();
1866 
1867     // Demand the bits from every vector element without a constant index.
1868     APInt DemandedSrcElts = APInt::getAllOnesValue(NumSrcElts);
1869     if (auto *CIdx = dyn_cast<ConstantSDNode>(Idx))
1870       if (CIdx->getAPIntValue().ult(NumSrcElts))
1871         DemandedSrcElts = APInt::getOneBitSet(NumSrcElts, CIdx->getZExtValue());
1872 
1873     // If BitWidth > EltBitWidth the value is anyext:ed. So we do not know
1874     // anything about the extended bits.
1875     APInt DemandedSrcBits = DemandedBits;
1876     if (BitWidth > EltBitWidth)
1877       DemandedSrcBits = DemandedSrcBits.trunc(EltBitWidth);
1878 
1879     if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedSrcElts, Known2, TLO,
1880                              Depth + 1))
1881       return true;
1882 
1883     // Attempt to avoid multi-use ops if we don't need anything from them.
1884     if (!DemandedSrcBits.isAllOnesValue() ||
1885         !DemandedSrcElts.isAllOnesValue()) {
1886       if (SDValue DemandedSrc = SimplifyMultipleUseDemandedBits(
1887               Src, DemandedSrcBits, DemandedSrcElts, TLO.DAG, Depth + 1)) {
1888         SDValue NewOp =
1889             TLO.DAG.getNode(Op.getOpcode(), dl, VT, DemandedSrc, Idx);
1890         return TLO.CombineTo(Op, NewOp);
1891       }
1892     }
1893 
1894     Known = Known2;
1895     if (BitWidth > EltBitWidth)
1896       Known = Known.zext(BitWidth, false /* => any extend */);
1897     break;
1898   }
1899   case ISD::BITCAST: {
1900     SDValue Src = Op.getOperand(0);
1901     EVT SrcVT = Src.getValueType();
1902     unsigned NumSrcEltBits = SrcVT.getScalarSizeInBits();
1903 
1904     // If this is an FP->Int bitcast and if the sign bit is the only
1905     // thing demanded, turn this into a FGETSIGN.
1906     if (!TLO.LegalOperations() && !VT.isVector() && !SrcVT.isVector() &&
1907         DemandedBits == APInt::getSignMask(Op.getValueSizeInBits()) &&
1908         SrcVT.isFloatingPoint()) {
1909       bool OpVTLegal = isOperationLegalOrCustom(ISD::FGETSIGN, VT);
1910       bool i32Legal = isOperationLegalOrCustom(ISD::FGETSIGN, MVT::i32);
1911       if ((OpVTLegal || i32Legal) && VT.isSimple() && SrcVT != MVT::f16 &&
1912           SrcVT != MVT::f128) {
1913         // Cannot eliminate/lower SHL for f128 yet.
1914         EVT Ty = OpVTLegal ? VT : MVT::i32;
1915         // Make a FGETSIGN + SHL to move the sign bit into the appropriate
1916         // place.  We expect the SHL to be eliminated by other optimizations.
1917         SDValue Sign = TLO.DAG.getNode(ISD::FGETSIGN, dl, Ty, Src);
1918         unsigned OpVTSizeInBits = Op.getValueSizeInBits();
1919         if (!OpVTLegal && OpVTSizeInBits > 32)
1920           Sign = TLO.DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Sign);
1921         unsigned ShVal = Op.getValueSizeInBits() - 1;
1922         SDValue ShAmt = TLO.DAG.getConstant(ShVal, dl, VT);
1923         return TLO.CombineTo(Op,
1924                              TLO.DAG.getNode(ISD::SHL, dl, VT, Sign, ShAmt));
1925       }
1926     }
1927 
1928     // Bitcast from a vector using SimplifyDemanded Bits/VectorElts.
1929     // Demand the elt/bit if any of the original elts/bits are demanded.
1930     // TODO - bigendian once we have test coverage.
1931     if (SrcVT.isVector() && (BitWidth % NumSrcEltBits) == 0 &&
1932         TLO.DAG.getDataLayout().isLittleEndian()) {
1933       unsigned Scale = BitWidth / NumSrcEltBits;
1934       unsigned NumSrcElts = SrcVT.getVectorNumElements();
1935       APInt DemandedSrcBits = APInt::getNullValue(NumSrcEltBits);
1936       APInt DemandedSrcElts = APInt::getNullValue(NumSrcElts);
1937       for (unsigned i = 0; i != Scale; ++i) {
1938         unsigned Offset = i * NumSrcEltBits;
1939         APInt Sub = DemandedBits.extractBits(NumSrcEltBits, Offset);
1940         if (!Sub.isNullValue()) {
1941           DemandedSrcBits |= Sub;
1942           for (unsigned j = 0; j != NumElts; ++j)
1943             if (DemandedElts[j])
1944               DemandedSrcElts.setBit((j * Scale) + i);
1945         }
1946       }
1947 
1948       APInt KnownSrcUndef, KnownSrcZero;
1949       if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, KnownSrcUndef,
1950                                      KnownSrcZero, TLO, Depth + 1))
1951         return true;
1952 
1953       KnownBits KnownSrcBits;
1954       if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedSrcElts,
1955                                KnownSrcBits, TLO, Depth + 1))
1956         return true;
1957     } else if ((NumSrcEltBits % BitWidth) == 0 &&
1958                TLO.DAG.getDataLayout().isLittleEndian()) {
1959       unsigned Scale = NumSrcEltBits / BitWidth;
1960       unsigned NumSrcElts = SrcVT.isVector() ? SrcVT.getVectorNumElements() : 1;
1961       APInt DemandedSrcBits = APInt::getNullValue(NumSrcEltBits);
1962       APInt DemandedSrcElts = APInt::getNullValue(NumSrcElts);
1963       for (unsigned i = 0; i != NumElts; ++i)
1964         if (DemandedElts[i]) {
1965           unsigned Offset = (i % Scale) * BitWidth;
1966           DemandedSrcBits.insertBits(DemandedBits, Offset);
1967           DemandedSrcElts.setBit(i / Scale);
1968         }
1969 
1970       if (SrcVT.isVector()) {
1971         APInt KnownSrcUndef, KnownSrcZero;
1972         if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, KnownSrcUndef,
1973                                        KnownSrcZero, TLO, Depth + 1))
1974           return true;
1975       }
1976 
1977       KnownBits KnownSrcBits;
1978       if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedSrcElts,
1979                                KnownSrcBits, TLO, Depth + 1))
1980         return true;
1981     }
1982 
1983     // If this is a bitcast, let computeKnownBits handle it.  Only do this on a
1984     // recursive call where Known may be useful to the caller.
1985     if (Depth > 0) {
1986       Known = TLO.DAG.computeKnownBits(Op, DemandedElts, Depth);
1987       return false;
1988     }
1989     break;
1990   }
1991   case ISD::ADD:
1992   case ISD::MUL:
1993   case ISD::SUB: {
1994     // Add, Sub, and Mul don't demand any bits in positions beyond that
1995     // of the highest bit demanded of them.
1996     SDValue Op0 = Op.getOperand(0), Op1 = Op.getOperand(1);
1997     SDNodeFlags Flags = Op.getNode()->getFlags();
1998     unsigned DemandedBitsLZ = DemandedBits.countLeadingZeros();
1999     APInt LoMask = APInt::getLowBitsSet(BitWidth, BitWidth - DemandedBitsLZ);
2000     if (SimplifyDemandedBits(Op0, LoMask, DemandedElts, Known2, TLO,
2001                              Depth + 1) ||
2002         SimplifyDemandedBits(Op1, LoMask, DemandedElts, Known2, TLO,
2003                              Depth + 1) ||
2004         // See if the operation should be performed at a smaller bit width.
2005         ShrinkDemandedOp(Op, BitWidth, DemandedBits, TLO)) {
2006       if (Flags.hasNoSignedWrap() || Flags.hasNoUnsignedWrap()) {
2007         // Disable the nsw and nuw flags. We can no longer guarantee that we
2008         // won't wrap after simplification.
2009         Flags.setNoSignedWrap(false);
2010         Flags.setNoUnsignedWrap(false);
2011         SDValue NewOp =
2012             TLO.DAG.getNode(Op.getOpcode(), dl, VT, Op0, Op1, Flags);
2013         return TLO.CombineTo(Op, NewOp);
2014       }
2015       return true;
2016     }
2017 
2018     // Attempt to avoid multi-use ops if we don't need anything from them.
2019     if (!LoMask.isAllOnesValue() || !DemandedElts.isAllOnesValue()) {
2020       SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
2021           Op0, LoMask, DemandedElts, TLO.DAG, Depth + 1);
2022       SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits(
2023           Op1, LoMask, DemandedElts, TLO.DAG, Depth + 1);
2024       if (DemandedOp0 || DemandedOp1) {
2025         Flags.setNoSignedWrap(false);
2026         Flags.setNoUnsignedWrap(false);
2027         Op0 = DemandedOp0 ? DemandedOp0 : Op0;
2028         Op1 = DemandedOp1 ? DemandedOp1 : Op1;
2029         SDValue NewOp =
2030             TLO.DAG.getNode(Op.getOpcode(), dl, VT, Op0, Op1, Flags);
2031         return TLO.CombineTo(Op, NewOp);
2032       }
2033     }
2034 
2035     // If we have a constant operand, we may be able to turn it into -1 if we
2036     // do not demand the high bits. This can make the constant smaller to
2037     // encode, allow more general folding, or match specialized instruction
2038     // patterns (eg, 'blsr' on x86). Don't bother changing 1 to -1 because that
2039     // is probably not useful (and could be detrimental).
2040     ConstantSDNode *C = isConstOrConstSplat(Op1);
2041     APInt HighMask = APInt::getHighBitsSet(BitWidth, DemandedBitsLZ);
2042     if (C && !C->isAllOnesValue() && !C->isOne() &&
2043         (C->getAPIntValue() | HighMask).isAllOnesValue()) {
2044       SDValue Neg1 = TLO.DAG.getAllOnesConstant(dl, VT);
2045       // Disable the nsw and nuw flags. We can no longer guarantee that we
2046       // won't wrap after simplification.
2047       Flags.setNoSignedWrap(false);
2048       Flags.setNoUnsignedWrap(false);
2049       SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, VT, Op0, Neg1, Flags);
2050       return TLO.CombineTo(Op, NewOp);
2051     }
2052 
2053     LLVM_FALLTHROUGH;
2054   }
2055   default:
2056     if (Op.getOpcode() >= ISD::BUILTIN_OP_END) {
2057       if (SimplifyDemandedBitsForTargetNode(Op, DemandedBits, DemandedElts,
2058                                             Known, TLO, Depth))
2059         return true;
2060       break;
2061     }
2062 
2063     // Just use computeKnownBits to compute output bits.
2064     Known = TLO.DAG.computeKnownBits(Op, DemandedElts, Depth);
2065     break;
2066   }
2067 
2068   // If we know the value of all of the demanded bits, return this as a
2069   // constant.
2070   if (DemandedBits.isSubsetOf(Known.Zero | Known.One)) {
2071     // Avoid folding to a constant if any OpaqueConstant is involved.
2072     const SDNode *N = Op.getNode();
2073     for (SDNodeIterator I = SDNodeIterator::begin(N),
2074                         E = SDNodeIterator::end(N);
2075          I != E; ++I) {
2076       SDNode *Op = *I;
2077       if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op))
2078         if (C->isOpaque())
2079           return false;
2080     }
2081     // TODO: Handle float bits as well.
2082     if (VT.isInteger())
2083       return TLO.CombineTo(Op, TLO.DAG.getConstant(Known.One, dl, VT));
2084   }
2085 
2086   return false;
2087 }
2088 
2089 bool TargetLowering::SimplifyDemandedVectorElts(SDValue Op,
2090                                                 const APInt &DemandedElts,
2091                                                 APInt &KnownUndef,
2092                                                 APInt &KnownZero,
2093                                                 DAGCombinerInfo &DCI) const {
2094   SelectionDAG &DAG = DCI.DAG;
2095   TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
2096                         !DCI.isBeforeLegalizeOps());
2097 
2098   bool Simplified =
2099       SimplifyDemandedVectorElts(Op, DemandedElts, KnownUndef, KnownZero, TLO);
2100   if (Simplified) {
2101     DCI.AddToWorklist(Op.getNode());
2102     DCI.CommitTargetLoweringOpt(TLO);
2103   }
2104 
2105   return Simplified;
2106 }
2107 
2108 /// Given a vector binary operation and known undefined elements for each input
2109 /// operand, compute whether each element of the output is undefined.
2110 static APInt getKnownUndefForVectorBinop(SDValue BO, SelectionDAG &DAG,
2111                                          const APInt &UndefOp0,
2112                                          const APInt &UndefOp1) {
2113   EVT VT = BO.getValueType();
2114   assert(DAG.getTargetLoweringInfo().isBinOp(BO.getOpcode()) && VT.isVector() &&
2115          "Vector binop only");
2116 
2117   EVT EltVT = VT.getVectorElementType();
2118   unsigned NumElts = VT.getVectorNumElements();
2119   assert(UndefOp0.getBitWidth() == NumElts &&
2120          UndefOp1.getBitWidth() == NumElts && "Bad type for undef analysis");
2121 
2122   auto getUndefOrConstantElt = [&](SDValue V, unsigned Index,
2123                                    const APInt &UndefVals) {
2124     if (UndefVals[Index])
2125       return DAG.getUNDEF(EltVT);
2126 
2127     if (auto *BV = dyn_cast<BuildVectorSDNode>(V)) {
2128       // Try hard to make sure that the getNode() call is not creating temporary
2129       // nodes. Ignore opaque integers because they do not constant fold.
2130       SDValue Elt = BV->getOperand(Index);
2131       auto *C = dyn_cast<ConstantSDNode>(Elt);
2132       if (isa<ConstantFPSDNode>(Elt) || Elt.isUndef() || (C && !C->isOpaque()))
2133         return Elt;
2134     }
2135 
2136     return SDValue();
2137   };
2138 
2139   APInt KnownUndef = APInt::getNullValue(NumElts);
2140   for (unsigned i = 0; i != NumElts; ++i) {
2141     // If both inputs for this element are either constant or undef and match
2142     // the element type, compute the constant/undef result for this element of
2143     // the vector.
2144     // TODO: Ideally we would use FoldConstantArithmetic() here, but that does
2145     // not handle FP constants. The code within getNode() should be refactored
2146     // to avoid the danger of creating a bogus temporary node here.
2147     SDValue C0 = getUndefOrConstantElt(BO.getOperand(0), i, UndefOp0);
2148     SDValue C1 = getUndefOrConstantElt(BO.getOperand(1), i, UndefOp1);
2149     if (C0 && C1 && C0.getValueType() == EltVT && C1.getValueType() == EltVT)
2150       if (DAG.getNode(BO.getOpcode(), SDLoc(BO), EltVT, C0, C1).isUndef())
2151         KnownUndef.setBit(i);
2152   }
2153   return KnownUndef;
2154 }
2155 
2156 bool TargetLowering::SimplifyDemandedVectorElts(
2157     SDValue Op, const APInt &OriginalDemandedElts, APInt &KnownUndef,
2158     APInt &KnownZero, TargetLoweringOpt &TLO, unsigned Depth,
2159     bool AssumeSingleUse) const {
2160   EVT VT = Op.getValueType();
2161   APInt DemandedElts = OriginalDemandedElts;
2162   unsigned NumElts = DemandedElts.getBitWidth();
2163   assert(VT.isVector() && "Expected vector op");
2164   assert(VT.getVectorNumElements() == NumElts &&
2165          "Mask size mismatches value type element count!");
2166 
2167   KnownUndef = KnownZero = APInt::getNullValue(NumElts);
2168 
2169   // Undef operand.
2170   if (Op.isUndef()) {
2171     KnownUndef.setAllBits();
2172     return false;
2173   }
2174 
2175   // If Op has other users, assume that all elements are needed.
2176   if (!Op.getNode()->hasOneUse() && !AssumeSingleUse)
2177     DemandedElts.setAllBits();
2178 
2179   // Not demanding any elements from Op.
2180   if (DemandedElts == 0) {
2181     KnownUndef.setAllBits();
2182     return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT));
2183   }
2184 
2185   // Limit search depth.
2186   if (Depth >= SelectionDAG::MaxRecursionDepth)
2187     return false;
2188 
2189   SDLoc DL(Op);
2190   unsigned EltSizeInBits = VT.getScalarSizeInBits();
2191 
2192   switch (Op.getOpcode()) {
2193   case ISD::SCALAR_TO_VECTOR: {
2194     if (!DemandedElts[0]) {
2195       KnownUndef.setAllBits();
2196       return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT));
2197     }
2198     KnownUndef.setHighBits(NumElts - 1);
2199     break;
2200   }
2201   case ISD::BITCAST: {
2202     SDValue Src = Op.getOperand(0);
2203     EVT SrcVT = Src.getValueType();
2204 
2205     // We only handle vectors here.
2206     // TODO - investigate calling SimplifyDemandedBits/ComputeKnownBits?
2207     if (!SrcVT.isVector())
2208       break;
2209 
2210     // Fast handling of 'identity' bitcasts.
2211     unsigned NumSrcElts = SrcVT.getVectorNumElements();
2212     if (NumSrcElts == NumElts)
2213       return SimplifyDemandedVectorElts(Src, DemandedElts, KnownUndef,
2214                                         KnownZero, TLO, Depth + 1);
2215 
2216     APInt SrcZero, SrcUndef;
2217     APInt SrcDemandedElts = APInt::getNullValue(NumSrcElts);
2218 
2219     // Bitcast from 'large element' src vector to 'small element' vector, we
2220     // must demand a source element if any DemandedElt maps to it.
2221     if ((NumElts % NumSrcElts) == 0) {
2222       unsigned Scale = NumElts / NumSrcElts;
2223       for (unsigned i = 0; i != NumElts; ++i)
2224         if (DemandedElts[i])
2225           SrcDemandedElts.setBit(i / Scale);
2226 
2227       if (SimplifyDemandedVectorElts(Src, SrcDemandedElts, SrcUndef, SrcZero,
2228                                      TLO, Depth + 1))
2229         return true;
2230 
2231       // Try calling SimplifyDemandedBits, converting demanded elts to the bits
2232       // of the large element.
2233       // TODO - bigendian once we have test coverage.
2234       if (TLO.DAG.getDataLayout().isLittleEndian()) {
2235         unsigned SrcEltSizeInBits = SrcVT.getScalarSizeInBits();
2236         APInt SrcDemandedBits = APInt::getNullValue(SrcEltSizeInBits);
2237         for (unsigned i = 0; i != NumElts; ++i)
2238           if (DemandedElts[i]) {
2239             unsigned Ofs = (i % Scale) * EltSizeInBits;
2240             SrcDemandedBits.setBits(Ofs, Ofs + EltSizeInBits);
2241           }
2242 
2243         KnownBits Known;
2244         if (SimplifyDemandedBits(Src, SrcDemandedBits, Known, TLO, Depth + 1))
2245           return true;
2246       }
2247 
2248       // If the src element is zero/undef then all the output elements will be -
2249       // only demanded elements are guaranteed to be correct.
2250       for (unsigned i = 0; i != NumSrcElts; ++i) {
2251         if (SrcDemandedElts[i]) {
2252           if (SrcZero[i])
2253             KnownZero.setBits(i * Scale, (i + 1) * Scale);
2254           if (SrcUndef[i])
2255             KnownUndef.setBits(i * Scale, (i + 1) * Scale);
2256         }
2257       }
2258     }
2259 
2260     // Bitcast from 'small element' src vector to 'large element' vector, we
2261     // demand all smaller source elements covered by the larger demanded element
2262     // of this vector.
2263     if ((NumSrcElts % NumElts) == 0) {
2264       unsigned Scale = NumSrcElts / NumElts;
2265       for (unsigned i = 0; i != NumElts; ++i)
2266         if (DemandedElts[i])
2267           SrcDemandedElts.setBits(i * Scale, (i + 1) * Scale);
2268 
2269       if (SimplifyDemandedVectorElts(Src, SrcDemandedElts, SrcUndef, SrcZero,
2270                                      TLO, Depth + 1))
2271         return true;
2272 
2273       // If all the src elements covering an output element are zero/undef, then
2274       // the output element will be as well, assuming it was demanded.
2275       for (unsigned i = 0; i != NumElts; ++i) {
2276         if (DemandedElts[i]) {
2277           if (SrcZero.extractBits(Scale, i * Scale).isAllOnesValue())
2278             KnownZero.setBit(i);
2279           if (SrcUndef.extractBits(Scale, i * Scale).isAllOnesValue())
2280             KnownUndef.setBit(i);
2281         }
2282       }
2283     }
2284     break;
2285   }
2286   case ISD::BUILD_VECTOR: {
2287     // Check all elements and simplify any unused elements with UNDEF.
2288     if (!DemandedElts.isAllOnesValue()) {
2289       // Don't simplify BROADCASTS.
2290       if (llvm::any_of(Op->op_values(),
2291                        [&](SDValue Elt) { return Op.getOperand(0) != Elt; })) {
2292         SmallVector<SDValue, 32> Ops(Op->op_begin(), Op->op_end());
2293         bool Updated = false;
2294         for (unsigned i = 0; i != NumElts; ++i) {
2295           if (!DemandedElts[i] && !Ops[i].isUndef()) {
2296             Ops[i] = TLO.DAG.getUNDEF(Ops[0].getValueType());
2297             KnownUndef.setBit(i);
2298             Updated = true;
2299           }
2300         }
2301         if (Updated)
2302           return TLO.CombineTo(Op, TLO.DAG.getBuildVector(VT, DL, Ops));
2303       }
2304     }
2305     for (unsigned i = 0; i != NumElts; ++i) {
2306       SDValue SrcOp = Op.getOperand(i);
2307       if (SrcOp.isUndef()) {
2308         KnownUndef.setBit(i);
2309       } else if (EltSizeInBits == SrcOp.getScalarValueSizeInBits() &&
2310                  (isNullConstant(SrcOp) || isNullFPConstant(SrcOp))) {
2311         KnownZero.setBit(i);
2312       }
2313     }
2314     break;
2315   }
2316   case ISD::CONCAT_VECTORS: {
2317     EVT SubVT = Op.getOperand(0).getValueType();
2318     unsigned NumSubVecs = Op.getNumOperands();
2319     unsigned NumSubElts = SubVT.getVectorNumElements();
2320     for (unsigned i = 0; i != NumSubVecs; ++i) {
2321       SDValue SubOp = Op.getOperand(i);
2322       APInt SubElts = DemandedElts.extractBits(NumSubElts, i * NumSubElts);
2323       APInt SubUndef, SubZero;
2324       if (SimplifyDemandedVectorElts(SubOp, SubElts, SubUndef, SubZero, TLO,
2325                                      Depth + 1))
2326         return true;
2327       KnownUndef.insertBits(SubUndef, i * NumSubElts);
2328       KnownZero.insertBits(SubZero, i * NumSubElts);
2329     }
2330     break;
2331   }
2332   case ISD::INSERT_SUBVECTOR: {
2333     if (!isa<ConstantSDNode>(Op.getOperand(2)))
2334       break;
2335     SDValue Base = Op.getOperand(0);
2336     SDValue Sub = Op.getOperand(1);
2337     EVT SubVT = Sub.getValueType();
2338     unsigned NumSubElts = SubVT.getVectorNumElements();
2339     const APInt &Idx = Op.getConstantOperandAPInt(2);
2340     if (Idx.ugt(NumElts - NumSubElts))
2341       break;
2342     unsigned SubIdx = Idx.getZExtValue();
2343     APInt SubElts = DemandedElts.extractBits(NumSubElts, SubIdx);
2344     APInt SubUndef, SubZero;
2345     if (SimplifyDemandedVectorElts(Sub, SubElts, SubUndef, SubZero, TLO,
2346                                    Depth + 1))
2347       return true;
2348     APInt BaseElts = DemandedElts;
2349     BaseElts.insertBits(APInt::getNullValue(NumSubElts), SubIdx);
2350 
2351     // If none of the base operand elements are demanded, replace it with undef.
2352     if (!BaseElts && !Base.isUndef())
2353       return TLO.CombineTo(Op,
2354                            TLO.DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
2355                                            TLO.DAG.getUNDEF(VT),
2356                                            Op.getOperand(1),
2357                                            Op.getOperand(2)));
2358 
2359     if (SimplifyDemandedVectorElts(Base, BaseElts, KnownUndef, KnownZero, TLO,
2360                                    Depth + 1))
2361       return true;
2362     KnownUndef.insertBits(SubUndef, SubIdx);
2363     KnownZero.insertBits(SubZero, SubIdx);
2364     break;
2365   }
2366   case ISD::EXTRACT_SUBVECTOR: {
2367     SDValue Src = Op.getOperand(0);
2368     ConstantSDNode *SubIdx = dyn_cast<ConstantSDNode>(Op.getOperand(1));
2369     unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
2370     if (SubIdx && SubIdx->getAPIntValue().ule(NumSrcElts - NumElts)) {
2371       // Offset the demanded elts by the subvector index.
2372       uint64_t Idx = SubIdx->getZExtValue();
2373       APInt SrcElts = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx);
2374       APInt SrcUndef, SrcZero;
2375       if (SimplifyDemandedVectorElts(Src, SrcElts, SrcUndef, SrcZero, TLO,
2376                                      Depth + 1))
2377         return true;
2378       KnownUndef = SrcUndef.extractBits(NumElts, Idx);
2379       KnownZero = SrcZero.extractBits(NumElts, Idx);
2380     }
2381     break;
2382   }
2383   case ISD::INSERT_VECTOR_ELT: {
2384     SDValue Vec = Op.getOperand(0);
2385     SDValue Scl = Op.getOperand(1);
2386     auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2));
2387 
2388     // For a legal, constant insertion index, if we don't need this insertion
2389     // then strip it, else remove it from the demanded elts.
2390     if (CIdx && CIdx->getAPIntValue().ult(NumElts)) {
2391       unsigned Idx = CIdx->getZExtValue();
2392       if (!DemandedElts[Idx])
2393         return TLO.CombineTo(Op, Vec);
2394 
2395       APInt DemandedVecElts(DemandedElts);
2396       DemandedVecElts.clearBit(Idx);
2397       if (SimplifyDemandedVectorElts(Vec, DemandedVecElts, KnownUndef,
2398                                      KnownZero, TLO, Depth + 1))
2399         return true;
2400 
2401       KnownUndef.clearBit(Idx);
2402       if (Scl.isUndef())
2403         KnownUndef.setBit(Idx);
2404 
2405       KnownZero.clearBit(Idx);
2406       if (isNullConstant(Scl) || isNullFPConstant(Scl))
2407         KnownZero.setBit(Idx);
2408       break;
2409     }
2410 
2411     APInt VecUndef, VecZero;
2412     if (SimplifyDemandedVectorElts(Vec, DemandedElts, VecUndef, VecZero, TLO,
2413                                    Depth + 1))
2414       return true;
2415     // Without knowing the insertion index we can't set KnownUndef/KnownZero.
2416     break;
2417   }
2418   case ISD::VSELECT: {
2419     // Try to transform the select condition based on the current demanded
2420     // elements.
2421     // TODO: If a condition element is undef, we can choose from one arm of the
2422     //       select (and if one arm is undef, then we can propagate that to the
2423     //       result).
2424     // TODO - add support for constant vselect masks (see IR version of this).
2425     APInt UnusedUndef, UnusedZero;
2426     if (SimplifyDemandedVectorElts(Op.getOperand(0), DemandedElts, UnusedUndef,
2427                                    UnusedZero, TLO, Depth + 1))
2428       return true;
2429 
2430     // See if we can simplify either vselect operand.
2431     APInt DemandedLHS(DemandedElts);
2432     APInt DemandedRHS(DemandedElts);
2433     APInt UndefLHS, ZeroLHS;
2434     APInt UndefRHS, ZeroRHS;
2435     if (SimplifyDemandedVectorElts(Op.getOperand(1), DemandedLHS, UndefLHS,
2436                                    ZeroLHS, TLO, Depth + 1))
2437       return true;
2438     if (SimplifyDemandedVectorElts(Op.getOperand(2), DemandedRHS, UndefRHS,
2439                                    ZeroRHS, TLO, Depth + 1))
2440       return true;
2441 
2442     KnownUndef = UndefLHS & UndefRHS;
2443     KnownZero = ZeroLHS & ZeroRHS;
2444     break;
2445   }
2446   case ISD::VECTOR_SHUFFLE: {
2447     ArrayRef<int> ShuffleMask = cast<ShuffleVectorSDNode>(Op)->getMask();
2448 
2449     // Collect demanded elements from shuffle operands..
2450     APInt DemandedLHS(NumElts, 0);
2451     APInt DemandedRHS(NumElts, 0);
2452     for (unsigned i = 0; i != NumElts; ++i) {
2453       int M = ShuffleMask[i];
2454       if (M < 0 || !DemandedElts[i])
2455         continue;
2456       assert(0 <= M && M < (int)(2 * NumElts) && "Shuffle index out of range");
2457       if (M < (int)NumElts)
2458         DemandedLHS.setBit(M);
2459       else
2460         DemandedRHS.setBit(M - NumElts);
2461     }
2462 
2463     // See if we can simplify either shuffle operand.
2464     APInt UndefLHS, ZeroLHS;
2465     APInt UndefRHS, ZeroRHS;
2466     if (SimplifyDemandedVectorElts(Op.getOperand(0), DemandedLHS, UndefLHS,
2467                                    ZeroLHS, TLO, Depth + 1))
2468       return true;
2469     if (SimplifyDemandedVectorElts(Op.getOperand(1), DemandedRHS, UndefRHS,
2470                                    ZeroRHS, TLO, Depth + 1))
2471       return true;
2472 
2473     // Simplify mask using undef elements from LHS/RHS.
2474     bool Updated = false;
2475     bool IdentityLHS = true, IdentityRHS = true;
2476     SmallVector<int, 32> NewMask(ShuffleMask.begin(), ShuffleMask.end());
2477     for (unsigned i = 0; i != NumElts; ++i) {
2478       int &M = NewMask[i];
2479       if (M < 0)
2480         continue;
2481       if (!DemandedElts[i] || (M < (int)NumElts && UndefLHS[M]) ||
2482           (M >= (int)NumElts && UndefRHS[M - NumElts])) {
2483         Updated = true;
2484         M = -1;
2485       }
2486       IdentityLHS &= (M < 0) || (M == (int)i);
2487       IdentityRHS &= (M < 0) || ((M - NumElts) == i);
2488     }
2489 
2490     // Update legal shuffle masks based on demanded elements if it won't reduce
2491     // to Identity which can cause premature removal of the shuffle mask.
2492     if (Updated && !IdentityLHS && !IdentityRHS && !TLO.LegalOps) {
2493       SDValue LegalShuffle =
2494           buildLegalVectorShuffle(VT, DL, Op.getOperand(0), Op.getOperand(1),
2495                                   NewMask, TLO.DAG);
2496       if (LegalShuffle)
2497         return TLO.CombineTo(Op, LegalShuffle);
2498     }
2499 
2500     // Propagate undef/zero elements from LHS/RHS.
2501     for (unsigned i = 0; i != NumElts; ++i) {
2502       int M = ShuffleMask[i];
2503       if (M < 0) {
2504         KnownUndef.setBit(i);
2505       } else if (M < (int)NumElts) {
2506         if (UndefLHS[M])
2507           KnownUndef.setBit(i);
2508         if (ZeroLHS[M])
2509           KnownZero.setBit(i);
2510       } else {
2511         if (UndefRHS[M - NumElts])
2512           KnownUndef.setBit(i);
2513         if (ZeroRHS[M - NumElts])
2514           KnownZero.setBit(i);
2515       }
2516     }
2517     break;
2518   }
2519   case ISD::ANY_EXTEND_VECTOR_INREG:
2520   case ISD::SIGN_EXTEND_VECTOR_INREG:
2521   case ISD::ZERO_EXTEND_VECTOR_INREG: {
2522     APInt SrcUndef, SrcZero;
2523     SDValue Src = Op.getOperand(0);
2524     unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
2525     APInt DemandedSrcElts = DemandedElts.zextOrSelf(NumSrcElts);
2526     if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, SrcUndef, SrcZero, TLO,
2527                                    Depth + 1))
2528       return true;
2529     KnownZero = SrcZero.zextOrTrunc(NumElts);
2530     KnownUndef = SrcUndef.zextOrTrunc(NumElts);
2531 
2532     if (Op.getOpcode() == ISD::ANY_EXTEND_VECTOR_INREG &&
2533         Op.getValueSizeInBits() == Src.getValueSizeInBits() &&
2534         DemandedSrcElts == 1 && TLO.DAG.getDataLayout().isLittleEndian()) {
2535       // aext - if we just need the bottom element then we can bitcast.
2536       return TLO.CombineTo(Op, TLO.DAG.getBitcast(VT, Src));
2537     }
2538 
2539     if (Op.getOpcode() == ISD::ZERO_EXTEND_VECTOR_INREG) {
2540       // zext(undef) upper bits are guaranteed to be zero.
2541       if (DemandedElts.isSubsetOf(KnownUndef))
2542         return TLO.CombineTo(Op, TLO.DAG.getConstant(0, SDLoc(Op), VT));
2543       KnownUndef.clearAllBits();
2544     }
2545     break;
2546   }
2547 
2548   // TODO: There are more binop opcodes that could be handled here - MUL, MIN,
2549   // MAX, saturated math, etc.
2550   case ISD::OR:
2551   case ISD::XOR:
2552   case ISD::ADD:
2553   case ISD::SUB:
2554   case ISD::FADD:
2555   case ISD::FSUB:
2556   case ISD::FMUL:
2557   case ISD::FDIV:
2558   case ISD::FREM: {
2559     APInt UndefRHS, ZeroRHS;
2560     if (SimplifyDemandedVectorElts(Op.getOperand(1), DemandedElts, UndefRHS,
2561                                    ZeroRHS, TLO, Depth + 1))
2562       return true;
2563     APInt UndefLHS, ZeroLHS;
2564     if (SimplifyDemandedVectorElts(Op.getOperand(0), DemandedElts, UndefLHS,
2565                                    ZeroLHS, TLO, Depth + 1))
2566       return true;
2567 
2568     KnownZero = ZeroLHS & ZeroRHS;
2569     KnownUndef = getKnownUndefForVectorBinop(Op, TLO.DAG, UndefLHS, UndefRHS);
2570     break;
2571   }
2572   case ISD::SHL:
2573   case ISD::SRL:
2574   case ISD::SRA:
2575   case ISD::ROTL:
2576   case ISD::ROTR: {
2577     APInt UndefRHS, ZeroRHS;
2578     if (SimplifyDemandedVectorElts(Op.getOperand(1), DemandedElts, UndefRHS,
2579                                    ZeroRHS, TLO, Depth + 1))
2580       return true;
2581     APInt UndefLHS, ZeroLHS;
2582     if (SimplifyDemandedVectorElts(Op.getOperand(0), DemandedElts, UndefLHS,
2583                                    ZeroLHS, TLO, Depth + 1))
2584       return true;
2585 
2586     KnownZero = ZeroLHS;
2587     KnownUndef = UndefLHS & UndefRHS; // TODO: use getKnownUndefForVectorBinop?
2588     break;
2589   }
2590   case ISD::MUL:
2591   case ISD::AND: {
2592     APInt SrcUndef, SrcZero;
2593     if (SimplifyDemandedVectorElts(Op.getOperand(1), DemandedElts, SrcUndef,
2594                                    SrcZero, TLO, Depth + 1))
2595       return true;
2596     if (SimplifyDemandedVectorElts(Op.getOperand(0), DemandedElts, KnownUndef,
2597                                    KnownZero, TLO, Depth + 1))
2598       return true;
2599 
2600     // If either side has a zero element, then the result element is zero, even
2601     // if the other is an UNDEF.
2602     // TODO: Extend getKnownUndefForVectorBinop to also deal with known zeros
2603     // and then handle 'and' nodes with the rest of the binop opcodes.
2604     KnownZero |= SrcZero;
2605     KnownUndef &= SrcUndef;
2606     KnownUndef &= ~KnownZero;
2607     break;
2608   }
2609   case ISD::TRUNCATE:
2610   case ISD::SIGN_EXTEND:
2611   case ISD::ZERO_EXTEND:
2612     if (SimplifyDemandedVectorElts(Op.getOperand(0), DemandedElts, KnownUndef,
2613                                    KnownZero, TLO, Depth + 1))
2614       return true;
2615 
2616     if (Op.getOpcode() == ISD::ZERO_EXTEND) {
2617       // zext(undef) upper bits are guaranteed to be zero.
2618       if (DemandedElts.isSubsetOf(KnownUndef))
2619         return TLO.CombineTo(Op, TLO.DAG.getConstant(0, SDLoc(Op), VT));
2620       KnownUndef.clearAllBits();
2621     }
2622     break;
2623   default: {
2624     if (Op.getOpcode() >= ISD::BUILTIN_OP_END) {
2625       if (SimplifyDemandedVectorEltsForTargetNode(Op, DemandedElts, KnownUndef,
2626                                                   KnownZero, TLO, Depth))
2627         return true;
2628     } else {
2629       KnownBits Known;
2630       APInt DemandedBits = APInt::getAllOnesValue(EltSizeInBits);
2631       if (SimplifyDemandedBits(Op, DemandedBits, OriginalDemandedElts, Known,
2632                                TLO, Depth, AssumeSingleUse))
2633         return true;
2634     }
2635     break;
2636   }
2637   }
2638   assert((KnownUndef & KnownZero) == 0 && "Elements flagged as undef AND zero");
2639 
2640   // Constant fold all undef cases.
2641   // TODO: Handle zero cases as well.
2642   if (DemandedElts.isSubsetOf(KnownUndef))
2643     return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT));
2644 
2645   return false;
2646 }
2647 
2648 /// Determine which of the bits specified in Mask are known to be either zero or
2649 /// one and return them in the Known.
2650 void TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
2651                                                    KnownBits &Known,
2652                                                    const APInt &DemandedElts,
2653                                                    const SelectionDAG &DAG,
2654                                                    unsigned Depth) const {
2655   assert((Op.getOpcode() >= ISD::BUILTIN_OP_END ||
2656           Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
2657           Op.getOpcode() == ISD::INTRINSIC_W_CHAIN ||
2658           Op.getOpcode() == ISD::INTRINSIC_VOID) &&
2659          "Should use MaskedValueIsZero if you don't know whether Op"
2660          " is a target node!");
2661   Known.resetAll();
2662 }
2663 
2664 void TargetLowering::computeKnownBitsForTargetInstr(
2665     GISelKnownBits &Analysis, Register R, KnownBits &Known,
2666     const APInt &DemandedElts, const MachineRegisterInfo &MRI,
2667     unsigned Depth) const {
2668   Known.resetAll();
2669 }
2670 
2671 void TargetLowering::computeKnownBitsForFrameIndex(const SDValue Op,
2672                                                    KnownBits &Known,
2673                                                    const APInt &DemandedElts,
2674                                                    const SelectionDAG &DAG,
2675                                                    unsigned Depth) const {
2676   assert(isa<FrameIndexSDNode>(Op) && "expected FrameIndex");
2677 
2678   if (unsigned Align = DAG.InferPtrAlignment(Op)) {
2679     // The low bits are known zero if the pointer is aligned.
2680     Known.Zero.setLowBits(Log2_32(Align));
2681   }
2682 }
2683 
2684 /// This method can be implemented by targets that want to expose additional
2685 /// information about sign bits to the DAG Combiner.
2686 unsigned TargetLowering::ComputeNumSignBitsForTargetNode(SDValue Op,
2687                                                          const APInt &,
2688                                                          const SelectionDAG &,
2689                                                          unsigned Depth) const {
2690   assert((Op.getOpcode() >= ISD::BUILTIN_OP_END ||
2691           Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
2692           Op.getOpcode() == ISD::INTRINSIC_W_CHAIN ||
2693           Op.getOpcode() == ISD::INTRINSIC_VOID) &&
2694          "Should use ComputeNumSignBits if you don't know whether Op"
2695          " is a target node!");
2696   return 1;
2697 }
2698 
2699 bool TargetLowering::SimplifyDemandedVectorEltsForTargetNode(
2700     SDValue Op, const APInt &DemandedElts, APInt &KnownUndef, APInt &KnownZero,
2701     TargetLoweringOpt &TLO, unsigned Depth) const {
2702   assert((Op.getOpcode() >= ISD::BUILTIN_OP_END ||
2703           Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
2704           Op.getOpcode() == ISD::INTRINSIC_W_CHAIN ||
2705           Op.getOpcode() == ISD::INTRINSIC_VOID) &&
2706          "Should use SimplifyDemandedVectorElts if you don't know whether Op"
2707          " is a target node!");
2708   return false;
2709 }
2710 
2711 bool TargetLowering::SimplifyDemandedBitsForTargetNode(
2712     SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
2713     KnownBits &Known, TargetLoweringOpt &TLO, unsigned Depth) const {
2714   assert((Op.getOpcode() >= ISD::BUILTIN_OP_END ||
2715           Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
2716           Op.getOpcode() == ISD::INTRINSIC_W_CHAIN ||
2717           Op.getOpcode() == ISD::INTRINSIC_VOID) &&
2718          "Should use SimplifyDemandedBits if you don't know whether Op"
2719          " is a target node!");
2720   computeKnownBitsForTargetNode(Op, Known, DemandedElts, TLO.DAG, Depth);
2721   return false;
2722 }
2723 
2724 SDValue TargetLowering::SimplifyMultipleUseDemandedBitsForTargetNode(
2725     SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
2726     SelectionDAG &DAG, unsigned Depth) const {
2727   assert(
2728       (Op.getOpcode() >= ISD::BUILTIN_OP_END ||
2729        Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
2730        Op.getOpcode() == ISD::INTRINSIC_W_CHAIN ||
2731        Op.getOpcode() == ISD::INTRINSIC_VOID) &&
2732       "Should use SimplifyMultipleUseDemandedBits if you don't know whether Op"
2733       " is a target node!");
2734   return SDValue();
2735 }
2736 
2737 SDValue
2738 TargetLowering::buildLegalVectorShuffle(EVT VT, const SDLoc &DL, SDValue N0,
2739                                         SDValue N1, MutableArrayRef<int> Mask,
2740                                         SelectionDAG &DAG) const {
2741   bool LegalMask = isShuffleMaskLegal(Mask, VT);
2742   if (!LegalMask) {
2743     std::swap(N0, N1);
2744     ShuffleVectorSDNode::commuteMask(Mask);
2745     LegalMask = isShuffleMaskLegal(Mask, VT);
2746   }
2747 
2748   if (!LegalMask)
2749     return SDValue();
2750 
2751   return DAG.getVectorShuffle(VT, DL, N0, N1, Mask);
2752 }
2753 
2754 const Constant *TargetLowering::getTargetConstantFromLoad(LoadSDNode*) const {
2755   return nullptr;
2756 }
2757 
2758 bool TargetLowering::isKnownNeverNaNForTargetNode(SDValue Op,
2759                                                   const SelectionDAG &DAG,
2760                                                   bool SNaN,
2761                                                   unsigned Depth) const {
2762   assert((Op.getOpcode() >= ISD::BUILTIN_OP_END ||
2763           Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
2764           Op.getOpcode() == ISD::INTRINSIC_W_CHAIN ||
2765           Op.getOpcode() == ISD::INTRINSIC_VOID) &&
2766          "Should use isKnownNeverNaN if you don't know whether Op"
2767          " is a target node!");
2768   return false;
2769 }
2770 
2771 // FIXME: Ideally, this would use ISD::isConstantSplatVector(), but that must
2772 // work with truncating build vectors and vectors with elements of less than
2773 // 8 bits.
2774 bool TargetLowering::isConstTrueVal(const SDNode *N) const {
2775   if (!N)
2776     return false;
2777 
2778   APInt CVal;
2779   if (auto *CN = dyn_cast<ConstantSDNode>(N)) {
2780     CVal = CN->getAPIntValue();
2781   } else if (auto *BV = dyn_cast<BuildVectorSDNode>(N)) {
2782     auto *CN = BV->getConstantSplatNode();
2783     if (!CN)
2784       return false;
2785 
2786     // If this is a truncating build vector, truncate the splat value.
2787     // Otherwise, we may fail to match the expected values below.
2788     unsigned BVEltWidth = BV->getValueType(0).getScalarSizeInBits();
2789     CVal = CN->getAPIntValue();
2790     if (BVEltWidth < CVal.getBitWidth())
2791       CVal = CVal.trunc(BVEltWidth);
2792   } else {
2793     return false;
2794   }
2795 
2796   switch (getBooleanContents(N->getValueType(0))) {
2797   case UndefinedBooleanContent:
2798     return CVal[0];
2799   case ZeroOrOneBooleanContent:
2800     return CVal.isOneValue();
2801   case ZeroOrNegativeOneBooleanContent:
2802     return CVal.isAllOnesValue();
2803   }
2804 
2805   llvm_unreachable("Invalid boolean contents");
2806 }
2807 
2808 bool TargetLowering::isConstFalseVal(const SDNode *N) const {
2809   if (!N)
2810     return false;
2811 
2812   const ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N);
2813   if (!CN) {
2814     const BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N);
2815     if (!BV)
2816       return false;
2817 
2818     // Only interested in constant splats, we don't care about undef
2819     // elements in identifying boolean constants and getConstantSplatNode
2820     // returns NULL if all ops are undef;
2821     CN = BV->getConstantSplatNode();
2822     if (!CN)
2823       return false;
2824   }
2825 
2826   if (getBooleanContents(N->getValueType(0)) == UndefinedBooleanContent)
2827     return !CN->getAPIntValue()[0];
2828 
2829   return CN->isNullValue();
2830 }
2831 
2832 bool TargetLowering::isExtendedTrueVal(const ConstantSDNode *N, EVT VT,
2833                                        bool SExt) const {
2834   if (VT == MVT::i1)
2835     return N->isOne();
2836 
2837   TargetLowering::BooleanContent Cnt = getBooleanContents(VT);
2838   switch (Cnt) {
2839   case TargetLowering::ZeroOrOneBooleanContent:
2840     // An extended value of 1 is always true, unless its original type is i1,
2841     // in which case it will be sign extended to -1.
2842     return (N->isOne() && !SExt) || (SExt && (N->getValueType(0) != MVT::i1));
2843   case TargetLowering::UndefinedBooleanContent:
2844   case TargetLowering::ZeroOrNegativeOneBooleanContent:
2845     return N->isAllOnesValue() && SExt;
2846   }
2847   llvm_unreachable("Unexpected enumeration.");
2848 }
2849 
2850 /// This helper function of SimplifySetCC tries to optimize the comparison when
2851 /// either operand of the SetCC node is a bitwise-and instruction.
2852 SDValue TargetLowering::foldSetCCWithAnd(EVT VT, SDValue N0, SDValue N1,
2853                                          ISD::CondCode Cond, const SDLoc &DL,
2854                                          DAGCombinerInfo &DCI) const {
2855   // Match these patterns in any of their permutations:
2856   // (X & Y) == Y
2857   // (X & Y) != Y
2858   if (N1.getOpcode() == ISD::AND && N0.getOpcode() != ISD::AND)
2859     std::swap(N0, N1);
2860 
2861   EVT OpVT = N0.getValueType();
2862   if (N0.getOpcode() != ISD::AND || !OpVT.isInteger() ||
2863       (Cond != ISD::SETEQ && Cond != ISD::SETNE))
2864     return SDValue();
2865 
2866   SDValue X, Y;
2867   if (N0.getOperand(0) == N1) {
2868     X = N0.getOperand(1);
2869     Y = N0.getOperand(0);
2870   } else if (N0.getOperand(1) == N1) {
2871     X = N0.getOperand(0);
2872     Y = N0.getOperand(1);
2873   } else {
2874     return SDValue();
2875   }
2876 
2877   SelectionDAG &DAG = DCI.DAG;
2878   SDValue Zero = DAG.getConstant(0, DL, OpVT);
2879   if (DAG.isKnownToBeAPowerOfTwo(Y)) {
2880     // Simplify X & Y == Y to X & Y != 0 if Y has exactly one bit set.
2881     // Note that where Y is variable and is known to have at most one bit set
2882     // (for example, if it is Z & 1) we cannot do this; the expressions are not
2883     // equivalent when Y == 0.
2884     assert(OpVT.isInteger());
2885     Cond = ISD::getSetCCInverse(Cond, OpVT);
2886     if (DCI.isBeforeLegalizeOps() ||
2887         isCondCodeLegal(Cond, N0.getSimpleValueType()))
2888       return DAG.getSetCC(DL, VT, N0, Zero, Cond);
2889   } else if (N0.hasOneUse() && hasAndNotCompare(Y)) {
2890     // If the target supports an 'and-not' or 'and-complement' logic operation,
2891     // try to use that to make a comparison operation more efficient.
2892     // But don't do this transform if the mask is a single bit because there are
2893     // more efficient ways to deal with that case (for example, 'bt' on x86 or
2894     // 'rlwinm' on PPC).
2895 
2896     // Bail out if the compare operand that we want to turn into a zero is
2897     // already a zero (otherwise, infinite loop).
2898     auto *YConst = dyn_cast<ConstantSDNode>(Y);
2899     if (YConst && YConst->isNullValue())
2900       return SDValue();
2901 
2902     // Transform this into: ~X & Y == 0.
2903     SDValue NotX = DAG.getNOT(SDLoc(X), X, OpVT);
2904     SDValue NewAnd = DAG.getNode(ISD::AND, SDLoc(N0), OpVT, NotX, Y);
2905     return DAG.getSetCC(DL, VT, NewAnd, Zero, Cond);
2906   }
2907 
2908   return SDValue();
2909 }
2910 
2911 /// There are multiple IR patterns that could be checking whether certain
2912 /// truncation of a signed number would be lossy or not. The pattern which is
2913 /// best at IR level, may not lower optimally. Thus, we want to unfold it.
2914 /// We are looking for the following pattern: (KeptBits is a constant)
2915 ///   (add %x, (1 << (KeptBits-1))) srccond (1 << KeptBits)
2916 /// KeptBits won't be bitwidth(x), that will be constant-folded to true/false.
2917 /// KeptBits also can't be 1, that would have been folded to  %x dstcond 0
2918 /// We will unfold it into the natural trunc+sext pattern:
2919 ///   ((%x << C) a>> C) dstcond %x
2920 /// Where  C = bitwidth(x) - KeptBits  and  C u< bitwidth(x)
2921 SDValue TargetLowering::optimizeSetCCOfSignedTruncationCheck(
2922     EVT SCCVT, SDValue N0, SDValue N1, ISD::CondCode Cond, DAGCombinerInfo &DCI,
2923     const SDLoc &DL) const {
2924   // We must be comparing with a constant.
2925   ConstantSDNode *C1;
2926   if (!(C1 = dyn_cast<ConstantSDNode>(N1)))
2927     return SDValue();
2928 
2929   // N0 should be:  add %x, (1 << (KeptBits-1))
2930   if (N0->getOpcode() != ISD::ADD)
2931     return SDValue();
2932 
2933   // And we must be 'add'ing a constant.
2934   ConstantSDNode *C01;
2935   if (!(C01 = dyn_cast<ConstantSDNode>(N0->getOperand(1))))
2936     return SDValue();
2937 
2938   SDValue X = N0->getOperand(0);
2939   EVT XVT = X.getValueType();
2940 
2941   // Validate constants ...
2942 
2943   APInt I1 = C1->getAPIntValue();
2944 
2945   ISD::CondCode NewCond;
2946   if (Cond == ISD::CondCode::SETULT) {
2947     NewCond = ISD::CondCode::SETEQ;
2948   } else if (Cond == ISD::CondCode::SETULE) {
2949     NewCond = ISD::CondCode::SETEQ;
2950     // But need to 'canonicalize' the constant.
2951     I1 += 1;
2952   } else if (Cond == ISD::CondCode::SETUGT) {
2953     NewCond = ISD::CondCode::SETNE;
2954     // But need to 'canonicalize' the constant.
2955     I1 += 1;
2956   } else if (Cond == ISD::CondCode::SETUGE) {
2957     NewCond = ISD::CondCode::SETNE;
2958   } else
2959     return SDValue();
2960 
2961   APInt I01 = C01->getAPIntValue();
2962 
2963   auto checkConstants = [&I1, &I01]() -> bool {
2964     // Both of them must be power-of-two, and the constant from setcc is bigger.
2965     return I1.ugt(I01) && I1.isPowerOf2() && I01.isPowerOf2();
2966   };
2967 
2968   if (checkConstants()) {
2969     // Great, e.g. got  icmp ult i16 (add i16 %x, 128), 256
2970   } else {
2971     // What if we invert constants? (and the target predicate)
2972     I1.negate();
2973     I01.negate();
2974     assert(XVT.isInteger());
2975     NewCond = getSetCCInverse(NewCond, XVT);
2976     if (!checkConstants())
2977       return SDValue();
2978     // Great, e.g. got  icmp uge i16 (add i16 %x, -128), -256
2979   }
2980 
2981   // They are power-of-two, so which bit is set?
2982   const unsigned KeptBits = I1.logBase2();
2983   const unsigned KeptBitsMinusOne = I01.logBase2();
2984 
2985   // Magic!
2986   if (KeptBits != (KeptBitsMinusOne + 1))
2987     return SDValue();
2988   assert(KeptBits > 0 && KeptBits < XVT.getSizeInBits() && "unreachable");
2989 
2990   // We don't want to do this in every single case.
2991   SelectionDAG &DAG = DCI.DAG;
2992   if (!DAG.getTargetLoweringInfo().shouldTransformSignedTruncationCheck(
2993           XVT, KeptBits))
2994     return SDValue();
2995 
2996   const unsigned MaskedBits = XVT.getSizeInBits() - KeptBits;
2997   assert(MaskedBits > 0 && MaskedBits < XVT.getSizeInBits() && "unreachable");
2998 
2999   // Unfold into:  ((%x << C) a>> C) cond %x
3000   // Where 'cond' will be either 'eq' or 'ne'.
3001   SDValue ShiftAmt = DAG.getConstant(MaskedBits, DL, XVT);
3002   SDValue T0 = DAG.getNode(ISD::SHL, DL, XVT, X, ShiftAmt);
3003   SDValue T1 = DAG.getNode(ISD::SRA, DL, XVT, T0, ShiftAmt);
3004   SDValue T2 = DAG.getSetCC(DL, SCCVT, T1, X, NewCond);
3005 
3006   return T2;
3007 }
3008 
3009 // (X & (C l>>/<< Y)) ==/!= 0  -->  ((X <</l>> Y) & C) ==/!= 0
3010 SDValue TargetLowering::optimizeSetCCByHoistingAndByConstFromLogicalShift(
3011     EVT SCCVT, SDValue N0, SDValue N1C, ISD::CondCode Cond,
3012     DAGCombinerInfo &DCI, const SDLoc &DL) const {
3013   assert(isConstOrConstSplat(N1C) &&
3014          isConstOrConstSplat(N1C)->getAPIntValue().isNullValue() &&
3015          "Should be a comparison with 0.");
3016   assert((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
3017          "Valid only for [in]equality comparisons.");
3018 
3019   unsigned NewShiftOpcode;
3020   SDValue X, C, Y;
3021 
3022   SelectionDAG &DAG = DCI.DAG;
3023   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3024 
3025   // Look for '(C l>>/<< Y)'.
3026   auto Match = [&NewShiftOpcode, &X, &C, &Y, &TLI, &DAG](SDValue V) {
3027     // The shift should be one-use.
3028     if (!V.hasOneUse())
3029       return false;
3030     unsigned OldShiftOpcode = V.getOpcode();
3031     switch (OldShiftOpcode) {
3032     case ISD::SHL:
3033       NewShiftOpcode = ISD::SRL;
3034       break;
3035     case ISD::SRL:
3036       NewShiftOpcode = ISD::SHL;
3037       break;
3038     default:
3039       return false; // must be a logical shift.
3040     }
3041     // We should be shifting a constant.
3042     // FIXME: best to use isConstantOrConstantVector().
3043     C = V.getOperand(0);
3044     ConstantSDNode *CC =
3045         isConstOrConstSplat(C, /*AllowUndefs=*/true, /*AllowTruncation=*/true);
3046     if (!CC)
3047       return false;
3048     Y = V.getOperand(1);
3049 
3050     ConstantSDNode *XC =
3051         isConstOrConstSplat(X, /*AllowUndefs=*/true, /*AllowTruncation=*/true);
3052     return TLI.shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(
3053         X, XC, CC, Y, OldShiftOpcode, NewShiftOpcode, DAG);
3054   };
3055 
3056   // LHS of comparison should be an one-use 'and'.
3057   if (N0.getOpcode() != ISD::AND || !N0.hasOneUse())
3058     return SDValue();
3059 
3060   X = N0.getOperand(0);
3061   SDValue Mask = N0.getOperand(1);
3062 
3063   // 'and' is commutative!
3064   if (!Match(Mask)) {
3065     std::swap(X, Mask);
3066     if (!Match(Mask))
3067       return SDValue();
3068   }
3069 
3070   EVT VT = X.getValueType();
3071 
3072   // Produce:
3073   // ((X 'OppositeShiftOpcode' Y) & C) Cond 0
3074   SDValue T0 = DAG.getNode(NewShiftOpcode, DL, VT, X, Y);
3075   SDValue T1 = DAG.getNode(ISD::AND, DL, VT, T0, C);
3076   SDValue T2 = DAG.getSetCC(DL, SCCVT, T1, N1C, Cond);
3077   return T2;
3078 }
3079 
3080 /// Try to fold an equality comparison with a {add/sub/xor} binary operation as
3081 /// the 1st operand (N0). Callers are expected to swap the N0/N1 parameters to
3082 /// handle the commuted versions of these patterns.
3083 SDValue TargetLowering::foldSetCCWithBinOp(EVT VT, SDValue N0, SDValue N1,
3084                                            ISD::CondCode Cond, const SDLoc &DL,
3085                                            DAGCombinerInfo &DCI) const {
3086   unsigned BOpcode = N0.getOpcode();
3087   assert((BOpcode == ISD::ADD || BOpcode == ISD::SUB || BOpcode == ISD::XOR) &&
3088          "Unexpected binop");
3089   assert((Cond == ISD::SETEQ || Cond == ISD::SETNE) && "Unexpected condcode");
3090 
3091   // (X + Y) == X --> Y == 0
3092   // (X - Y) == X --> Y == 0
3093   // (X ^ Y) == X --> Y == 0
3094   SelectionDAG &DAG = DCI.DAG;
3095   EVT OpVT = N0.getValueType();
3096   SDValue X = N0.getOperand(0);
3097   SDValue Y = N0.getOperand(1);
3098   if (X == N1)
3099     return DAG.getSetCC(DL, VT, Y, DAG.getConstant(0, DL, OpVT), Cond);
3100 
3101   if (Y != N1)
3102     return SDValue();
3103 
3104   // (X + Y) == Y --> X == 0
3105   // (X ^ Y) == Y --> X == 0
3106   if (BOpcode == ISD::ADD || BOpcode == ISD::XOR)
3107     return DAG.getSetCC(DL, VT, X, DAG.getConstant(0, DL, OpVT), Cond);
3108 
3109   // The shift would not be valid if the operands are boolean (i1).
3110   if (!N0.hasOneUse() || OpVT.getScalarSizeInBits() == 1)
3111     return SDValue();
3112 
3113   // (X - Y) == Y --> X == Y << 1
3114   EVT ShiftVT = getShiftAmountTy(OpVT, DAG.getDataLayout(),
3115                                  !DCI.isBeforeLegalize());
3116   SDValue One = DAG.getConstant(1, DL, ShiftVT);
3117   SDValue YShl1 = DAG.getNode(ISD::SHL, DL, N1.getValueType(), Y, One);
3118   if (!DCI.isCalledByLegalizer())
3119     DCI.AddToWorklist(YShl1.getNode());
3120   return DAG.getSetCC(DL, VT, X, YShl1, Cond);
3121 }
3122 
3123 /// Try to simplify a setcc built with the specified operands and cc. If it is
3124 /// unable to simplify it, return a null SDValue.
3125 SDValue TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
3126                                       ISD::CondCode Cond, bool foldBooleans,
3127                                       DAGCombinerInfo &DCI,
3128                                       const SDLoc &dl) const {
3129   SelectionDAG &DAG = DCI.DAG;
3130   const DataLayout &Layout = DAG.getDataLayout();
3131   EVT OpVT = N0.getValueType();
3132 
3133   // Constant fold or commute setcc.
3134   if (SDValue Fold = DAG.FoldSetCC(VT, N0, N1, Cond, dl))
3135     return Fold;
3136 
3137   // Ensure that the constant occurs on the RHS and fold constant comparisons.
3138   // TODO: Handle non-splat vector constants. All undef causes trouble.
3139   ISD::CondCode SwappedCC = ISD::getSetCCSwappedOperands(Cond);
3140   if (isConstOrConstSplat(N0) &&
3141       (DCI.isBeforeLegalizeOps() ||
3142        isCondCodeLegal(SwappedCC, N0.getSimpleValueType())))
3143     return DAG.getSetCC(dl, VT, N1, N0, SwappedCC);
3144 
3145   // If we have a subtract with the same 2 non-constant operands as this setcc
3146   // -- but in reverse order -- then try to commute the operands of this setcc
3147   // to match. A matching pair of setcc (cmp) and sub may be combined into 1
3148   // instruction on some targets.
3149   if (!isConstOrConstSplat(N0) && !isConstOrConstSplat(N1) &&
3150       (DCI.isBeforeLegalizeOps() ||
3151        isCondCodeLegal(SwappedCC, N0.getSimpleValueType())) &&
3152       DAG.getNodeIfExists(ISD::SUB, DAG.getVTList(OpVT), { N1, N0 } ) &&
3153       !DAG.getNodeIfExists(ISD::SUB, DAG.getVTList(OpVT), { N0, N1 } ))
3154     return DAG.getSetCC(dl, VT, N1, N0, SwappedCC);
3155 
3156   if (auto *N1C = dyn_cast<ConstantSDNode>(N1.getNode())) {
3157     const APInt &C1 = N1C->getAPIntValue();
3158 
3159     // If the LHS is '(srl (ctlz x), 5)', the RHS is 0/1, and this is an
3160     // equality comparison, then we're just comparing whether X itself is
3161     // zero.
3162     if (N0.getOpcode() == ISD::SRL && (C1.isNullValue() || C1.isOneValue()) &&
3163         N0.getOperand(0).getOpcode() == ISD::CTLZ &&
3164         N0.getOperand(1).getOpcode() == ISD::Constant) {
3165       const APInt &ShAmt = N0.getConstantOperandAPInt(1);
3166       if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
3167           ShAmt == Log2_32(N0.getValueSizeInBits())) {
3168         if ((C1 == 0) == (Cond == ISD::SETEQ)) {
3169           // (srl (ctlz x), 5) == 0  -> X != 0
3170           // (srl (ctlz x), 5) != 1  -> X != 0
3171           Cond = ISD::SETNE;
3172         } else {
3173           // (srl (ctlz x), 5) != 0  -> X == 0
3174           // (srl (ctlz x), 5) == 1  -> X == 0
3175           Cond = ISD::SETEQ;
3176         }
3177         SDValue Zero = DAG.getConstant(0, dl, N0.getValueType());
3178         return DAG.getSetCC(dl, VT, N0.getOperand(0).getOperand(0),
3179                             Zero, Cond);
3180       }
3181     }
3182 
3183     SDValue CTPOP = N0;
3184     // Look through truncs that don't change the value of a ctpop.
3185     if (N0.hasOneUse() && N0.getOpcode() == ISD::TRUNCATE)
3186       CTPOP = N0.getOperand(0);
3187 
3188     if (CTPOP.hasOneUse() && CTPOP.getOpcode() == ISD::CTPOP &&
3189         (N0 == CTPOP ||
3190          N0.getValueSizeInBits() > Log2_32_Ceil(CTPOP.getValueSizeInBits()))) {
3191       EVT CTVT = CTPOP.getValueType();
3192       SDValue CTOp = CTPOP.getOperand(0);
3193 
3194       // (ctpop x) u< 2 -> (x & x-1) == 0
3195       // (ctpop x) u> 1 -> (x & x-1) != 0
3196       if ((Cond == ISD::SETULT && C1 == 2) || (Cond == ISD::SETUGT && C1 == 1)){
3197         SDValue NegOne = DAG.getAllOnesConstant(dl, CTVT);
3198         SDValue Add = DAG.getNode(ISD::ADD, dl, CTVT, CTOp, NegOne);
3199         SDValue And = DAG.getNode(ISD::AND, dl, CTVT, CTOp, Add);
3200         ISD::CondCode CC = Cond == ISD::SETULT ? ISD::SETEQ : ISD::SETNE;
3201         return DAG.getSetCC(dl, VT, And, DAG.getConstant(0, dl, CTVT), CC);
3202       }
3203 
3204       // If ctpop is not supported, expand a power-of-2 comparison based on it.
3205       if (C1 == 1 && !isOperationLegalOrCustom(ISD::CTPOP, CTVT) &&
3206           (Cond == ISD::SETEQ || Cond == ISD::SETNE)) {
3207         // (ctpop x) == 1 --> (x != 0) && ((x & x-1) == 0)
3208         // (ctpop x) != 1 --> (x == 0) || ((x & x-1) != 0)
3209         SDValue Zero = DAG.getConstant(0, dl, CTVT);
3210         SDValue NegOne = DAG.getAllOnesConstant(dl, CTVT);
3211         assert(CTVT.isInteger());
3212         ISD::CondCode InvCond = ISD::getSetCCInverse(Cond, CTVT);
3213         SDValue Add = DAG.getNode(ISD::ADD, dl, CTVT, CTOp, NegOne);
3214         SDValue And = DAG.getNode(ISD::AND, dl, CTVT, CTOp, Add);
3215         SDValue LHS = DAG.getSetCC(dl, VT, CTOp, Zero, InvCond);
3216         SDValue RHS = DAG.getSetCC(dl, VT, And, Zero, Cond);
3217         unsigned LogicOpcode = Cond == ISD::SETEQ ? ISD::AND : ISD::OR;
3218         return DAG.getNode(LogicOpcode, dl, VT, LHS, RHS);
3219       }
3220     }
3221 
3222     // (zext x) == C --> x == (trunc C)
3223     // (sext x) == C --> x == (trunc C)
3224     if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
3225         DCI.isBeforeLegalize() && N0->hasOneUse()) {
3226       unsigned MinBits = N0.getValueSizeInBits();
3227       SDValue PreExt;
3228       bool Signed = false;
3229       if (N0->getOpcode() == ISD::ZERO_EXTEND) {
3230         // ZExt
3231         MinBits = N0->getOperand(0).getValueSizeInBits();
3232         PreExt = N0->getOperand(0);
3233       } else if (N0->getOpcode() == ISD::AND) {
3234         // DAGCombine turns costly ZExts into ANDs
3235         if (auto *C = dyn_cast<ConstantSDNode>(N0->getOperand(1)))
3236           if ((C->getAPIntValue()+1).isPowerOf2()) {
3237             MinBits = C->getAPIntValue().countTrailingOnes();
3238             PreExt = N0->getOperand(0);
3239           }
3240       } else if (N0->getOpcode() == ISD::SIGN_EXTEND) {
3241         // SExt
3242         MinBits = N0->getOperand(0).getValueSizeInBits();
3243         PreExt = N0->getOperand(0);
3244         Signed = true;
3245       } else if (auto *LN0 = dyn_cast<LoadSDNode>(N0)) {
3246         // ZEXTLOAD / SEXTLOAD
3247         if (LN0->getExtensionType() == ISD::ZEXTLOAD) {
3248           MinBits = LN0->getMemoryVT().getSizeInBits();
3249           PreExt = N0;
3250         } else if (LN0->getExtensionType() == ISD::SEXTLOAD) {
3251           Signed = true;
3252           MinBits = LN0->getMemoryVT().getSizeInBits();
3253           PreExt = N0;
3254         }
3255       }
3256 
3257       // Figure out how many bits we need to preserve this constant.
3258       unsigned ReqdBits = Signed ?
3259         C1.getBitWidth() - C1.getNumSignBits() + 1 :
3260         C1.getActiveBits();
3261 
3262       // Make sure we're not losing bits from the constant.
3263       if (MinBits > 0 &&
3264           MinBits < C1.getBitWidth() &&
3265           MinBits >= ReqdBits) {
3266         EVT MinVT = EVT::getIntegerVT(*DAG.getContext(), MinBits);
3267         if (isTypeDesirableForOp(ISD::SETCC, MinVT)) {
3268           // Will get folded away.
3269           SDValue Trunc = DAG.getNode(ISD::TRUNCATE, dl, MinVT, PreExt);
3270           if (MinBits == 1 && C1 == 1)
3271             // Invert the condition.
3272             return DAG.getSetCC(dl, VT, Trunc, DAG.getConstant(0, dl, MVT::i1),
3273                                 Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ);
3274           SDValue C = DAG.getConstant(C1.trunc(MinBits), dl, MinVT);
3275           return DAG.getSetCC(dl, VT, Trunc, C, Cond);
3276         }
3277 
3278         // If truncating the setcc operands is not desirable, we can still
3279         // simplify the expression in some cases:
3280         // setcc ([sz]ext (setcc x, y, cc)), 0, setne) -> setcc (x, y, cc)
3281         // setcc ([sz]ext (setcc x, y, cc)), 0, seteq) -> setcc (x, y, inv(cc))
3282         // setcc (zext (setcc x, y, cc)), 1, setne) -> setcc (x, y, inv(cc))
3283         // setcc (zext (setcc x, y, cc)), 1, seteq) -> setcc (x, y, cc)
3284         // setcc (sext (setcc x, y, cc)), -1, setne) -> setcc (x, y, inv(cc))
3285         // setcc (sext (setcc x, y, cc)), -1, seteq) -> setcc (x, y, cc)
3286         SDValue TopSetCC = N0->getOperand(0);
3287         unsigned N0Opc = N0->getOpcode();
3288         bool SExt = (N0Opc == ISD::SIGN_EXTEND);
3289         if (TopSetCC.getValueType() == MVT::i1 && VT == MVT::i1 &&
3290             TopSetCC.getOpcode() == ISD::SETCC &&
3291             (N0Opc == ISD::ZERO_EXTEND || N0Opc == ISD::SIGN_EXTEND) &&
3292             (isConstFalseVal(N1C) ||
3293              isExtendedTrueVal(N1C, N0->getValueType(0), SExt))) {
3294 
3295           bool Inverse = (N1C->isNullValue() && Cond == ISD::SETEQ) ||
3296                          (!N1C->isNullValue() && Cond == ISD::SETNE);
3297 
3298           if (!Inverse)
3299             return TopSetCC;
3300 
3301           ISD::CondCode InvCond = ISD::getSetCCInverse(
3302               cast<CondCodeSDNode>(TopSetCC.getOperand(2))->get(),
3303               TopSetCC.getOperand(0).getValueType());
3304           return DAG.getSetCC(dl, VT, TopSetCC.getOperand(0),
3305                                       TopSetCC.getOperand(1),
3306                                       InvCond);
3307         }
3308       }
3309     }
3310 
3311     // If the LHS is '(and load, const)', the RHS is 0, the test is for
3312     // equality or unsigned, and all 1 bits of the const are in the same
3313     // partial word, see if we can shorten the load.
3314     if (DCI.isBeforeLegalize() &&
3315         !ISD::isSignedIntSetCC(Cond) &&
3316         N0.getOpcode() == ISD::AND && C1 == 0 &&
3317         N0.getNode()->hasOneUse() &&
3318         isa<LoadSDNode>(N0.getOperand(0)) &&
3319         N0.getOperand(0).getNode()->hasOneUse() &&
3320         isa<ConstantSDNode>(N0.getOperand(1))) {
3321       LoadSDNode *Lod = cast<LoadSDNode>(N0.getOperand(0));
3322       APInt bestMask;
3323       unsigned bestWidth = 0, bestOffset = 0;
3324       if (Lod->isSimple() && Lod->isUnindexed()) {
3325         unsigned origWidth = N0.getValueSizeInBits();
3326         unsigned maskWidth = origWidth;
3327         // We can narrow (e.g.) 16-bit extending loads on 32-bit target to
3328         // 8 bits, but have to be careful...
3329         if (Lod->getExtensionType() != ISD::NON_EXTLOAD)
3330           origWidth = Lod->getMemoryVT().getSizeInBits();
3331         const APInt &Mask = N0.getConstantOperandAPInt(1);
3332         for (unsigned width = origWidth / 2; width>=8; width /= 2) {
3333           APInt newMask = APInt::getLowBitsSet(maskWidth, width);
3334           for (unsigned offset=0; offset<origWidth/width; offset++) {
3335             if (Mask.isSubsetOf(newMask)) {
3336               if (Layout.isLittleEndian())
3337                 bestOffset = (uint64_t)offset * (width/8);
3338               else
3339                 bestOffset = (origWidth/width - offset - 1) * (width/8);
3340               bestMask = Mask.lshr(offset * (width/8) * 8);
3341               bestWidth = width;
3342               break;
3343             }
3344             newMask <<= width;
3345           }
3346         }
3347       }
3348       if (bestWidth) {
3349         EVT newVT = EVT::getIntegerVT(*DAG.getContext(), bestWidth);
3350         if (newVT.isRound() &&
3351             shouldReduceLoadWidth(Lod, ISD::NON_EXTLOAD, newVT)) {
3352           SDValue Ptr = Lod->getBasePtr();
3353           if (bestOffset != 0)
3354             Ptr = DAG.getMemBasePlusOffset(Ptr, bestOffset, dl);
3355           unsigned NewAlign = MinAlign(Lod->getAlignment(), bestOffset);
3356           SDValue NewLoad = DAG.getLoad(
3357               newVT, dl, Lod->getChain(), Ptr,
3358               Lod->getPointerInfo().getWithOffset(bestOffset), NewAlign);
3359           return DAG.getSetCC(dl, VT,
3360                               DAG.getNode(ISD::AND, dl, newVT, NewLoad,
3361                                       DAG.getConstant(bestMask.trunc(bestWidth),
3362                                                       dl, newVT)),
3363                               DAG.getConstant(0LL, dl, newVT), Cond);
3364         }
3365       }
3366     }
3367 
3368     // If the LHS is a ZERO_EXTEND, perform the comparison on the input.
3369     if (N0.getOpcode() == ISD::ZERO_EXTEND) {
3370       unsigned InSize = N0.getOperand(0).getValueSizeInBits();
3371 
3372       // If the comparison constant has bits in the upper part, the
3373       // zero-extended value could never match.
3374       if (C1.intersects(APInt::getHighBitsSet(C1.getBitWidth(),
3375                                               C1.getBitWidth() - InSize))) {
3376         switch (Cond) {
3377         case ISD::SETUGT:
3378         case ISD::SETUGE:
3379         case ISD::SETEQ:
3380           return DAG.getConstant(0, dl, VT);
3381         case ISD::SETULT:
3382         case ISD::SETULE:
3383         case ISD::SETNE:
3384           return DAG.getConstant(1, dl, VT);
3385         case ISD::SETGT:
3386         case ISD::SETGE:
3387           // True if the sign bit of C1 is set.
3388           return DAG.getConstant(C1.isNegative(), dl, VT);
3389         case ISD::SETLT:
3390         case ISD::SETLE:
3391           // True if the sign bit of C1 isn't set.
3392           return DAG.getConstant(C1.isNonNegative(), dl, VT);
3393         default:
3394           break;
3395         }
3396       }
3397 
3398       // Otherwise, we can perform the comparison with the low bits.
3399       switch (Cond) {
3400       case ISD::SETEQ:
3401       case ISD::SETNE:
3402       case ISD::SETUGT:
3403       case ISD::SETUGE:
3404       case ISD::SETULT:
3405       case ISD::SETULE: {
3406         EVT newVT = N0.getOperand(0).getValueType();
3407         if (DCI.isBeforeLegalizeOps() ||
3408             (isOperationLegal(ISD::SETCC, newVT) &&
3409              isCondCodeLegal(Cond, newVT.getSimpleVT()))) {
3410           EVT NewSetCCVT = getSetCCResultType(Layout, *DAG.getContext(), newVT);
3411           SDValue NewConst = DAG.getConstant(C1.trunc(InSize), dl, newVT);
3412 
3413           SDValue NewSetCC = DAG.getSetCC(dl, NewSetCCVT, N0.getOperand(0),
3414                                           NewConst, Cond);
3415           return DAG.getBoolExtOrTrunc(NewSetCC, dl, VT, N0.getValueType());
3416         }
3417         break;
3418       }
3419       default:
3420         break; // todo, be more careful with signed comparisons
3421       }
3422     } else if (N0.getOpcode() == ISD::SIGN_EXTEND_INREG &&
3423                (Cond == ISD::SETEQ || Cond == ISD::SETNE)) {
3424       EVT ExtSrcTy = cast<VTSDNode>(N0.getOperand(1))->getVT();
3425       unsigned ExtSrcTyBits = ExtSrcTy.getSizeInBits();
3426       EVT ExtDstTy = N0.getValueType();
3427       unsigned ExtDstTyBits = ExtDstTy.getSizeInBits();
3428 
3429       // If the constant doesn't fit into the number of bits for the source of
3430       // the sign extension, it is impossible for both sides to be equal.
3431       if (C1.getMinSignedBits() > ExtSrcTyBits)
3432         return DAG.getConstant(Cond == ISD::SETNE, dl, VT);
3433 
3434       SDValue ZextOp;
3435       EVT Op0Ty = N0.getOperand(0).getValueType();
3436       if (Op0Ty == ExtSrcTy) {
3437         ZextOp = N0.getOperand(0);
3438       } else {
3439         APInt Imm = APInt::getLowBitsSet(ExtDstTyBits, ExtSrcTyBits);
3440         ZextOp = DAG.getNode(ISD::AND, dl, Op0Ty, N0.getOperand(0),
3441                              DAG.getConstant(Imm, dl, Op0Ty));
3442       }
3443       if (!DCI.isCalledByLegalizer())
3444         DCI.AddToWorklist(ZextOp.getNode());
3445       // Otherwise, make this a use of a zext.
3446       return DAG.getSetCC(dl, VT, ZextOp,
3447                           DAG.getConstant(C1 & APInt::getLowBitsSet(
3448                                                               ExtDstTyBits,
3449                                                               ExtSrcTyBits),
3450                                           dl, ExtDstTy),
3451                           Cond);
3452     } else if ((N1C->isNullValue() || N1C->isOne()) &&
3453                 (Cond == ISD::SETEQ || Cond == ISD::SETNE)) {
3454       // SETCC (SETCC), [0|1], [EQ|NE]  -> SETCC
3455       if (N0.getOpcode() == ISD::SETCC &&
3456           isTypeLegal(VT) && VT.bitsLE(N0.getValueType()) &&
3457           (N0.getValueType() == MVT::i1 ||
3458            getBooleanContents(N0.getOperand(0).getValueType()) ==
3459                        ZeroOrOneBooleanContent)) {
3460         bool TrueWhenTrue = (Cond == ISD::SETEQ) ^ (!N1C->isOne());
3461         if (TrueWhenTrue)
3462           return DAG.getNode(ISD::TRUNCATE, dl, VT, N0);
3463         // Invert the condition.
3464         ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get();
3465         CC = ISD::getSetCCInverse(CC, N0.getOperand(0).getValueType());
3466         if (DCI.isBeforeLegalizeOps() ||
3467             isCondCodeLegal(CC, N0.getOperand(0).getSimpleValueType()))
3468           return DAG.getSetCC(dl, VT, N0.getOperand(0), N0.getOperand(1), CC);
3469       }
3470 
3471       if ((N0.getOpcode() == ISD::XOR ||
3472            (N0.getOpcode() == ISD::AND &&
3473             N0.getOperand(0).getOpcode() == ISD::XOR &&
3474             N0.getOperand(1) == N0.getOperand(0).getOperand(1))) &&
3475           isa<ConstantSDNode>(N0.getOperand(1)) &&
3476           cast<ConstantSDNode>(N0.getOperand(1))->isOne()) {
3477         // If this is (X^1) == 0/1, swap the RHS and eliminate the xor.  We
3478         // can only do this if the top bits are known zero.
3479         unsigned BitWidth = N0.getValueSizeInBits();
3480         if (DAG.MaskedValueIsZero(N0,
3481                                   APInt::getHighBitsSet(BitWidth,
3482                                                         BitWidth-1))) {
3483           // Okay, get the un-inverted input value.
3484           SDValue Val;
3485           if (N0.getOpcode() == ISD::XOR) {
3486             Val = N0.getOperand(0);
3487           } else {
3488             assert(N0.getOpcode() == ISD::AND &&
3489                     N0.getOperand(0).getOpcode() == ISD::XOR);
3490             // ((X^1)&1)^1 -> X & 1
3491             Val = DAG.getNode(ISD::AND, dl, N0.getValueType(),
3492                               N0.getOperand(0).getOperand(0),
3493                               N0.getOperand(1));
3494           }
3495 
3496           return DAG.getSetCC(dl, VT, Val, N1,
3497                               Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ);
3498         }
3499       } else if (N1C->isOne()) {
3500         SDValue Op0 = N0;
3501         if (Op0.getOpcode() == ISD::TRUNCATE)
3502           Op0 = Op0.getOperand(0);
3503 
3504         if ((Op0.getOpcode() == ISD::XOR) &&
3505             Op0.getOperand(0).getOpcode() == ISD::SETCC &&
3506             Op0.getOperand(1).getOpcode() == ISD::SETCC) {
3507           SDValue XorLHS = Op0.getOperand(0);
3508           SDValue XorRHS = Op0.getOperand(1);
3509           // Ensure that the input setccs return an i1 type or 0/1 value.
3510           if (Op0.getValueType() == MVT::i1 ||
3511               (getBooleanContents(XorLHS.getOperand(0).getValueType()) ==
3512                       ZeroOrOneBooleanContent &&
3513                getBooleanContents(XorRHS.getOperand(0).getValueType()) ==
3514                         ZeroOrOneBooleanContent)) {
3515             // (xor (setcc), (setcc)) == / != 1 -> (setcc) != / == (setcc)
3516             Cond = (Cond == ISD::SETEQ) ? ISD::SETNE : ISD::SETEQ;
3517             return DAG.getSetCC(dl, VT, XorLHS, XorRHS, Cond);
3518           }
3519         }
3520         if (Op0.getOpcode() == ISD::AND &&
3521             isa<ConstantSDNode>(Op0.getOperand(1)) &&
3522             cast<ConstantSDNode>(Op0.getOperand(1))->isOne()) {
3523           // If this is (X&1) == / != 1, normalize it to (X&1) != / == 0.
3524           if (Op0.getValueType().bitsGT(VT))
3525             Op0 = DAG.getNode(ISD::AND, dl, VT,
3526                           DAG.getNode(ISD::TRUNCATE, dl, VT, Op0.getOperand(0)),
3527                           DAG.getConstant(1, dl, VT));
3528           else if (Op0.getValueType().bitsLT(VT))
3529             Op0 = DAG.getNode(ISD::AND, dl, VT,
3530                         DAG.getNode(ISD::ANY_EXTEND, dl, VT, Op0.getOperand(0)),
3531                         DAG.getConstant(1, dl, VT));
3532 
3533           return DAG.getSetCC(dl, VT, Op0,
3534                               DAG.getConstant(0, dl, Op0.getValueType()),
3535                               Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ);
3536         }
3537         if (Op0.getOpcode() == ISD::AssertZext &&
3538             cast<VTSDNode>(Op0.getOperand(1))->getVT() == MVT::i1)
3539           return DAG.getSetCC(dl, VT, Op0,
3540                               DAG.getConstant(0, dl, Op0.getValueType()),
3541                               Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ);
3542       }
3543     }
3544 
3545     // Given:
3546     //   icmp eq/ne (urem %x, %y), 0
3547     // Iff %x has 0 or 1 bits set, and %y has at least 2 bits set, omit 'urem':
3548     //   icmp eq/ne %x, 0
3549     if (N0.getOpcode() == ISD::UREM && N1C->isNullValue() &&
3550         (Cond == ISD::SETEQ || Cond == ISD::SETNE)) {
3551       KnownBits XKnown = DAG.computeKnownBits(N0.getOperand(0));
3552       KnownBits YKnown = DAG.computeKnownBits(N0.getOperand(1));
3553       if (XKnown.countMaxPopulation() == 1 && YKnown.countMinPopulation() >= 2)
3554         return DAG.getSetCC(dl, VT, N0.getOperand(0), N1, Cond);
3555     }
3556 
3557     if (SDValue V =
3558             optimizeSetCCOfSignedTruncationCheck(VT, N0, N1, Cond, DCI, dl))
3559       return V;
3560   }
3561 
3562   // These simplifications apply to splat vectors as well.
3563   // TODO: Handle more splat vector cases.
3564   if (auto *N1C = isConstOrConstSplat(N1)) {
3565     const APInt &C1 = N1C->getAPIntValue();
3566 
3567     APInt MinVal, MaxVal;
3568     unsigned OperandBitSize = N1C->getValueType(0).getScalarSizeInBits();
3569     if (ISD::isSignedIntSetCC(Cond)) {
3570       MinVal = APInt::getSignedMinValue(OperandBitSize);
3571       MaxVal = APInt::getSignedMaxValue(OperandBitSize);
3572     } else {
3573       MinVal = APInt::getMinValue(OperandBitSize);
3574       MaxVal = APInt::getMaxValue(OperandBitSize);
3575     }
3576 
3577     // Canonicalize GE/LE comparisons to use GT/LT comparisons.
3578     if (Cond == ISD::SETGE || Cond == ISD::SETUGE) {
3579       // X >= MIN --> true
3580       if (C1 == MinVal)
3581         return DAG.getBoolConstant(true, dl, VT, OpVT);
3582 
3583       if (!VT.isVector()) { // TODO: Support this for vectors.
3584         // X >= C0 --> X > (C0 - 1)
3585         APInt C = C1 - 1;
3586         ISD::CondCode NewCC = (Cond == ISD::SETGE) ? ISD::SETGT : ISD::SETUGT;
3587         if ((DCI.isBeforeLegalizeOps() ||
3588              isCondCodeLegal(NewCC, VT.getSimpleVT())) &&
3589             (!N1C->isOpaque() || (C.getBitWidth() <= 64 &&
3590                                   isLegalICmpImmediate(C.getSExtValue())))) {
3591           return DAG.getSetCC(dl, VT, N0,
3592                               DAG.getConstant(C, dl, N1.getValueType()),
3593                               NewCC);
3594         }
3595       }
3596     }
3597 
3598     if (Cond == ISD::SETLE || Cond == ISD::SETULE) {
3599       // X <= MAX --> true
3600       if (C1 == MaxVal)
3601         return DAG.getBoolConstant(true, dl, VT, OpVT);
3602 
3603       // X <= C0 --> X < (C0 + 1)
3604       if (!VT.isVector()) { // TODO: Support this for vectors.
3605         APInt C = C1 + 1;
3606         ISD::CondCode NewCC = (Cond == ISD::SETLE) ? ISD::SETLT : ISD::SETULT;
3607         if ((DCI.isBeforeLegalizeOps() ||
3608              isCondCodeLegal(NewCC, VT.getSimpleVT())) &&
3609             (!N1C->isOpaque() || (C.getBitWidth() <= 64 &&
3610                                   isLegalICmpImmediate(C.getSExtValue())))) {
3611           return DAG.getSetCC(dl, VT, N0,
3612                               DAG.getConstant(C, dl, N1.getValueType()),
3613                               NewCC);
3614         }
3615       }
3616     }
3617 
3618     if (Cond == ISD::SETLT || Cond == ISD::SETULT) {
3619       if (C1 == MinVal)
3620         return DAG.getBoolConstant(false, dl, VT, OpVT); // X < MIN --> false
3621 
3622       // TODO: Support this for vectors after legalize ops.
3623       if (!VT.isVector() || DCI.isBeforeLegalizeOps()) {
3624         // Canonicalize setlt X, Max --> setne X, Max
3625         if (C1 == MaxVal)
3626           return DAG.getSetCC(dl, VT, N0, N1, ISD::SETNE);
3627 
3628         // If we have setult X, 1, turn it into seteq X, 0
3629         if (C1 == MinVal+1)
3630           return DAG.getSetCC(dl, VT, N0,
3631                               DAG.getConstant(MinVal, dl, N0.getValueType()),
3632                               ISD::SETEQ);
3633       }
3634     }
3635 
3636     if (Cond == ISD::SETGT || Cond == ISD::SETUGT) {
3637       if (C1 == MaxVal)
3638         return DAG.getBoolConstant(false, dl, VT, OpVT); // X > MAX --> false
3639 
3640       // TODO: Support this for vectors after legalize ops.
3641       if (!VT.isVector() || DCI.isBeforeLegalizeOps()) {
3642         // Canonicalize setgt X, Min --> setne X, Min
3643         if (C1 == MinVal)
3644           return DAG.getSetCC(dl, VT, N0, N1, ISD::SETNE);
3645 
3646         // If we have setugt X, Max-1, turn it into seteq X, Max
3647         if (C1 == MaxVal-1)
3648           return DAG.getSetCC(dl, VT, N0,
3649                               DAG.getConstant(MaxVal, dl, N0.getValueType()),
3650                               ISD::SETEQ);
3651       }
3652     }
3653 
3654     if (Cond == ISD::SETEQ || Cond == ISD::SETNE) {
3655       // (X & (C l>>/<< Y)) ==/!= 0  -->  ((X <</l>> Y) & C) ==/!= 0
3656       if (C1.isNullValue())
3657         if (SDValue CC = optimizeSetCCByHoistingAndByConstFromLogicalShift(
3658                 VT, N0, N1, Cond, DCI, dl))
3659           return CC;
3660     }
3661 
3662     // If we have "setcc X, C0", check to see if we can shrink the immediate
3663     // by changing cc.
3664     // TODO: Support this for vectors after legalize ops.
3665     if (!VT.isVector() || DCI.isBeforeLegalizeOps()) {
3666       // SETUGT X, SINTMAX  -> SETLT X, 0
3667       if (Cond == ISD::SETUGT &&
3668           C1 == APInt::getSignedMaxValue(OperandBitSize))
3669         return DAG.getSetCC(dl, VT, N0,
3670                             DAG.getConstant(0, dl, N1.getValueType()),
3671                             ISD::SETLT);
3672 
3673       // SETULT X, SINTMIN  -> SETGT X, -1
3674       if (Cond == ISD::SETULT &&
3675           C1 == APInt::getSignedMinValue(OperandBitSize)) {
3676         SDValue ConstMinusOne =
3677             DAG.getConstant(APInt::getAllOnesValue(OperandBitSize), dl,
3678                             N1.getValueType());
3679         return DAG.getSetCC(dl, VT, N0, ConstMinusOne, ISD::SETGT);
3680       }
3681     }
3682   }
3683 
3684   // Back to non-vector simplifications.
3685   // TODO: Can we do these for vector splats?
3686   if (auto *N1C = dyn_cast<ConstantSDNode>(N1.getNode())) {
3687     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3688     const APInt &C1 = N1C->getAPIntValue();
3689     EVT ShValTy = N0.getValueType();
3690 
3691     // Fold bit comparisons when we can.
3692     if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
3693         (VT == ShValTy || (isTypeLegal(VT) && VT.bitsLE(ShValTy))) &&
3694         N0.getOpcode() == ISD::AND) {
3695       if (auto *AndRHS = dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
3696         EVT ShiftTy =
3697             getShiftAmountTy(ShValTy, Layout, !DCI.isBeforeLegalize());
3698         if (Cond == ISD::SETNE && C1 == 0) {// (X & 8) != 0  -->  (X & 8) >> 3
3699           // Perform the xform if the AND RHS is a single bit.
3700           unsigned ShCt = AndRHS->getAPIntValue().logBase2();
3701           if (AndRHS->getAPIntValue().isPowerOf2() &&
3702               !TLI.shouldAvoidTransformToShift(ShValTy, ShCt)) {
3703             return DAG.getNode(ISD::TRUNCATE, dl, VT,
3704                                DAG.getNode(ISD::SRL, dl, ShValTy, N0,
3705                                            DAG.getConstant(ShCt, dl, ShiftTy)));
3706           }
3707         } else if (Cond == ISD::SETEQ && C1 == AndRHS->getAPIntValue()) {
3708           // (X & 8) == 8  -->  (X & 8) >> 3
3709           // Perform the xform if C1 is a single bit.
3710           unsigned ShCt = C1.logBase2();
3711           if (C1.isPowerOf2() &&
3712               !TLI.shouldAvoidTransformToShift(ShValTy, ShCt)) {
3713             return DAG.getNode(ISD::TRUNCATE, dl, VT,
3714                                DAG.getNode(ISD::SRL, dl, ShValTy, N0,
3715                                            DAG.getConstant(ShCt, dl, ShiftTy)));
3716           }
3717         }
3718       }
3719     }
3720 
3721     if (C1.getMinSignedBits() <= 64 &&
3722         !isLegalICmpImmediate(C1.getSExtValue())) {
3723       EVT ShiftTy = getShiftAmountTy(ShValTy, Layout, !DCI.isBeforeLegalize());
3724       // (X & -256) == 256 -> (X >> 8) == 1
3725       if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
3726           N0.getOpcode() == ISD::AND && N0.hasOneUse()) {
3727         if (auto *AndRHS = dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
3728           const APInt &AndRHSC = AndRHS->getAPIntValue();
3729           if ((-AndRHSC).isPowerOf2() && (AndRHSC & C1) == C1) {
3730             unsigned ShiftBits = AndRHSC.countTrailingZeros();
3731             if (!TLI.shouldAvoidTransformToShift(ShValTy, ShiftBits)) {
3732               SDValue Shift =
3733                 DAG.getNode(ISD::SRL, dl, ShValTy, N0.getOperand(0),
3734                             DAG.getConstant(ShiftBits, dl, ShiftTy));
3735               SDValue CmpRHS = DAG.getConstant(C1.lshr(ShiftBits), dl, ShValTy);
3736               return DAG.getSetCC(dl, VT, Shift, CmpRHS, Cond);
3737             }
3738           }
3739         }
3740       } else if (Cond == ISD::SETULT || Cond == ISD::SETUGE ||
3741                  Cond == ISD::SETULE || Cond == ISD::SETUGT) {
3742         bool AdjOne = (Cond == ISD::SETULE || Cond == ISD::SETUGT);
3743         // X <  0x100000000 -> (X >> 32) <  1
3744         // X >= 0x100000000 -> (X >> 32) >= 1
3745         // X <= 0x0ffffffff -> (X >> 32) <  1
3746         // X >  0x0ffffffff -> (X >> 32) >= 1
3747         unsigned ShiftBits;
3748         APInt NewC = C1;
3749         ISD::CondCode NewCond = Cond;
3750         if (AdjOne) {
3751           ShiftBits = C1.countTrailingOnes();
3752           NewC = NewC + 1;
3753           NewCond = (Cond == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE;
3754         } else {
3755           ShiftBits = C1.countTrailingZeros();
3756         }
3757         NewC.lshrInPlace(ShiftBits);
3758         if (ShiftBits && NewC.getMinSignedBits() <= 64 &&
3759             isLegalICmpImmediate(NewC.getSExtValue()) &&
3760             !TLI.shouldAvoidTransformToShift(ShValTy, ShiftBits)) {
3761           SDValue Shift = DAG.getNode(ISD::SRL, dl, ShValTy, N0,
3762                                       DAG.getConstant(ShiftBits, dl, ShiftTy));
3763           SDValue CmpRHS = DAG.getConstant(NewC, dl, ShValTy);
3764           return DAG.getSetCC(dl, VT, Shift, CmpRHS, NewCond);
3765         }
3766       }
3767     }
3768   }
3769 
3770   if (!isa<ConstantFPSDNode>(N0) && isa<ConstantFPSDNode>(N1)) {
3771     auto *CFP = cast<ConstantFPSDNode>(N1);
3772     assert(!CFP->getValueAPF().isNaN() && "Unexpected NaN value");
3773 
3774     // Otherwise, we know the RHS is not a NaN.  Simplify the node to drop the
3775     // constant if knowing that the operand is non-nan is enough.  We prefer to
3776     // have SETO(x,x) instead of SETO(x, 0.0) because this avoids having to
3777     // materialize 0.0.
3778     if (Cond == ISD::SETO || Cond == ISD::SETUO)
3779       return DAG.getSetCC(dl, VT, N0, N0, Cond);
3780 
3781     // setcc (fneg x), C -> setcc swap(pred) x, -C
3782     if (N0.getOpcode() == ISD::FNEG) {
3783       ISD::CondCode SwapCond = ISD::getSetCCSwappedOperands(Cond);
3784       if (DCI.isBeforeLegalizeOps() ||
3785           isCondCodeLegal(SwapCond, N0.getSimpleValueType())) {
3786         SDValue NegN1 = DAG.getNode(ISD::FNEG, dl, N0.getValueType(), N1);
3787         return DAG.getSetCC(dl, VT, N0.getOperand(0), NegN1, SwapCond);
3788       }
3789     }
3790 
3791     // If the condition is not legal, see if we can find an equivalent one
3792     // which is legal.
3793     if (!isCondCodeLegal(Cond, N0.getSimpleValueType())) {
3794       // If the comparison was an awkward floating-point == or != and one of
3795       // the comparison operands is infinity or negative infinity, convert the
3796       // condition to a less-awkward <= or >=.
3797       if (CFP->getValueAPF().isInfinity()) {
3798         if (CFP->getValueAPF().isNegative()) {
3799           if (Cond == ISD::SETOEQ &&
3800               isCondCodeLegal(ISD::SETOLE, N0.getSimpleValueType()))
3801             return DAG.getSetCC(dl, VT, N0, N1, ISD::SETOLE);
3802           if (Cond == ISD::SETUEQ &&
3803               isCondCodeLegal(ISD::SETOLE, N0.getSimpleValueType()))
3804             return DAG.getSetCC(dl, VT, N0, N1, ISD::SETULE);
3805           if (Cond == ISD::SETUNE &&
3806               isCondCodeLegal(ISD::SETUGT, N0.getSimpleValueType()))
3807             return DAG.getSetCC(dl, VT, N0, N1, ISD::SETUGT);
3808           if (Cond == ISD::SETONE &&
3809               isCondCodeLegal(ISD::SETUGT, N0.getSimpleValueType()))
3810             return DAG.getSetCC(dl, VT, N0, N1, ISD::SETOGT);
3811         } else {
3812           if (Cond == ISD::SETOEQ &&
3813               isCondCodeLegal(ISD::SETOGE, N0.getSimpleValueType()))
3814             return DAG.getSetCC(dl, VT, N0, N1, ISD::SETOGE);
3815           if (Cond == ISD::SETUEQ &&
3816               isCondCodeLegal(ISD::SETOGE, N0.getSimpleValueType()))
3817             return DAG.getSetCC(dl, VT, N0, N1, ISD::SETUGE);
3818           if (Cond == ISD::SETUNE &&
3819               isCondCodeLegal(ISD::SETULT, N0.getSimpleValueType()))
3820             return DAG.getSetCC(dl, VT, N0, N1, ISD::SETULT);
3821           if (Cond == ISD::SETONE &&
3822               isCondCodeLegal(ISD::SETULT, N0.getSimpleValueType()))
3823             return DAG.getSetCC(dl, VT, N0, N1, ISD::SETOLT);
3824         }
3825       }
3826     }
3827   }
3828 
3829   if (N0 == N1) {
3830     // The sext(setcc()) => setcc() optimization relies on the appropriate
3831     // constant being emitted.
3832     assert(!N0.getValueType().isInteger() &&
3833            "Integer types should be handled by FoldSetCC");
3834 
3835     bool EqTrue = ISD::isTrueWhenEqual(Cond);
3836     unsigned UOF = ISD::getUnorderedFlavor(Cond);
3837     if (UOF == 2) // FP operators that are undefined on NaNs.
3838       return DAG.getBoolConstant(EqTrue, dl, VT, OpVT);
3839     if (UOF == unsigned(EqTrue))
3840       return DAG.getBoolConstant(EqTrue, dl, VT, OpVT);
3841     // Otherwise, we can't fold it.  However, we can simplify it to SETUO/SETO
3842     // if it is not already.
3843     ISD::CondCode NewCond = UOF == 0 ? ISD::SETO : ISD::SETUO;
3844     if (NewCond != Cond &&
3845         (DCI.isBeforeLegalizeOps() ||
3846                             isCondCodeLegal(NewCond, N0.getSimpleValueType())))
3847       return DAG.getSetCC(dl, VT, N0, N1, NewCond);
3848   }
3849 
3850   if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
3851       N0.getValueType().isInteger()) {
3852     if (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::SUB ||
3853         N0.getOpcode() == ISD::XOR) {
3854       // Simplify (X+Y) == (X+Z) -->  Y == Z
3855       if (N0.getOpcode() == N1.getOpcode()) {
3856         if (N0.getOperand(0) == N1.getOperand(0))
3857           return DAG.getSetCC(dl, VT, N0.getOperand(1), N1.getOperand(1), Cond);
3858         if (N0.getOperand(1) == N1.getOperand(1))
3859           return DAG.getSetCC(dl, VT, N0.getOperand(0), N1.getOperand(0), Cond);
3860         if (isCommutativeBinOp(N0.getOpcode())) {
3861           // If X op Y == Y op X, try other combinations.
3862           if (N0.getOperand(0) == N1.getOperand(1))
3863             return DAG.getSetCC(dl, VT, N0.getOperand(1), N1.getOperand(0),
3864                                 Cond);
3865           if (N0.getOperand(1) == N1.getOperand(0))
3866             return DAG.getSetCC(dl, VT, N0.getOperand(0), N1.getOperand(1),
3867                                 Cond);
3868         }
3869       }
3870 
3871       // If RHS is a legal immediate value for a compare instruction, we need
3872       // to be careful about increasing register pressure needlessly.
3873       bool LegalRHSImm = false;
3874 
3875       if (auto *RHSC = dyn_cast<ConstantSDNode>(N1)) {
3876         if (auto *LHSR = dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
3877           // Turn (X+C1) == C2 --> X == C2-C1
3878           if (N0.getOpcode() == ISD::ADD && N0.getNode()->hasOneUse()) {
3879             return DAG.getSetCC(dl, VT, N0.getOperand(0),
3880                                 DAG.getConstant(RHSC->getAPIntValue()-
3881                                                 LHSR->getAPIntValue(),
3882                                 dl, N0.getValueType()), Cond);
3883           }
3884 
3885           // Turn (X^C1) == C2 into X == C1^C2 iff X&~C1 = 0.
3886           if (N0.getOpcode() == ISD::XOR)
3887             // If we know that all of the inverted bits are zero, don't bother
3888             // performing the inversion.
3889             if (DAG.MaskedValueIsZero(N0.getOperand(0), ~LHSR->getAPIntValue()))
3890               return
3891                 DAG.getSetCC(dl, VT, N0.getOperand(0),
3892                              DAG.getConstant(LHSR->getAPIntValue() ^
3893                                                RHSC->getAPIntValue(),
3894                                              dl, N0.getValueType()),
3895                              Cond);
3896         }
3897 
3898         // Turn (C1-X) == C2 --> X == C1-C2
3899         if (auto *SUBC = dyn_cast<ConstantSDNode>(N0.getOperand(0))) {
3900           if (N0.getOpcode() == ISD::SUB && N0.getNode()->hasOneUse()) {
3901             return
3902               DAG.getSetCC(dl, VT, N0.getOperand(1),
3903                            DAG.getConstant(SUBC->getAPIntValue() -
3904                                              RHSC->getAPIntValue(),
3905                                            dl, N0.getValueType()),
3906                            Cond);
3907           }
3908         }
3909 
3910         // Could RHSC fold directly into a compare?
3911         if (RHSC->getValueType(0).getSizeInBits() <= 64)
3912           LegalRHSImm = isLegalICmpImmediate(RHSC->getSExtValue());
3913       }
3914 
3915       // (X+Y) == X --> Y == 0 and similar folds.
3916       // Don't do this if X is an immediate that can fold into a cmp
3917       // instruction and X+Y has other uses. It could be an induction variable
3918       // chain, and the transform would increase register pressure.
3919       if (!LegalRHSImm || N0.hasOneUse())
3920         if (SDValue V = foldSetCCWithBinOp(VT, N0, N1, Cond, dl, DCI))
3921           return V;
3922     }
3923 
3924     if (N1.getOpcode() == ISD::ADD || N1.getOpcode() == ISD::SUB ||
3925         N1.getOpcode() == ISD::XOR)
3926       if (SDValue V = foldSetCCWithBinOp(VT, N1, N0, Cond, dl, DCI))
3927         return V;
3928 
3929     if (SDValue V = foldSetCCWithAnd(VT, N0, N1, Cond, dl, DCI))
3930       return V;
3931   }
3932 
3933   // Fold remainder of division by a constant.
3934   if ((N0.getOpcode() == ISD::UREM || N0.getOpcode() == ISD::SREM) &&
3935       N0.hasOneUse() && (Cond == ISD::SETEQ || Cond == ISD::SETNE)) {
3936     AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes();
3937 
3938     // When division is cheap or optimizing for minimum size,
3939     // fall through to DIVREM creation by skipping this fold.
3940     if (!isIntDivCheap(VT, Attr) && !Attr.hasFnAttribute(Attribute::MinSize)) {
3941       if (N0.getOpcode() == ISD::UREM) {
3942         if (SDValue Folded = buildUREMEqFold(VT, N0, N1, Cond, DCI, dl))
3943           return Folded;
3944       } else if (N0.getOpcode() == ISD::SREM) {
3945         if (SDValue Folded = buildSREMEqFold(VT, N0, N1, Cond, DCI, dl))
3946           return Folded;
3947       }
3948     }
3949   }
3950 
3951   // Fold away ALL boolean setcc's.
3952   if (N0.getValueType().getScalarType() == MVT::i1 && foldBooleans) {
3953     SDValue Temp;
3954     switch (Cond) {
3955     default: llvm_unreachable("Unknown integer setcc!");
3956     case ISD::SETEQ:  // X == Y  -> ~(X^Y)
3957       Temp = DAG.getNode(ISD::XOR, dl, OpVT, N0, N1);
3958       N0 = DAG.getNOT(dl, Temp, OpVT);
3959       if (!DCI.isCalledByLegalizer())
3960         DCI.AddToWorklist(Temp.getNode());
3961       break;
3962     case ISD::SETNE:  // X != Y   -->  (X^Y)
3963       N0 = DAG.getNode(ISD::XOR, dl, OpVT, N0, N1);
3964       break;
3965     case ISD::SETGT:  // X >s Y   -->  X == 0 & Y == 1  -->  ~X & Y
3966     case ISD::SETULT: // X <u Y   -->  X == 0 & Y == 1  -->  ~X & Y
3967       Temp = DAG.getNOT(dl, N0, OpVT);
3968       N0 = DAG.getNode(ISD::AND, dl, OpVT, N1, Temp);
3969       if (!DCI.isCalledByLegalizer())
3970         DCI.AddToWorklist(Temp.getNode());
3971       break;
3972     case ISD::SETLT:  // X <s Y   --> X == 1 & Y == 0  -->  ~Y & X
3973     case ISD::SETUGT: // X >u Y   --> X == 1 & Y == 0  -->  ~Y & X
3974       Temp = DAG.getNOT(dl, N1, OpVT);
3975       N0 = DAG.getNode(ISD::AND, dl, OpVT, N0, Temp);
3976       if (!DCI.isCalledByLegalizer())
3977         DCI.AddToWorklist(Temp.getNode());
3978       break;
3979     case ISD::SETULE: // X <=u Y  --> X == 0 | Y == 1  -->  ~X | Y
3980     case ISD::SETGE:  // X >=s Y  --> X == 0 | Y == 1  -->  ~X | Y
3981       Temp = DAG.getNOT(dl, N0, OpVT);
3982       N0 = DAG.getNode(ISD::OR, dl, OpVT, N1, Temp);
3983       if (!DCI.isCalledByLegalizer())
3984         DCI.AddToWorklist(Temp.getNode());
3985       break;
3986     case ISD::SETUGE: // X >=u Y  --> X == 1 | Y == 0  -->  ~Y | X
3987     case ISD::SETLE:  // X <=s Y  --> X == 1 | Y == 0  -->  ~Y | X
3988       Temp = DAG.getNOT(dl, N1, OpVT);
3989       N0 = DAG.getNode(ISD::OR, dl, OpVT, N0, Temp);
3990       break;
3991     }
3992     if (VT.getScalarType() != MVT::i1) {
3993       if (!DCI.isCalledByLegalizer())
3994         DCI.AddToWorklist(N0.getNode());
3995       // FIXME: If running after legalize, we probably can't do this.
3996       ISD::NodeType ExtendCode = getExtendForContent(getBooleanContents(OpVT));
3997       N0 = DAG.getNode(ExtendCode, dl, VT, N0);
3998     }
3999     return N0;
4000   }
4001 
4002   // Could not fold it.
4003   return SDValue();
4004 }
4005 
4006 /// Returns true (and the GlobalValue and the offset) if the node is a
4007 /// GlobalAddress + offset.
4008 bool TargetLowering::isGAPlusOffset(SDNode *WN, const GlobalValue *&GA,
4009                                     int64_t &Offset) const {
4010 
4011   SDNode *N = unwrapAddress(SDValue(WN, 0)).getNode();
4012 
4013   if (auto *GASD = dyn_cast<GlobalAddressSDNode>(N)) {
4014     GA = GASD->getGlobal();
4015     Offset += GASD->getOffset();
4016     return true;
4017   }
4018 
4019   if (N->getOpcode() == ISD::ADD) {
4020     SDValue N1 = N->getOperand(0);
4021     SDValue N2 = N->getOperand(1);
4022     if (isGAPlusOffset(N1.getNode(), GA, Offset)) {
4023       if (auto *V = dyn_cast<ConstantSDNode>(N2)) {
4024         Offset += V->getSExtValue();
4025         return true;
4026       }
4027     } else if (isGAPlusOffset(N2.getNode(), GA, Offset)) {
4028       if (auto *V = dyn_cast<ConstantSDNode>(N1)) {
4029         Offset += V->getSExtValue();
4030         return true;
4031       }
4032     }
4033   }
4034 
4035   return false;
4036 }
4037 
4038 SDValue TargetLowering::PerformDAGCombine(SDNode *N,
4039                                           DAGCombinerInfo &DCI) const {
4040   // Default implementation: no optimization.
4041   return SDValue();
4042 }
4043 
4044 //===----------------------------------------------------------------------===//
4045 //  Inline Assembler Implementation Methods
4046 //===----------------------------------------------------------------------===//
4047 
4048 TargetLowering::ConstraintType
4049 TargetLowering::getConstraintType(StringRef Constraint) const {
4050   unsigned S = Constraint.size();
4051 
4052   if (S == 1) {
4053     switch (Constraint[0]) {
4054     default: break;
4055     case 'r':
4056       return C_RegisterClass;
4057     case 'm': // memory
4058     case 'o': // offsetable
4059     case 'V': // not offsetable
4060       return C_Memory;
4061     case 'n': // Simple Integer
4062     case 'E': // Floating Point Constant
4063     case 'F': // Floating Point Constant
4064       return C_Immediate;
4065     case 'i': // Simple Integer or Relocatable Constant
4066     case 's': // Relocatable Constant
4067     case 'p': // Address.
4068     case 'X': // Allow ANY value.
4069     case 'I': // Target registers.
4070     case 'J':
4071     case 'K':
4072     case 'L':
4073     case 'M':
4074     case 'N':
4075     case 'O':
4076     case 'P':
4077     case '<':
4078     case '>':
4079       return C_Other;
4080     }
4081   }
4082 
4083   if (S > 1 && Constraint[0] == '{' && Constraint[S - 1] == '}') {
4084     if (S == 8 && Constraint.substr(1, 6) == "memory") // "{memory}"
4085       return C_Memory;
4086     return C_Register;
4087   }
4088   return C_Unknown;
4089 }
4090 
4091 /// Try to replace an X constraint, which matches anything, with another that
4092 /// has more specific requirements based on the type of the corresponding
4093 /// operand.
4094 const char *TargetLowering::LowerXConstraint(EVT ConstraintVT) const {
4095   if (ConstraintVT.isInteger())
4096     return "r";
4097   if (ConstraintVT.isFloatingPoint())
4098     return "f"; // works for many targets
4099   return nullptr;
4100 }
4101 
4102 SDValue TargetLowering::LowerAsmOutputForConstraint(
4103     SDValue &Chain, SDValue &Flag, SDLoc DL, const AsmOperandInfo &OpInfo,
4104     SelectionDAG &DAG) const {
4105   return SDValue();
4106 }
4107 
4108 /// Lower the specified operand into the Ops vector.
4109 /// If it is invalid, don't add anything to Ops.
4110 void TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
4111                                                   std::string &Constraint,
4112                                                   std::vector<SDValue> &Ops,
4113                                                   SelectionDAG &DAG) const {
4114 
4115   if (Constraint.length() > 1) return;
4116 
4117   char ConstraintLetter = Constraint[0];
4118   switch (ConstraintLetter) {
4119   default: break;
4120   case 'X':     // Allows any operand; labels (basic block) use this.
4121     if (Op.getOpcode() == ISD::BasicBlock ||
4122         Op.getOpcode() == ISD::TargetBlockAddress) {
4123       Ops.push_back(Op);
4124       return;
4125     }
4126     LLVM_FALLTHROUGH;
4127   case 'i':    // Simple Integer or Relocatable Constant
4128   case 'n':    // Simple Integer
4129   case 's': {  // Relocatable Constant
4130 
4131     GlobalAddressSDNode *GA;
4132     ConstantSDNode *C;
4133     BlockAddressSDNode *BA;
4134     uint64_t Offset = 0;
4135 
4136     // Match (GA) or (C) or (GA+C) or (GA-C) or ((GA+C)+C) or (((GA+C)+C)+C),
4137     // etc., since getelementpointer is variadic. We can't use
4138     // SelectionDAG::FoldSymbolOffset because it expects the GA to be accessible
4139     // while in this case the GA may be furthest from the root node which is
4140     // likely an ISD::ADD.
4141     while (1) {
4142       if ((GA = dyn_cast<GlobalAddressSDNode>(Op)) && ConstraintLetter != 'n') {
4143         Ops.push_back(DAG.getTargetGlobalAddress(GA->getGlobal(), SDLoc(Op),
4144                                                  GA->getValueType(0),
4145                                                  Offset + GA->getOffset()));
4146         return;
4147       } else if ((C = dyn_cast<ConstantSDNode>(Op)) &&
4148                  ConstraintLetter != 's') {
4149         // gcc prints these as sign extended.  Sign extend value to 64 bits
4150         // now; without this it would get ZExt'd later in
4151         // ScheduleDAGSDNodes::EmitNode, which is very generic.
4152         bool IsBool = C->getConstantIntValue()->getBitWidth() == 1;
4153         BooleanContent BCont = getBooleanContents(MVT::i64);
4154         ISD::NodeType ExtOpc = IsBool ? getExtendForContent(BCont)
4155                                       : ISD::SIGN_EXTEND;
4156         int64_t ExtVal = ExtOpc == ISD::ZERO_EXTEND ? C->getZExtValue()
4157                                                     : C->getSExtValue();
4158         Ops.push_back(DAG.getTargetConstant(Offset + ExtVal,
4159                                             SDLoc(C), MVT::i64));
4160         return;
4161       } else if ((BA = dyn_cast<BlockAddressSDNode>(Op)) &&
4162                  ConstraintLetter != 'n') {
4163         Ops.push_back(DAG.getTargetBlockAddress(
4164             BA->getBlockAddress(), BA->getValueType(0),
4165             Offset + BA->getOffset(), BA->getTargetFlags()));
4166         return;
4167       } else {
4168         const unsigned OpCode = Op.getOpcode();
4169         if (OpCode == ISD::ADD || OpCode == ISD::SUB) {
4170           if ((C = dyn_cast<ConstantSDNode>(Op.getOperand(0))))
4171             Op = Op.getOperand(1);
4172           // Subtraction is not commutative.
4173           else if (OpCode == ISD::ADD &&
4174                    (C = dyn_cast<ConstantSDNode>(Op.getOperand(1))))
4175             Op = Op.getOperand(0);
4176           else
4177             return;
4178           Offset += (OpCode == ISD::ADD ? 1 : -1) * C->getSExtValue();
4179           continue;
4180         }
4181       }
4182       return;
4183     }
4184     break;
4185   }
4186   }
4187 }
4188 
4189 std::pair<unsigned, const TargetRegisterClass *>
4190 TargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *RI,
4191                                              StringRef Constraint,
4192                                              MVT VT) const {
4193   if (Constraint.empty() || Constraint[0] != '{')
4194     return std::make_pair(0u, static_cast<TargetRegisterClass *>(nullptr));
4195   assert(*(Constraint.end() - 1) == '}' && "Not a brace enclosed constraint?");
4196 
4197   // Remove the braces from around the name.
4198   StringRef RegName(Constraint.data() + 1, Constraint.size() - 2);
4199 
4200   std::pair<unsigned, const TargetRegisterClass *> R =
4201       std::make_pair(0u, static_cast<const TargetRegisterClass *>(nullptr));
4202 
4203   // Figure out which register class contains this reg.
4204   for (const TargetRegisterClass *RC : RI->regclasses()) {
4205     // If none of the value types for this register class are valid, we
4206     // can't use it.  For example, 64-bit reg classes on 32-bit targets.
4207     if (!isLegalRC(*RI, *RC))
4208       continue;
4209 
4210     for (TargetRegisterClass::iterator I = RC->begin(), E = RC->end();
4211          I != E; ++I) {
4212       if (RegName.equals_lower(RI->getRegAsmName(*I))) {
4213         std::pair<unsigned, const TargetRegisterClass *> S =
4214             std::make_pair(*I, RC);
4215 
4216         // If this register class has the requested value type, return it,
4217         // otherwise keep searching and return the first class found
4218         // if no other is found which explicitly has the requested type.
4219         if (RI->isTypeLegalForClass(*RC, VT))
4220           return S;
4221         if (!R.second)
4222           R = S;
4223       }
4224     }
4225   }
4226 
4227   return R;
4228 }
4229 
4230 //===----------------------------------------------------------------------===//
4231 // Constraint Selection.
4232 
4233 /// Return true of this is an input operand that is a matching constraint like
4234 /// "4".
4235 bool TargetLowering::AsmOperandInfo::isMatchingInputConstraint() const {
4236   assert(!ConstraintCode.empty() && "No known constraint!");
4237   return isdigit(static_cast<unsigned char>(ConstraintCode[0]));
4238 }
4239 
4240 /// If this is an input matching constraint, this method returns the output
4241 /// operand it matches.
4242 unsigned TargetLowering::AsmOperandInfo::getMatchedOperand() const {
4243   assert(!ConstraintCode.empty() && "No known constraint!");
4244   return atoi(ConstraintCode.c_str());
4245 }
4246 
4247 /// Split up the constraint string from the inline assembly value into the
4248 /// specific constraints and their prefixes, and also tie in the associated
4249 /// operand values.
4250 /// If this returns an empty vector, and if the constraint string itself
4251 /// isn't empty, there was an error parsing.
4252 TargetLowering::AsmOperandInfoVector
4253 TargetLowering::ParseConstraints(const DataLayout &DL,
4254                                  const TargetRegisterInfo *TRI,
4255                                  ImmutableCallSite CS) const {
4256   /// Information about all of the constraints.
4257   AsmOperandInfoVector ConstraintOperands;
4258   const InlineAsm *IA = cast<InlineAsm>(CS.getCalledValue());
4259   unsigned maCount = 0; // Largest number of multiple alternative constraints.
4260 
4261   // Do a prepass over the constraints, canonicalizing them, and building up the
4262   // ConstraintOperands list.
4263   unsigned ArgNo = 0; // ArgNo - The argument of the CallInst.
4264   unsigned ResNo = 0; // ResNo - The result number of the next output.
4265 
4266   for (InlineAsm::ConstraintInfo &CI : IA->ParseConstraints()) {
4267     ConstraintOperands.emplace_back(std::move(CI));
4268     AsmOperandInfo &OpInfo = ConstraintOperands.back();
4269 
4270     // Update multiple alternative constraint count.
4271     if (OpInfo.multipleAlternatives.size() > maCount)
4272       maCount = OpInfo.multipleAlternatives.size();
4273 
4274     OpInfo.ConstraintVT = MVT::Other;
4275 
4276     // Compute the value type for each operand.
4277     switch (OpInfo.Type) {
4278     case InlineAsm::isOutput:
4279       // Indirect outputs just consume an argument.
4280       if (OpInfo.isIndirect) {
4281         OpInfo.CallOperandVal = const_cast<Value *>(CS.getArgument(ArgNo++));
4282         break;
4283       }
4284 
4285       // The return value of the call is this value.  As such, there is no
4286       // corresponding argument.
4287       assert(!CS.getType()->isVoidTy() &&
4288              "Bad inline asm!");
4289       if (StructType *STy = dyn_cast<StructType>(CS.getType())) {
4290         OpInfo.ConstraintVT =
4291             getSimpleValueType(DL, STy->getElementType(ResNo));
4292       } else {
4293         assert(ResNo == 0 && "Asm only has one result!");
4294         OpInfo.ConstraintVT = getSimpleValueType(DL, CS.getType());
4295       }
4296       ++ResNo;
4297       break;
4298     case InlineAsm::isInput:
4299       OpInfo.CallOperandVal = const_cast<Value *>(CS.getArgument(ArgNo++));
4300       break;
4301     case InlineAsm::isClobber:
4302       // Nothing to do.
4303       break;
4304     }
4305 
4306     if (OpInfo.CallOperandVal) {
4307       llvm::Type *OpTy = OpInfo.CallOperandVal->getType();
4308       if (OpInfo.isIndirect) {
4309         llvm::PointerType *PtrTy = dyn_cast<PointerType>(OpTy);
4310         if (!PtrTy)
4311           report_fatal_error("Indirect operand for inline asm not a pointer!");
4312         OpTy = PtrTy->getElementType();
4313       }
4314 
4315       // Look for vector wrapped in a struct. e.g. { <16 x i8> }.
4316       if (StructType *STy = dyn_cast<StructType>(OpTy))
4317         if (STy->getNumElements() == 1)
4318           OpTy = STy->getElementType(0);
4319 
4320       // If OpTy is not a single value, it may be a struct/union that we
4321       // can tile with integers.
4322       if (!OpTy->isSingleValueType() && OpTy->isSized()) {
4323         unsigned BitSize = DL.getTypeSizeInBits(OpTy);
4324         switch (BitSize) {
4325         default: break;
4326         case 1:
4327         case 8:
4328         case 16:
4329         case 32:
4330         case 64:
4331         case 128:
4332           OpInfo.ConstraintVT =
4333               MVT::getVT(IntegerType::get(OpTy->getContext(), BitSize), true);
4334           break;
4335         }
4336       } else if (PointerType *PT = dyn_cast<PointerType>(OpTy)) {
4337         unsigned PtrSize = DL.getPointerSizeInBits(PT->getAddressSpace());
4338         OpInfo.ConstraintVT = MVT::getIntegerVT(PtrSize);
4339       } else {
4340         OpInfo.ConstraintVT = MVT::getVT(OpTy, true);
4341       }
4342     }
4343   }
4344 
4345   // If we have multiple alternative constraints, select the best alternative.
4346   if (!ConstraintOperands.empty()) {
4347     if (maCount) {
4348       unsigned bestMAIndex = 0;
4349       int bestWeight = -1;
4350       // weight:  -1 = invalid match, and 0 = so-so match to 5 = good match.
4351       int weight = -1;
4352       unsigned maIndex;
4353       // Compute the sums of the weights for each alternative, keeping track
4354       // of the best (highest weight) one so far.
4355       for (maIndex = 0; maIndex < maCount; ++maIndex) {
4356         int weightSum = 0;
4357         for (unsigned cIndex = 0, eIndex = ConstraintOperands.size();
4358              cIndex != eIndex; ++cIndex) {
4359           AsmOperandInfo &OpInfo = ConstraintOperands[cIndex];
4360           if (OpInfo.Type == InlineAsm::isClobber)
4361             continue;
4362 
4363           // If this is an output operand with a matching input operand,
4364           // look up the matching input. If their types mismatch, e.g. one
4365           // is an integer, the other is floating point, or their sizes are
4366           // different, flag it as an maCantMatch.
4367           if (OpInfo.hasMatchingInput()) {
4368             AsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput];
4369             if (OpInfo.ConstraintVT != Input.ConstraintVT) {
4370               if ((OpInfo.ConstraintVT.isInteger() !=
4371                    Input.ConstraintVT.isInteger()) ||
4372                   (OpInfo.ConstraintVT.getSizeInBits() !=
4373                    Input.ConstraintVT.getSizeInBits())) {
4374                 weightSum = -1; // Can't match.
4375                 break;
4376               }
4377             }
4378           }
4379           weight = getMultipleConstraintMatchWeight(OpInfo, maIndex);
4380           if (weight == -1) {
4381             weightSum = -1;
4382             break;
4383           }
4384           weightSum += weight;
4385         }
4386         // Update best.
4387         if (weightSum > bestWeight) {
4388           bestWeight = weightSum;
4389           bestMAIndex = maIndex;
4390         }
4391       }
4392 
4393       // Now select chosen alternative in each constraint.
4394       for (unsigned cIndex = 0, eIndex = ConstraintOperands.size();
4395            cIndex != eIndex; ++cIndex) {
4396         AsmOperandInfo &cInfo = ConstraintOperands[cIndex];
4397         if (cInfo.Type == InlineAsm::isClobber)
4398           continue;
4399         cInfo.selectAlternative(bestMAIndex);
4400       }
4401     }
4402   }
4403 
4404   // Check and hook up tied operands, choose constraint code to use.
4405   for (unsigned cIndex = 0, eIndex = ConstraintOperands.size();
4406        cIndex != eIndex; ++cIndex) {
4407     AsmOperandInfo &OpInfo = ConstraintOperands[cIndex];
4408 
4409     // If this is an output operand with a matching input operand, look up the
4410     // matching input. If their types mismatch, e.g. one is an integer, the
4411     // other is floating point, or their sizes are different, flag it as an
4412     // error.
4413     if (OpInfo.hasMatchingInput()) {
4414       AsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput];
4415 
4416       if (OpInfo.ConstraintVT != Input.ConstraintVT) {
4417         std::pair<unsigned, const TargetRegisterClass *> MatchRC =
4418             getRegForInlineAsmConstraint(TRI, OpInfo.ConstraintCode,
4419                                          OpInfo.ConstraintVT);
4420         std::pair<unsigned, const TargetRegisterClass *> InputRC =
4421             getRegForInlineAsmConstraint(TRI, Input.ConstraintCode,
4422                                          Input.ConstraintVT);
4423         if ((OpInfo.ConstraintVT.isInteger() !=
4424              Input.ConstraintVT.isInteger()) ||
4425             (MatchRC.second != InputRC.second)) {
4426           report_fatal_error("Unsupported asm: input constraint"
4427                              " with a matching output constraint of"
4428                              " incompatible type!");
4429         }
4430       }
4431     }
4432   }
4433 
4434   return ConstraintOperands;
4435 }
4436 
4437 /// Return an integer indicating how general CT is.
4438 static unsigned getConstraintGenerality(TargetLowering::ConstraintType CT) {
4439   switch (CT) {
4440   case TargetLowering::C_Immediate:
4441   case TargetLowering::C_Other:
4442   case TargetLowering::C_Unknown:
4443     return 0;
4444   case TargetLowering::C_Register:
4445     return 1;
4446   case TargetLowering::C_RegisterClass:
4447     return 2;
4448   case TargetLowering::C_Memory:
4449     return 3;
4450   }
4451   llvm_unreachable("Invalid constraint type");
4452 }
4453 
4454 /// Examine constraint type and operand type and determine a weight value.
4455 /// This object must already have been set up with the operand type
4456 /// and the current alternative constraint selected.
4457 TargetLowering::ConstraintWeight
4458   TargetLowering::getMultipleConstraintMatchWeight(
4459     AsmOperandInfo &info, int maIndex) const {
4460   InlineAsm::ConstraintCodeVector *rCodes;
4461   if (maIndex >= (int)info.multipleAlternatives.size())
4462     rCodes = &info.Codes;
4463   else
4464     rCodes = &info.multipleAlternatives[maIndex].Codes;
4465   ConstraintWeight BestWeight = CW_Invalid;
4466 
4467   // Loop over the options, keeping track of the most general one.
4468   for (unsigned i = 0, e = rCodes->size(); i != e; ++i) {
4469     ConstraintWeight weight =
4470       getSingleConstraintMatchWeight(info, (*rCodes)[i].c_str());
4471     if (weight > BestWeight)
4472       BestWeight = weight;
4473   }
4474 
4475   return BestWeight;
4476 }
4477 
4478 /// Examine constraint type and operand type and determine a weight value.
4479 /// This object must already have been set up with the operand type
4480 /// and the current alternative constraint selected.
4481 TargetLowering::ConstraintWeight
4482   TargetLowering::getSingleConstraintMatchWeight(
4483     AsmOperandInfo &info, const char *constraint) const {
4484   ConstraintWeight weight = CW_Invalid;
4485   Value *CallOperandVal = info.CallOperandVal;
4486     // If we don't have a value, we can't do a match,
4487     // but allow it at the lowest weight.
4488   if (!CallOperandVal)
4489     return CW_Default;
4490   // Look at the constraint type.
4491   switch (*constraint) {
4492     case 'i': // immediate integer.
4493     case 'n': // immediate integer with a known value.
4494       if (isa<ConstantInt>(CallOperandVal))
4495         weight = CW_Constant;
4496       break;
4497     case 's': // non-explicit intregal immediate.
4498       if (isa<GlobalValue>(CallOperandVal))
4499         weight = CW_Constant;
4500       break;
4501     case 'E': // immediate float if host format.
4502     case 'F': // immediate float.
4503       if (isa<ConstantFP>(CallOperandVal))
4504         weight = CW_Constant;
4505       break;
4506     case '<': // memory operand with autodecrement.
4507     case '>': // memory operand with autoincrement.
4508     case 'm': // memory operand.
4509     case 'o': // offsettable memory operand
4510     case 'V': // non-offsettable memory operand
4511       weight = CW_Memory;
4512       break;
4513     case 'r': // general register.
4514     case 'g': // general register, memory operand or immediate integer.
4515               // note: Clang converts "g" to "imr".
4516       if (CallOperandVal->getType()->isIntegerTy())
4517         weight = CW_Register;
4518       break;
4519     case 'X': // any operand.
4520   default:
4521     weight = CW_Default;
4522     break;
4523   }
4524   return weight;
4525 }
4526 
4527 /// If there are multiple different constraints that we could pick for this
4528 /// operand (e.g. "imr") try to pick the 'best' one.
4529 /// This is somewhat tricky: constraints fall into four classes:
4530 ///    Other         -> immediates and magic values
4531 ///    Register      -> one specific register
4532 ///    RegisterClass -> a group of regs
4533 ///    Memory        -> memory
4534 /// Ideally, we would pick the most specific constraint possible: if we have
4535 /// something that fits into a register, we would pick it.  The problem here
4536 /// is that if we have something that could either be in a register or in
4537 /// memory that use of the register could cause selection of *other*
4538 /// operands to fail: they might only succeed if we pick memory.  Because of
4539 /// this the heuristic we use is:
4540 ///
4541 ///  1) If there is an 'other' constraint, and if the operand is valid for
4542 ///     that constraint, use it.  This makes us take advantage of 'i'
4543 ///     constraints when available.
4544 ///  2) Otherwise, pick the most general constraint present.  This prefers
4545 ///     'm' over 'r', for example.
4546 ///
4547 static void ChooseConstraint(TargetLowering::AsmOperandInfo &OpInfo,
4548                              const TargetLowering &TLI,
4549                              SDValue Op, SelectionDAG *DAG) {
4550   assert(OpInfo.Codes.size() > 1 && "Doesn't have multiple constraint options");
4551   unsigned BestIdx = 0;
4552   TargetLowering::ConstraintType BestType = TargetLowering::C_Unknown;
4553   int BestGenerality = -1;
4554 
4555   // Loop over the options, keeping track of the most general one.
4556   for (unsigned i = 0, e = OpInfo.Codes.size(); i != e; ++i) {
4557     TargetLowering::ConstraintType CType =
4558       TLI.getConstraintType(OpInfo.Codes[i]);
4559 
4560     // Indirect 'other' or 'immediate' constraints are not allowed.
4561     if (OpInfo.isIndirect && !(CType == TargetLowering::C_Memory ||
4562                                CType == TargetLowering::C_Register ||
4563                                CType == TargetLowering::C_RegisterClass))
4564       continue;
4565 
4566     // If this is an 'other' or 'immediate' constraint, see if the operand is
4567     // valid for it. For example, on X86 we might have an 'rI' constraint. If
4568     // the operand is an integer in the range [0..31] we want to use I (saving a
4569     // load of a register), otherwise we must use 'r'.
4570     if ((CType == TargetLowering::C_Other ||
4571          CType == TargetLowering::C_Immediate) && Op.getNode()) {
4572       assert(OpInfo.Codes[i].size() == 1 &&
4573              "Unhandled multi-letter 'other' constraint");
4574       std::vector<SDValue> ResultOps;
4575       TLI.LowerAsmOperandForConstraint(Op, OpInfo.Codes[i],
4576                                        ResultOps, *DAG);
4577       if (!ResultOps.empty()) {
4578         BestType = CType;
4579         BestIdx = i;
4580         break;
4581       }
4582     }
4583 
4584     // Things with matching constraints can only be registers, per gcc
4585     // documentation.  This mainly affects "g" constraints.
4586     if (CType == TargetLowering::C_Memory && OpInfo.hasMatchingInput())
4587       continue;
4588 
4589     // This constraint letter is more general than the previous one, use it.
4590     int Generality = getConstraintGenerality(CType);
4591     if (Generality > BestGenerality) {
4592       BestType = CType;
4593       BestIdx = i;
4594       BestGenerality = Generality;
4595     }
4596   }
4597 
4598   OpInfo.ConstraintCode = OpInfo.Codes[BestIdx];
4599   OpInfo.ConstraintType = BestType;
4600 }
4601 
4602 /// Determines the constraint code and constraint type to use for the specific
4603 /// AsmOperandInfo, setting OpInfo.ConstraintCode and OpInfo.ConstraintType.
4604 void TargetLowering::ComputeConstraintToUse(AsmOperandInfo &OpInfo,
4605                                             SDValue Op,
4606                                             SelectionDAG *DAG) const {
4607   assert(!OpInfo.Codes.empty() && "Must have at least one constraint");
4608 
4609   // Single-letter constraints ('r') are very common.
4610   if (OpInfo.Codes.size() == 1) {
4611     OpInfo.ConstraintCode = OpInfo.Codes[0];
4612     OpInfo.ConstraintType = getConstraintType(OpInfo.ConstraintCode);
4613   } else {
4614     ChooseConstraint(OpInfo, *this, Op, DAG);
4615   }
4616 
4617   // 'X' matches anything.
4618   if (OpInfo.ConstraintCode == "X" && OpInfo.CallOperandVal) {
4619     // Labels and constants are handled elsewhere ('X' is the only thing
4620     // that matches labels).  For Functions, the type here is the type of
4621     // the result, which is not what we want to look at; leave them alone.
4622     Value *v = OpInfo.CallOperandVal;
4623     if (isa<BasicBlock>(v) || isa<ConstantInt>(v) || isa<Function>(v)) {
4624       OpInfo.CallOperandVal = v;
4625       return;
4626     }
4627 
4628     if (Op.getNode() && Op.getOpcode() == ISD::TargetBlockAddress)
4629       return;
4630 
4631     // Otherwise, try to resolve it to something we know about by looking at
4632     // the actual operand type.
4633     if (const char *Repl = LowerXConstraint(OpInfo.ConstraintVT)) {
4634       OpInfo.ConstraintCode = Repl;
4635       OpInfo.ConstraintType = getConstraintType(OpInfo.ConstraintCode);
4636     }
4637   }
4638 }
4639 
4640 /// Given an exact SDIV by a constant, create a multiplication
4641 /// with the multiplicative inverse of the constant.
4642 static SDValue BuildExactSDIV(const TargetLowering &TLI, SDNode *N,
4643                               const SDLoc &dl, SelectionDAG &DAG,
4644                               SmallVectorImpl<SDNode *> &Created) {
4645   SDValue Op0 = N->getOperand(0);
4646   SDValue Op1 = N->getOperand(1);
4647   EVT VT = N->getValueType(0);
4648   EVT SVT = VT.getScalarType();
4649   EVT ShVT = TLI.getShiftAmountTy(VT, DAG.getDataLayout());
4650   EVT ShSVT = ShVT.getScalarType();
4651 
4652   bool UseSRA = false;
4653   SmallVector<SDValue, 16> Shifts, Factors;
4654 
4655   auto BuildSDIVPattern = [&](ConstantSDNode *C) {
4656     if (C->isNullValue())
4657       return false;
4658     APInt Divisor = C->getAPIntValue();
4659     unsigned Shift = Divisor.countTrailingZeros();
4660     if (Shift) {
4661       Divisor.ashrInPlace(Shift);
4662       UseSRA = true;
4663     }
4664     // Calculate the multiplicative inverse, using Newton's method.
4665     APInt t;
4666     APInt Factor = Divisor;
4667     while ((t = Divisor * Factor) != 1)
4668       Factor *= APInt(Divisor.getBitWidth(), 2) - t;
4669     Shifts.push_back(DAG.getConstant(Shift, dl, ShSVT));
4670     Factors.push_back(DAG.getConstant(Factor, dl, SVT));
4671     return true;
4672   };
4673 
4674   // Collect all magic values from the build vector.
4675   if (!ISD::matchUnaryPredicate(Op1, BuildSDIVPattern))
4676     return SDValue();
4677 
4678   SDValue Shift, Factor;
4679   if (VT.isVector()) {
4680     Shift = DAG.getBuildVector(ShVT, dl, Shifts);
4681     Factor = DAG.getBuildVector(VT, dl, Factors);
4682   } else {
4683     Shift = Shifts[0];
4684     Factor = Factors[0];
4685   }
4686 
4687   SDValue Res = Op0;
4688 
4689   // Shift the value upfront if it is even, so the LSB is one.
4690   if (UseSRA) {
4691     // TODO: For UDIV use SRL instead of SRA.
4692     SDNodeFlags Flags;
4693     Flags.setExact(true);
4694     Res = DAG.getNode(ISD::SRA, dl, VT, Res, Shift, Flags);
4695     Created.push_back(Res.getNode());
4696   }
4697 
4698   return DAG.getNode(ISD::MUL, dl, VT, Res, Factor);
4699 }
4700 
4701 SDValue TargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
4702                               SelectionDAG &DAG,
4703                               SmallVectorImpl<SDNode *> &Created) const {
4704   AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes();
4705   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4706   if (TLI.isIntDivCheap(N->getValueType(0), Attr))
4707     return SDValue(N, 0); // Lower SDIV as SDIV
4708   return SDValue();
4709 }
4710 
4711 /// Given an ISD::SDIV node expressing a divide by constant,
4712 /// return a DAG expression to select that will generate the same value by
4713 /// multiplying by a magic number.
4714 /// Ref: "Hacker's Delight" or "The PowerPC Compiler Writer's Guide".
4715 SDValue TargetLowering::BuildSDIV(SDNode *N, SelectionDAG &DAG,
4716                                   bool IsAfterLegalization,
4717                                   SmallVectorImpl<SDNode *> &Created) const {
4718   SDLoc dl(N);
4719   EVT VT = N->getValueType(0);
4720   EVT SVT = VT.getScalarType();
4721   EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout());
4722   EVT ShSVT = ShVT.getScalarType();
4723   unsigned EltBits = VT.getScalarSizeInBits();
4724 
4725   // Check to see if we can do this.
4726   // FIXME: We should be more aggressive here.
4727   if (!isTypeLegal(VT))
4728     return SDValue();
4729 
4730   // If the sdiv has an 'exact' bit we can use a simpler lowering.
4731   if (N->getFlags().hasExact())
4732     return BuildExactSDIV(*this, N, dl, DAG, Created);
4733 
4734   SmallVector<SDValue, 16> MagicFactors, Factors, Shifts, ShiftMasks;
4735 
4736   auto BuildSDIVPattern = [&](ConstantSDNode *C) {
4737     if (C->isNullValue())
4738       return false;
4739 
4740     const APInt &Divisor = C->getAPIntValue();
4741     APInt::ms magics = Divisor.magic();
4742     int NumeratorFactor = 0;
4743     int ShiftMask = -1;
4744 
4745     if (Divisor.isOneValue() || Divisor.isAllOnesValue()) {
4746       // If d is +1/-1, we just multiply the numerator by +1/-1.
4747       NumeratorFactor = Divisor.getSExtValue();
4748       magics.m = 0;
4749       magics.s = 0;
4750       ShiftMask = 0;
4751     } else if (Divisor.isStrictlyPositive() && magics.m.isNegative()) {
4752       // If d > 0 and m < 0, add the numerator.
4753       NumeratorFactor = 1;
4754     } else if (Divisor.isNegative() && magics.m.isStrictlyPositive()) {
4755       // If d < 0 and m > 0, subtract the numerator.
4756       NumeratorFactor = -1;
4757     }
4758 
4759     MagicFactors.push_back(DAG.getConstant(magics.m, dl, SVT));
4760     Factors.push_back(DAG.getConstant(NumeratorFactor, dl, SVT));
4761     Shifts.push_back(DAG.getConstant(magics.s, dl, ShSVT));
4762     ShiftMasks.push_back(DAG.getConstant(ShiftMask, dl, SVT));
4763     return true;
4764   };
4765 
4766   SDValue N0 = N->getOperand(0);
4767   SDValue N1 = N->getOperand(1);
4768 
4769   // Collect the shifts / magic values from each element.
4770   if (!ISD::matchUnaryPredicate(N1, BuildSDIVPattern))
4771     return SDValue();
4772 
4773   SDValue MagicFactor, Factor, Shift, ShiftMask;
4774   if (VT.isVector()) {
4775     MagicFactor = DAG.getBuildVector(VT, dl, MagicFactors);
4776     Factor = DAG.getBuildVector(VT, dl, Factors);
4777     Shift = DAG.getBuildVector(ShVT, dl, Shifts);
4778     ShiftMask = DAG.getBuildVector(VT, dl, ShiftMasks);
4779   } else {
4780     MagicFactor = MagicFactors[0];
4781     Factor = Factors[0];
4782     Shift = Shifts[0];
4783     ShiftMask = ShiftMasks[0];
4784   }
4785 
4786   // Multiply the numerator (operand 0) by the magic value.
4787   // FIXME: We should support doing a MUL in a wider type.
4788   SDValue Q;
4789   if (IsAfterLegalization ? isOperationLegal(ISD::MULHS, VT)
4790                           : isOperationLegalOrCustom(ISD::MULHS, VT))
4791     Q = DAG.getNode(ISD::MULHS, dl, VT, N0, MagicFactor);
4792   else if (IsAfterLegalization ? isOperationLegal(ISD::SMUL_LOHI, VT)
4793                                : isOperationLegalOrCustom(ISD::SMUL_LOHI, VT)) {
4794     SDValue LoHi =
4795         DAG.getNode(ISD::SMUL_LOHI, dl, DAG.getVTList(VT, VT), N0, MagicFactor);
4796     Q = SDValue(LoHi.getNode(), 1);
4797   } else
4798     return SDValue(); // No mulhs or equivalent.
4799   Created.push_back(Q.getNode());
4800 
4801   // (Optionally) Add/subtract the numerator using Factor.
4802   Factor = DAG.getNode(ISD::MUL, dl, VT, N0, Factor);
4803   Created.push_back(Factor.getNode());
4804   Q = DAG.getNode(ISD::ADD, dl, VT, Q, Factor);
4805   Created.push_back(Q.getNode());
4806 
4807   // Shift right algebraic by shift value.
4808   Q = DAG.getNode(ISD::SRA, dl, VT, Q, Shift);
4809   Created.push_back(Q.getNode());
4810 
4811   // Extract the sign bit, mask it and add it to the quotient.
4812   SDValue SignShift = DAG.getConstant(EltBits - 1, dl, ShVT);
4813   SDValue T = DAG.getNode(ISD::SRL, dl, VT, Q, SignShift);
4814   Created.push_back(T.getNode());
4815   T = DAG.getNode(ISD::AND, dl, VT, T, ShiftMask);
4816   Created.push_back(T.getNode());
4817   return DAG.getNode(ISD::ADD, dl, VT, Q, T);
4818 }
4819 
4820 /// Given an ISD::UDIV node expressing a divide by constant,
4821 /// return a DAG expression to select that will generate the same value by
4822 /// multiplying by a magic number.
4823 /// Ref: "Hacker's Delight" or "The PowerPC Compiler Writer's Guide".
4824 SDValue TargetLowering::BuildUDIV(SDNode *N, SelectionDAG &DAG,
4825                                   bool IsAfterLegalization,
4826                                   SmallVectorImpl<SDNode *> &Created) const {
4827   SDLoc dl(N);
4828   EVT VT = N->getValueType(0);
4829   EVT SVT = VT.getScalarType();
4830   EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout());
4831   EVT ShSVT = ShVT.getScalarType();
4832   unsigned EltBits = VT.getScalarSizeInBits();
4833 
4834   // Check to see if we can do this.
4835   // FIXME: We should be more aggressive here.
4836   if (!isTypeLegal(VT))
4837     return SDValue();
4838 
4839   bool UseNPQ = false;
4840   SmallVector<SDValue, 16> PreShifts, PostShifts, MagicFactors, NPQFactors;
4841 
4842   auto BuildUDIVPattern = [&](ConstantSDNode *C) {
4843     if (C->isNullValue())
4844       return false;
4845     // FIXME: We should use a narrower constant when the upper
4846     // bits are known to be zero.
4847     APInt Divisor = C->getAPIntValue();
4848     APInt::mu magics = Divisor.magicu();
4849     unsigned PreShift = 0, PostShift = 0;
4850 
4851     // If the divisor is even, we can avoid using the expensive fixup by
4852     // shifting the divided value upfront.
4853     if (magics.a != 0 && !Divisor[0]) {
4854       PreShift = Divisor.countTrailingZeros();
4855       // Get magic number for the shifted divisor.
4856       magics = Divisor.lshr(PreShift).magicu(PreShift);
4857       assert(magics.a == 0 && "Should use cheap fixup now");
4858     }
4859 
4860     APInt Magic = magics.m;
4861 
4862     unsigned SelNPQ;
4863     if (magics.a == 0 || Divisor.isOneValue()) {
4864       assert(magics.s < Divisor.getBitWidth() &&
4865              "We shouldn't generate an undefined shift!");
4866       PostShift = magics.s;
4867       SelNPQ = false;
4868     } else {
4869       PostShift = magics.s - 1;
4870       SelNPQ = true;
4871     }
4872 
4873     PreShifts.push_back(DAG.getConstant(PreShift, dl, ShSVT));
4874     MagicFactors.push_back(DAG.getConstant(Magic, dl, SVT));
4875     NPQFactors.push_back(
4876         DAG.getConstant(SelNPQ ? APInt::getOneBitSet(EltBits, EltBits - 1)
4877                                : APInt::getNullValue(EltBits),
4878                         dl, SVT));
4879     PostShifts.push_back(DAG.getConstant(PostShift, dl, ShSVT));
4880     UseNPQ |= SelNPQ;
4881     return true;
4882   };
4883 
4884   SDValue N0 = N->getOperand(0);
4885   SDValue N1 = N->getOperand(1);
4886 
4887   // Collect the shifts/magic values from each element.
4888   if (!ISD::matchUnaryPredicate(N1, BuildUDIVPattern))
4889     return SDValue();
4890 
4891   SDValue PreShift, PostShift, MagicFactor, NPQFactor;
4892   if (VT.isVector()) {
4893     PreShift = DAG.getBuildVector(ShVT, dl, PreShifts);
4894     MagicFactor = DAG.getBuildVector(VT, dl, MagicFactors);
4895     NPQFactor = DAG.getBuildVector(VT, dl, NPQFactors);
4896     PostShift = DAG.getBuildVector(ShVT, dl, PostShifts);
4897   } else {
4898     PreShift = PreShifts[0];
4899     MagicFactor = MagicFactors[0];
4900     PostShift = PostShifts[0];
4901   }
4902 
4903   SDValue Q = N0;
4904   Q = DAG.getNode(ISD::SRL, dl, VT, Q, PreShift);
4905   Created.push_back(Q.getNode());
4906 
4907   // FIXME: We should support doing a MUL in a wider type.
4908   auto GetMULHU = [&](SDValue X, SDValue Y) {
4909     if (IsAfterLegalization ? isOperationLegal(ISD::MULHU, VT)
4910                             : isOperationLegalOrCustom(ISD::MULHU, VT))
4911       return DAG.getNode(ISD::MULHU, dl, VT, X, Y);
4912     if (IsAfterLegalization ? isOperationLegal(ISD::UMUL_LOHI, VT)
4913                             : isOperationLegalOrCustom(ISD::UMUL_LOHI, VT)) {
4914       SDValue LoHi =
4915           DAG.getNode(ISD::UMUL_LOHI, dl, DAG.getVTList(VT, VT), X, Y);
4916       return SDValue(LoHi.getNode(), 1);
4917     }
4918     return SDValue(); // No mulhu or equivalent
4919   };
4920 
4921   // Multiply the numerator (operand 0) by the magic value.
4922   Q = GetMULHU(Q, MagicFactor);
4923   if (!Q)
4924     return SDValue();
4925 
4926   Created.push_back(Q.getNode());
4927 
4928   if (UseNPQ) {
4929     SDValue NPQ = DAG.getNode(ISD::SUB, dl, VT, N0, Q);
4930     Created.push_back(NPQ.getNode());
4931 
4932     // For vectors we might have a mix of non-NPQ/NPQ paths, so use
4933     // MULHU to act as a SRL-by-1 for NPQ, else multiply by zero.
4934     if (VT.isVector())
4935       NPQ = GetMULHU(NPQ, NPQFactor);
4936     else
4937       NPQ = DAG.getNode(ISD::SRL, dl, VT, NPQ, DAG.getConstant(1, dl, ShVT));
4938 
4939     Created.push_back(NPQ.getNode());
4940 
4941     Q = DAG.getNode(ISD::ADD, dl, VT, NPQ, Q);
4942     Created.push_back(Q.getNode());
4943   }
4944 
4945   Q = DAG.getNode(ISD::SRL, dl, VT, Q, PostShift);
4946   Created.push_back(Q.getNode());
4947 
4948   SDValue One = DAG.getConstant(1, dl, VT);
4949   SDValue IsOne = DAG.getSetCC(dl, VT, N1, One, ISD::SETEQ);
4950   return DAG.getSelect(dl, VT, IsOne, N0, Q);
4951 }
4952 
4953 /// If all values in Values that *don't* match the predicate are same 'splat'
4954 /// value, then replace all values with that splat value.
4955 /// Else, if AlternativeReplacement was provided, then replace all values that
4956 /// do match predicate with AlternativeReplacement value.
4957 static void
4958 turnVectorIntoSplatVector(MutableArrayRef<SDValue> Values,
4959                           std::function<bool(SDValue)> Predicate,
4960                           SDValue AlternativeReplacement = SDValue()) {
4961   SDValue Replacement;
4962   // Is there a value for which the Predicate does *NOT* match? What is it?
4963   auto SplatValue = llvm::find_if_not(Values, Predicate);
4964   if (SplatValue != Values.end()) {
4965     // Does Values consist only of SplatValue's and values matching Predicate?
4966     if (llvm::all_of(Values, [Predicate, SplatValue](SDValue Value) {
4967           return Value == *SplatValue || Predicate(Value);
4968         })) // Then we shall replace values matching predicate with SplatValue.
4969       Replacement = *SplatValue;
4970   }
4971   if (!Replacement) {
4972     // Oops, we did not find the "baseline" splat value.
4973     if (!AlternativeReplacement)
4974       return; // Nothing to do.
4975     // Let's replace with provided value then.
4976     Replacement = AlternativeReplacement;
4977   }
4978   std::replace_if(Values.begin(), Values.end(), Predicate, Replacement);
4979 }
4980 
4981 /// Given an ISD::UREM used only by an ISD::SETEQ or ISD::SETNE
4982 /// where the divisor is constant and the comparison target is zero,
4983 /// return a DAG expression that will generate the same comparison result
4984 /// using only multiplications, additions and shifts/rotations.
4985 /// Ref: "Hacker's Delight" 10-17.
4986 SDValue TargetLowering::buildUREMEqFold(EVT SETCCVT, SDValue REMNode,
4987                                         SDValue CompTargetNode,
4988                                         ISD::CondCode Cond,
4989                                         DAGCombinerInfo &DCI,
4990                                         const SDLoc &DL) const {
4991   SmallVector<SDNode *, 5> Built;
4992   if (SDValue Folded = prepareUREMEqFold(SETCCVT, REMNode, CompTargetNode, Cond,
4993                                          DCI, DL, Built)) {
4994     for (SDNode *N : Built)
4995       DCI.AddToWorklist(N);
4996     return Folded;
4997   }
4998 
4999   return SDValue();
5000 }
5001 
5002 SDValue
5003 TargetLowering::prepareUREMEqFold(EVT SETCCVT, SDValue REMNode,
5004                                   SDValue CompTargetNode, ISD::CondCode Cond,
5005                                   DAGCombinerInfo &DCI, const SDLoc &DL,
5006                                   SmallVectorImpl<SDNode *> &Created) const {
5007   // fold (seteq/ne (urem N, D), 0) -> (setule/ugt (rotr (mul N, P), K), Q)
5008   // - D must be constant, with D = D0 * 2^K where D0 is odd
5009   // - P is the multiplicative inverse of D0 modulo 2^W
5010   // - Q = floor(((2^W) - 1) / D)
5011   // where W is the width of the common type of N and D.
5012   assert((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
5013          "Only applicable for (in)equality comparisons.");
5014 
5015   SelectionDAG &DAG = DCI.DAG;
5016 
5017   EVT VT = REMNode.getValueType();
5018   EVT SVT = VT.getScalarType();
5019   EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout());
5020   EVT ShSVT = ShVT.getScalarType();
5021 
5022   // If MUL is unavailable, we cannot proceed in any case.
5023   if (!isOperationLegalOrCustom(ISD::MUL, VT))
5024     return SDValue();
5025 
5026   bool ComparingWithAllZeros = true;
5027   bool AllComparisonsWithNonZerosAreTautological = true;
5028   bool HadTautologicalLanes = false;
5029   bool AllLanesAreTautological = true;
5030   bool HadEvenDivisor = false;
5031   bool AllDivisorsArePowerOfTwo = true;
5032   bool HadTautologicalInvertedLanes = false;
5033   SmallVector<SDValue, 16> PAmts, KAmts, QAmts, IAmts;
5034 
5035   auto BuildUREMPattern = [&](ConstantSDNode *CDiv, ConstantSDNode *CCmp) {
5036     // Division by 0 is UB. Leave it to be constant-folded elsewhere.
5037     if (CDiv->isNullValue())
5038       return false;
5039 
5040     const APInt &D = CDiv->getAPIntValue();
5041     const APInt &Cmp = CCmp->getAPIntValue();
5042 
5043     ComparingWithAllZeros &= Cmp.isNullValue();
5044 
5045     // x u% C1` is *always* less than C1. So given `x u% C1 == C2`,
5046     // if C2 is not less than C1, the comparison is always false.
5047     // But we will only be able to produce the comparison that will give the
5048     // opposive tautological answer. So this lane would need to be fixed up.
5049     bool TautologicalInvertedLane = D.ule(Cmp);
5050     HadTautologicalInvertedLanes |= TautologicalInvertedLane;
5051 
5052     // If all lanes are tautological (either all divisors are ones, or divisor
5053     // is not greater than the constant we are comparing with),
5054     // we will prefer to avoid the fold.
5055     bool TautologicalLane = D.isOneValue() || TautologicalInvertedLane;
5056     HadTautologicalLanes |= TautologicalLane;
5057     AllLanesAreTautological &= TautologicalLane;
5058 
5059     // If we are comparing with non-zero, we need'll need  to subtract said
5060     // comparison value from the LHS. But there is no point in doing that if
5061     // every lane where we are comparing with non-zero is tautological..
5062     if (!Cmp.isNullValue())
5063       AllComparisonsWithNonZerosAreTautological &= TautologicalLane;
5064 
5065     // Decompose D into D0 * 2^K
5066     unsigned K = D.countTrailingZeros();
5067     assert((!D.isOneValue() || (K == 0)) && "For divisor '1' we won't rotate.");
5068     APInt D0 = D.lshr(K);
5069 
5070     // D is even if it has trailing zeros.
5071     HadEvenDivisor |= (K != 0);
5072     // D is a power-of-two if D0 is one.
5073     // If all divisors are power-of-two, we will prefer to avoid the fold.
5074     AllDivisorsArePowerOfTwo &= D0.isOneValue();
5075 
5076     // P = inv(D0, 2^W)
5077     // 2^W requires W + 1 bits, so we have to extend and then truncate.
5078     unsigned W = D.getBitWidth();
5079     APInt P = D0.zext(W + 1)
5080                   .multiplicativeInverse(APInt::getSignedMinValue(W + 1))
5081                   .trunc(W);
5082     assert(!P.isNullValue() && "No multiplicative inverse!"); // unreachable
5083     assert((D0 * P).isOneValue() && "Multiplicative inverse sanity check.");
5084 
5085     // Q = floor((2^W - 1) u/ D)
5086     // R = ((2^W - 1) u% D)
5087     APInt Q, R;
5088     APInt::udivrem(APInt::getAllOnesValue(W), D, Q, R);
5089 
5090     // If we are comparing with zero, then that comparison constant is okay,
5091     // else it may need to be one less than that.
5092     if (Cmp.ugt(R))
5093       Q -= 1;
5094 
5095     assert(APInt::getAllOnesValue(ShSVT.getSizeInBits()).ugt(K) &&
5096            "We are expecting that K is always less than all-ones for ShSVT");
5097 
5098     // If the lane is tautological the result can be constant-folded.
5099     if (TautologicalLane) {
5100       // Set P and K amount to a bogus values so we can try to splat them.
5101       P = 0;
5102       K = -1;
5103       // And ensure that comparison constant is tautological,
5104       // it will always compare true/false.
5105       Q = -1;
5106     }
5107 
5108     PAmts.push_back(DAG.getConstant(P, DL, SVT));
5109     KAmts.push_back(
5110         DAG.getConstant(APInt(ShSVT.getSizeInBits(), K), DL, ShSVT));
5111     QAmts.push_back(DAG.getConstant(Q, DL, SVT));
5112     return true;
5113   };
5114 
5115   SDValue N = REMNode.getOperand(0);
5116   SDValue D = REMNode.getOperand(1);
5117 
5118   // Collect the values from each element.
5119   if (!ISD::matchBinaryPredicate(D, CompTargetNode, BuildUREMPattern))
5120     return SDValue();
5121 
5122   // If all lanes are tautological, the result can be constant-folded.
5123   if (AllLanesAreTautological)
5124     return SDValue();
5125 
5126   // If this is a urem by a powers-of-two, avoid the fold since it can be
5127   // best implemented as a bit test.
5128   if (AllDivisorsArePowerOfTwo)
5129     return SDValue();
5130 
5131   SDValue PVal, KVal, QVal;
5132   if (VT.isVector()) {
5133     if (HadTautologicalLanes) {
5134       // Try to turn PAmts into a splat, since we don't care about the values
5135       // that are currently '0'. If we can't, just keep '0'`s.
5136       turnVectorIntoSplatVector(PAmts, isNullConstant);
5137       // Try to turn KAmts into a splat, since we don't care about the values
5138       // that are currently '-1'. If we can't, change them to '0'`s.
5139       turnVectorIntoSplatVector(KAmts, isAllOnesConstant,
5140                                 DAG.getConstant(0, DL, ShSVT));
5141     }
5142 
5143     PVal = DAG.getBuildVector(VT, DL, PAmts);
5144     KVal = DAG.getBuildVector(ShVT, DL, KAmts);
5145     QVal = DAG.getBuildVector(VT, DL, QAmts);
5146   } else {
5147     PVal = PAmts[0];
5148     KVal = KAmts[0];
5149     QVal = QAmts[0];
5150   }
5151 
5152   if (!ComparingWithAllZeros && !AllComparisonsWithNonZerosAreTautological) {
5153     if (!isOperationLegalOrCustom(ISD::SUB, VT))
5154       return SDValue(); // FIXME: Could/should use `ISD::ADD`?
5155     assert(CompTargetNode.getValueType() == N.getValueType() &&
5156            "Expecting that the types on LHS and RHS of comparisons match.");
5157     N = DAG.getNode(ISD::SUB, DL, VT, N, CompTargetNode);
5158   }
5159 
5160   // (mul N, P)
5161   SDValue Op0 = DAG.getNode(ISD::MUL, DL, VT, N, PVal);
5162   Created.push_back(Op0.getNode());
5163 
5164   // Rotate right only if any divisor was even. We avoid rotates for all-odd
5165   // divisors as a performance improvement, since rotating by 0 is a no-op.
5166   if (HadEvenDivisor) {
5167     // We need ROTR to do this.
5168     if (!isOperationLegalOrCustom(ISD::ROTR, VT))
5169       return SDValue();
5170     SDNodeFlags Flags;
5171     Flags.setExact(true);
5172     // UREM: (rotr (mul N, P), K)
5173     Op0 = DAG.getNode(ISD::ROTR, DL, VT, Op0, KVal, Flags);
5174     Created.push_back(Op0.getNode());
5175   }
5176 
5177   // UREM: (setule/setugt (rotr (mul N, P), K), Q)
5178   SDValue NewCC =
5179       DAG.getSetCC(DL, SETCCVT, Op0, QVal,
5180                    ((Cond == ISD::SETEQ) ? ISD::SETULE : ISD::SETUGT));
5181   if (!HadTautologicalInvertedLanes)
5182     return NewCC;
5183 
5184   // If any lanes previously compared always-false, the NewCC will give
5185   // always-true result for them, so we need to fixup those lanes.
5186   // Or the other way around for inequality predicate.
5187   assert(VT.isVector() && "Can/should only get here for vectors.");
5188   Created.push_back(NewCC.getNode());
5189 
5190   // x u% C1` is *always* less than C1. So given `x u% C1 == C2`,
5191   // if C2 is not less than C1, the comparison is always false.
5192   // But we have produced the comparison that will give the
5193   // opposive tautological answer. So these lanes would need to be fixed up.
5194   SDValue TautologicalInvertedChannels =
5195       DAG.getSetCC(DL, SETCCVT, D, CompTargetNode, ISD::SETULE);
5196   Created.push_back(TautologicalInvertedChannels.getNode());
5197 
5198   if (isOperationLegalOrCustom(ISD::VSELECT, SETCCVT)) {
5199     // If we have a vector select, let's replace the comparison results in the
5200     // affected lanes with the correct tautological result.
5201     SDValue Replacement = DAG.getBoolConstant(Cond == ISD::SETEQ ? false : true,
5202                                               DL, SETCCVT, SETCCVT);
5203     return DAG.getNode(ISD::VSELECT, DL, SETCCVT, TautologicalInvertedChannels,
5204                        Replacement, NewCC);
5205   }
5206 
5207   // Else, we can just invert the comparison result in the appropriate lanes.
5208   if (isOperationLegalOrCustom(ISD::XOR, SETCCVT))
5209     return DAG.getNode(ISD::XOR, DL, SETCCVT, NewCC,
5210                        TautologicalInvertedChannels);
5211 
5212   return SDValue(); // Don't know how to lower.
5213 }
5214 
5215 /// Given an ISD::SREM used only by an ISD::SETEQ or ISD::SETNE
5216 /// where the divisor is constant and the comparison target is zero,
5217 /// return a DAG expression that will generate the same comparison result
5218 /// using only multiplications, additions and shifts/rotations.
5219 /// Ref: "Hacker's Delight" 10-17.
5220 SDValue TargetLowering::buildSREMEqFold(EVT SETCCVT, SDValue REMNode,
5221                                         SDValue CompTargetNode,
5222                                         ISD::CondCode Cond,
5223                                         DAGCombinerInfo &DCI,
5224                                         const SDLoc &DL) const {
5225   SmallVector<SDNode *, 7> Built;
5226   if (SDValue Folded = prepareSREMEqFold(SETCCVT, REMNode, CompTargetNode, Cond,
5227                                          DCI, DL, Built)) {
5228     assert(Built.size() <= 7 && "Max size prediction failed.");
5229     for (SDNode *N : Built)
5230       DCI.AddToWorklist(N);
5231     return Folded;
5232   }
5233 
5234   return SDValue();
5235 }
5236 
5237 SDValue
5238 TargetLowering::prepareSREMEqFold(EVT SETCCVT, SDValue REMNode,
5239                                   SDValue CompTargetNode, ISD::CondCode Cond,
5240                                   DAGCombinerInfo &DCI, const SDLoc &DL,
5241                                   SmallVectorImpl<SDNode *> &Created) const {
5242   // Fold:
5243   //   (seteq/ne (srem N, D), 0)
5244   // To:
5245   //   (setule/ugt (rotr (add (mul N, P), A), K), Q)
5246   //
5247   // - D must be constant, with D = D0 * 2^K where D0 is odd
5248   // - P is the multiplicative inverse of D0 modulo 2^W
5249   // - A = bitwiseand(floor((2^(W - 1) - 1) / D0), (-(2^k)))
5250   // - Q = floor((2 * A) / (2^K))
5251   // where W is the width of the common type of N and D.
5252   assert((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
5253          "Only applicable for (in)equality comparisons.");
5254 
5255   SelectionDAG &DAG = DCI.DAG;
5256 
5257   EVT VT = REMNode.getValueType();
5258   EVT SVT = VT.getScalarType();
5259   EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout());
5260   EVT ShSVT = ShVT.getScalarType();
5261 
5262   // If MUL is unavailable, we cannot proceed in any case.
5263   if (!isOperationLegalOrCustom(ISD::MUL, VT))
5264     return SDValue();
5265 
5266   // TODO: Could support comparing with non-zero too.
5267   ConstantSDNode *CompTarget = isConstOrConstSplat(CompTargetNode);
5268   if (!CompTarget || !CompTarget->isNullValue())
5269     return SDValue();
5270 
5271   bool HadIntMinDivisor = false;
5272   bool HadOneDivisor = false;
5273   bool AllDivisorsAreOnes = true;
5274   bool HadEvenDivisor = false;
5275   bool NeedToApplyOffset = false;
5276   bool AllDivisorsArePowerOfTwo = true;
5277   SmallVector<SDValue, 16> PAmts, AAmts, KAmts, QAmts;
5278 
5279   auto BuildSREMPattern = [&](ConstantSDNode *C) {
5280     // Division by 0 is UB. Leave it to be constant-folded elsewhere.
5281     if (C->isNullValue())
5282       return false;
5283 
5284     // FIXME: we don't fold `rem %X, -C` to `rem %X, C` in DAGCombine.
5285 
5286     // WARNING: this fold is only valid for positive divisors!
5287     APInt D = C->getAPIntValue();
5288     if (D.isNegative())
5289       D.negate(); //  `rem %X, -C` is equivalent to `rem %X, C`
5290 
5291     HadIntMinDivisor |= D.isMinSignedValue();
5292 
5293     // If all divisors are ones, we will prefer to avoid the fold.
5294     HadOneDivisor |= D.isOneValue();
5295     AllDivisorsAreOnes &= D.isOneValue();
5296 
5297     // Decompose D into D0 * 2^K
5298     unsigned K = D.countTrailingZeros();
5299     assert((!D.isOneValue() || (K == 0)) && "For divisor '1' we won't rotate.");
5300     APInt D0 = D.lshr(K);
5301 
5302     if (!D.isMinSignedValue()) {
5303       // D is even if it has trailing zeros; unless it's INT_MIN, in which case
5304       // we don't care about this lane in this fold, we'll special-handle it.
5305       HadEvenDivisor |= (K != 0);
5306     }
5307 
5308     // D is a power-of-two if D0 is one. This includes INT_MIN.
5309     // If all divisors are power-of-two, we will prefer to avoid the fold.
5310     AllDivisorsArePowerOfTwo &= D0.isOneValue();
5311 
5312     // P = inv(D0, 2^W)
5313     // 2^W requires W + 1 bits, so we have to extend and then truncate.
5314     unsigned W = D.getBitWidth();
5315     APInt P = D0.zext(W + 1)
5316                   .multiplicativeInverse(APInt::getSignedMinValue(W + 1))
5317                   .trunc(W);
5318     assert(!P.isNullValue() && "No multiplicative inverse!"); // unreachable
5319     assert((D0 * P).isOneValue() && "Multiplicative inverse sanity check.");
5320 
5321     // A = floor((2^(W - 1) - 1) / D0) & -2^K
5322     APInt A = APInt::getSignedMaxValue(W).udiv(D0);
5323     A.clearLowBits(K);
5324 
5325     if (!D.isMinSignedValue()) {
5326       // If divisor INT_MIN, then we don't care about this lane in this fold,
5327       // we'll special-handle it.
5328       NeedToApplyOffset |= A != 0;
5329     }
5330 
5331     // Q = floor((2 * A) / (2^K))
5332     APInt Q = (2 * A).udiv(APInt::getOneBitSet(W, K));
5333 
5334     assert(APInt::getAllOnesValue(SVT.getSizeInBits()).ugt(A) &&
5335            "We are expecting that A is always less than all-ones for SVT");
5336     assert(APInt::getAllOnesValue(ShSVT.getSizeInBits()).ugt(K) &&
5337            "We are expecting that K is always less than all-ones for ShSVT");
5338 
5339     // If the divisor is 1 the result can be constant-folded. Likewise, we
5340     // don't care about INT_MIN lanes, those can be set to undef if appropriate.
5341     if (D.isOneValue()) {
5342       // Set P, A and K to a bogus values so we can try to splat them.
5343       P = 0;
5344       A = -1;
5345       K = -1;
5346 
5347       // x ?% 1 == 0  <-->  true  <-->  x u<= -1
5348       Q = -1;
5349     }
5350 
5351     PAmts.push_back(DAG.getConstant(P, DL, SVT));
5352     AAmts.push_back(DAG.getConstant(A, DL, SVT));
5353     KAmts.push_back(
5354         DAG.getConstant(APInt(ShSVT.getSizeInBits(), K), DL, ShSVT));
5355     QAmts.push_back(DAG.getConstant(Q, DL, SVT));
5356     return true;
5357   };
5358 
5359   SDValue N = REMNode.getOperand(0);
5360   SDValue D = REMNode.getOperand(1);
5361 
5362   // Collect the values from each element.
5363   if (!ISD::matchUnaryPredicate(D, BuildSREMPattern))
5364     return SDValue();
5365 
5366   // If this is a srem by a one, avoid the fold since it can be constant-folded.
5367   if (AllDivisorsAreOnes)
5368     return SDValue();
5369 
5370   // If this is a srem by a powers-of-two (including INT_MIN), avoid the fold
5371   // since it can be best implemented as a bit test.
5372   if (AllDivisorsArePowerOfTwo)
5373     return SDValue();
5374 
5375   SDValue PVal, AVal, KVal, QVal;
5376   if (VT.isVector()) {
5377     if (HadOneDivisor) {
5378       // Try to turn PAmts into a splat, since we don't care about the values
5379       // that are currently '0'. If we can't, just keep '0'`s.
5380       turnVectorIntoSplatVector(PAmts, isNullConstant);
5381       // Try to turn AAmts into a splat, since we don't care about the
5382       // values that are currently '-1'. If we can't, change them to '0'`s.
5383       turnVectorIntoSplatVector(AAmts, isAllOnesConstant,
5384                                 DAG.getConstant(0, DL, SVT));
5385       // Try to turn KAmts into a splat, since we don't care about the values
5386       // that are currently '-1'. If we can't, change them to '0'`s.
5387       turnVectorIntoSplatVector(KAmts, isAllOnesConstant,
5388                                 DAG.getConstant(0, DL, ShSVT));
5389     }
5390 
5391     PVal = DAG.getBuildVector(VT, DL, PAmts);
5392     AVal = DAG.getBuildVector(VT, DL, AAmts);
5393     KVal = DAG.getBuildVector(ShVT, DL, KAmts);
5394     QVal = DAG.getBuildVector(VT, DL, QAmts);
5395   } else {
5396     PVal = PAmts[0];
5397     AVal = AAmts[0];
5398     KVal = KAmts[0];
5399     QVal = QAmts[0];
5400   }
5401 
5402   // (mul N, P)
5403   SDValue Op0 = DAG.getNode(ISD::MUL, DL, VT, N, PVal);
5404   Created.push_back(Op0.getNode());
5405 
5406   if (NeedToApplyOffset) {
5407     // We need ADD to do this.
5408     if (!isOperationLegalOrCustom(ISD::ADD, VT))
5409       return SDValue();
5410 
5411     // (add (mul N, P), A)
5412     Op0 = DAG.getNode(ISD::ADD, DL, VT, Op0, AVal);
5413     Created.push_back(Op0.getNode());
5414   }
5415 
5416   // Rotate right only if any divisor was even. We avoid rotates for all-odd
5417   // divisors as a performance improvement, since rotating by 0 is a no-op.
5418   if (HadEvenDivisor) {
5419     // We need ROTR to do this.
5420     if (!isOperationLegalOrCustom(ISD::ROTR, VT))
5421       return SDValue();
5422     SDNodeFlags Flags;
5423     Flags.setExact(true);
5424     // SREM: (rotr (add (mul N, P), A), K)
5425     Op0 = DAG.getNode(ISD::ROTR, DL, VT, Op0, KVal, Flags);
5426     Created.push_back(Op0.getNode());
5427   }
5428 
5429   // SREM: (setule/setugt (rotr (add (mul N, P), A), K), Q)
5430   SDValue Fold =
5431       DAG.getSetCC(DL, SETCCVT, Op0, QVal,
5432                    ((Cond == ISD::SETEQ) ? ISD::SETULE : ISD::SETUGT));
5433 
5434   // If we didn't have lanes with INT_MIN divisor, then we're done.
5435   if (!HadIntMinDivisor)
5436     return Fold;
5437 
5438   // That fold is only valid for positive divisors. Which effectively means,
5439   // it is invalid for INT_MIN divisors. So if we have such a lane,
5440   // we must fix-up results for said lanes.
5441   assert(VT.isVector() && "Can/should only get here for vectors.");
5442 
5443   if (!isOperationLegalOrCustom(ISD::SETEQ, VT) ||
5444       !isOperationLegalOrCustom(ISD::AND, VT) ||
5445       !isOperationLegalOrCustom(Cond, VT) ||
5446       !isOperationLegalOrCustom(ISD::VSELECT, VT))
5447     return SDValue();
5448 
5449   Created.push_back(Fold.getNode());
5450 
5451   SDValue IntMin = DAG.getConstant(
5452       APInt::getSignedMinValue(SVT.getScalarSizeInBits()), DL, VT);
5453   SDValue IntMax = DAG.getConstant(
5454       APInt::getSignedMaxValue(SVT.getScalarSizeInBits()), DL, VT);
5455   SDValue Zero =
5456       DAG.getConstant(APInt::getNullValue(SVT.getScalarSizeInBits()), DL, VT);
5457 
5458   // Which lanes had INT_MIN divisors? Divisor is constant, so const-folded.
5459   SDValue DivisorIsIntMin = DAG.getSetCC(DL, SETCCVT, D, IntMin, ISD::SETEQ);
5460   Created.push_back(DivisorIsIntMin.getNode());
5461 
5462   // (N s% INT_MIN) ==/!= 0  <-->  (N & INT_MAX) ==/!= 0
5463   SDValue Masked = DAG.getNode(ISD::AND, DL, VT, N, IntMax);
5464   Created.push_back(Masked.getNode());
5465   SDValue MaskedIsZero = DAG.getSetCC(DL, SETCCVT, Masked, Zero, Cond);
5466   Created.push_back(MaskedIsZero.getNode());
5467 
5468   // To produce final result we need to blend 2 vectors: 'SetCC' and
5469   // 'MaskedIsZero'. If the divisor for channel was *NOT* INT_MIN, we pick
5470   // from 'Fold', else pick from 'MaskedIsZero'. Since 'DivisorIsIntMin' is
5471   // constant-folded, select can get lowered to a shuffle with constant mask.
5472   SDValue Blended =
5473       DAG.getNode(ISD::VSELECT, DL, VT, DivisorIsIntMin, MaskedIsZero, Fold);
5474 
5475   return Blended;
5476 }
5477 
5478 bool TargetLowering::
5479 verifyReturnAddressArgumentIsConstant(SDValue Op, SelectionDAG &DAG) const {
5480   if (!isa<ConstantSDNode>(Op.getOperand(0))) {
5481     DAG.getContext()->emitError("argument to '__builtin_return_address' must "
5482                                 "be a constant integer");
5483     return true;
5484   }
5485 
5486   return false;
5487 }
5488 
5489 char TargetLowering::isNegatibleForFree(SDValue Op, SelectionDAG &DAG,
5490                                         bool LegalOperations, bool ForCodeSize,
5491                                         unsigned Depth) const {
5492   // fneg is removable even if it has multiple uses.
5493   if (Op.getOpcode() == ISD::FNEG)
5494     return 2;
5495 
5496   // Don't allow anything with multiple uses unless we know it is free.
5497   EVT VT = Op.getValueType();
5498   const SDNodeFlags Flags = Op->getFlags();
5499   const TargetOptions &Options = DAG.getTarget().Options;
5500   if (!Op.hasOneUse() && !(Op.getOpcode() == ISD::FP_EXTEND &&
5501                            isFPExtFree(VT, Op.getOperand(0).getValueType())))
5502     return 0;
5503 
5504   // Don't recurse exponentially.
5505   if (Depth > SelectionDAG::MaxRecursionDepth)
5506     return 0;
5507 
5508   switch (Op.getOpcode()) {
5509   case ISD::ConstantFP: {
5510     if (!LegalOperations)
5511       return 1;
5512 
5513     // Don't invert constant FP values after legalization unless the target says
5514     // the negated constant is legal.
5515     return isOperationLegal(ISD::ConstantFP, VT) ||
5516            isFPImmLegal(neg(cast<ConstantFPSDNode>(Op)->getValueAPF()), VT,
5517                         ForCodeSize);
5518   }
5519   case ISD::BUILD_VECTOR: {
5520     // Only permit BUILD_VECTOR of constants.
5521     if (llvm::any_of(Op->op_values(), [&](SDValue N) {
5522           return !N.isUndef() && !isa<ConstantFPSDNode>(N);
5523         }))
5524       return 0;
5525     if (!LegalOperations)
5526       return 1;
5527     if (isOperationLegal(ISD::ConstantFP, VT) &&
5528         isOperationLegal(ISD::BUILD_VECTOR, VT))
5529       return 1;
5530     return llvm::all_of(Op->op_values(), [&](SDValue N) {
5531       return N.isUndef() ||
5532              isFPImmLegal(neg(cast<ConstantFPSDNode>(N)->getValueAPF()), VT,
5533                           ForCodeSize);
5534     });
5535   }
5536   case ISD::FADD:
5537     if (!Options.NoSignedZerosFPMath && !Flags.hasNoSignedZeros())
5538       return 0;
5539 
5540     // After operation legalization, it might not be legal to create new FSUBs.
5541     if (LegalOperations && !isOperationLegalOrCustom(ISD::FSUB, VT))
5542       return 0;
5543 
5544     // fold (fneg (fadd A, B)) -> (fsub (fneg A), B)
5545     if (char V = isNegatibleForFree(Op.getOperand(0), DAG, LegalOperations,
5546                                     ForCodeSize, Depth + 1))
5547       return V;
5548     // fold (fneg (fadd A, B)) -> (fsub (fneg B), A)
5549     return isNegatibleForFree(Op.getOperand(1), DAG, LegalOperations,
5550                               ForCodeSize, Depth + 1);
5551   case ISD::FSUB:
5552     // We can't turn -(A-B) into B-A when we honor signed zeros.
5553     if (!Options.NoSignedZerosFPMath && !Flags.hasNoSignedZeros())
5554       return 0;
5555 
5556     // fold (fneg (fsub A, B)) -> (fsub B, A)
5557     return 1;
5558 
5559   case ISD::FMUL:
5560   case ISD::FDIV:
5561     // fold (fneg (fmul X, Y)) -> (fmul (fneg X), Y) or (fmul X, (fneg Y))
5562     if (char V = isNegatibleForFree(Op.getOperand(0), DAG, LegalOperations,
5563                                     ForCodeSize, Depth + 1))
5564       return V;
5565 
5566     // Ignore X * 2.0 because that is expected to be canonicalized to X + X.
5567     if (auto *C = isConstOrConstSplatFP(Op.getOperand(1)))
5568       if (C->isExactlyValue(2.0) && Op.getOpcode() == ISD::FMUL)
5569         return 0;
5570 
5571     return isNegatibleForFree(Op.getOperand(1), DAG, LegalOperations,
5572                               ForCodeSize, Depth + 1);
5573 
5574   case ISD::FMA:
5575   case ISD::FMAD: {
5576     if (!Options.NoSignedZerosFPMath && !Flags.hasNoSignedZeros())
5577       return 0;
5578 
5579     // fold (fneg (fma X, Y, Z)) -> (fma (fneg X), Y, (fneg Z))
5580     // fold (fneg (fma X, Y, Z)) -> (fma X, (fneg Y), (fneg Z))
5581     char V2 = isNegatibleForFree(Op.getOperand(2), DAG, LegalOperations,
5582                                  ForCodeSize, Depth + 1);
5583     if (!V2)
5584       return 0;
5585 
5586     // One of Op0/Op1 must be cheaply negatible, then select the cheapest.
5587     char V0 = isNegatibleForFree(Op.getOperand(0), DAG, LegalOperations,
5588                                  ForCodeSize, Depth + 1);
5589     char V1 = isNegatibleForFree(Op.getOperand(1), DAG, LegalOperations,
5590                                  ForCodeSize, Depth + 1);
5591     char V01 = std::max(V0, V1);
5592     return V01 ? std::max(V01, V2) : 0;
5593   }
5594 
5595   case ISD::FP_EXTEND:
5596   case ISD::FP_ROUND:
5597   case ISD::FSIN:
5598     return isNegatibleForFree(Op.getOperand(0), DAG, LegalOperations,
5599                               ForCodeSize, Depth + 1);
5600   }
5601 
5602   return 0;
5603 }
5604 
5605 SDValue TargetLowering::getNegatedExpression(SDValue Op, SelectionDAG &DAG,
5606                                              bool LegalOperations,
5607                                              bool ForCodeSize,
5608                                              unsigned Depth) const {
5609   // fneg is removable even if it has multiple uses.
5610   if (Op.getOpcode() == ISD::FNEG)
5611     return Op.getOperand(0);
5612 
5613   assert(Depth <= SelectionDAG::MaxRecursionDepth &&
5614          "getNegatedExpression doesn't match isNegatibleForFree");
5615   const SDNodeFlags Flags = Op->getFlags();
5616 
5617   switch (Op.getOpcode()) {
5618   case ISD::ConstantFP: {
5619     APFloat V = cast<ConstantFPSDNode>(Op)->getValueAPF();
5620     V.changeSign();
5621     return DAG.getConstantFP(V, SDLoc(Op), Op.getValueType());
5622   }
5623   case ISD::BUILD_VECTOR: {
5624     SmallVector<SDValue, 4> Ops;
5625     for (SDValue C : Op->op_values()) {
5626       if (C.isUndef()) {
5627         Ops.push_back(C);
5628         continue;
5629       }
5630       APFloat V = cast<ConstantFPSDNode>(C)->getValueAPF();
5631       V.changeSign();
5632       Ops.push_back(DAG.getConstantFP(V, SDLoc(Op), C.getValueType()));
5633     }
5634     return DAG.getBuildVector(Op.getValueType(), SDLoc(Op), Ops);
5635   }
5636   case ISD::FADD:
5637     assert((DAG.getTarget().Options.NoSignedZerosFPMath ||
5638             Flags.hasNoSignedZeros()) &&
5639            "Expected NSZ fp-flag");
5640 
5641     // fold (fneg (fadd A, B)) -> (fsub (fneg A), B)
5642     if (isNegatibleForFree(Op.getOperand(0), DAG, LegalOperations, ForCodeSize,
5643                            Depth + 1))
5644       return DAG.getNode(ISD::FSUB, SDLoc(Op), Op.getValueType(),
5645                          getNegatedExpression(Op.getOperand(0), DAG,
5646                                               LegalOperations, ForCodeSize,
5647                                               Depth + 1),
5648                          Op.getOperand(1), Flags);
5649     // fold (fneg (fadd A, B)) -> (fsub (fneg B), A)
5650     return DAG.getNode(ISD::FSUB, SDLoc(Op), Op.getValueType(),
5651                        getNegatedExpression(Op.getOperand(1), DAG,
5652                                             LegalOperations, ForCodeSize,
5653                                             Depth + 1),
5654                        Op.getOperand(0), Flags);
5655   case ISD::FSUB:
5656     // fold (fneg (fsub 0, B)) -> B
5657     if (ConstantFPSDNode *N0CFP =
5658             isConstOrConstSplatFP(Op.getOperand(0), /*AllowUndefs*/ true))
5659       if (N0CFP->isZero())
5660         return Op.getOperand(1);
5661 
5662     // fold (fneg (fsub A, B)) -> (fsub B, A)
5663     return DAG.getNode(ISD::FSUB, SDLoc(Op), Op.getValueType(),
5664                        Op.getOperand(1), Op.getOperand(0), Flags);
5665 
5666   case ISD::FMUL:
5667   case ISD::FDIV:
5668     // fold (fneg (fmul X, Y)) -> (fmul (fneg X), Y)
5669     if (isNegatibleForFree(Op.getOperand(0), DAG, LegalOperations, ForCodeSize,
5670                            Depth + 1))
5671       return DAG.getNode(Op.getOpcode(), SDLoc(Op), Op.getValueType(),
5672                          getNegatedExpression(Op.getOperand(0), DAG,
5673                                               LegalOperations, ForCodeSize,
5674                                               Depth + 1),
5675                          Op.getOperand(1), Flags);
5676 
5677     // fold (fneg (fmul X, Y)) -> (fmul X, (fneg Y))
5678     return DAG.getNode(
5679         Op.getOpcode(), SDLoc(Op), Op.getValueType(), Op.getOperand(0),
5680         getNegatedExpression(Op.getOperand(1), DAG, LegalOperations,
5681                              ForCodeSize, Depth + 1),
5682         Flags);
5683 
5684   case ISD::FMA:
5685   case ISD::FMAD: {
5686     assert((DAG.getTarget().Options.NoSignedZerosFPMath ||
5687             Flags.hasNoSignedZeros()) &&
5688            "Expected NSZ fp-flag");
5689 
5690     SDValue Neg2 = getNegatedExpression(Op.getOperand(2), DAG, LegalOperations,
5691                                         ForCodeSize, Depth + 1);
5692 
5693     char V0 = isNegatibleForFree(Op.getOperand(0), DAG, LegalOperations,
5694                                  ForCodeSize, Depth + 1);
5695     char V1 = isNegatibleForFree(Op.getOperand(1), DAG, LegalOperations,
5696                                  ForCodeSize, Depth + 1);
5697     // TODO: This is a hack. It is possible that costs have changed between now
5698     //       and the initial calls to isNegatibleForFree(). That is because we
5699     //       are rewriting the expression, and that may change the number of
5700     //       uses (and therefore the cost) of values. If the negation costs are
5701     //       equal, only negate this value if it is a constant. Otherwise, try
5702     //       operand 1. A better fix would eliminate uses as a cost factor or
5703     //       track the change in uses as we rewrite the expression.
5704     if (V0 > V1 || (V0 == V1 && isa<ConstantFPSDNode>(Op.getOperand(0)))) {
5705       // fold (fneg (fma X, Y, Z)) -> (fma (fneg X), Y, (fneg Z))
5706       SDValue Neg0 = getNegatedExpression(
5707           Op.getOperand(0), DAG, LegalOperations, ForCodeSize, Depth + 1);
5708       return DAG.getNode(Op.getOpcode(), SDLoc(Op), Op.getValueType(), Neg0,
5709                          Op.getOperand(1), Neg2, Flags);
5710     }
5711 
5712     // fold (fneg (fma X, Y, Z)) -> (fma X, (fneg Y), (fneg Z))
5713     SDValue Neg1 = getNegatedExpression(Op.getOperand(1), DAG, LegalOperations,
5714                                         ForCodeSize, Depth + 1);
5715     return DAG.getNode(Op.getOpcode(), SDLoc(Op), Op.getValueType(),
5716                        Op.getOperand(0), Neg1, Neg2, Flags);
5717   }
5718 
5719   case ISD::FP_EXTEND:
5720   case ISD::FSIN:
5721     return DAG.getNode(Op.getOpcode(), SDLoc(Op), Op.getValueType(),
5722                        getNegatedExpression(Op.getOperand(0), DAG,
5723                                             LegalOperations, ForCodeSize,
5724                                             Depth + 1));
5725   case ISD::FP_ROUND:
5726     return DAG.getNode(ISD::FP_ROUND, SDLoc(Op), Op.getValueType(),
5727                        getNegatedExpression(Op.getOperand(0), DAG,
5728                                             LegalOperations, ForCodeSize,
5729                                             Depth + 1),
5730                        Op.getOperand(1));
5731   }
5732 
5733   llvm_unreachable("Unknown code");
5734 }
5735 
5736 //===----------------------------------------------------------------------===//
5737 // Legalization Utilities
5738 //===----------------------------------------------------------------------===//
5739 
5740 bool TargetLowering::expandMUL_LOHI(unsigned Opcode, EVT VT, SDLoc dl,
5741                                     SDValue LHS, SDValue RHS,
5742                                     SmallVectorImpl<SDValue> &Result,
5743                                     EVT HiLoVT, SelectionDAG &DAG,
5744                                     MulExpansionKind Kind, SDValue LL,
5745                                     SDValue LH, SDValue RL, SDValue RH) const {
5746   assert(Opcode == ISD::MUL || Opcode == ISD::UMUL_LOHI ||
5747          Opcode == ISD::SMUL_LOHI);
5748 
5749   bool HasMULHS = (Kind == MulExpansionKind::Always) ||
5750                   isOperationLegalOrCustom(ISD::MULHS, HiLoVT);
5751   bool HasMULHU = (Kind == MulExpansionKind::Always) ||
5752                   isOperationLegalOrCustom(ISD::MULHU, HiLoVT);
5753   bool HasSMUL_LOHI = (Kind == MulExpansionKind::Always) ||
5754                       isOperationLegalOrCustom(ISD::SMUL_LOHI, HiLoVT);
5755   bool HasUMUL_LOHI = (Kind == MulExpansionKind::Always) ||
5756                       isOperationLegalOrCustom(ISD::UMUL_LOHI, HiLoVT);
5757 
5758   if (!HasMULHU && !HasMULHS && !HasUMUL_LOHI && !HasSMUL_LOHI)
5759     return false;
5760 
5761   unsigned OuterBitSize = VT.getScalarSizeInBits();
5762   unsigned InnerBitSize = HiLoVT.getScalarSizeInBits();
5763   unsigned LHSSB = DAG.ComputeNumSignBits(LHS);
5764   unsigned RHSSB = DAG.ComputeNumSignBits(RHS);
5765 
5766   // LL, LH, RL, and RH must be either all NULL or all set to a value.
5767   assert((LL.getNode() && LH.getNode() && RL.getNode() && RH.getNode()) ||
5768          (!LL.getNode() && !LH.getNode() && !RL.getNode() && !RH.getNode()));
5769 
5770   SDVTList VTs = DAG.getVTList(HiLoVT, HiLoVT);
5771   auto MakeMUL_LOHI = [&](SDValue L, SDValue R, SDValue &Lo, SDValue &Hi,
5772                           bool Signed) -> bool {
5773     if ((Signed && HasSMUL_LOHI) || (!Signed && HasUMUL_LOHI)) {
5774       Lo = DAG.getNode(Signed ? ISD::SMUL_LOHI : ISD::UMUL_LOHI, dl, VTs, L, R);
5775       Hi = SDValue(Lo.getNode(), 1);
5776       return true;
5777     }
5778     if ((Signed && HasMULHS) || (!Signed && HasMULHU)) {
5779       Lo = DAG.getNode(ISD::MUL, dl, HiLoVT, L, R);
5780       Hi = DAG.getNode(Signed ? ISD::MULHS : ISD::MULHU, dl, HiLoVT, L, R);
5781       return true;
5782     }
5783     return false;
5784   };
5785 
5786   SDValue Lo, Hi;
5787 
5788   if (!LL.getNode() && !RL.getNode() &&
5789       isOperationLegalOrCustom(ISD::TRUNCATE, HiLoVT)) {
5790     LL = DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, LHS);
5791     RL = DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, RHS);
5792   }
5793 
5794   if (!LL.getNode())
5795     return false;
5796 
5797   APInt HighMask = APInt::getHighBitsSet(OuterBitSize, InnerBitSize);
5798   if (DAG.MaskedValueIsZero(LHS, HighMask) &&
5799       DAG.MaskedValueIsZero(RHS, HighMask)) {
5800     // The inputs are both zero-extended.
5801     if (MakeMUL_LOHI(LL, RL, Lo, Hi, false)) {
5802       Result.push_back(Lo);
5803       Result.push_back(Hi);
5804       if (Opcode != ISD::MUL) {
5805         SDValue Zero = DAG.getConstant(0, dl, HiLoVT);
5806         Result.push_back(Zero);
5807         Result.push_back(Zero);
5808       }
5809       return true;
5810     }
5811   }
5812 
5813   if (!VT.isVector() && Opcode == ISD::MUL && LHSSB > InnerBitSize &&
5814       RHSSB > InnerBitSize) {
5815     // The input values are both sign-extended.
5816     // TODO non-MUL case?
5817     if (MakeMUL_LOHI(LL, RL, Lo, Hi, true)) {
5818       Result.push_back(Lo);
5819       Result.push_back(Hi);
5820       return true;
5821     }
5822   }
5823 
5824   unsigned ShiftAmount = OuterBitSize - InnerBitSize;
5825   EVT ShiftAmountTy = getShiftAmountTy(VT, DAG.getDataLayout());
5826   if (APInt::getMaxValue(ShiftAmountTy.getSizeInBits()).ult(ShiftAmount)) {
5827     // FIXME getShiftAmountTy does not always return a sensible result when VT
5828     // is an illegal type, and so the type may be too small to fit the shift
5829     // amount. Override it with i32. The shift will have to be legalized.
5830     ShiftAmountTy = MVT::i32;
5831   }
5832   SDValue Shift = DAG.getConstant(ShiftAmount, dl, ShiftAmountTy);
5833 
5834   if (!LH.getNode() && !RH.getNode() &&
5835       isOperationLegalOrCustom(ISD::SRL, VT) &&
5836       isOperationLegalOrCustom(ISD::TRUNCATE, HiLoVT)) {
5837     LH = DAG.getNode(ISD::SRL, dl, VT, LHS, Shift);
5838     LH = DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, LH);
5839     RH = DAG.getNode(ISD::SRL, dl, VT, RHS, Shift);
5840     RH = DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, RH);
5841   }
5842 
5843   if (!LH.getNode())
5844     return false;
5845 
5846   if (!MakeMUL_LOHI(LL, RL, Lo, Hi, false))
5847     return false;
5848 
5849   Result.push_back(Lo);
5850 
5851   if (Opcode == ISD::MUL) {
5852     RH = DAG.getNode(ISD::MUL, dl, HiLoVT, LL, RH);
5853     LH = DAG.getNode(ISD::MUL, dl, HiLoVT, LH, RL);
5854     Hi = DAG.getNode(ISD::ADD, dl, HiLoVT, Hi, RH);
5855     Hi = DAG.getNode(ISD::ADD, dl, HiLoVT, Hi, LH);
5856     Result.push_back(Hi);
5857     return true;
5858   }
5859 
5860   // Compute the full width result.
5861   auto Merge = [&](SDValue Lo, SDValue Hi) -> SDValue {
5862     Lo = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Lo);
5863     Hi = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Hi);
5864     Hi = DAG.getNode(ISD::SHL, dl, VT, Hi, Shift);
5865     return DAG.getNode(ISD::OR, dl, VT, Lo, Hi);
5866   };
5867 
5868   SDValue Next = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Hi);
5869   if (!MakeMUL_LOHI(LL, RH, Lo, Hi, false))
5870     return false;
5871 
5872   // This is effectively the add part of a multiply-add of half-sized operands,
5873   // so it cannot overflow.
5874   Next = DAG.getNode(ISD::ADD, dl, VT, Next, Merge(Lo, Hi));
5875 
5876   if (!MakeMUL_LOHI(LH, RL, Lo, Hi, false))
5877     return false;
5878 
5879   SDValue Zero = DAG.getConstant(0, dl, HiLoVT);
5880   EVT BoolType = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
5881 
5882   bool UseGlue = (isOperationLegalOrCustom(ISD::ADDC, VT) &&
5883                   isOperationLegalOrCustom(ISD::ADDE, VT));
5884   if (UseGlue)
5885     Next = DAG.getNode(ISD::ADDC, dl, DAG.getVTList(VT, MVT::Glue), Next,
5886                        Merge(Lo, Hi));
5887   else
5888     Next = DAG.getNode(ISD::ADDCARRY, dl, DAG.getVTList(VT, BoolType), Next,
5889                        Merge(Lo, Hi), DAG.getConstant(0, dl, BoolType));
5890 
5891   SDValue Carry = Next.getValue(1);
5892   Result.push_back(DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, Next));
5893   Next = DAG.getNode(ISD::SRL, dl, VT, Next, Shift);
5894 
5895   if (!MakeMUL_LOHI(LH, RH, Lo, Hi, Opcode == ISD::SMUL_LOHI))
5896     return false;
5897 
5898   if (UseGlue)
5899     Hi = DAG.getNode(ISD::ADDE, dl, DAG.getVTList(HiLoVT, MVT::Glue), Hi, Zero,
5900                      Carry);
5901   else
5902     Hi = DAG.getNode(ISD::ADDCARRY, dl, DAG.getVTList(HiLoVT, BoolType), Hi,
5903                      Zero, Carry);
5904 
5905   Next = DAG.getNode(ISD::ADD, dl, VT, Next, Merge(Lo, Hi));
5906 
5907   if (Opcode == ISD::SMUL_LOHI) {
5908     SDValue NextSub = DAG.getNode(ISD::SUB, dl, VT, Next,
5909                                   DAG.getNode(ISD::ZERO_EXTEND, dl, VT, RL));
5910     Next = DAG.getSelectCC(dl, LH, Zero, NextSub, Next, ISD::SETLT);
5911 
5912     NextSub = DAG.getNode(ISD::SUB, dl, VT, Next,
5913                           DAG.getNode(ISD::ZERO_EXTEND, dl, VT, LL));
5914     Next = DAG.getSelectCC(dl, RH, Zero, NextSub, Next, ISD::SETLT);
5915   }
5916 
5917   Result.push_back(DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, Next));
5918   Next = DAG.getNode(ISD::SRL, dl, VT, Next, Shift);
5919   Result.push_back(DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, Next));
5920   return true;
5921 }
5922 
5923 bool TargetLowering::expandMUL(SDNode *N, SDValue &Lo, SDValue &Hi, EVT HiLoVT,
5924                                SelectionDAG &DAG, MulExpansionKind Kind,
5925                                SDValue LL, SDValue LH, SDValue RL,
5926                                SDValue RH) const {
5927   SmallVector<SDValue, 2> Result;
5928   bool Ok = expandMUL_LOHI(N->getOpcode(), N->getValueType(0), N,
5929                            N->getOperand(0), N->getOperand(1), Result, HiLoVT,
5930                            DAG, Kind, LL, LH, RL, RH);
5931   if (Ok) {
5932     assert(Result.size() == 2);
5933     Lo = Result[0];
5934     Hi = Result[1];
5935   }
5936   return Ok;
5937 }
5938 
5939 bool TargetLowering::expandFunnelShift(SDNode *Node, SDValue &Result,
5940                                        SelectionDAG &DAG) const {
5941   EVT VT = Node->getValueType(0);
5942 
5943   if (VT.isVector() && (!isOperationLegalOrCustom(ISD::SHL, VT) ||
5944                         !isOperationLegalOrCustom(ISD::SRL, VT) ||
5945                         !isOperationLegalOrCustom(ISD::SUB, VT) ||
5946                         !isOperationLegalOrCustomOrPromote(ISD::OR, VT)))
5947     return false;
5948 
5949   // fshl: (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
5950   // fshr: (X << (BW - (Z % BW))) | (Y >> (Z % BW))
5951   SDValue X = Node->getOperand(0);
5952   SDValue Y = Node->getOperand(1);
5953   SDValue Z = Node->getOperand(2);
5954 
5955   unsigned EltSizeInBits = VT.getScalarSizeInBits();
5956   bool IsFSHL = Node->getOpcode() == ISD::FSHL;
5957   SDLoc DL(SDValue(Node, 0));
5958 
5959   EVT ShVT = Z.getValueType();
5960   SDValue BitWidthC = DAG.getConstant(EltSizeInBits, DL, ShVT);
5961   SDValue Zero = DAG.getConstant(0, DL, ShVT);
5962 
5963   SDValue ShAmt;
5964   if (isPowerOf2_32(EltSizeInBits)) {
5965     SDValue Mask = DAG.getConstant(EltSizeInBits - 1, DL, ShVT);
5966     ShAmt = DAG.getNode(ISD::AND, DL, ShVT, Z, Mask);
5967   } else {
5968     ShAmt = DAG.getNode(ISD::UREM, DL, ShVT, Z, BitWidthC);
5969   }
5970 
5971   SDValue InvShAmt = DAG.getNode(ISD::SUB, DL, ShVT, BitWidthC, ShAmt);
5972   SDValue ShX = DAG.getNode(ISD::SHL, DL, VT, X, IsFSHL ? ShAmt : InvShAmt);
5973   SDValue ShY = DAG.getNode(ISD::SRL, DL, VT, Y, IsFSHL ? InvShAmt : ShAmt);
5974   SDValue Or = DAG.getNode(ISD::OR, DL, VT, ShX, ShY);
5975 
5976   // If (Z % BW == 0), then the opposite direction shift is shift-by-bitwidth,
5977   // and that is undefined. We must compare and select to avoid UB.
5978   EVT CCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), ShVT);
5979 
5980   // For fshl, 0-shift returns the 1st arg (X).
5981   // For fshr, 0-shift returns the 2nd arg (Y).
5982   SDValue IsZeroShift = DAG.getSetCC(DL, CCVT, ShAmt, Zero, ISD::SETEQ);
5983   Result = DAG.getSelect(DL, VT, IsZeroShift, IsFSHL ? X : Y, Or);
5984   return true;
5985 }
5986 
5987 // TODO: Merge with expandFunnelShift.
5988 bool TargetLowering::expandROT(SDNode *Node, SDValue &Result,
5989                                SelectionDAG &DAG) const {
5990   EVT VT = Node->getValueType(0);
5991   unsigned EltSizeInBits = VT.getScalarSizeInBits();
5992   bool IsLeft = Node->getOpcode() == ISD::ROTL;
5993   SDValue Op0 = Node->getOperand(0);
5994   SDValue Op1 = Node->getOperand(1);
5995   SDLoc DL(SDValue(Node, 0));
5996 
5997   EVT ShVT = Op1.getValueType();
5998   SDValue BitWidthC = DAG.getConstant(EltSizeInBits, DL, ShVT);
5999 
6000   // If a rotate in the other direction is legal, use it.
6001   unsigned RevRot = IsLeft ? ISD::ROTR : ISD::ROTL;
6002   if (isOperationLegal(RevRot, VT)) {
6003     SDValue Sub = DAG.getNode(ISD::SUB, DL, ShVT, BitWidthC, Op1);
6004     Result = DAG.getNode(RevRot, DL, VT, Op0, Sub);
6005     return true;
6006   }
6007 
6008   if (VT.isVector() && (!isOperationLegalOrCustom(ISD::SHL, VT) ||
6009                         !isOperationLegalOrCustom(ISD::SRL, VT) ||
6010                         !isOperationLegalOrCustom(ISD::SUB, VT) ||
6011                         !isOperationLegalOrCustomOrPromote(ISD::OR, VT) ||
6012                         !isOperationLegalOrCustomOrPromote(ISD::AND, VT)))
6013     return false;
6014 
6015   // Otherwise,
6016   //   (rotl x, c) -> (or (shl x, (and c, w-1)), (srl x, (and w-c, w-1)))
6017   //   (rotr x, c) -> (or (srl x, (and c, w-1)), (shl x, (and w-c, w-1)))
6018   //
6019   assert(isPowerOf2_32(EltSizeInBits) && EltSizeInBits > 1 &&
6020          "Expecting the type bitwidth to be a power of 2");
6021   unsigned ShOpc = IsLeft ? ISD::SHL : ISD::SRL;
6022   unsigned HsOpc = IsLeft ? ISD::SRL : ISD::SHL;
6023   SDValue BitWidthMinusOneC = DAG.getConstant(EltSizeInBits - 1, DL, ShVT);
6024   SDValue NegOp1 = DAG.getNode(ISD::SUB, DL, ShVT, BitWidthC, Op1);
6025   SDValue And0 = DAG.getNode(ISD::AND, DL, ShVT, Op1, BitWidthMinusOneC);
6026   SDValue And1 = DAG.getNode(ISD::AND, DL, ShVT, NegOp1, BitWidthMinusOneC);
6027   Result = DAG.getNode(ISD::OR, DL, VT, DAG.getNode(ShOpc, DL, VT, Op0, And0),
6028                        DAG.getNode(HsOpc, DL, VT, Op0, And1));
6029   return true;
6030 }
6031 
6032 bool TargetLowering::expandFP_TO_SINT(SDNode *Node, SDValue &Result,
6033                                       SelectionDAG &DAG) const {
6034   unsigned OpNo = Node->isStrictFPOpcode() ? 1 : 0;
6035   SDValue Src = Node->getOperand(OpNo);
6036   EVT SrcVT = Src.getValueType();
6037   EVT DstVT = Node->getValueType(0);
6038   SDLoc dl(SDValue(Node, 0));
6039 
6040   // FIXME: Only f32 to i64 conversions are supported.
6041   if (SrcVT != MVT::f32 || DstVT != MVT::i64)
6042     return false;
6043 
6044   if (Node->isStrictFPOpcode())
6045     // When a NaN is converted to an integer a trap is allowed. We can't
6046     // use this expansion here because it would eliminate that trap. Other
6047     // traps are also allowed and cannot be eliminated. See
6048     // IEEE 754-2008 sec 5.8.
6049     return false;
6050 
6051   // Expand f32 -> i64 conversion
6052   // This algorithm comes from compiler-rt's implementation of fixsfdi:
6053   // https://github.com/llvm/llvm-project/blob/master/compiler-rt/lib/builtins/fixsfdi.c
6054   unsigned SrcEltBits = SrcVT.getScalarSizeInBits();
6055   EVT IntVT = SrcVT.changeTypeToInteger();
6056   EVT IntShVT = getShiftAmountTy(IntVT, DAG.getDataLayout());
6057 
6058   SDValue ExponentMask = DAG.getConstant(0x7F800000, dl, IntVT);
6059   SDValue ExponentLoBit = DAG.getConstant(23, dl, IntVT);
6060   SDValue Bias = DAG.getConstant(127, dl, IntVT);
6061   SDValue SignMask = DAG.getConstant(APInt::getSignMask(SrcEltBits), dl, IntVT);
6062   SDValue SignLowBit = DAG.getConstant(SrcEltBits - 1, dl, IntVT);
6063   SDValue MantissaMask = DAG.getConstant(0x007FFFFF, dl, IntVT);
6064 
6065   SDValue Bits = DAG.getNode(ISD::BITCAST, dl, IntVT, Src);
6066 
6067   SDValue ExponentBits = DAG.getNode(
6068       ISD::SRL, dl, IntVT, DAG.getNode(ISD::AND, dl, IntVT, Bits, ExponentMask),
6069       DAG.getZExtOrTrunc(ExponentLoBit, dl, IntShVT));
6070   SDValue Exponent = DAG.getNode(ISD::SUB, dl, IntVT, ExponentBits, Bias);
6071 
6072   SDValue Sign = DAG.getNode(ISD::SRA, dl, IntVT,
6073                              DAG.getNode(ISD::AND, dl, IntVT, Bits, SignMask),
6074                              DAG.getZExtOrTrunc(SignLowBit, dl, IntShVT));
6075   Sign = DAG.getSExtOrTrunc(Sign, dl, DstVT);
6076 
6077   SDValue R = DAG.getNode(ISD::OR, dl, IntVT,
6078                           DAG.getNode(ISD::AND, dl, IntVT, Bits, MantissaMask),
6079                           DAG.getConstant(0x00800000, dl, IntVT));
6080 
6081   R = DAG.getZExtOrTrunc(R, dl, DstVT);
6082 
6083   R = DAG.getSelectCC(
6084       dl, Exponent, ExponentLoBit,
6085       DAG.getNode(ISD::SHL, dl, DstVT, R,
6086                   DAG.getZExtOrTrunc(
6087                       DAG.getNode(ISD::SUB, dl, IntVT, Exponent, ExponentLoBit),
6088                       dl, IntShVT)),
6089       DAG.getNode(ISD::SRL, dl, DstVT, R,
6090                   DAG.getZExtOrTrunc(
6091                       DAG.getNode(ISD::SUB, dl, IntVT, ExponentLoBit, Exponent),
6092                       dl, IntShVT)),
6093       ISD::SETGT);
6094 
6095   SDValue Ret = DAG.getNode(ISD::SUB, dl, DstVT,
6096                             DAG.getNode(ISD::XOR, dl, DstVT, R, Sign), Sign);
6097 
6098   Result = DAG.getSelectCC(dl, Exponent, DAG.getConstant(0, dl, IntVT),
6099                            DAG.getConstant(0, dl, DstVT), Ret, ISD::SETLT);
6100   return true;
6101 }
6102 
6103 bool TargetLowering::expandFP_TO_UINT(SDNode *Node, SDValue &Result,
6104                                       SDValue &Chain,
6105                                       SelectionDAG &DAG) const {
6106   SDLoc dl(SDValue(Node, 0));
6107   unsigned OpNo = Node->isStrictFPOpcode() ? 1 : 0;
6108   SDValue Src = Node->getOperand(OpNo);
6109 
6110   EVT SrcVT = Src.getValueType();
6111   EVT DstVT = Node->getValueType(0);
6112   EVT SetCCVT =
6113       getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), SrcVT);
6114   EVT DstSetCCVT =
6115       getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), DstVT);
6116 
6117   // Only expand vector types if we have the appropriate vector bit operations.
6118   unsigned SIntOpcode = Node->isStrictFPOpcode() ? ISD::STRICT_FP_TO_SINT :
6119                                                    ISD::FP_TO_SINT;
6120   if (DstVT.isVector() && (!isOperationLegalOrCustom(SIntOpcode, DstVT) ||
6121                            !isOperationLegalOrCustomOrPromote(ISD::XOR, SrcVT)))
6122     return false;
6123 
6124   // If the maximum float value is smaller then the signed integer range,
6125   // the destination signmask can't be represented by the float, so we can
6126   // just use FP_TO_SINT directly.
6127   const fltSemantics &APFSem = DAG.EVTToAPFloatSemantics(SrcVT);
6128   APFloat APF(APFSem, APInt::getNullValue(SrcVT.getScalarSizeInBits()));
6129   APInt SignMask = APInt::getSignMask(DstVT.getScalarSizeInBits());
6130   if (APFloat::opOverflow &
6131       APF.convertFromAPInt(SignMask, false, APFloat::rmNearestTiesToEven)) {
6132     if (Node->isStrictFPOpcode()) {
6133       Result = DAG.getNode(ISD::STRICT_FP_TO_SINT, dl, { DstVT, MVT::Other },
6134                            { Node->getOperand(0), Src });
6135       Chain = Result.getValue(1);
6136     } else
6137       Result = DAG.getNode(ISD::FP_TO_SINT, dl, DstVT, Src);
6138     return true;
6139   }
6140 
6141   SDValue Cst = DAG.getConstantFP(APF, dl, SrcVT);
6142   SDValue Sel;
6143 
6144   if (Node->isStrictFPOpcode())
6145     Sel = DAG.getSetCC(dl, SetCCVT, Src, Cst, ISD::SETLT,
6146                        Node->getOperand(0), /*IsSignaling*/ true);
6147   else
6148     Sel = DAG.getSetCC(dl, SetCCVT, Src, Cst, ISD::SETLT);
6149 
6150   bool Strict = Node->isStrictFPOpcode() ||
6151                 shouldUseStrictFP_TO_INT(SrcVT, DstVT, /*IsSigned*/ false);
6152 
6153   if (Strict) {
6154     // Expand based on maximum range of FP_TO_SINT, if the value exceeds the
6155     // signmask then offset (the result of which should be fully representable).
6156     // Sel = Src < 0x8000000000000000
6157     // FltOfs = select Sel, 0, 0x8000000000000000
6158     // IntOfs = select Sel, 0, 0x8000000000000000
6159     // Result = fp_to_sint(Src - FltOfs) ^ IntOfs
6160 
6161     // TODO: Should any fast-math-flags be set for the FSUB?
6162     SDValue FltOfs = DAG.getSelect(dl, SrcVT, Sel,
6163                                    DAG.getConstantFP(0.0, dl, SrcVT), Cst);
6164     Sel = DAG.getBoolExtOrTrunc(Sel, dl, DstSetCCVT, DstVT);
6165     SDValue IntOfs = DAG.getSelect(dl, DstVT, Sel,
6166                                    DAG.getConstant(0, dl, DstVT),
6167                                    DAG.getConstant(SignMask, dl, DstVT));
6168     SDValue SInt;
6169     if (Node->isStrictFPOpcode()) {
6170       SDValue Val = DAG.getNode(ISD::STRICT_FSUB, dl, { SrcVT, MVT::Other },
6171                                 { Node->getOperand(0), Src, FltOfs });
6172       SInt = DAG.getNode(ISD::STRICT_FP_TO_SINT, dl, { DstVT, MVT::Other },
6173                          { Val.getValue(1), Val });
6174       Chain = SInt.getValue(1);
6175     } else {
6176       SDValue Val = DAG.getNode(ISD::FSUB, dl, SrcVT, Src, FltOfs);
6177       SInt = DAG.getNode(ISD::FP_TO_SINT, dl, DstVT, Val);
6178     }
6179     Result = DAG.getNode(ISD::XOR, dl, DstVT, SInt, IntOfs);
6180   } else {
6181     // Expand based on maximum range of FP_TO_SINT:
6182     // True = fp_to_sint(Src)
6183     // False = 0x8000000000000000 + fp_to_sint(Src - 0x8000000000000000)
6184     // Result = select (Src < 0x8000000000000000), True, False
6185 
6186     SDValue True = DAG.getNode(ISD::FP_TO_SINT, dl, DstVT, Src);
6187     // TODO: Should any fast-math-flags be set for the FSUB?
6188     SDValue False = DAG.getNode(ISD::FP_TO_SINT, dl, DstVT,
6189                                 DAG.getNode(ISD::FSUB, dl, SrcVT, Src, Cst));
6190     False = DAG.getNode(ISD::XOR, dl, DstVT, False,
6191                         DAG.getConstant(SignMask, dl, DstVT));
6192     Sel = DAG.getBoolExtOrTrunc(Sel, dl, DstSetCCVT, DstVT);
6193     Result = DAG.getSelect(dl, DstVT, Sel, True, False);
6194   }
6195   return true;
6196 }
6197 
6198 bool TargetLowering::expandUINT_TO_FP(SDNode *Node, SDValue &Result,
6199                                       SDValue &Chain,
6200                                       SelectionDAG &DAG) const {
6201   unsigned OpNo = Node->isStrictFPOpcode() ? 1 : 0;
6202   SDValue Src = Node->getOperand(OpNo);
6203   EVT SrcVT = Src.getValueType();
6204   EVT DstVT = Node->getValueType(0);
6205 
6206   if (SrcVT.getScalarType() != MVT::i64)
6207     return false;
6208 
6209   SDLoc dl(SDValue(Node, 0));
6210   EVT ShiftVT = getShiftAmountTy(SrcVT, DAG.getDataLayout());
6211 
6212   if (DstVT.getScalarType() == MVT::f32) {
6213     // Only expand vector types if we have the appropriate vector bit
6214     // operations.
6215     if (SrcVT.isVector() &&
6216         (!isOperationLegalOrCustom(ISD::SRL, SrcVT) ||
6217          !isOperationLegalOrCustom(ISD::FADD, DstVT) ||
6218          !isOperationLegalOrCustom(ISD::SINT_TO_FP, SrcVT) ||
6219          !isOperationLegalOrCustomOrPromote(ISD::OR, SrcVT) ||
6220          !isOperationLegalOrCustomOrPromote(ISD::AND, SrcVT)))
6221       return false;
6222 
6223     // For unsigned conversions, convert them to signed conversions using the
6224     // algorithm from the x86_64 __floatundisf in compiler_rt.
6225 
6226     // TODO: This really should be implemented using a branch rather than a
6227     // select.  We happen to get lucky and machinesink does the right
6228     // thing most of the time.  This would be a good candidate for a
6229     // pseudo-op, or, even better, for whole-function isel.
6230     EVT SetCCVT =
6231         getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), SrcVT);
6232 
6233     SDValue SignBitTest = DAG.getSetCC(
6234         dl, SetCCVT, Src, DAG.getConstant(0, dl, SrcVT), ISD::SETLT);
6235 
6236     SDValue ShiftConst = DAG.getConstant(1, dl, ShiftVT);
6237     SDValue Shr = DAG.getNode(ISD::SRL, dl, SrcVT, Src, ShiftConst);
6238     SDValue AndConst = DAG.getConstant(1, dl, SrcVT);
6239     SDValue And = DAG.getNode(ISD::AND, dl, SrcVT, Src, AndConst);
6240     SDValue Or = DAG.getNode(ISD::OR, dl, SrcVT, And, Shr);
6241 
6242     SDValue Slow, Fast;
6243     if (Node->isStrictFPOpcode()) {
6244       // In strict mode, we must avoid spurious exceptions, and therefore
6245       // must make sure to only emit a single STRICT_SINT_TO_FP.
6246       SDValue InCvt = DAG.getSelect(dl, SrcVT, SignBitTest, Or, Src);
6247       Fast = DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, { DstVT, MVT::Other },
6248                          { Node->getOperand(0), InCvt });
6249       Slow = DAG.getNode(ISD::STRICT_FADD, dl, { DstVT, MVT::Other },
6250                          { Fast.getValue(1), Fast, Fast });
6251       Chain = Slow.getValue(1);
6252       // The STRICT_SINT_TO_FP inherits the exception mode from the
6253       // incoming STRICT_UINT_TO_FP node; the STRICT_FADD node can
6254       // never raise any exception.
6255       SDNodeFlags Flags;
6256       Flags.setNoFPExcept(Node->getFlags().hasNoFPExcept());
6257       Fast->setFlags(Flags);
6258       Flags.setNoFPExcept(true);
6259       Slow->setFlags(Flags);
6260     } else {
6261       SDValue SignCvt = DAG.getNode(ISD::SINT_TO_FP, dl, DstVT, Or);
6262       Slow = DAG.getNode(ISD::FADD, dl, DstVT, SignCvt, SignCvt);
6263       Fast = DAG.getNode(ISD::SINT_TO_FP, dl, DstVT, Src);
6264     }
6265 
6266     Result = DAG.getSelect(dl, DstVT, SignBitTest, Slow, Fast);
6267     return true;
6268   }
6269 
6270   if (DstVT.getScalarType() == MVT::f64) {
6271     // Only expand vector types if we have the appropriate vector bit
6272     // operations.
6273     if (SrcVT.isVector() &&
6274         (!isOperationLegalOrCustom(ISD::SRL, SrcVT) ||
6275          !isOperationLegalOrCustom(ISD::FADD, DstVT) ||
6276          !isOperationLegalOrCustom(ISD::FSUB, DstVT) ||
6277          !isOperationLegalOrCustomOrPromote(ISD::OR, SrcVT) ||
6278          !isOperationLegalOrCustomOrPromote(ISD::AND, SrcVT)))
6279       return false;
6280 
6281     // Implementation of unsigned i64 to f64 following the algorithm in
6282     // __floatundidf in compiler_rt. This implementation has the advantage
6283     // of performing rounding correctly, both in the default rounding mode
6284     // and in all alternate rounding modes.
6285     SDValue TwoP52 = DAG.getConstant(UINT64_C(0x4330000000000000), dl, SrcVT);
6286     SDValue TwoP84PlusTwoP52 = DAG.getConstantFP(
6287         BitsToDouble(UINT64_C(0x4530000000100000)), dl, DstVT);
6288     SDValue TwoP84 = DAG.getConstant(UINT64_C(0x4530000000000000), dl, SrcVT);
6289     SDValue LoMask = DAG.getConstant(UINT64_C(0x00000000FFFFFFFF), dl, SrcVT);
6290     SDValue HiShift = DAG.getConstant(32, dl, ShiftVT);
6291 
6292     SDValue Lo = DAG.getNode(ISD::AND, dl, SrcVT, Src, LoMask);
6293     SDValue Hi = DAG.getNode(ISD::SRL, dl, SrcVT, Src, HiShift);
6294     SDValue LoOr = DAG.getNode(ISD::OR, dl, SrcVT, Lo, TwoP52);
6295     SDValue HiOr = DAG.getNode(ISD::OR, dl, SrcVT, Hi, TwoP84);
6296     SDValue LoFlt = DAG.getBitcast(DstVT, LoOr);
6297     SDValue HiFlt = DAG.getBitcast(DstVT, HiOr);
6298     if (Node->isStrictFPOpcode()) {
6299       SDValue HiSub =
6300           DAG.getNode(ISD::STRICT_FSUB, dl, {DstVT, MVT::Other},
6301                       {Node->getOperand(0), HiFlt, TwoP84PlusTwoP52});
6302       Result = DAG.getNode(ISD::STRICT_FADD, dl, {DstVT, MVT::Other},
6303                            {HiSub.getValue(1), LoFlt, HiSub});
6304       Chain = Result.getValue(1);
6305     } else {
6306       SDValue HiSub =
6307           DAG.getNode(ISD::FSUB, dl, DstVT, HiFlt, TwoP84PlusTwoP52);
6308       Result = DAG.getNode(ISD::FADD, dl, DstVT, LoFlt, HiSub);
6309     }
6310     return true;
6311   }
6312 
6313   return false;
6314 }
6315 
6316 SDValue TargetLowering::expandFMINNUM_FMAXNUM(SDNode *Node,
6317                                               SelectionDAG &DAG) const {
6318   SDLoc dl(Node);
6319   unsigned NewOp = Node->getOpcode() == ISD::FMINNUM ?
6320     ISD::FMINNUM_IEEE : ISD::FMAXNUM_IEEE;
6321   EVT VT = Node->getValueType(0);
6322   if (isOperationLegalOrCustom(NewOp, VT)) {
6323     SDValue Quiet0 = Node->getOperand(0);
6324     SDValue Quiet1 = Node->getOperand(1);
6325 
6326     if (!Node->getFlags().hasNoNaNs()) {
6327       // Insert canonicalizes if it's possible we need to quiet to get correct
6328       // sNaN behavior.
6329       if (!DAG.isKnownNeverSNaN(Quiet0)) {
6330         Quiet0 = DAG.getNode(ISD::FCANONICALIZE, dl, VT, Quiet0,
6331                              Node->getFlags());
6332       }
6333       if (!DAG.isKnownNeverSNaN(Quiet1)) {
6334         Quiet1 = DAG.getNode(ISD::FCANONICALIZE, dl, VT, Quiet1,
6335                              Node->getFlags());
6336       }
6337     }
6338 
6339     return DAG.getNode(NewOp, dl, VT, Quiet0, Quiet1, Node->getFlags());
6340   }
6341 
6342   // If the target has FMINIMUM/FMAXIMUM but not FMINNUM/FMAXNUM use that
6343   // instead if there are no NaNs.
6344   if (Node->getFlags().hasNoNaNs()) {
6345     unsigned IEEE2018Op =
6346         Node->getOpcode() == ISD::FMINNUM ? ISD::FMINIMUM : ISD::FMAXIMUM;
6347     if (isOperationLegalOrCustom(IEEE2018Op, VT)) {
6348       return DAG.getNode(IEEE2018Op, dl, VT, Node->getOperand(0),
6349                          Node->getOperand(1), Node->getFlags());
6350     }
6351   }
6352 
6353   // If none of the above worked, but there are no NaNs, then expand to
6354   // a compare/select sequence.  This is required for correctness since
6355   // InstCombine might have canonicalized a fcmp+select sequence to a
6356   // FMINNUM/FMAXNUM node.  If we were to fall through to the default
6357   // expansion to libcall, we might introduce a link-time dependency
6358   // on libm into a file that originally did not have one.
6359   if (Node->getFlags().hasNoNaNs()) {
6360     ISD::CondCode Pred =
6361         Node->getOpcode() == ISD::FMINNUM ? ISD::SETLT : ISD::SETGT;
6362     SDValue Op1 = Node->getOperand(0);
6363     SDValue Op2 = Node->getOperand(1);
6364     SDValue SelCC = DAG.getSelectCC(dl, Op1, Op2, Op1, Op2, Pred);
6365     // Copy FMF flags, but always set the no-signed-zeros flag
6366     // as this is implied by the FMINNUM/FMAXNUM semantics.
6367     SDNodeFlags Flags = Node->getFlags();
6368     Flags.setNoSignedZeros(true);
6369     SelCC->setFlags(Flags);
6370     return SelCC;
6371   }
6372 
6373   return SDValue();
6374 }
6375 
6376 bool TargetLowering::expandCTPOP(SDNode *Node, SDValue &Result,
6377                                  SelectionDAG &DAG) const {
6378   SDLoc dl(Node);
6379   EVT VT = Node->getValueType(0);
6380   EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout());
6381   SDValue Op = Node->getOperand(0);
6382   unsigned Len = VT.getScalarSizeInBits();
6383   assert(VT.isInteger() && "CTPOP not implemented for this type.");
6384 
6385   // TODO: Add support for irregular type lengths.
6386   if (!(Len <= 128 && Len % 8 == 0))
6387     return false;
6388 
6389   // Only expand vector types if we have the appropriate vector bit operations.
6390   if (VT.isVector() && (!isOperationLegalOrCustom(ISD::ADD, VT) ||
6391                         !isOperationLegalOrCustom(ISD::SUB, VT) ||
6392                         !isOperationLegalOrCustom(ISD::SRL, VT) ||
6393                         (Len != 8 && !isOperationLegalOrCustom(ISD::MUL, VT)) ||
6394                         !isOperationLegalOrCustomOrPromote(ISD::AND, VT)))
6395     return false;
6396 
6397   // This is the "best" algorithm from
6398   // http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
6399   SDValue Mask55 =
6400       DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x55)), dl, VT);
6401   SDValue Mask33 =
6402       DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x33)), dl, VT);
6403   SDValue Mask0F =
6404       DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x0F)), dl, VT);
6405   SDValue Mask01 =
6406       DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x01)), dl, VT);
6407 
6408   // v = v - ((v >> 1) & 0x55555555...)
6409   Op = DAG.getNode(ISD::SUB, dl, VT, Op,
6410                    DAG.getNode(ISD::AND, dl, VT,
6411                                DAG.getNode(ISD::SRL, dl, VT, Op,
6412                                            DAG.getConstant(1, dl, ShVT)),
6413                                Mask55));
6414   // v = (v & 0x33333333...) + ((v >> 2) & 0x33333333...)
6415   Op = DAG.getNode(ISD::ADD, dl, VT, DAG.getNode(ISD::AND, dl, VT, Op, Mask33),
6416                    DAG.getNode(ISD::AND, dl, VT,
6417                                DAG.getNode(ISD::SRL, dl, VT, Op,
6418                                            DAG.getConstant(2, dl, ShVT)),
6419                                Mask33));
6420   // v = (v + (v >> 4)) & 0x0F0F0F0F...
6421   Op = DAG.getNode(ISD::AND, dl, VT,
6422                    DAG.getNode(ISD::ADD, dl, VT, Op,
6423                                DAG.getNode(ISD::SRL, dl, VT, Op,
6424                                            DAG.getConstant(4, dl, ShVT))),
6425                    Mask0F);
6426   // v = (v * 0x01010101...) >> (Len - 8)
6427   if (Len > 8)
6428     Op =
6429         DAG.getNode(ISD::SRL, dl, VT, DAG.getNode(ISD::MUL, dl, VT, Op, Mask01),
6430                     DAG.getConstant(Len - 8, dl, ShVT));
6431 
6432   Result = Op;
6433   return true;
6434 }
6435 
6436 bool TargetLowering::expandCTLZ(SDNode *Node, SDValue &Result,
6437                                 SelectionDAG &DAG) const {
6438   SDLoc dl(Node);
6439   EVT VT = Node->getValueType(0);
6440   EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout());
6441   SDValue Op = Node->getOperand(0);
6442   unsigned NumBitsPerElt = VT.getScalarSizeInBits();
6443 
6444   // If the non-ZERO_UNDEF version is supported we can use that instead.
6445   if (Node->getOpcode() == ISD::CTLZ_ZERO_UNDEF &&
6446       isOperationLegalOrCustom(ISD::CTLZ, VT)) {
6447     Result = DAG.getNode(ISD::CTLZ, dl, VT, Op);
6448     return true;
6449   }
6450 
6451   // If the ZERO_UNDEF version is supported use that and handle the zero case.
6452   if (isOperationLegalOrCustom(ISD::CTLZ_ZERO_UNDEF, VT)) {
6453     EVT SetCCVT =
6454         getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
6455     SDValue CTLZ = DAG.getNode(ISD::CTLZ_ZERO_UNDEF, dl, VT, Op);
6456     SDValue Zero = DAG.getConstant(0, dl, VT);
6457     SDValue SrcIsZero = DAG.getSetCC(dl, SetCCVT, Op, Zero, ISD::SETEQ);
6458     Result = DAG.getNode(ISD::SELECT, dl, VT, SrcIsZero,
6459                          DAG.getConstant(NumBitsPerElt, dl, VT), CTLZ);
6460     return true;
6461   }
6462 
6463   // Only expand vector types if we have the appropriate vector bit operations.
6464   if (VT.isVector() && (!isPowerOf2_32(NumBitsPerElt) ||
6465                         !isOperationLegalOrCustom(ISD::CTPOP, VT) ||
6466                         !isOperationLegalOrCustom(ISD::SRL, VT) ||
6467                         !isOperationLegalOrCustomOrPromote(ISD::OR, VT)))
6468     return false;
6469 
6470   // for now, we do this:
6471   // x = x | (x >> 1);
6472   // x = x | (x >> 2);
6473   // ...
6474   // x = x | (x >>16);
6475   // x = x | (x >>32); // for 64-bit input
6476   // return popcount(~x);
6477   //
6478   // Ref: "Hacker's Delight" by Henry Warren
6479   for (unsigned i = 0; (1U << i) <= (NumBitsPerElt / 2); ++i) {
6480     SDValue Tmp = DAG.getConstant(1ULL << i, dl, ShVT);
6481     Op = DAG.getNode(ISD::OR, dl, VT, Op,
6482                      DAG.getNode(ISD::SRL, dl, VT, Op, Tmp));
6483   }
6484   Op = DAG.getNOT(dl, Op, VT);
6485   Result = DAG.getNode(ISD::CTPOP, dl, VT, Op);
6486   return true;
6487 }
6488 
6489 bool TargetLowering::expandCTTZ(SDNode *Node, SDValue &Result,
6490                                 SelectionDAG &DAG) const {
6491   SDLoc dl(Node);
6492   EVT VT = Node->getValueType(0);
6493   SDValue Op = Node->getOperand(0);
6494   unsigned NumBitsPerElt = VT.getScalarSizeInBits();
6495 
6496   // If the non-ZERO_UNDEF version is supported we can use that instead.
6497   if (Node->getOpcode() == ISD::CTTZ_ZERO_UNDEF &&
6498       isOperationLegalOrCustom(ISD::CTTZ, VT)) {
6499     Result = DAG.getNode(ISD::CTTZ, dl, VT, Op);
6500     return true;
6501   }
6502 
6503   // If the ZERO_UNDEF version is supported use that and handle the zero case.
6504   if (isOperationLegalOrCustom(ISD::CTTZ_ZERO_UNDEF, VT)) {
6505     EVT SetCCVT =
6506         getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
6507     SDValue CTTZ = DAG.getNode(ISD::CTTZ_ZERO_UNDEF, dl, VT, Op);
6508     SDValue Zero = DAG.getConstant(0, dl, VT);
6509     SDValue SrcIsZero = DAG.getSetCC(dl, SetCCVT, Op, Zero, ISD::SETEQ);
6510     Result = DAG.getNode(ISD::SELECT, dl, VT, SrcIsZero,
6511                          DAG.getConstant(NumBitsPerElt, dl, VT), CTTZ);
6512     return true;
6513   }
6514 
6515   // Only expand vector types if we have the appropriate vector bit operations.
6516   if (VT.isVector() && (!isPowerOf2_32(NumBitsPerElt) ||
6517                         (!isOperationLegalOrCustom(ISD::CTPOP, VT) &&
6518                          !isOperationLegalOrCustom(ISD::CTLZ, VT)) ||
6519                         !isOperationLegalOrCustom(ISD::SUB, VT) ||
6520                         !isOperationLegalOrCustomOrPromote(ISD::AND, VT) ||
6521                         !isOperationLegalOrCustomOrPromote(ISD::XOR, VT)))
6522     return false;
6523 
6524   // for now, we use: { return popcount(~x & (x - 1)); }
6525   // unless the target has ctlz but not ctpop, in which case we use:
6526   // { return 32 - nlz(~x & (x-1)); }
6527   // Ref: "Hacker's Delight" by Henry Warren
6528   SDValue Tmp = DAG.getNode(
6529       ISD::AND, dl, VT, DAG.getNOT(dl, Op, VT),
6530       DAG.getNode(ISD::SUB, dl, VT, Op, DAG.getConstant(1, dl, VT)));
6531 
6532   // If ISD::CTLZ is legal and CTPOP isn't, then do that instead.
6533   if (isOperationLegal(ISD::CTLZ, VT) && !isOperationLegal(ISD::CTPOP, VT)) {
6534     Result =
6535         DAG.getNode(ISD::SUB, dl, VT, DAG.getConstant(NumBitsPerElt, dl, VT),
6536                     DAG.getNode(ISD::CTLZ, dl, VT, Tmp));
6537     return true;
6538   }
6539 
6540   Result = DAG.getNode(ISD::CTPOP, dl, VT, Tmp);
6541   return true;
6542 }
6543 
6544 bool TargetLowering::expandABS(SDNode *N, SDValue &Result,
6545                                SelectionDAG &DAG) const {
6546   SDLoc dl(N);
6547   EVT VT = N->getValueType(0);
6548   EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout());
6549   SDValue Op = N->getOperand(0);
6550 
6551   // Only expand vector types if we have the appropriate vector operations.
6552   if (VT.isVector() && (!isOperationLegalOrCustom(ISD::SRA, VT) ||
6553                         !isOperationLegalOrCustom(ISD::ADD, VT) ||
6554                         !isOperationLegalOrCustomOrPromote(ISD::XOR, VT)))
6555     return false;
6556 
6557   SDValue Shift =
6558       DAG.getNode(ISD::SRA, dl, VT, Op,
6559                   DAG.getConstant(VT.getScalarSizeInBits() - 1, dl, ShVT));
6560   SDValue Add = DAG.getNode(ISD::ADD, dl, VT, Op, Shift);
6561   Result = DAG.getNode(ISD::XOR, dl, VT, Add, Shift);
6562   return true;
6563 }
6564 
6565 std::pair<SDValue, SDValue>
6566 TargetLowering::scalarizeVectorLoad(LoadSDNode *LD,
6567                                     SelectionDAG &DAG) const {
6568   SDLoc SL(LD);
6569   SDValue Chain = LD->getChain();
6570   SDValue BasePTR = LD->getBasePtr();
6571   EVT SrcVT = LD->getMemoryVT();
6572   ISD::LoadExtType ExtType = LD->getExtensionType();
6573 
6574   unsigned NumElem = SrcVT.getVectorNumElements();
6575 
6576   EVT SrcEltVT = SrcVT.getScalarType();
6577   EVT DstEltVT = LD->getValueType(0).getScalarType();
6578 
6579   unsigned Stride = SrcEltVT.getSizeInBits() / 8;
6580   assert(SrcEltVT.isByteSized());
6581 
6582   SmallVector<SDValue, 8> Vals;
6583   SmallVector<SDValue, 8> LoadChains;
6584 
6585   for (unsigned Idx = 0; Idx < NumElem; ++Idx) {
6586     SDValue ScalarLoad =
6587         DAG.getExtLoad(ExtType, SL, DstEltVT, Chain, BasePTR,
6588                        LD->getPointerInfo().getWithOffset(Idx * Stride),
6589                        SrcEltVT, MinAlign(LD->getAlignment(), Idx * Stride),
6590                        LD->getMemOperand()->getFlags(), LD->getAAInfo());
6591 
6592     BasePTR = DAG.getObjectPtrOffset(SL, BasePTR, Stride);
6593 
6594     Vals.push_back(ScalarLoad.getValue(0));
6595     LoadChains.push_back(ScalarLoad.getValue(1));
6596   }
6597 
6598   SDValue NewChain = DAG.getNode(ISD::TokenFactor, SL, MVT::Other, LoadChains);
6599   SDValue Value = DAG.getBuildVector(LD->getValueType(0), SL, Vals);
6600 
6601   return std::make_pair(Value, NewChain);
6602 }
6603 
6604 SDValue TargetLowering::scalarizeVectorStore(StoreSDNode *ST,
6605                                              SelectionDAG &DAG) const {
6606   SDLoc SL(ST);
6607 
6608   SDValue Chain = ST->getChain();
6609   SDValue BasePtr = ST->getBasePtr();
6610   SDValue Value = ST->getValue();
6611   EVT StVT = ST->getMemoryVT();
6612 
6613   // The type of the data we want to save
6614   EVT RegVT = Value.getValueType();
6615   EVT RegSclVT = RegVT.getScalarType();
6616 
6617   // The type of data as saved in memory.
6618   EVT MemSclVT = StVT.getScalarType();
6619 
6620   EVT IdxVT = getVectorIdxTy(DAG.getDataLayout());
6621   unsigned NumElem = StVT.getVectorNumElements();
6622 
6623   // A vector must always be stored in memory as-is, i.e. without any padding
6624   // between the elements, since various code depend on it, e.g. in the
6625   // handling of a bitcast of a vector type to int, which may be done with a
6626   // vector store followed by an integer load. A vector that does not have
6627   // elements that are byte-sized must therefore be stored as an integer
6628   // built out of the extracted vector elements.
6629   if (!MemSclVT.isByteSized()) {
6630     unsigned NumBits = StVT.getSizeInBits();
6631     EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), NumBits);
6632 
6633     SDValue CurrVal = DAG.getConstant(0, SL, IntVT);
6634 
6635     for (unsigned Idx = 0; Idx < NumElem; ++Idx) {
6636       SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, RegSclVT, Value,
6637                                 DAG.getConstant(Idx, SL, IdxVT));
6638       SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SL, MemSclVT, Elt);
6639       SDValue ExtElt = DAG.getNode(ISD::ZERO_EXTEND, SL, IntVT, Trunc);
6640       unsigned ShiftIntoIdx =
6641           (DAG.getDataLayout().isBigEndian() ? (NumElem - 1) - Idx : Idx);
6642       SDValue ShiftAmount =
6643           DAG.getConstant(ShiftIntoIdx * MemSclVT.getSizeInBits(), SL, IntVT);
6644       SDValue ShiftedElt =
6645           DAG.getNode(ISD::SHL, SL, IntVT, ExtElt, ShiftAmount);
6646       CurrVal = DAG.getNode(ISD::OR, SL, IntVT, CurrVal, ShiftedElt);
6647     }
6648 
6649     return DAG.getStore(Chain, SL, CurrVal, BasePtr, ST->getPointerInfo(),
6650                         ST->getAlignment(), ST->getMemOperand()->getFlags(),
6651                         ST->getAAInfo());
6652   }
6653 
6654   // Store Stride in bytes
6655   unsigned Stride = MemSclVT.getSizeInBits() / 8;
6656   assert(Stride && "Zero stride!");
6657   // Extract each of the elements from the original vector and save them into
6658   // memory individually.
6659   SmallVector<SDValue, 8> Stores;
6660   for (unsigned Idx = 0; Idx < NumElem; ++Idx) {
6661     SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, RegSclVT, Value,
6662                               DAG.getConstant(Idx, SL, IdxVT));
6663 
6664     SDValue Ptr = DAG.getObjectPtrOffset(SL, BasePtr, Idx * Stride);
6665 
6666     // This scalar TruncStore may be illegal, but we legalize it later.
6667     SDValue Store = DAG.getTruncStore(
6668         Chain, SL, Elt, Ptr, ST->getPointerInfo().getWithOffset(Idx * Stride),
6669         MemSclVT, MinAlign(ST->getAlignment(), Idx * Stride),
6670         ST->getMemOperand()->getFlags(), ST->getAAInfo());
6671 
6672     Stores.push_back(Store);
6673   }
6674 
6675   return DAG.getNode(ISD::TokenFactor, SL, MVT::Other, Stores);
6676 }
6677 
6678 std::pair<SDValue, SDValue>
6679 TargetLowering::expandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG) const {
6680   assert(LD->getAddressingMode() == ISD::UNINDEXED &&
6681          "unaligned indexed loads not implemented!");
6682   SDValue Chain = LD->getChain();
6683   SDValue Ptr = LD->getBasePtr();
6684   EVT VT = LD->getValueType(0);
6685   EVT LoadedVT = LD->getMemoryVT();
6686   SDLoc dl(LD);
6687   auto &MF = DAG.getMachineFunction();
6688 
6689   if (VT.isFloatingPoint() || VT.isVector()) {
6690     EVT intVT = EVT::getIntegerVT(*DAG.getContext(), LoadedVT.getSizeInBits());
6691     if (isTypeLegal(intVT) && isTypeLegal(LoadedVT)) {
6692       if (!isOperationLegalOrCustom(ISD::LOAD, intVT) &&
6693           LoadedVT.isVector()) {
6694         // Scalarize the load and let the individual components be handled.
6695         return scalarizeVectorLoad(LD, DAG);
6696       }
6697 
6698       // Expand to a (misaligned) integer load of the same size,
6699       // then bitconvert to floating point or vector.
6700       SDValue newLoad = DAG.getLoad(intVT, dl, Chain, Ptr,
6701                                     LD->getMemOperand());
6702       SDValue Result = DAG.getNode(ISD::BITCAST, dl, LoadedVT, newLoad);
6703       if (LoadedVT != VT)
6704         Result = DAG.getNode(VT.isFloatingPoint() ? ISD::FP_EXTEND :
6705                              ISD::ANY_EXTEND, dl, VT, Result);
6706 
6707       return std::make_pair(Result, newLoad.getValue(1));
6708     }
6709 
6710     // Copy the value to a (aligned) stack slot using (unaligned) integer
6711     // loads and stores, then do a (aligned) load from the stack slot.
6712     MVT RegVT = getRegisterType(*DAG.getContext(), intVT);
6713     unsigned LoadedBytes = LoadedVT.getStoreSize();
6714     unsigned RegBytes = RegVT.getSizeInBits() / 8;
6715     unsigned NumRegs = (LoadedBytes + RegBytes - 1) / RegBytes;
6716 
6717     // Make sure the stack slot is also aligned for the register type.
6718     SDValue StackBase = DAG.CreateStackTemporary(LoadedVT, RegVT);
6719     auto FrameIndex = cast<FrameIndexSDNode>(StackBase.getNode())->getIndex();
6720     SmallVector<SDValue, 8> Stores;
6721     SDValue StackPtr = StackBase;
6722     unsigned Offset = 0;
6723 
6724     EVT PtrVT = Ptr.getValueType();
6725     EVT StackPtrVT = StackPtr.getValueType();
6726 
6727     SDValue PtrIncrement = DAG.getConstant(RegBytes, dl, PtrVT);
6728     SDValue StackPtrIncrement = DAG.getConstant(RegBytes, dl, StackPtrVT);
6729 
6730     // Do all but one copies using the full register width.
6731     for (unsigned i = 1; i < NumRegs; i++) {
6732       // Load one integer register's worth from the original location.
6733       SDValue Load = DAG.getLoad(
6734           RegVT, dl, Chain, Ptr, LD->getPointerInfo().getWithOffset(Offset),
6735           MinAlign(LD->getAlignment(), Offset), LD->getMemOperand()->getFlags(),
6736           LD->getAAInfo());
6737       // Follow the load with a store to the stack slot.  Remember the store.
6738       Stores.push_back(DAG.getStore(
6739           Load.getValue(1), dl, Load, StackPtr,
6740           MachinePointerInfo::getFixedStack(MF, FrameIndex, Offset)));
6741       // Increment the pointers.
6742       Offset += RegBytes;
6743 
6744       Ptr = DAG.getObjectPtrOffset(dl, Ptr, PtrIncrement);
6745       StackPtr = DAG.getObjectPtrOffset(dl, StackPtr, StackPtrIncrement);
6746     }
6747 
6748     // The last copy may be partial.  Do an extending load.
6749     EVT MemVT = EVT::getIntegerVT(*DAG.getContext(),
6750                                   8 * (LoadedBytes - Offset));
6751     SDValue Load =
6752         DAG.getExtLoad(ISD::EXTLOAD, dl, RegVT, Chain, Ptr,
6753                        LD->getPointerInfo().getWithOffset(Offset), MemVT,
6754                        MinAlign(LD->getAlignment(), Offset),
6755                        LD->getMemOperand()->getFlags(), LD->getAAInfo());
6756     // Follow the load with a store to the stack slot.  Remember the store.
6757     // On big-endian machines this requires a truncating store to ensure
6758     // that the bits end up in the right place.
6759     Stores.push_back(DAG.getTruncStore(
6760         Load.getValue(1), dl, Load, StackPtr,
6761         MachinePointerInfo::getFixedStack(MF, FrameIndex, Offset), MemVT));
6762 
6763     // The order of the stores doesn't matter - say it with a TokenFactor.
6764     SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores);
6765 
6766     // Finally, perform the original load only redirected to the stack slot.
6767     Load = DAG.getExtLoad(LD->getExtensionType(), dl, VT, TF, StackBase,
6768                           MachinePointerInfo::getFixedStack(MF, FrameIndex, 0),
6769                           LoadedVT);
6770 
6771     // Callers expect a MERGE_VALUES node.
6772     return std::make_pair(Load, TF);
6773   }
6774 
6775   assert(LoadedVT.isInteger() && !LoadedVT.isVector() &&
6776          "Unaligned load of unsupported type.");
6777 
6778   // Compute the new VT that is half the size of the old one.  This is an
6779   // integer MVT.
6780   unsigned NumBits = LoadedVT.getSizeInBits();
6781   EVT NewLoadedVT;
6782   NewLoadedVT = EVT::getIntegerVT(*DAG.getContext(), NumBits/2);
6783   NumBits >>= 1;
6784 
6785   unsigned Alignment = LD->getAlignment();
6786   unsigned IncrementSize = NumBits / 8;
6787   ISD::LoadExtType HiExtType = LD->getExtensionType();
6788 
6789   // If the original load is NON_EXTLOAD, the hi part load must be ZEXTLOAD.
6790   if (HiExtType == ISD::NON_EXTLOAD)
6791     HiExtType = ISD::ZEXTLOAD;
6792 
6793   // Load the value in two parts
6794   SDValue Lo, Hi;
6795   if (DAG.getDataLayout().isLittleEndian()) {
6796     Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, VT, Chain, Ptr, LD->getPointerInfo(),
6797                         NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(),
6798                         LD->getAAInfo());
6799 
6800     Ptr = DAG.getObjectPtrOffset(dl, Ptr, IncrementSize);
6801     Hi = DAG.getExtLoad(HiExtType, dl, VT, Chain, Ptr,
6802                         LD->getPointerInfo().getWithOffset(IncrementSize),
6803                         NewLoadedVT, MinAlign(Alignment, IncrementSize),
6804                         LD->getMemOperand()->getFlags(), LD->getAAInfo());
6805   } else {
6806     Hi = DAG.getExtLoad(HiExtType, dl, VT, Chain, Ptr, LD->getPointerInfo(),
6807                         NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(),
6808                         LD->getAAInfo());
6809 
6810     Ptr = DAG.getObjectPtrOffset(dl, Ptr, IncrementSize);
6811     Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, VT, Chain, Ptr,
6812                         LD->getPointerInfo().getWithOffset(IncrementSize),
6813                         NewLoadedVT, MinAlign(Alignment, IncrementSize),
6814                         LD->getMemOperand()->getFlags(), LD->getAAInfo());
6815   }
6816 
6817   // aggregate the two parts
6818   SDValue ShiftAmount =
6819       DAG.getConstant(NumBits, dl, getShiftAmountTy(Hi.getValueType(),
6820                                                     DAG.getDataLayout()));
6821   SDValue Result = DAG.getNode(ISD::SHL, dl, VT, Hi, ShiftAmount);
6822   Result = DAG.getNode(ISD::OR, dl, VT, Result, Lo);
6823 
6824   SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1),
6825                              Hi.getValue(1));
6826 
6827   return std::make_pair(Result, TF);
6828 }
6829 
6830 SDValue TargetLowering::expandUnalignedStore(StoreSDNode *ST,
6831                                              SelectionDAG &DAG) const {
6832   assert(ST->getAddressingMode() == ISD::UNINDEXED &&
6833          "unaligned indexed stores not implemented!");
6834   SDValue Chain = ST->getChain();
6835   SDValue Ptr = ST->getBasePtr();
6836   SDValue Val = ST->getValue();
6837   EVT VT = Val.getValueType();
6838   int Alignment = ST->getAlignment();
6839   auto &MF = DAG.getMachineFunction();
6840   EVT StoreMemVT = ST->getMemoryVT();
6841 
6842   SDLoc dl(ST);
6843   if (StoreMemVT.isFloatingPoint() || StoreMemVT.isVector()) {
6844     EVT intVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits());
6845     if (isTypeLegal(intVT)) {
6846       if (!isOperationLegalOrCustom(ISD::STORE, intVT) &&
6847           StoreMemVT.isVector()) {
6848         // Scalarize the store and let the individual components be handled.
6849         SDValue Result = scalarizeVectorStore(ST, DAG);
6850         return Result;
6851       }
6852       // Expand to a bitconvert of the value to the integer type of the
6853       // same size, then a (misaligned) int store.
6854       // FIXME: Does not handle truncating floating point stores!
6855       SDValue Result = DAG.getNode(ISD::BITCAST, dl, intVT, Val);
6856       Result = DAG.getStore(Chain, dl, Result, Ptr, ST->getPointerInfo(),
6857                             Alignment, ST->getMemOperand()->getFlags());
6858       return Result;
6859     }
6860     // Do a (aligned) store to a stack slot, then copy from the stack slot
6861     // to the final destination using (unaligned) integer loads and stores.
6862     MVT RegVT = getRegisterType(
6863         *DAG.getContext(),
6864         EVT::getIntegerVT(*DAG.getContext(), StoreMemVT.getSizeInBits()));
6865     EVT PtrVT = Ptr.getValueType();
6866     unsigned StoredBytes = StoreMemVT.getStoreSize();
6867     unsigned RegBytes = RegVT.getSizeInBits() / 8;
6868     unsigned NumRegs = (StoredBytes + RegBytes - 1) / RegBytes;
6869 
6870     // Make sure the stack slot is also aligned for the register type.
6871     SDValue StackPtr = DAG.CreateStackTemporary(StoreMemVT, RegVT);
6872     auto FrameIndex = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
6873 
6874     // Perform the original store, only redirected to the stack slot.
6875     SDValue Store = DAG.getTruncStore(
6876         Chain, dl, Val, StackPtr,
6877         MachinePointerInfo::getFixedStack(MF, FrameIndex, 0), StoreMemVT);
6878 
6879     EVT StackPtrVT = StackPtr.getValueType();
6880 
6881     SDValue PtrIncrement = DAG.getConstant(RegBytes, dl, PtrVT);
6882     SDValue StackPtrIncrement = DAG.getConstant(RegBytes, dl, StackPtrVT);
6883     SmallVector<SDValue, 8> Stores;
6884     unsigned Offset = 0;
6885 
6886     // Do all but one copies using the full register width.
6887     for (unsigned i = 1; i < NumRegs; i++) {
6888       // Load one integer register's worth from the stack slot.
6889       SDValue Load = DAG.getLoad(
6890           RegVT, dl, Store, StackPtr,
6891           MachinePointerInfo::getFixedStack(MF, FrameIndex, Offset));
6892       // Store it to the final location.  Remember the store.
6893       Stores.push_back(DAG.getStore(Load.getValue(1), dl, Load, Ptr,
6894                                     ST->getPointerInfo().getWithOffset(Offset),
6895                                     MinAlign(ST->getAlignment(), Offset),
6896                                     ST->getMemOperand()->getFlags()));
6897       // Increment the pointers.
6898       Offset += RegBytes;
6899       StackPtr = DAG.getObjectPtrOffset(dl, StackPtr, StackPtrIncrement);
6900       Ptr = DAG.getObjectPtrOffset(dl, Ptr, PtrIncrement);
6901     }
6902 
6903     // The last store may be partial.  Do a truncating store.  On big-endian
6904     // machines this requires an extending load from the stack slot to ensure
6905     // that the bits are in the right place.
6906     EVT LoadMemVT =
6907         EVT::getIntegerVT(*DAG.getContext(), 8 * (StoredBytes - Offset));
6908 
6909     // Load from the stack slot.
6910     SDValue Load = DAG.getExtLoad(
6911         ISD::EXTLOAD, dl, RegVT, Store, StackPtr,
6912         MachinePointerInfo::getFixedStack(MF, FrameIndex, Offset), LoadMemVT);
6913 
6914     Stores.push_back(
6915         DAG.getTruncStore(Load.getValue(1), dl, Load, Ptr,
6916                           ST->getPointerInfo().getWithOffset(Offset), LoadMemVT,
6917                           MinAlign(ST->getAlignment(), Offset),
6918                           ST->getMemOperand()->getFlags(), ST->getAAInfo()));
6919     // The order of the stores doesn't matter - say it with a TokenFactor.
6920     SDValue Result = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores);
6921     return Result;
6922   }
6923 
6924   assert(StoreMemVT.isInteger() && !StoreMemVT.isVector() &&
6925          "Unaligned store of unknown type.");
6926   // Get the half-size VT
6927   EVT NewStoredVT = StoreMemVT.getHalfSizedIntegerVT(*DAG.getContext());
6928   int NumBits = NewStoredVT.getSizeInBits();
6929   int IncrementSize = NumBits / 8;
6930 
6931   // Divide the stored value in two parts.
6932   SDValue ShiftAmount = DAG.getConstant(
6933       NumBits, dl, getShiftAmountTy(Val.getValueType(), DAG.getDataLayout()));
6934   SDValue Lo = Val;
6935   SDValue Hi = DAG.getNode(ISD::SRL, dl, VT, Val, ShiftAmount);
6936 
6937   // Store the two parts
6938   SDValue Store1, Store2;
6939   Store1 = DAG.getTruncStore(Chain, dl,
6940                              DAG.getDataLayout().isLittleEndian() ? Lo : Hi,
6941                              Ptr, ST->getPointerInfo(), NewStoredVT, Alignment,
6942                              ST->getMemOperand()->getFlags());
6943 
6944   Ptr = DAG.getObjectPtrOffset(dl, Ptr, IncrementSize);
6945   Alignment = MinAlign(Alignment, IncrementSize);
6946   Store2 = DAG.getTruncStore(
6947       Chain, dl, DAG.getDataLayout().isLittleEndian() ? Hi : Lo, Ptr,
6948       ST->getPointerInfo().getWithOffset(IncrementSize), NewStoredVT, Alignment,
6949       ST->getMemOperand()->getFlags(), ST->getAAInfo());
6950 
6951   SDValue Result =
6952       DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Store1, Store2);
6953   return Result;
6954 }
6955 
6956 SDValue
6957 TargetLowering::IncrementMemoryAddress(SDValue Addr, SDValue Mask,
6958                                        const SDLoc &DL, EVT DataVT,
6959                                        SelectionDAG &DAG,
6960                                        bool IsCompressedMemory) const {
6961   SDValue Increment;
6962   EVT AddrVT = Addr.getValueType();
6963   EVT MaskVT = Mask.getValueType();
6964   assert(DataVT.getVectorNumElements() == MaskVT.getVectorNumElements() &&
6965          "Incompatible types of Data and Mask");
6966   if (IsCompressedMemory) {
6967     // Incrementing the pointer according to number of '1's in the mask.
6968     EVT MaskIntVT = EVT::getIntegerVT(*DAG.getContext(), MaskVT.getSizeInBits());
6969     SDValue MaskInIntReg = DAG.getBitcast(MaskIntVT, Mask);
6970     if (MaskIntVT.getSizeInBits() < 32) {
6971       MaskInIntReg = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, MaskInIntReg);
6972       MaskIntVT = MVT::i32;
6973     }
6974 
6975     // Count '1's with POPCNT.
6976     Increment = DAG.getNode(ISD::CTPOP, DL, MaskIntVT, MaskInIntReg);
6977     Increment = DAG.getZExtOrTrunc(Increment, DL, AddrVT);
6978     // Scale is an element size in bytes.
6979     SDValue Scale = DAG.getConstant(DataVT.getScalarSizeInBits() / 8, DL,
6980                                     AddrVT);
6981     Increment = DAG.getNode(ISD::MUL, DL, AddrVT, Increment, Scale);
6982   } else
6983     Increment = DAG.getConstant(DataVT.getStoreSize(), DL, AddrVT);
6984 
6985   return DAG.getNode(ISD::ADD, DL, AddrVT, Addr, Increment);
6986 }
6987 
6988 static SDValue clampDynamicVectorIndex(SelectionDAG &DAG,
6989                                        SDValue Idx,
6990                                        EVT VecVT,
6991                                        const SDLoc &dl) {
6992   if (isa<ConstantSDNode>(Idx))
6993     return Idx;
6994 
6995   EVT IdxVT = Idx.getValueType();
6996   unsigned NElts = VecVT.getVectorNumElements();
6997   if (isPowerOf2_32(NElts)) {
6998     APInt Imm = APInt::getLowBitsSet(IdxVT.getSizeInBits(),
6999                                      Log2_32(NElts));
7000     return DAG.getNode(ISD::AND, dl, IdxVT, Idx,
7001                        DAG.getConstant(Imm, dl, IdxVT));
7002   }
7003 
7004   return DAG.getNode(ISD::UMIN, dl, IdxVT, Idx,
7005                      DAG.getConstant(NElts - 1, dl, IdxVT));
7006 }
7007 
7008 SDValue TargetLowering::getVectorElementPointer(SelectionDAG &DAG,
7009                                                 SDValue VecPtr, EVT VecVT,
7010                                                 SDValue Index) const {
7011   SDLoc dl(Index);
7012   // Make sure the index type is big enough to compute in.
7013   Index = DAG.getZExtOrTrunc(Index, dl, VecPtr.getValueType());
7014 
7015   EVT EltVT = VecVT.getVectorElementType();
7016 
7017   // Calculate the element offset and add it to the pointer.
7018   unsigned EltSize = EltVT.getSizeInBits() / 8; // FIXME: should be ABI size.
7019   assert(EltSize * 8 == EltVT.getSizeInBits() &&
7020          "Converting bits to bytes lost precision");
7021 
7022   Index = clampDynamicVectorIndex(DAG, Index, VecVT, dl);
7023 
7024   EVT IdxVT = Index.getValueType();
7025 
7026   Index = DAG.getNode(ISD::MUL, dl, IdxVT, Index,
7027                       DAG.getConstant(EltSize, dl, IdxVT));
7028   return DAG.getMemBasePlusOffset(VecPtr, Index, dl);
7029 }
7030 
7031 //===----------------------------------------------------------------------===//
7032 // Implementation of Emulated TLS Model
7033 //===----------------------------------------------------------------------===//
7034 
7035 SDValue TargetLowering::LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA,
7036                                                 SelectionDAG &DAG) const {
7037   // Access to address of TLS varialbe xyz is lowered to a function call:
7038   //   __emutls_get_address( address of global variable named "__emutls_v.xyz" )
7039   EVT PtrVT = getPointerTy(DAG.getDataLayout());
7040   PointerType *VoidPtrType = Type::getInt8PtrTy(*DAG.getContext());
7041   SDLoc dl(GA);
7042 
7043   ArgListTy Args;
7044   ArgListEntry Entry;
7045   std::string NameString = ("__emutls_v." + GA->getGlobal()->getName()).str();
7046   Module *VariableModule = const_cast<Module*>(GA->getGlobal()->getParent());
7047   StringRef EmuTlsVarName(NameString);
7048   GlobalVariable *EmuTlsVar = VariableModule->getNamedGlobal(EmuTlsVarName);
7049   assert(EmuTlsVar && "Cannot find EmuTlsVar ");
7050   Entry.Node = DAG.getGlobalAddress(EmuTlsVar, dl, PtrVT);
7051   Entry.Ty = VoidPtrType;
7052   Args.push_back(Entry);
7053 
7054   SDValue EmuTlsGetAddr = DAG.getExternalSymbol("__emutls_get_address", PtrVT);
7055 
7056   TargetLowering::CallLoweringInfo CLI(DAG);
7057   CLI.setDebugLoc(dl).setChain(DAG.getEntryNode());
7058   CLI.setLibCallee(CallingConv::C, VoidPtrType, EmuTlsGetAddr, std::move(Args));
7059   std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
7060 
7061   // TLSADDR will be codegen'ed as call. Inform MFI that function has calls.
7062   // At last for X86 targets, maybe good for other targets too?
7063   MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
7064   MFI.setAdjustsStack(true); // Is this only for X86 target?
7065   MFI.setHasCalls(true);
7066 
7067   assert((GA->getOffset() == 0) &&
7068          "Emulated TLS must have zero offset in GlobalAddressSDNode");
7069   return CallResult.first;
7070 }
7071 
7072 SDValue TargetLowering::lowerCmpEqZeroToCtlzSrl(SDValue Op,
7073                                                 SelectionDAG &DAG) const {
7074   assert((Op->getOpcode() == ISD::SETCC) && "Input has to be a SETCC node.");
7075   if (!isCtlzFast())
7076     return SDValue();
7077   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
7078   SDLoc dl(Op);
7079   if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
7080     if (C->isNullValue() && CC == ISD::SETEQ) {
7081       EVT VT = Op.getOperand(0).getValueType();
7082       SDValue Zext = Op.getOperand(0);
7083       if (VT.bitsLT(MVT::i32)) {
7084         VT = MVT::i32;
7085         Zext = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Op.getOperand(0));
7086       }
7087       unsigned Log2b = Log2_32(VT.getSizeInBits());
7088       SDValue Clz = DAG.getNode(ISD::CTLZ, dl, VT, Zext);
7089       SDValue Scc = DAG.getNode(ISD::SRL, dl, VT, Clz,
7090                                 DAG.getConstant(Log2b, dl, MVT::i32));
7091       return DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Scc);
7092     }
7093   }
7094   return SDValue();
7095 }
7096 
7097 SDValue TargetLowering::expandAddSubSat(SDNode *Node, SelectionDAG &DAG) const {
7098   unsigned Opcode = Node->getOpcode();
7099   SDValue LHS = Node->getOperand(0);
7100   SDValue RHS = Node->getOperand(1);
7101   EVT VT = LHS.getValueType();
7102   SDLoc dl(Node);
7103 
7104   assert(VT == RHS.getValueType() && "Expected operands to be the same type");
7105   assert(VT.isInteger() && "Expected operands to be integers");
7106 
7107   // usub.sat(a, b) -> umax(a, b) - b
7108   if (Opcode == ISD::USUBSAT && isOperationLegalOrCustom(ISD::UMAX, VT)) {
7109     SDValue Max = DAG.getNode(ISD::UMAX, dl, VT, LHS, RHS);
7110     return DAG.getNode(ISD::SUB, dl, VT, Max, RHS);
7111   }
7112 
7113   if (Opcode == ISD::UADDSAT && isOperationLegalOrCustom(ISD::UMIN, VT)) {
7114     SDValue InvRHS = DAG.getNOT(dl, RHS, VT);
7115     SDValue Min = DAG.getNode(ISD::UMIN, dl, VT, LHS, InvRHS);
7116     return DAG.getNode(ISD::ADD, dl, VT, Min, RHS);
7117   }
7118 
7119   unsigned OverflowOp;
7120   switch (Opcode) {
7121   case ISD::SADDSAT:
7122     OverflowOp = ISD::SADDO;
7123     break;
7124   case ISD::UADDSAT:
7125     OverflowOp = ISD::UADDO;
7126     break;
7127   case ISD::SSUBSAT:
7128     OverflowOp = ISD::SSUBO;
7129     break;
7130   case ISD::USUBSAT:
7131     OverflowOp = ISD::USUBO;
7132     break;
7133   default:
7134     llvm_unreachable("Expected method to receive signed or unsigned saturation "
7135                      "addition or subtraction node.");
7136   }
7137 
7138   unsigned BitWidth = LHS.getScalarValueSizeInBits();
7139   EVT BoolVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
7140   SDValue Result = DAG.getNode(OverflowOp, dl, DAG.getVTList(VT, BoolVT),
7141                                LHS, RHS);
7142   SDValue SumDiff = Result.getValue(0);
7143   SDValue Overflow = Result.getValue(1);
7144   SDValue Zero = DAG.getConstant(0, dl, VT);
7145   SDValue AllOnes = DAG.getAllOnesConstant(dl, VT);
7146 
7147   if (Opcode == ISD::UADDSAT) {
7148     if (getBooleanContents(VT) == ZeroOrNegativeOneBooleanContent) {
7149       // (LHS + RHS) | OverflowMask
7150       SDValue OverflowMask = DAG.getSExtOrTrunc(Overflow, dl, VT);
7151       return DAG.getNode(ISD::OR, dl, VT, SumDiff, OverflowMask);
7152     }
7153     // Overflow ? 0xffff.... : (LHS + RHS)
7154     return DAG.getSelect(dl, VT, Overflow, AllOnes, SumDiff);
7155   } else if (Opcode == ISD::USUBSAT) {
7156     if (getBooleanContents(VT) == ZeroOrNegativeOneBooleanContent) {
7157       // (LHS - RHS) & ~OverflowMask
7158       SDValue OverflowMask = DAG.getSExtOrTrunc(Overflow, dl, VT);
7159       SDValue Not = DAG.getNOT(dl, OverflowMask, VT);
7160       return DAG.getNode(ISD::AND, dl, VT, SumDiff, Not);
7161     }
7162     // Overflow ? 0 : (LHS - RHS)
7163     return DAG.getSelect(dl, VT, Overflow, Zero, SumDiff);
7164   } else {
7165     // SatMax -> Overflow && SumDiff < 0
7166     // SatMin -> Overflow && SumDiff >= 0
7167     APInt MinVal = APInt::getSignedMinValue(BitWidth);
7168     APInt MaxVal = APInt::getSignedMaxValue(BitWidth);
7169     SDValue SatMin = DAG.getConstant(MinVal, dl, VT);
7170     SDValue SatMax = DAG.getConstant(MaxVal, dl, VT);
7171     SDValue SumNeg = DAG.getSetCC(dl, BoolVT, SumDiff, Zero, ISD::SETLT);
7172     Result = DAG.getSelect(dl, VT, SumNeg, SatMax, SatMin);
7173     return DAG.getSelect(dl, VT, Overflow, Result, SumDiff);
7174   }
7175 }
7176 
7177 SDValue
7178 TargetLowering::expandFixedPointMul(SDNode *Node, SelectionDAG &DAG) const {
7179   assert((Node->getOpcode() == ISD::SMULFIX ||
7180           Node->getOpcode() == ISD::UMULFIX ||
7181           Node->getOpcode() == ISD::SMULFIXSAT ||
7182           Node->getOpcode() == ISD::UMULFIXSAT) &&
7183          "Expected a fixed point multiplication opcode");
7184 
7185   SDLoc dl(Node);
7186   SDValue LHS = Node->getOperand(0);
7187   SDValue RHS = Node->getOperand(1);
7188   EVT VT = LHS.getValueType();
7189   unsigned Scale = Node->getConstantOperandVal(2);
7190   bool Saturating = (Node->getOpcode() == ISD::SMULFIXSAT ||
7191                      Node->getOpcode() == ISD::UMULFIXSAT);
7192   bool Signed = (Node->getOpcode() == ISD::SMULFIX ||
7193                  Node->getOpcode() == ISD::SMULFIXSAT);
7194   EVT BoolVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
7195   unsigned VTSize = VT.getScalarSizeInBits();
7196 
7197   if (!Scale) {
7198     // [us]mul.fix(a, b, 0) -> mul(a, b)
7199     if (!Saturating) {
7200       if (isOperationLegalOrCustom(ISD::MUL, VT))
7201         return DAG.getNode(ISD::MUL, dl, VT, LHS, RHS);
7202     } else if (Signed && isOperationLegalOrCustom(ISD::SMULO, VT)) {
7203       SDValue Result =
7204           DAG.getNode(ISD::SMULO, dl, DAG.getVTList(VT, BoolVT), LHS, RHS);
7205       SDValue Product = Result.getValue(0);
7206       SDValue Overflow = Result.getValue(1);
7207       SDValue Zero = DAG.getConstant(0, dl, VT);
7208 
7209       APInt MinVal = APInt::getSignedMinValue(VTSize);
7210       APInt MaxVal = APInt::getSignedMaxValue(VTSize);
7211       SDValue SatMin = DAG.getConstant(MinVal, dl, VT);
7212       SDValue SatMax = DAG.getConstant(MaxVal, dl, VT);
7213       SDValue ProdNeg = DAG.getSetCC(dl, BoolVT, Product, Zero, ISD::SETLT);
7214       Result = DAG.getSelect(dl, VT, ProdNeg, SatMax, SatMin);
7215       return DAG.getSelect(dl, VT, Overflow, Result, Product);
7216     } else if (!Signed && isOperationLegalOrCustom(ISD::UMULO, VT)) {
7217       SDValue Result =
7218           DAG.getNode(ISD::UMULO, dl, DAG.getVTList(VT, BoolVT), LHS, RHS);
7219       SDValue Product = Result.getValue(0);
7220       SDValue Overflow = Result.getValue(1);
7221 
7222       APInt MaxVal = APInt::getMaxValue(VTSize);
7223       SDValue SatMax = DAG.getConstant(MaxVal, dl, VT);
7224       return DAG.getSelect(dl, VT, Overflow, SatMax, Product);
7225     }
7226   }
7227 
7228   assert(((Signed && Scale < VTSize) || (!Signed && Scale <= VTSize)) &&
7229          "Expected scale to be less than the number of bits if signed or at "
7230          "most the number of bits if unsigned.");
7231   assert(LHS.getValueType() == RHS.getValueType() &&
7232          "Expected both operands to be the same type");
7233 
7234   // Get the upper and lower bits of the result.
7235   SDValue Lo, Hi;
7236   unsigned LoHiOp = Signed ? ISD::SMUL_LOHI : ISD::UMUL_LOHI;
7237   unsigned HiOp = Signed ? ISD::MULHS : ISD::MULHU;
7238   if (isOperationLegalOrCustom(LoHiOp, VT)) {
7239     SDValue Result = DAG.getNode(LoHiOp, dl, DAG.getVTList(VT, VT), LHS, RHS);
7240     Lo = Result.getValue(0);
7241     Hi = Result.getValue(1);
7242   } else if (isOperationLegalOrCustom(HiOp, VT)) {
7243     Lo = DAG.getNode(ISD::MUL, dl, VT, LHS, RHS);
7244     Hi = DAG.getNode(HiOp, dl, VT, LHS, RHS);
7245   } else if (VT.isVector()) {
7246     return SDValue();
7247   } else {
7248     report_fatal_error("Unable to expand fixed point multiplication.");
7249   }
7250 
7251   if (Scale == VTSize)
7252     // Result is just the top half since we'd be shifting by the width of the
7253     // operand. Overflow impossible so this works for both UMULFIX and
7254     // UMULFIXSAT.
7255     return Hi;
7256 
7257   // The result will need to be shifted right by the scale since both operands
7258   // are scaled. The result is given to us in 2 halves, so we only want part of
7259   // both in the result.
7260   EVT ShiftTy = getShiftAmountTy(VT, DAG.getDataLayout());
7261   SDValue Result = DAG.getNode(ISD::FSHR, dl, VT, Hi, Lo,
7262                                DAG.getConstant(Scale, dl, ShiftTy));
7263   if (!Saturating)
7264     return Result;
7265 
7266   if (!Signed) {
7267     // Unsigned overflow happened if the upper (VTSize - Scale) bits (of the
7268     // widened multiplication) aren't all zeroes.
7269 
7270     // Saturate to max if ((Hi >> Scale) != 0),
7271     // which is the same as if (Hi > ((1 << Scale) - 1))
7272     APInt MaxVal = APInt::getMaxValue(VTSize);
7273     SDValue LowMask = DAG.getConstant(APInt::getLowBitsSet(VTSize, Scale),
7274                                       dl, VT);
7275     Result = DAG.getSelectCC(dl, Hi, LowMask,
7276                              DAG.getConstant(MaxVal, dl, VT), Result,
7277                              ISD::SETUGT);
7278 
7279     return Result;
7280   }
7281 
7282   // Signed overflow happened if the upper (VTSize - Scale + 1) bits (of the
7283   // widened multiplication) aren't all ones or all zeroes.
7284 
7285   SDValue SatMin = DAG.getConstant(APInt::getSignedMinValue(VTSize), dl, VT);
7286   SDValue SatMax = DAG.getConstant(APInt::getSignedMaxValue(VTSize), dl, VT);
7287 
7288   if (Scale == 0) {
7289     SDValue Sign = DAG.getNode(ISD::SRA, dl, VT, Lo,
7290                                DAG.getConstant(VTSize - 1, dl, ShiftTy));
7291     SDValue Overflow = DAG.getSetCC(dl, BoolVT, Hi, Sign, ISD::SETNE);
7292     // Saturated to SatMin if wide product is negative, and SatMax if wide
7293     // product is positive ...
7294     SDValue Zero = DAG.getConstant(0, dl, VT);
7295     SDValue ResultIfOverflow = DAG.getSelectCC(dl, Hi, Zero, SatMin, SatMax,
7296                                                ISD::SETLT);
7297     // ... but only if we overflowed.
7298     return DAG.getSelect(dl, VT, Overflow, ResultIfOverflow, Result);
7299   }
7300 
7301   //  We handled Scale==0 above so all the bits to examine is in Hi.
7302 
7303   // Saturate to max if ((Hi >> (Scale - 1)) > 0),
7304   // which is the same as if (Hi > (1 << (Scale - 1)) - 1)
7305   SDValue LowMask = DAG.getConstant(APInt::getLowBitsSet(VTSize, Scale - 1),
7306                                     dl, VT);
7307   Result = DAG.getSelectCC(dl, Hi, LowMask, SatMax, Result, ISD::SETGT);
7308   // Saturate to min if (Hi >> (Scale - 1)) < -1),
7309   // which is the same as if (HI < (-1 << (Scale - 1))
7310   SDValue HighMask =
7311       DAG.getConstant(APInt::getHighBitsSet(VTSize, VTSize - Scale + 1),
7312                       dl, VT);
7313   Result = DAG.getSelectCC(dl, Hi, HighMask, SatMin, Result, ISD::SETLT);
7314   return Result;
7315 }
7316 
7317 SDValue
7318 TargetLowering::expandFixedPointDiv(unsigned Opcode, const SDLoc &dl,
7319                                     SDValue LHS, SDValue RHS,
7320                                     unsigned Scale, SelectionDAG &DAG) const {
7321   assert((Opcode == ISD::SDIVFIX ||
7322           Opcode == ISD::UDIVFIX) &&
7323          "Expected a fixed point division opcode");
7324 
7325   EVT VT = LHS.getValueType();
7326   bool Signed = Opcode == ISD::SDIVFIX;
7327   EVT BoolVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
7328 
7329   // If there is enough room in the type to upscale the LHS or downscale the
7330   // RHS before the division, we can perform it in this type without having to
7331   // resize. For signed operations, the LHS headroom is the number of
7332   // redundant sign bits, and for unsigned ones it is the number of zeroes.
7333   // The headroom for the RHS is the number of trailing zeroes.
7334   unsigned LHSLead = Signed ? DAG.ComputeNumSignBits(LHS) - 1
7335                             : DAG.computeKnownBits(LHS).countMinLeadingZeros();
7336   unsigned RHSTrail = DAG.computeKnownBits(RHS).countMinTrailingZeros();
7337 
7338   if (LHSLead + RHSTrail < Scale)
7339     return SDValue();
7340 
7341   unsigned LHSShift = std::min(LHSLead, Scale);
7342   unsigned RHSShift = Scale - LHSShift;
7343 
7344   // At this point, we know that if we shift the LHS up by LHSShift and the
7345   // RHS down by RHSShift, we can emit a regular division with a final scaling
7346   // factor of Scale.
7347 
7348   EVT ShiftTy = getShiftAmountTy(VT, DAG.getDataLayout());
7349   if (LHSShift)
7350     LHS = DAG.getNode(ISD::SHL, dl, VT, LHS,
7351                       DAG.getConstant(LHSShift, dl, ShiftTy));
7352   if (RHSShift)
7353     RHS = DAG.getNode(Signed ? ISD::SRA : ISD::SRL, dl, VT, RHS,
7354                       DAG.getConstant(RHSShift, dl, ShiftTy));
7355 
7356   SDValue Quot;
7357   if (Signed) {
7358     // For signed operations, if the resulting quotient is negative and the
7359     // remainder is nonzero, subtract 1 from the quotient to round towards
7360     // negative infinity.
7361     SDValue Rem;
7362     // FIXME: Ideally we would always produce an SDIVREM here, but if the
7363     // type isn't legal, SDIVREM cannot be expanded. There is no reason why
7364     // we couldn't just form a libcall, but the type legalizer doesn't do it.
7365     if (isTypeLegal(VT) &&
7366         isOperationLegalOrCustom(ISD::SDIVREM, VT)) {
7367       Quot = DAG.getNode(ISD::SDIVREM, dl,
7368                          DAG.getVTList(VT, VT),
7369                          LHS, RHS);
7370       Rem = Quot.getValue(1);
7371       Quot = Quot.getValue(0);
7372     } else {
7373       Quot = DAG.getNode(ISD::SDIV, dl, VT,
7374                          LHS, RHS);
7375       Rem = DAG.getNode(ISD::SREM, dl, VT,
7376                         LHS, RHS);
7377     }
7378     SDValue Zero = DAG.getConstant(0, dl, VT);
7379     SDValue RemNonZero = DAG.getSetCC(dl, BoolVT, Rem, Zero, ISD::SETNE);
7380     SDValue LHSNeg = DAG.getSetCC(dl, BoolVT, LHS, Zero, ISD::SETLT);
7381     SDValue RHSNeg = DAG.getSetCC(dl, BoolVT, RHS, Zero, ISD::SETLT);
7382     SDValue QuotNeg = DAG.getNode(ISD::XOR, dl, BoolVT, LHSNeg, RHSNeg);
7383     SDValue Sub1 = DAG.getNode(ISD::SUB, dl, VT, Quot,
7384                                DAG.getConstant(1, dl, VT));
7385     Quot = DAG.getSelect(dl, VT,
7386                          DAG.getNode(ISD::AND, dl, BoolVT, RemNonZero, QuotNeg),
7387                          Sub1, Quot);
7388   } else
7389     Quot = DAG.getNode(ISD::UDIV, dl, VT,
7390                        LHS, RHS);
7391 
7392   // TODO: Saturation.
7393 
7394   return Quot;
7395 }
7396 
7397 void TargetLowering::expandUADDSUBO(
7398     SDNode *Node, SDValue &Result, SDValue &Overflow, SelectionDAG &DAG) const {
7399   SDLoc dl(Node);
7400   SDValue LHS = Node->getOperand(0);
7401   SDValue RHS = Node->getOperand(1);
7402   bool IsAdd = Node->getOpcode() == ISD::UADDO;
7403 
7404   // If ADD/SUBCARRY is legal, use that instead.
7405   unsigned OpcCarry = IsAdd ? ISD::ADDCARRY : ISD::SUBCARRY;
7406   if (isOperationLegalOrCustom(OpcCarry, Node->getValueType(0))) {
7407     SDValue CarryIn = DAG.getConstant(0, dl, Node->getValueType(1));
7408     SDValue NodeCarry = DAG.getNode(OpcCarry, dl, Node->getVTList(),
7409                                     { LHS, RHS, CarryIn });
7410     Result = SDValue(NodeCarry.getNode(), 0);
7411     Overflow = SDValue(NodeCarry.getNode(), 1);
7412     return;
7413   }
7414 
7415   Result = DAG.getNode(IsAdd ? ISD::ADD : ISD::SUB, dl,
7416                             LHS.getValueType(), LHS, RHS);
7417 
7418   EVT ResultType = Node->getValueType(1);
7419   EVT SetCCType = getSetCCResultType(
7420       DAG.getDataLayout(), *DAG.getContext(), Node->getValueType(0));
7421   ISD::CondCode CC = IsAdd ? ISD::SETULT : ISD::SETUGT;
7422   SDValue SetCC = DAG.getSetCC(dl, SetCCType, Result, LHS, CC);
7423   Overflow = DAG.getBoolExtOrTrunc(SetCC, dl, ResultType, ResultType);
7424 }
7425 
7426 void TargetLowering::expandSADDSUBO(
7427     SDNode *Node, SDValue &Result, SDValue &Overflow, SelectionDAG &DAG) const {
7428   SDLoc dl(Node);
7429   SDValue LHS = Node->getOperand(0);
7430   SDValue RHS = Node->getOperand(1);
7431   bool IsAdd = Node->getOpcode() == ISD::SADDO;
7432 
7433   Result = DAG.getNode(IsAdd ? ISD::ADD : ISD::SUB, dl,
7434                             LHS.getValueType(), LHS, RHS);
7435 
7436   EVT ResultType = Node->getValueType(1);
7437   EVT OType = getSetCCResultType(
7438       DAG.getDataLayout(), *DAG.getContext(), Node->getValueType(0));
7439 
7440   // If SADDSAT/SSUBSAT is legal, compare results to detect overflow.
7441   unsigned OpcSat = IsAdd ? ISD::SADDSAT : ISD::SSUBSAT;
7442   if (isOperationLegalOrCustom(OpcSat, LHS.getValueType())) {
7443     SDValue Sat = DAG.getNode(OpcSat, dl, LHS.getValueType(), LHS, RHS);
7444     SDValue SetCC = DAG.getSetCC(dl, OType, Result, Sat, ISD::SETNE);
7445     Overflow = DAG.getBoolExtOrTrunc(SetCC, dl, ResultType, ResultType);
7446     return;
7447   }
7448 
7449   SDValue Zero = DAG.getConstant(0, dl, LHS.getValueType());
7450 
7451   // For an addition, the result should be less than one of the operands (LHS)
7452   // if and only if the other operand (RHS) is negative, otherwise there will
7453   // be overflow.
7454   // For a subtraction, the result should be less than one of the operands
7455   // (LHS) if and only if the other operand (RHS) is (non-zero) positive,
7456   // otherwise there will be overflow.
7457   SDValue ResultLowerThanLHS = DAG.getSetCC(dl, OType, Result, LHS, ISD::SETLT);
7458   SDValue ConditionRHS =
7459       DAG.getSetCC(dl, OType, RHS, Zero, IsAdd ? ISD::SETLT : ISD::SETGT);
7460 
7461   Overflow = DAG.getBoolExtOrTrunc(
7462       DAG.getNode(ISD::XOR, dl, OType, ConditionRHS, ResultLowerThanLHS), dl,
7463       ResultType, ResultType);
7464 }
7465 
7466 bool TargetLowering::expandMULO(SDNode *Node, SDValue &Result,
7467                                 SDValue &Overflow, SelectionDAG &DAG) const {
7468   SDLoc dl(Node);
7469   EVT VT = Node->getValueType(0);
7470   EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
7471   SDValue LHS = Node->getOperand(0);
7472   SDValue RHS = Node->getOperand(1);
7473   bool isSigned = Node->getOpcode() == ISD::SMULO;
7474 
7475   // For power-of-two multiplications we can use a simpler shift expansion.
7476   if (ConstantSDNode *RHSC = isConstOrConstSplat(RHS)) {
7477     const APInt &C = RHSC->getAPIntValue();
7478     // mulo(X, 1 << S) -> { X << S, (X << S) >> S != X }
7479     if (C.isPowerOf2()) {
7480       // smulo(x, signed_min) is same as umulo(x, signed_min).
7481       bool UseArithShift = isSigned && !C.isMinSignedValue();
7482       EVT ShiftAmtTy = getShiftAmountTy(VT, DAG.getDataLayout());
7483       SDValue ShiftAmt = DAG.getConstant(C.logBase2(), dl, ShiftAmtTy);
7484       Result = DAG.getNode(ISD::SHL, dl, VT, LHS, ShiftAmt);
7485       Overflow = DAG.getSetCC(dl, SetCCVT,
7486           DAG.getNode(UseArithShift ? ISD::SRA : ISD::SRL,
7487                       dl, VT, Result, ShiftAmt),
7488           LHS, ISD::SETNE);
7489       return true;
7490     }
7491   }
7492 
7493   EVT WideVT = EVT::getIntegerVT(*DAG.getContext(), VT.getScalarSizeInBits() * 2);
7494   if (VT.isVector())
7495     WideVT = EVT::getVectorVT(*DAG.getContext(), WideVT,
7496                               VT.getVectorNumElements());
7497 
7498   SDValue BottomHalf;
7499   SDValue TopHalf;
7500   static const unsigned Ops[2][3] =
7501       { { ISD::MULHU, ISD::UMUL_LOHI, ISD::ZERO_EXTEND },
7502         { ISD::MULHS, ISD::SMUL_LOHI, ISD::SIGN_EXTEND }};
7503   if (isOperationLegalOrCustom(Ops[isSigned][0], VT)) {
7504     BottomHalf = DAG.getNode(ISD::MUL, dl, VT, LHS, RHS);
7505     TopHalf = DAG.getNode(Ops[isSigned][0], dl, VT, LHS, RHS);
7506   } else if (isOperationLegalOrCustom(Ops[isSigned][1], VT)) {
7507     BottomHalf = DAG.getNode(Ops[isSigned][1], dl, DAG.getVTList(VT, VT), LHS,
7508                              RHS);
7509     TopHalf = BottomHalf.getValue(1);
7510   } else if (isTypeLegal(WideVT)) {
7511     LHS = DAG.getNode(Ops[isSigned][2], dl, WideVT, LHS);
7512     RHS = DAG.getNode(Ops[isSigned][2], dl, WideVT, RHS);
7513     SDValue Mul = DAG.getNode(ISD::MUL, dl, WideVT, LHS, RHS);
7514     BottomHalf = DAG.getNode(ISD::TRUNCATE, dl, VT, Mul);
7515     SDValue ShiftAmt = DAG.getConstant(VT.getScalarSizeInBits(), dl,
7516         getShiftAmountTy(WideVT, DAG.getDataLayout()));
7517     TopHalf = DAG.getNode(ISD::TRUNCATE, dl, VT,
7518                           DAG.getNode(ISD::SRL, dl, WideVT, Mul, ShiftAmt));
7519   } else {
7520     if (VT.isVector())
7521       return false;
7522 
7523     // We can fall back to a libcall with an illegal type for the MUL if we
7524     // have a libcall big enough.
7525     // Also, we can fall back to a division in some cases, but that's a big
7526     // performance hit in the general case.
7527     RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
7528     if (WideVT == MVT::i16)
7529       LC = RTLIB::MUL_I16;
7530     else if (WideVT == MVT::i32)
7531       LC = RTLIB::MUL_I32;
7532     else if (WideVT == MVT::i64)
7533       LC = RTLIB::MUL_I64;
7534     else if (WideVT == MVT::i128)
7535       LC = RTLIB::MUL_I128;
7536     assert(LC != RTLIB::UNKNOWN_LIBCALL && "Cannot expand this operation!");
7537 
7538     SDValue HiLHS;
7539     SDValue HiRHS;
7540     if (isSigned) {
7541       // The high part is obtained by SRA'ing all but one of the bits of low
7542       // part.
7543       unsigned LoSize = VT.getSizeInBits();
7544       HiLHS =
7545           DAG.getNode(ISD::SRA, dl, VT, LHS,
7546                       DAG.getConstant(LoSize - 1, dl,
7547                                       getPointerTy(DAG.getDataLayout())));
7548       HiRHS =
7549           DAG.getNode(ISD::SRA, dl, VT, RHS,
7550                       DAG.getConstant(LoSize - 1, dl,
7551                                       getPointerTy(DAG.getDataLayout())));
7552     } else {
7553         HiLHS = DAG.getConstant(0, dl, VT);
7554         HiRHS = DAG.getConstant(0, dl, VT);
7555     }
7556 
7557     // Here we're passing the 2 arguments explicitly as 4 arguments that are
7558     // pre-lowered to the correct types. This all depends upon WideVT not
7559     // being a legal type for the architecture and thus has to be split to
7560     // two arguments.
7561     SDValue Ret;
7562     TargetLowering::MakeLibCallOptions CallOptions;
7563     CallOptions.setSExt(isSigned);
7564     CallOptions.setIsPostTypeLegalization(true);
7565     if (shouldSplitFunctionArgumentsAsLittleEndian(DAG.getDataLayout())) {
7566       // Halves of WideVT are packed into registers in different order
7567       // depending on platform endianness. This is usually handled by
7568       // the C calling convention, but we can't defer to it in
7569       // the legalizer.
7570       SDValue Args[] = { LHS, HiLHS, RHS, HiRHS };
7571       Ret = makeLibCall(DAG, LC, WideVT, Args, CallOptions, dl).first;
7572     } else {
7573       SDValue Args[] = { HiLHS, LHS, HiRHS, RHS };
7574       Ret = makeLibCall(DAG, LC, WideVT, Args, CallOptions, dl).first;
7575     }
7576     assert(Ret.getOpcode() == ISD::MERGE_VALUES &&
7577            "Ret value is a collection of constituent nodes holding result.");
7578     if (DAG.getDataLayout().isLittleEndian()) {
7579       // Same as above.
7580       BottomHalf = Ret.getOperand(0);
7581       TopHalf = Ret.getOperand(1);
7582     } else {
7583       BottomHalf = Ret.getOperand(1);
7584       TopHalf = Ret.getOperand(0);
7585     }
7586   }
7587 
7588   Result = BottomHalf;
7589   if (isSigned) {
7590     SDValue ShiftAmt = DAG.getConstant(
7591         VT.getScalarSizeInBits() - 1, dl,
7592         getShiftAmountTy(BottomHalf.getValueType(), DAG.getDataLayout()));
7593     SDValue Sign = DAG.getNode(ISD::SRA, dl, VT, BottomHalf, ShiftAmt);
7594     Overflow = DAG.getSetCC(dl, SetCCVT, TopHalf, Sign, ISD::SETNE);
7595   } else {
7596     Overflow = DAG.getSetCC(dl, SetCCVT, TopHalf,
7597                             DAG.getConstant(0, dl, VT), ISD::SETNE);
7598   }
7599 
7600   // Truncate the result if SetCC returns a larger type than needed.
7601   EVT RType = Node->getValueType(1);
7602   if (RType.getSizeInBits() < Overflow.getValueSizeInBits())
7603     Overflow = DAG.getNode(ISD::TRUNCATE, dl, RType, Overflow);
7604 
7605   assert(RType.getSizeInBits() == Overflow.getValueSizeInBits() &&
7606          "Unexpected result type for S/UMULO legalization");
7607   return true;
7608 }
7609 
7610 SDValue TargetLowering::expandVecReduce(SDNode *Node, SelectionDAG &DAG) const {
7611   SDLoc dl(Node);
7612   bool NoNaN = Node->getFlags().hasNoNaNs();
7613   unsigned BaseOpcode = 0;
7614   switch (Node->getOpcode()) {
7615   default: llvm_unreachable("Expected VECREDUCE opcode");
7616   case ISD::VECREDUCE_FADD: BaseOpcode = ISD::FADD; break;
7617   case ISD::VECREDUCE_FMUL: BaseOpcode = ISD::FMUL; break;
7618   case ISD::VECREDUCE_ADD:  BaseOpcode = ISD::ADD; break;
7619   case ISD::VECREDUCE_MUL:  BaseOpcode = ISD::MUL; break;
7620   case ISD::VECREDUCE_AND:  BaseOpcode = ISD::AND; break;
7621   case ISD::VECREDUCE_OR:   BaseOpcode = ISD::OR; break;
7622   case ISD::VECREDUCE_XOR:  BaseOpcode = ISD::XOR; break;
7623   case ISD::VECREDUCE_SMAX: BaseOpcode = ISD::SMAX; break;
7624   case ISD::VECREDUCE_SMIN: BaseOpcode = ISD::SMIN; break;
7625   case ISD::VECREDUCE_UMAX: BaseOpcode = ISD::UMAX; break;
7626   case ISD::VECREDUCE_UMIN: BaseOpcode = ISD::UMIN; break;
7627   case ISD::VECREDUCE_FMAX:
7628     BaseOpcode = NoNaN ? ISD::FMAXNUM : ISD::FMAXIMUM;
7629     break;
7630   case ISD::VECREDUCE_FMIN:
7631     BaseOpcode = NoNaN ? ISD::FMINNUM : ISD::FMINIMUM;
7632     break;
7633   }
7634 
7635   SDValue Op = Node->getOperand(0);
7636   EVT VT = Op.getValueType();
7637 
7638   // Try to use a shuffle reduction for power of two vectors.
7639   if (VT.isPow2VectorType()) {
7640     while (VT.getVectorNumElements() > 1) {
7641       EVT HalfVT = VT.getHalfNumVectorElementsVT(*DAG.getContext());
7642       if (!isOperationLegalOrCustom(BaseOpcode, HalfVT))
7643         break;
7644 
7645       SDValue Lo, Hi;
7646       std::tie(Lo, Hi) = DAG.SplitVector(Op, dl);
7647       Op = DAG.getNode(BaseOpcode, dl, HalfVT, Lo, Hi);
7648       VT = HalfVT;
7649     }
7650   }
7651 
7652   EVT EltVT = VT.getVectorElementType();
7653   unsigned NumElts = VT.getVectorNumElements();
7654 
7655   SmallVector<SDValue, 8> Ops;
7656   DAG.ExtractVectorElements(Op, Ops, 0, NumElts);
7657 
7658   SDValue Res = Ops[0];
7659   for (unsigned i = 1; i < NumElts; i++)
7660     Res = DAG.getNode(BaseOpcode, dl, EltVT, Res, Ops[i], Node->getFlags());
7661 
7662   // Result type may be wider than element type.
7663   if (EltVT != Node->getValueType(0))
7664     Res = DAG.getNode(ISD::ANY_EXTEND, dl, Node->getValueType(0), Res);
7665   return Res;
7666 }
7667