1 //===-- TargetLowering.cpp - Implement the TargetLowering class -----------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This implements the TargetLowering class.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/Target/TargetLowering.h"
15 #include "llvm/ADT/BitVector.h"
16 #include "llvm/ADT/STLExtras.h"
17 #include "llvm/CodeGen/Analysis.h"
18 #include "llvm/CodeGen/CallingConvLower.h"
19 #include "llvm/CodeGen/MachineFrameInfo.h"
20 #include "llvm/CodeGen/MachineFunction.h"
21 #include "llvm/CodeGen/MachineJumpTableInfo.h"
22 #include "llvm/CodeGen/MachineRegisterInfo.h"
23 #include "llvm/CodeGen/SelectionDAG.h"
24 #include "llvm/IR/DataLayout.h"
25 #include "llvm/IR/DerivedTypes.h"
26 #include "llvm/IR/GlobalVariable.h"
27 #include "llvm/IR/LLVMContext.h"
28 #include "llvm/MC/MCAsmInfo.h"
29 #include "llvm/MC/MCExpr.h"
30 #include "llvm/Support/ErrorHandling.h"
31 #include "llvm/Support/MathExtras.h"
32 #include "llvm/Target/TargetLoweringObjectFile.h"
33 #include "llvm/Target/TargetMachine.h"
34 #include "llvm/Target/TargetRegisterInfo.h"
35 #include "llvm/Target/TargetSubtargetInfo.h"
36 #include <cctype>
37 using namespace llvm;
38 
39 /// NOTE: The TargetMachine owns TLOF.
40 TargetLowering::TargetLowering(const TargetMachine &tm)
41   : TargetLoweringBase(tm) {}
42 
43 const char *TargetLowering::getTargetNodeName(unsigned Opcode) const {
44   return nullptr;
45 }
46 
47 /// Check whether a given call node is in tail position within its function. If
48 /// so, it sets Chain to the input chain of the tail call.
49 bool TargetLowering::isInTailCallPosition(SelectionDAG &DAG, SDNode *Node,
50                                           SDValue &Chain) const {
51   const Function *F = DAG.getMachineFunction().getFunction();
52 
53   // Conservatively require the attributes of the call to match those of
54   // the return. Ignore noalias because it doesn't affect the call sequence.
55   AttributeSet CallerAttrs = F->getAttributes();
56   if (AttrBuilder(CallerAttrs, AttributeSet::ReturnIndex)
57       .removeAttribute(Attribute::NoAlias).hasAttributes())
58     return false;
59 
60   // It's not safe to eliminate the sign / zero extension of the return value.
61   if (CallerAttrs.hasAttribute(AttributeSet::ReturnIndex, Attribute::ZExt) ||
62       CallerAttrs.hasAttribute(AttributeSet::ReturnIndex, Attribute::SExt))
63     return false;
64 
65   // Check if the only use is a function return node.
66   return isUsedByReturnOnly(Node, Chain);
67 }
68 
69 bool TargetLowering::parametersInCSRMatch(const MachineRegisterInfo &MRI,
70     const uint32_t *CallerPreservedMask,
71     const SmallVectorImpl<CCValAssign> &ArgLocs,
72     const SmallVectorImpl<SDValue> &OutVals) const {
73   for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) {
74     const CCValAssign &ArgLoc = ArgLocs[I];
75     if (!ArgLoc.isRegLoc())
76       continue;
77     unsigned Reg = ArgLoc.getLocReg();
78     // Only look at callee saved registers.
79     if (MachineOperand::clobbersPhysReg(CallerPreservedMask, Reg))
80       continue;
81     // Check that we pass the value used for the caller.
82     // (We look for a CopyFromReg reading a virtual register that is used
83     //  for the function live-in value of register Reg)
84     SDValue Value = OutVals[I];
85     if (Value->getOpcode() != ISD::CopyFromReg)
86       return false;
87     unsigned ArgReg = cast<RegisterSDNode>(Value->getOperand(1))->getReg();
88     if (MRI.getLiveInPhysReg(ArgReg) != Reg)
89       return false;
90   }
91   return true;
92 }
93 
94 /// \brief Set CallLoweringInfo attribute flags based on a call instruction
95 /// and called function attributes.
96 void TargetLowering::ArgListEntry::setAttributes(ImmutableCallSite *CS,
97                                                  unsigned AttrIdx) {
98   isSExt     = CS->paramHasAttr(AttrIdx, Attribute::SExt);
99   isZExt     = CS->paramHasAttr(AttrIdx, Attribute::ZExt);
100   isInReg    = CS->paramHasAttr(AttrIdx, Attribute::InReg);
101   isSRet     = CS->paramHasAttr(AttrIdx, Attribute::StructRet);
102   isNest     = CS->paramHasAttr(AttrIdx, Attribute::Nest);
103   isByVal    = CS->paramHasAttr(AttrIdx, Attribute::ByVal);
104   isInAlloca = CS->paramHasAttr(AttrIdx, Attribute::InAlloca);
105   isReturned = CS->paramHasAttr(AttrIdx, Attribute::Returned);
106   isSwiftSelf = CS->paramHasAttr(AttrIdx, Attribute::SwiftSelf);
107   isSwiftError = CS->paramHasAttr(AttrIdx, Attribute::SwiftError);
108   Alignment  = CS->getParamAlignment(AttrIdx);
109 }
110 
111 /// Generate a libcall taking the given operands as arguments and returning a
112 /// result of type RetVT.
113 std::pair<SDValue, SDValue>
114 TargetLowering::makeLibCall(SelectionDAG &DAG,
115                             RTLIB::Libcall LC, EVT RetVT,
116                             ArrayRef<SDValue> Ops,
117                             bool isSigned, SDLoc dl,
118                             bool doesNotReturn,
119                             bool isReturnValueUsed) const {
120   TargetLowering::ArgListTy Args;
121   Args.reserve(Ops.size());
122 
123   TargetLowering::ArgListEntry Entry;
124   for (SDValue Op : Ops) {
125     Entry.Node = Op;
126     Entry.Ty = Entry.Node.getValueType().getTypeForEVT(*DAG.getContext());
127     Entry.isSExt = shouldSignExtendTypeInLibCall(Op.getValueType(), isSigned);
128     Entry.isZExt = !shouldSignExtendTypeInLibCall(Op.getValueType(), isSigned);
129     Args.push_back(Entry);
130   }
131 
132   if (LC == RTLIB::UNKNOWN_LIBCALL)
133     report_fatal_error("Unsupported library call operation!");
134   SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC),
135                                          getPointerTy(DAG.getDataLayout()));
136 
137   Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext());
138   TargetLowering::CallLoweringInfo CLI(DAG);
139   bool signExtend = shouldSignExtendTypeInLibCall(RetVT, isSigned);
140   CLI.setDebugLoc(dl).setChain(DAG.getEntryNode())
141     .setCallee(getLibcallCallingConv(LC), RetTy, Callee, std::move(Args), 0)
142     .setNoReturn(doesNotReturn).setDiscardResult(!isReturnValueUsed)
143     .setSExtResult(signExtend).setZExtResult(!signExtend);
144   return LowerCallTo(CLI);
145 }
146 
147 /// Soften the operands of a comparison. This code is shared among BR_CC,
148 /// SELECT_CC, and SETCC handlers.
149 void TargetLowering::softenSetCCOperands(SelectionDAG &DAG, EVT VT,
150                                          SDValue &NewLHS, SDValue &NewRHS,
151                                          ISD::CondCode &CCCode,
152                                          SDLoc dl) const {
153   assert((VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f128 || VT == MVT::ppcf128)
154          && "Unsupported setcc type!");
155 
156   // Expand into one or more soft-fp libcall(s).
157   RTLIB::Libcall LC1 = RTLIB::UNKNOWN_LIBCALL, LC2 = RTLIB::UNKNOWN_LIBCALL;
158   bool ShouldInvertCC = false;
159   switch (CCCode) {
160   case ISD::SETEQ:
161   case ISD::SETOEQ:
162     LC1 = (VT == MVT::f32) ? RTLIB::OEQ_F32 :
163           (VT == MVT::f64) ? RTLIB::OEQ_F64 :
164           (VT == MVT::f128) ? RTLIB::OEQ_F128 : RTLIB::OEQ_PPCF128;
165     break;
166   case ISD::SETNE:
167   case ISD::SETUNE:
168     LC1 = (VT == MVT::f32) ? RTLIB::UNE_F32 :
169           (VT == MVT::f64) ? RTLIB::UNE_F64 :
170           (VT == MVT::f128) ? RTLIB::UNE_F128 : RTLIB::UNE_PPCF128;
171     break;
172   case ISD::SETGE:
173   case ISD::SETOGE:
174     LC1 = (VT == MVT::f32) ? RTLIB::OGE_F32 :
175           (VT == MVT::f64) ? RTLIB::OGE_F64 :
176           (VT == MVT::f128) ? RTLIB::OGE_F128 : RTLIB::OGE_PPCF128;
177     break;
178   case ISD::SETLT:
179   case ISD::SETOLT:
180     LC1 = (VT == MVT::f32) ? RTLIB::OLT_F32 :
181           (VT == MVT::f64) ? RTLIB::OLT_F64 :
182           (VT == MVT::f128) ? RTLIB::OLT_F128 : RTLIB::OLT_PPCF128;
183     break;
184   case ISD::SETLE:
185   case ISD::SETOLE:
186     LC1 = (VT == MVT::f32) ? RTLIB::OLE_F32 :
187           (VT == MVT::f64) ? RTLIB::OLE_F64 :
188           (VT == MVT::f128) ? RTLIB::OLE_F128 : RTLIB::OLE_PPCF128;
189     break;
190   case ISD::SETGT:
191   case ISD::SETOGT:
192     LC1 = (VT == MVT::f32) ? RTLIB::OGT_F32 :
193           (VT == MVT::f64) ? RTLIB::OGT_F64 :
194           (VT == MVT::f128) ? RTLIB::OGT_F128 : RTLIB::OGT_PPCF128;
195     break;
196   case ISD::SETUO:
197     LC1 = (VT == MVT::f32) ? RTLIB::UO_F32 :
198           (VT == MVT::f64) ? RTLIB::UO_F64 :
199           (VT == MVT::f128) ? RTLIB::UO_F128 : RTLIB::UO_PPCF128;
200     break;
201   case ISD::SETO:
202     LC1 = (VT == MVT::f32) ? RTLIB::O_F32 :
203           (VT == MVT::f64) ? RTLIB::O_F64 :
204           (VT == MVT::f128) ? RTLIB::O_F128 : RTLIB::O_PPCF128;
205     break;
206   case ISD::SETONE:
207     // SETONE = SETOLT | SETOGT
208     LC1 = (VT == MVT::f32) ? RTLIB::OLT_F32 :
209           (VT == MVT::f64) ? RTLIB::OLT_F64 :
210           (VT == MVT::f128) ? RTLIB::OLT_F128 : RTLIB::OLT_PPCF128;
211     LC2 = (VT == MVT::f32) ? RTLIB::OGT_F32 :
212           (VT == MVT::f64) ? RTLIB::OGT_F64 :
213           (VT == MVT::f128) ? RTLIB::OGT_F128 : RTLIB::OGT_PPCF128;
214     break;
215   case ISD::SETUEQ:
216     LC1 = (VT == MVT::f32) ? RTLIB::UO_F32 :
217           (VT == MVT::f64) ? RTLIB::UO_F64 :
218           (VT == MVT::f128) ? RTLIB::UO_F64 : RTLIB::UO_PPCF128;
219     LC2 = (VT == MVT::f32) ? RTLIB::OEQ_F32 :
220           (VT == MVT::f64) ? RTLIB::OEQ_F64 :
221           (VT == MVT::f128) ? RTLIB::OEQ_F128 : RTLIB::OEQ_PPCF128;
222     break;
223   default:
224     // Invert CC for unordered comparisons
225     ShouldInvertCC = true;
226     switch (CCCode) {
227     case ISD::SETULT:
228       LC1 = (VT == MVT::f32) ? RTLIB::OGE_F32 :
229             (VT == MVT::f64) ? RTLIB::OGE_F64 :
230             (VT == MVT::f128) ? RTLIB::OGE_F128 : RTLIB::OGE_PPCF128;
231       break;
232     case ISD::SETULE:
233       LC1 = (VT == MVT::f32) ? RTLIB::OGT_F32 :
234             (VT == MVT::f64) ? RTLIB::OGT_F64 :
235             (VT == MVT::f128) ? RTLIB::OGT_F128 : RTLIB::OGT_PPCF128;
236       break;
237     case ISD::SETUGT:
238       LC1 = (VT == MVT::f32) ? RTLIB::OLE_F32 :
239             (VT == MVT::f64) ? RTLIB::OLE_F64 :
240             (VT == MVT::f128) ? RTLIB::OLE_F128 : RTLIB::OLE_PPCF128;
241       break;
242     case ISD::SETUGE:
243       LC1 = (VT == MVT::f32) ? RTLIB::OLT_F32 :
244             (VT == MVT::f64) ? RTLIB::OLT_F64 :
245             (VT == MVT::f128) ? RTLIB::OLT_F128 : RTLIB::OLT_PPCF128;
246       break;
247     default: llvm_unreachable("Do not know how to soften this setcc!");
248     }
249   }
250 
251   // Use the target specific return value for comparions lib calls.
252   EVT RetVT = getCmpLibcallReturnType();
253   SDValue Ops[2] = {NewLHS, NewRHS};
254   NewLHS = makeLibCall(DAG, LC1, RetVT, Ops, false /*sign irrelevant*/,
255                        dl).first;
256   NewRHS = DAG.getConstant(0, dl, RetVT);
257 
258   CCCode = getCmpLibcallCC(LC1);
259   if (ShouldInvertCC)
260     CCCode = getSetCCInverse(CCCode, /*isInteger=*/true);
261 
262   if (LC2 != RTLIB::UNKNOWN_LIBCALL) {
263     SDValue Tmp = DAG.getNode(
264         ISD::SETCC, dl,
265         getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), RetVT),
266         NewLHS, NewRHS, DAG.getCondCode(CCCode));
267     NewLHS = makeLibCall(DAG, LC2, RetVT, Ops, false/*sign irrelevant*/,
268                          dl).first;
269     NewLHS = DAG.getNode(
270         ISD::SETCC, dl,
271         getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), RetVT),
272         NewLHS, NewRHS, DAG.getCondCode(getCmpLibcallCC(LC2)));
273     NewLHS = DAG.getNode(ISD::OR, dl, Tmp.getValueType(), Tmp, NewLHS);
274     NewRHS = SDValue();
275   }
276 }
277 
278 /// Return the entry encoding for a jump table in the current function. The
279 /// returned value is a member of the MachineJumpTableInfo::JTEntryKind enum.
280 unsigned TargetLowering::getJumpTableEncoding() const {
281   // In non-pic modes, just use the address of a block.
282   if (getTargetMachine().getRelocationModel() != Reloc::PIC_)
283     return MachineJumpTableInfo::EK_BlockAddress;
284 
285   // In PIC mode, if the target supports a GPRel32 directive, use it.
286   if (getTargetMachine().getMCAsmInfo()->getGPRel32Directive() != nullptr)
287     return MachineJumpTableInfo::EK_GPRel32BlockAddress;
288 
289   // Otherwise, use a label difference.
290   return MachineJumpTableInfo::EK_LabelDifference32;
291 }
292 
293 SDValue TargetLowering::getPICJumpTableRelocBase(SDValue Table,
294                                                  SelectionDAG &DAG) const {
295   // If our PIC model is GP relative, use the global offset table as the base.
296   unsigned JTEncoding = getJumpTableEncoding();
297 
298   if ((JTEncoding == MachineJumpTableInfo::EK_GPRel64BlockAddress) ||
299       (JTEncoding == MachineJumpTableInfo::EK_GPRel32BlockAddress))
300     return DAG.getGLOBAL_OFFSET_TABLE(getPointerTy(DAG.getDataLayout()));
301 
302   return Table;
303 }
304 
305 /// This returns the relocation base for the given PIC jumptable, the same as
306 /// getPICJumpTableRelocBase, but as an MCExpr.
307 const MCExpr *
308 TargetLowering::getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
309                                              unsigned JTI,MCContext &Ctx) const{
310   // The normal PIC reloc base is the label at the start of the jump table.
311   return MCSymbolRefExpr::create(MF->getJTISymbol(JTI, Ctx), Ctx);
312 }
313 
314 bool
315 TargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
316   // Assume that everything is safe in static mode.
317   if (getTargetMachine().getRelocationModel() == Reloc::Static)
318     return true;
319 
320   // In dynamic-no-pic mode, assume that known defined values are safe.
321   if (getTargetMachine().getRelocationModel() == Reloc::DynamicNoPIC &&
322       GA && GA->getGlobal()->isStrongDefinitionForLinker())
323     return true;
324 
325   // Otherwise assume nothing is safe.
326   return false;
327 }
328 
329 //===----------------------------------------------------------------------===//
330 //  Optimization Methods
331 //===----------------------------------------------------------------------===//
332 
333 /// Check to see if the specified operand of the specified instruction is a
334 /// constant integer. If so, check to see if there are any bits set in the
335 /// constant that are not demanded. If so, shrink the constant and return true.
336 bool TargetLowering::TargetLoweringOpt::ShrinkDemandedConstant(SDValue Op,
337                                                         const APInt &Demanded) {
338   SDLoc dl(Op);
339 
340   // FIXME: ISD::SELECT, ISD::SELECT_CC
341   switch (Op.getOpcode()) {
342   default: break;
343   case ISD::XOR:
344   case ISD::AND:
345   case ISD::OR: {
346     ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
347     if (!C) return false;
348 
349     if (Op.getOpcode() == ISD::XOR &&
350         (C->getAPIntValue() | (~Demanded)).isAllOnesValue())
351       return false;
352 
353     // if we can expand it to have all bits set, do it
354     if (C->getAPIntValue().intersects(~Demanded)) {
355       EVT VT = Op.getValueType();
356       SDValue New = DAG.getNode(Op.getOpcode(), dl, VT, Op.getOperand(0),
357                                 DAG.getConstant(Demanded &
358                                                 C->getAPIntValue(),
359                                                 dl, VT));
360       return CombineTo(Op, New);
361     }
362 
363     break;
364   }
365   }
366 
367   return false;
368 }
369 
370 /// Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if the casts are free.
371 /// This uses isZExtFree and ZERO_EXTEND for the widening cast, but it could be
372 /// generalized for targets with other types of implicit widening casts.
373 bool
374 TargetLowering::TargetLoweringOpt::ShrinkDemandedOp(SDValue Op,
375                                                     unsigned BitWidth,
376                                                     const APInt &Demanded,
377                                                     SDLoc dl) {
378   assert(Op.getNumOperands() == 2 &&
379          "ShrinkDemandedOp only supports binary operators!");
380   assert(Op.getNode()->getNumValues() == 1 &&
381          "ShrinkDemandedOp only supports nodes with one result!");
382 
383   // Early return, as this function cannot handle vector types.
384   if (Op.getValueType().isVector())
385     return false;
386 
387   // Don't do this if the node has another user, which may require the
388   // full value.
389   if (!Op.getNode()->hasOneUse())
390     return false;
391 
392   // Search for the smallest integer type with free casts to and from
393   // Op's type. For expedience, just check power-of-2 integer types.
394   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
395   unsigned DemandedSize = BitWidth - Demanded.countLeadingZeros();
396   unsigned SmallVTBits = DemandedSize;
397   if (!isPowerOf2_32(SmallVTBits))
398     SmallVTBits = NextPowerOf2(SmallVTBits);
399   for (; SmallVTBits < BitWidth; SmallVTBits = NextPowerOf2(SmallVTBits)) {
400     EVT SmallVT = EVT::getIntegerVT(*DAG.getContext(), SmallVTBits);
401     if (TLI.isTruncateFree(Op.getValueType(), SmallVT) &&
402         TLI.isZExtFree(SmallVT, Op.getValueType())) {
403       // We found a type with free casts.
404       SDValue X = DAG.getNode(Op.getOpcode(), dl, SmallVT,
405                               DAG.getNode(ISD::TRUNCATE, dl, SmallVT,
406                                           Op.getNode()->getOperand(0)),
407                               DAG.getNode(ISD::TRUNCATE, dl, SmallVT,
408                                           Op.getNode()->getOperand(1)));
409       bool NeedZext = DemandedSize > SmallVTBits;
410       SDValue Z = DAG.getNode(NeedZext ? ISD::ZERO_EXTEND : ISD::ANY_EXTEND,
411                               dl, Op.getValueType(), X);
412       return CombineTo(Op, Z);
413     }
414   }
415   return false;
416 }
417 
418 /// Look at Op. At this point, we know that only the DemandedMask bits of the
419 /// result of Op are ever used downstream. If we can use this information to
420 /// simplify Op, create a new simplified DAG node and return true, returning the
421 /// original and new nodes in Old and New. Otherwise, analyze the expression and
422 /// return a mask of KnownOne and KnownZero bits for the expression (used to
423 /// simplify the caller).  The KnownZero/One bits may only be accurate for those
424 /// bits in the DemandedMask.
425 bool TargetLowering::SimplifyDemandedBits(SDValue Op,
426                                           const APInt &DemandedMask,
427                                           APInt &KnownZero,
428                                           APInt &KnownOne,
429                                           TargetLoweringOpt &TLO,
430                                           unsigned Depth) const {
431   unsigned BitWidth = DemandedMask.getBitWidth();
432   assert(Op.getValueType().getScalarType().getSizeInBits() == BitWidth &&
433          "Mask size mismatches value type size!");
434   APInt NewMask = DemandedMask;
435   SDLoc dl(Op);
436   auto &DL = TLO.DAG.getDataLayout();
437 
438   // Don't know anything.
439   KnownZero = KnownOne = APInt(BitWidth, 0);
440 
441   // Other users may use these bits.
442   if (!Op.getNode()->hasOneUse()) {
443     if (Depth != 0) {
444       // If not at the root, Just compute the KnownZero/KnownOne bits to
445       // simplify things downstream.
446       TLO.DAG.computeKnownBits(Op, KnownZero, KnownOne, Depth);
447       return false;
448     }
449     // If this is the root being simplified, allow it to have multiple uses,
450     // just set the NewMask to all bits.
451     NewMask = APInt::getAllOnesValue(BitWidth);
452   } else if (DemandedMask == 0) {
453     // Not demanding any bits from Op.
454     if (!Op.isUndef())
455       return TLO.CombineTo(Op, TLO.DAG.getUNDEF(Op.getValueType()));
456     return false;
457   } else if (Depth == 6) {        // Limit search depth.
458     return false;
459   }
460 
461   APInt KnownZero2, KnownOne2, KnownZeroOut, KnownOneOut;
462   switch (Op.getOpcode()) {
463   case ISD::Constant:
464     // We know all of the bits for a constant!
465     KnownOne = cast<ConstantSDNode>(Op)->getAPIntValue();
466     KnownZero = ~KnownOne;
467     return false;   // Don't fall through, will infinitely loop.
468   case ISD::AND:
469     // If the RHS is a constant, check to see if the LHS would be zero without
470     // using the bits from the RHS.  Below, we use knowledge about the RHS to
471     // simplify the LHS, here we're using information from the LHS to simplify
472     // the RHS.
473     if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
474       APInt LHSZero, LHSOne;
475       // Do not increment Depth here; that can cause an infinite loop.
476       TLO.DAG.computeKnownBits(Op.getOperand(0), LHSZero, LHSOne, Depth);
477       // If the LHS already has zeros where RHSC does, this and is dead.
478       if ((LHSZero & NewMask) == (~RHSC->getAPIntValue() & NewMask))
479         return TLO.CombineTo(Op, Op.getOperand(0));
480       // If any of the set bits in the RHS are known zero on the LHS, shrink
481       // the constant.
482       if (TLO.ShrinkDemandedConstant(Op, ~LHSZero & NewMask))
483         return true;
484     }
485 
486     if (SimplifyDemandedBits(Op.getOperand(1), NewMask, KnownZero,
487                              KnownOne, TLO, Depth+1))
488       return true;
489     assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
490     if (SimplifyDemandedBits(Op.getOperand(0), ~KnownZero & NewMask,
491                              KnownZero2, KnownOne2, TLO, Depth+1))
492       return true;
493     assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
494 
495     // If all of the demanded bits are known one on one side, return the other.
496     // These bits cannot contribute to the result of the 'and'.
497     if ((NewMask & ~KnownZero2 & KnownOne) == (~KnownZero2 & NewMask))
498       return TLO.CombineTo(Op, Op.getOperand(0));
499     if ((NewMask & ~KnownZero & KnownOne2) == (~KnownZero & NewMask))
500       return TLO.CombineTo(Op, Op.getOperand(1));
501     // If all of the demanded bits in the inputs are known zeros, return zero.
502     if ((NewMask & (KnownZero|KnownZero2)) == NewMask)
503       return TLO.CombineTo(Op, TLO.DAG.getConstant(0, dl, Op.getValueType()));
504     // If the RHS is a constant, see if we can simplify it.
505     if (TLO.ShrinkDemandedConstant(Op, ~KnownZero2 & NewMask))
506       return true;
507     // If the operation can be done in a smaller type, do so.
508     if (TLO.ShrinkDemandedOp(Op, BitWidth, NewMask, dl))
509       return true;
510 
511     // Output known-1 bits are only known if set in both the LHS & RHS.
512     KnownOne &= KnownOne2;
513     // Output known-0 are known to be clear if zero in either the LHS | RHS.
514     KnownZero |= KnownZero2;
515     break;
516   case ISD::OR:
517     if (SimplifyDemandedBits(Op.getOperand(1), NewMask, KnownZero,
518                              KnownOne, TLO, Depth+1))
519       return true;
520     assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
521     if (SimplifyDemandedBits(Op.getOperand(0), ~KnownOne & NewMask,
522                              KnownZero2, KnownOne2, TLO, Depth+1))
523       return true;
524     assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
525 
526     // If all of the demanded bits are known zero on one side, return the other.
527     // These bits cannot contribute to the result of the 'or'.
528     if ((NewMask & ~KnownOne2 & KnownZero) == (~KnownOne2 & NewMask))
529       return TLO.CombineTo(Op, Op.getOperand(0));
530     if ((NewMask & ~KnownOne & KnownZero2) == (~KnownOne & NewMask))
531       return TLO.CombineTo(Op, Op.getOperand(1));
532     // If all of the potentially set bits on one side are known to be set on
533     // the other side, just use the 'other' side.
534     if ((NewMask & ~KnownZero & KnownOne2) == (~KnownZero & NewMask))
535       return TLO.CombineTo(Op, Op.getOperand(0));
536     if ((NewMask & ~KnownZero2 & KnownOne) == (~KnownZero2 & NewMask))
537       return TLO.CombineTo(Op, Op.getOperand(1));
538     // If the RHS is a constant, see if we can simplify it.
539     if (TLO.ShrinkDemandedConstant(Op, NewMask))
540       return true;
541     // If the operation can be done in a smaller type, do so.
542     if (TLO.ShrinkDemandedOp(Op, BitWidth, NewMask, dl))
543       return true;
544 
545     // Output known-0 bits are only known if clear in both the LHS & RHS.
546     KnownZero &= KnownZero2;
547     // Output known-1 are known to be set if set in either the LHS | RHS.
548     KnownOne |= KnownOne2;
549     break;
550   case ISD::XOR:
551     if (SimplifyDemandedBits(Op.getOperand(1), NewMask, KnownZero,
552                              KnownOne, TLO, Depth+1))
553       return true;
554     assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
555     if (SimplifyDemandedBits(Op.getOperand(0), NewMask, KnownZero2,
556                              KnownOne2, TLO, Depth+1))
557       return true;
558     assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
559 
560     // If all of the demanded bits are known zero on one side, return the other.
561     // These bits cannot contribute to the result of the 'xor'.
562     if ((KnownZero & NewMask) == NewMask)
563       return TLO.CombineTo(Op, Op.getOperand(0));
564     if ((KnownZero2 & NewMask) == NewMask)
565       return TLO.CombineTo(Op, Op.getOperand(1));
566     // If the operation can be done in a smaller type, do so.
567     if (TLO.ShrinkDemandedOp(Op, BitWidth, NewMask, dl))
568       return true;
569 
570     // If all of the unknown bits are known to be zero on one side or the other
571     // (but not both) turn this into an *inclusive* or.
572     //    e.g. (A & C1)^(B & C2) -> (A & C1)|(B & C2) iff C1&C2 == 0
573     if ((NewMask & ~KnownZero & ~KnownZero2) == 0)
574       return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::OR, dl, Op.getValueType(),
575                                                Op.getOperand(0),
576                                                Op.getOperand(1)));
577 
578     // Output known-0 bits are known if clear or set in both the LHS & RHS.
579     KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2);
580     // Output known-1 are known to be set if set in only one of the LHS, RHS.
581     KnownOneOut = (KnownZero & KnownOne2) | (KnownOne & KnownZero2);
582 
583     // If all of the demanded bits on one side are known, and all of the set
584     // bits on that side are also known to be set on the other side, turn this
585     // into an AND, as we know the bits will be cleared.
586     //    e.g. (X | C1) ^ C2 --> (X | C1) & ~C2 iff (C1&C2) == C2
587     // NB: it is okay if more bits are known than are requested
588     if ((NewMask & (KnownZero|KnownOne)) == NewMask) { // all known on one side
589       if (KnownOne == KnownOne2) { // set bits are the same on both sides
590         EVT VT = Op.getValueType();
591         SDValue ANDC = TLO.DAG.getConstant(~KnownOne & NewMask, dl, VT);
592         return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::AND, dl, VT,
593                                                  Op.getOperand(0), ANDC));
594       }
595     }
596 
597     // If the RHS is a constant, see if we can simplify it.
598     // for XOR, we prefer to force bits to 1 if they will make a -1.
599     // if we can't force bits, try to shrink constant
600     if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
601       APInt Expanded = C->getAPIntValue() | (~NewMask);
602       // if we can expand it to have all bits set, do it
603       if (Expanded.isAllOnesValue()) {
604         if (Expanded != C->getAPIntValue()) {
605           EVT VT = Op.getValueType();
606           SDValue New = TLO.DAG.getNode(Op.getOpcode(), dl,VT, Op.getOperand(0),
607                                         TLO.DAG.getConstant(Expanded, dl, VT));
608           return TLO.CombineTo(Op, New);
609         }
610         // if it already has all the bits set, nothing to change
611         // but don't shrink either!
612       } else if (TLO.ShrinkDemandedConstant(Op, NewMask)) {
613         return true;
614       }
615     }
616 
617     KnownZero = KnownZeroOut;
618     KnownOne  = KnownOneOut;
619     break;
620   case ISD::SELECT:
621     if (SimplifyDemandedBits(Op.getOperand(2), NewMask, KnownZero,
622                              KnownOne, TLO, Depth+1))
623       return true;
624     if (SimplifyDemandedBits(Op.getOperand(1), NewMask, KnownZero2,
625                              KnownOne2, TLO, Depth+1))
626       return true;
627     assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
628     assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
629 
630     // If the operands are constants, see if we can simplify them.
631     if (TLO.ShrinkDemandedConstant(Op, NewMask))
632       return true;
633 
634     // Only known if known in both the LHS and RHS.
635     KnownOne &= KnownOne2;
636     KnownZero &= KnownZero2;
637     break;
638   case ISD::SELECT_CC:
639     if (SimplifyDemandedBits(Op.getOperand(3), NewMask, KnownZero,
640                              KnownOne, TLO, Depth+1))
641       return true;
642     if (SimplifyDemandedBits(Op.getOperand(2), NewMask, KnownZero2,
643                              KnownOne2, TLO, Depth+1))
644       return true;
645     assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
646     assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
647 
648     // If the operands are constants, see if we can simplify them.
649     if (TLO.ShrinkDemandedConstant(Op, NewMask))
650       return true;
651 
652     // Only known if known in both the LHS and RHS.
653     KnownOne &= KnownOne2;
654     KnownZero &= KnownZero2;
655     break;
656   case ISD::SHL:
657     if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
658       unsigned ShAmt = SA->getZExtValue();
659       SDValue InOp = Op.getOperand(0);
660 
661       // If the shift count is an invalid immediate, don't do anything.
662       if (ShAmt >= BitWidth)
663         break;
664 
665       // If this is ((X >>u C1) << ShAmt), see if we can simplify this into a
666       // single shift.  We can do this if the bottom bits (which are shifted
667       // out) are never demanded.
668       if (InOp.getOpcode() == ISD::SRL &&
669           isa<ConstantSDNode>(InOp.getOperand(1))) {
670         if (ShAmt && (NewMask & APInt::getLowBitsSet(BitWidth, ShAmt)) == 0) {
671           unsigned C1= cast<ConstantSDNode>(InOp.getOperand(1))->getZExtValue();
672           unsigned Opc = ISD::SHL;
673           int Diff = ShAmt-C1;
674           if (Diff < 0) {
675             Diff = -Diff;
676             Opc = ISD::SRL;
677           }
678 
679           SDValue NewSA =
680             TLO.DAG.getConstant(Diff, dl, Op.getOperand(1).getValueType());
681           EVT VT = Op.getValueType();
682           return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, dl, VT,
683                                                    InOp.getOperand(0), NewSA));
684         }
685       }
686 
687       if (SimplifyDemandedBits(InOp, NewMask.lshr(ShAmt),
688                                KnownZero, KnownOne, TLO, Depth+1))
689         return true;
690 
691       // Convert (shl (anyext x, c)) to (anyext (shl x, c)) if the high bits
692       // are not demanded. This will likely allow the anyext to be folded away.
693       if (InOp.getNode()->getOpcode() == ISD::ANY_EXTEND) {
694         SDValue InnerOp = InOp.getNode()->getOperand(0);
695         EVT InnerVT = InnerOp.getValueType();
696         unsigned InnerBits = InnerVT.getSizeInBits();
697         if (ShAmt < InnerBits && NewMask.lshr(InnerBits) == 0 &&
698             isTypeDesirableForOp(ISD::SHL, InnerVT)) {
699           EVT ShTy = getShiftAmountTy(InnerVT, DL);
700           if (!APInt(BitWidth, ShAmt).isIntN(ShTy.getSizeInBits()))
701             ShTy = InnerVT;
702           SDValue NarrowShl =
703             TLO.DAG.getNode(ISD::SHL, dl, InnerVT, InnerOp,
704                             TLO.DAG.getConstant(ShAmt, dl, ShTy));
705           return
706             TLO.CombineTo(Op,
707                           TLO.DAG.getNode(ISD::ANY_EXTEND, dl, Op.getValueType(),
708                                           NarrowShl));
709         }
710         // Repeat the SHL optimization above in cases where an extension
711         // intervenes: (shl (anyext (shr x, c1)), c2) to
712         // (shl (anyext x), c2-c1).  This requires that the bottom c1 bits
713         // aren't demanded (as above) and that the shifted upper c1 bits of
714         // x aren't demanded.
715         if (InOp.hasOneUse() &&
716             InnerOp.getOpcode() == ISD::SRL &&
717             InnerOp.hasOneUse() &&
718             isa<ConstantSDNode>(InnerOp.getOperand(1))) {
719           uint64_t InnerShAmt = cast<ConstantSDNode>(InnerOp.getOperand(1))
720             ->getZExtValue();
721           if (InnerShAmt < ShAmt &&
722               InnerShAmt < InnerBits &&
723               NewMask.lshr(InnerBits - InnerShAmt + ShAmt) == 0 &&
724               NewMask.trunc(ShAmt) == 0) {
725             SDValue NewSA =
726               TLO.DAG.getConstant(ShAmt - InnerShAmt, dl,
727                                   Op.getOperand(1).getValueType());
728             EVT VT = Op.getValueType();
729             SDValue NewExt = TLO.DAG.getNode(ISD::ANY_EXTEND, dl, VT,
730                                              InnerOp.getOperand(0));
731             return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SHL, dl, VT,
732                                                      NewExt, NewSA));
733           }
734         }
735       }
736 
737       KnownZero <<= SA->getZExtValue();
738       KnownOne  <<= SA->getZExtValue();
739       // low bits known zero.
740       KnownZero |= APInt::getLowBitsSet(BitWidth, SA->getZExtValue());
741     }
742     break;
743   case ISD::SRL:
744     if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
745       EVT VT = Op.getValueType();
746       unsigned ShAmt = SA->getZExtValue();
747       unsigned VTSize = VT.getSizeInBits();
748       SDValue InOp = Op.getOperand(0);
749 
750       // If the shift count is an invalid immediate, don't do anything.
751       if (ShAmt >= BitWidth)
752         break;
753 
754       APInt InDemandedMask = (NewMask << ShAmt);
755 
756       // If the shift is exact, then it does demand the low bits (and knows that
757       // they are zero).
758       if (cast<BinaryWithFlagsSDNode>(Op)->Flags.hasExact())
759         InDemandedMask |= APInt::getLowBitsSet(BitWidth, ShAmt);
760 
761       // If this is ((X << C1) >>u ShAmt), see if we can simplify this into a
762       // single shift.  We can do this if the top bits (which are shifted out)
763       // are never demanded.
764       if (InOp.getOpcode() == ISD::SHL &&
765           isa<ConstantSDNode>(InOp.getOperand(1))) {
766         if (ShAmt && (NewMask & APInt::getHighBitsSet(VTSize, ShAmt)) == 0) {
767           unsigned C1= cast<ConstantSDNode>(InOp.getOperand(1))->getZExtValue();
768           unsigned Opc = ISD::SRL;
769           int Diff = ShAmt-C1;
770           if (Diff < 0) {
771             Diff = -Diff;
772             Opc = ISD::SHL;
773           }
774 
775           SDValue NewSA =
776             TLO.DAG.getConstant(Diff, dl, Op.getOperand(1).getValueType());
777           return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, dl, VT,
778                                                    InOp.getOperand(0), NewSA));
779         }
780       }
781 
782       // Compute the new bits that are at the top now.
783       if (SimplifyDemandedBits(InOp, InDemandedMask,
784                                KnownZero, KnownOne, TLO, Depth+1))
785         return true;
786       assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
787       KnownZero = KnownZero.lshr(ShAmt);
788       KnownOne  = KnownOne.lshr(ShAmt);
789 
790       APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt);
791       KnownZero |= HighBits;  // High bits known zero.
792     }
793     break;
794   case ISD::SRA:
795     // If this is an arithmetic shift right and only the low-bit is set, we can
796     // always convert this into a logical shr, even if the shift amount is
797     // variable.  The low bit of the shift cannot be an input sign bit unless
798     // the shift amount is >= the size of the datatype, which is undefined.
799     if (NewMask == 1)
800       return TLO.CombineTo(Op,
801                            TLO.DAG.getNode(ISD::SRL, dl, Op.getValueType(),
802                                            Op.getOperand(0), Op.getOperand(1)));
803 
804     if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
805       EVT VT = Op.getValueType();
806       unsigned ShAmt = SA->getZExtValue();
807 
808       // If the shift count is an invalid immediate, don't do anything.
809       if (ShAmt >= BitWidth)
810         break;
811 
812       APInt InDemandedMask = (NewMask << ShAmt);
813 
814       // If the shift is exact, then it does demand the low bits (and knows that
815       // they are zero).
816       if (cast<BinaryWithFlagsSDNode>(Op)->Flags.hasExact())
817         InDemandedMask |= APInt::getLowBitsSet(BitWidth, ShAmt);
818 
819       // If any of the demanded bits are produced by the sign extension, we also
820       // demand the input sign bit.
821       APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt);
822       if (HighBits.intersects(NewMask))
823         InDemandedMask |= APInt::getSignBit(VT.getScalarType().getSizeInBits());
824 
825       if (SimplifyDemandedBits(Op.getOperand(0), InDemandedMask,
826                                KnownZero, KnownOne, TLO, Depth+1))
827         return true;
828       assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
829       KnownZero = KnownZero.lshr(ShAmt);
830       KnownOne  = KnownOne.lshr(ShAmt);
831 
832       // Handle the sign bit, adjusted to where it is now in the mask.
833       APInt SignBit = APInt::getSignBit(BitWidth).lshr(ShAmt);
834 
835       // If the input sign bit is known to be zero, or if none of the top bits
836       // are demanded, turn this into an unsigned shift right.
837       if (KnownZero.intersects(SignBit) || (HighBits & ~NewMask) == HighBits) {
838         SDNodeFlags Flags;
839         Flags.setExact(cast<BinaryWithFlagsSDNode>(Op)->Flags.hasExact());
840         return TLO.CombineTo(Op,
841                              TLO.DAG.getNode(ISD::SRL, dl, VT, Op.getOperand(0),
842                                              Op.getOperand(1), &Flags));
843       }
844 
845       int Log2 = NewMask.exactLogBase2();
846       if (Log2 >= 0) {
847         // The bit must come from the sign.
848         SDValue NewSA =
849           TLO.DAG.getConstant(BitWidth - 1 - Log2, dl,
850                               Op.getOperand(1).getValueType());
851         return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL, dl, VT,
852                                                  Op.getOperand(0), NewSA));
853       }
854 
855       if (KnownOne.intersects(SignBit))
856         // New bits are known one.
857         KnownOne |= HighBits;
858     }
859     break;
860   case ISD::SIGN_EXTEND_INREG: {
861     EVT ExVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
862 
863     APInt MsbMask = APInt::getHighBitsSet(BitWidth, 1);
864     // If we only care about the highest bit, don't bother shifting right.
865     if (MsbMask == NewMask) {
866       unsigned ShAmt = ExVT.getScalarType().getSizeInBits();
867       SDValue InOp = Op.getOperand(0);
868       unsigned VTBits = Op->getValueType(0).getScalarType().getSizeInBits();
869       bool AlreadySignExtended =
870         TLO.DAG.ComputeNumSignBits(InOp) >= VTBits-ShAmt+1;
871       // However if the input is already sign extended we expect the sign
872       // extension to be dropped altogether later and do not simplify.
873       if (!AlreadySignExtended) {
874         // Compute the correct shift amount type, which must be getShiftAmountTy
875         // for scalar types after legalization.
876         EVT ShiftAmtTy = Op.getValueType();
877         if (TLO.LegalTypes() && !ShiftAmtTy.isVector())
878           ShiftAmtTy = getShiftAmountTy(ShiftAmtTy, DL);
879 
880         SDValue ShiftAmt = TLO.DAG.getConstant(BitWidth - ShAmt, dl,
881                                                ShiftAmtTy);
882         return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SHL, dl,
883                                                  Op.getValueType(), InOp,
884                                                  ShiftAmt));
885       }
886     }
887 
888     // Sign extension.  Compute the demanded bits in the result that are not
889     // present in the input.
890     APInt NewBits =
891       APInt::getHighBitsSet(BitWidth,
892                             BitWidth - ExVT.getScalarType().getSizeInBits());
893 
894     // If none of the extended bits are demanded, eliminate the sextinreg.
895     if ((NewBits & NewMask) == 0)
896       return TLO.CombineTo(Op, Op.getOperand(0));
897 
898     APInt InSignBit =
899       APInt::getSignBit(ExVT.getScalarType().getSizeInBits()).zext(BitWidth);
900     APInt InputDemandedBits =
901       APInt::getLowBitsSet(BitWidth,
902                            ExVT.getScalarType().getSizeInBits()) &
903       NewMask;
904 
905     // Since the sign extended bits are demanded, we know that the sign
906     // bit is demanded.
907     InputDemandedBits |= InSignBit;
908 
909     if (SimplifyDemandedBits(Op.getOperand(0), InputDemandedBits,
910                              KnownZero, KnownOne, TLO, Depth+1))
911       return true;
912     assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
913 
914     // If the sign bit of the input is known set or clear, then we know the
915     // top bits of the result.
916 
917     // If the input sign bit is known zero, convert this into a zero extension.
918     if (KnownZero.intersects(InSignBit))
919       return TLO.CombineTo(Op,
920                           TLO.DAG.getZeroExtendInReg(Op.getOperand(0),dl,ExVT));
921 
922     if (KnownOne.intersects(InSignBit)) {    // Input sign bit known set
923       KnownOne |= NewBits;
924       KnownZero &= ~NewBits;
925     } else {                       // Input sign bit unknown
926       KnownZero &= ~NewBits;
927       KnownOne &= ~NewBits;
928     }
929     break;
930   }
931   case ISD::BUILD_PAIR: {
932     EVT HalfVT = Op.getOperand(0).getValueType();
933     unsigned HalfBitWidth = HalfVT.getScalarSizeInBits();
934 
935     APInt MaskLo = NewMask.getLoBits(HalfBitWidth).trunc(HalfBitWidth);
936     APInt MaskHi = NewMask.getHiBits(HalfBitWidth).trunc(HalfBitWidth);
937 
938     APInt KnownZeroLo, KnownOneLo;
939     APInt KnownZeroHi, KnownOneHi;
940 
941     if (SimplifyDemandedBits(Op.getOperand(0), MaskLo, KnownZeroLo,
942                              KnownOneLo, TLO, Depth + 1))
943       return true;
944 
945     if (SimplifyDemandedBits(Op.getOperand(1), MaskHi, KnownZeroHi,
946                              KnownOneHi, TLO, Depth + 1))
947       return true;
948 
949     KnownZero = KnownZeroLo.zext(BitWidth) |
950                 KnownZeroHi.zext(BitWidth).shl(HalfBitWidth);
951 
952     KnownOne = KnownOneLo.zext(BitWidth) |
953                KnownOneHi.zext(BitWidth).shl(HalfBitWidth);
954     break;
955   }
956   case ISD::ZERO_EXTEND: {
957     unsigned OperandBitWidth =
958       Op.getOperand(0).getValueType().getScalarType().getSizeInBits();
959     APInt InMask = NewMask.trunc(OperandBitWidth);
960 
961     // If none of the top bits are demanded, convert this into an any_extend.
962     APInt NewBits =
963       APInt::getHighBitsSet(BitWidth, BitWidth - OperandBitWidth) & NewMask;
964     if (!NewBits.intersects(NewMask))
965       return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::ANY_EXTEND, dl,
966                                                Op.getValueType(),
967                                                Op.getOperand(0)));
968 
969     if (SimplifyDemandedBits(Op.getOperand(0), InMask,
970                              KnownZero, KnownOne, TLO, Depth+1))
971       return true;
972     assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
973     KnownZero = KnownZero.zext(BitWidth);
974     KnownOne = KnownOne.zext(BitWidth);
975     KnownZero |= NewBits;
976     break;
977   }
978   case ISD::SIGN_EXTEND: {
979     EVT InVT = Op.getOperand(0).getValueType();
980     unsigned InBits = InVT.getScalarType().getSizeInBits();
981     APInt InMask    = APInt::getLowBitsSet(BitWidth, InBits);
982     APInt InSignBit = APInt::getBitsSet(BitWidth, InBits - 1, InBits);
983     APInt NewBits   = ~InMask & NewMask;
984 
985     // If none of the top bits are demanded, convert this into an any_extend.
986     if (NewBits == 0)
987       return TLO.CombineTo(Op,TLO.DAG.getNode(ISD::ANY_EXTEND, dl,
988                                               Op.getValueType(),
989                                               Op.getOperand(0)));
990 
991     // Since some of the sign extended bits are demanded, we know that the sign
992     // bit is demanded.
993     APInt InDemandedBits = InMask & NewMask;
994     InDemandedBits |= InSignBit;
995     InDemandedBits = InDemandedBits.trunc(InBits);
996 
997     if (SimplifyDemandedBits(Op.getOperand(0), InDemandedBits, KnownZero,
998                              KnownOne, TLO, Depth+1))
999       return true;
1000     KnownZero = KnownZero.zext(BitWidth);
1001     KnownOne = KnownOne.zext(BitWidth);
1002 
1003     // If the sign bit is known zero, convert this to a zero extend.
1004     if (KnownZero.intersects(InSignBit))
1005       return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::ZERO_EXTEND, dl,
1006                                                Op.getValueType(),
1007                                                Op.getOperand(0)));
1008 
1009     // If the sign bit is known one, the top bits match.
1010     if (KnownOne.intersects(InSignBit)) {
1011       KnownOne |= NewBits;
1012       assert((KnownZero & NewBits) == 0);
1013     } else {   // Otherwise, top bits aren't known.
1014       assert((KnownOne & NewBits) == 0);
1015       assert((KnownZero & NewBits) == 0);
1016     }
1017     break;
1018   }
1019   case ISD::ANY_EXTEND: {
1020     unsigned OperandBitWidth =
1021       Op.getOperand(0).getValueType().getScalarType().getSizeInBits();
1022     APInt InMask = NewMask.trunc(OperandBitWidth);
1023     if (SimplifyDemandedBits(Op.getOperand(0), InMask,
1024                              KnownZero, KnownOne, TLO, Depth+1))
1025       return true;
1026     assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1027     KnownZero = KnownZero.zext(BitWidth);
1028     KnownOne = KnownOne.zext(BitWidth);
1029     break;
1030   }
1031   case ISD::TRUNCATE: {
1032     // Simplify the input, using demanded bit information, and compute the known
1033     // zero/one bits live out.
1034     unsigned OperandBitWidth =
1035       Op.getOperand(0).getValueType().getScalarType().getSizeInBits();
1036     APInt TruncMask = NewMask.zext(OperandBitWidth);
1037     if (SimplifyDemandedBits(Op.getOperand(0), TruncMask,
1038                              KnownZero, KnownOne, TLO, Depth+1))
1039       return true;
1040     KnownZero = KnownZero.trunc(BitWidth);
1041     KnownOne = KnownOne.trunc(BitWidth);
1042 
1043     // If the input is only used by this truncate, see if we can shrink it based
1044     // on the known demanded bits.
1045     if (Op.getOperand(0).getNode()->hasOneUse()) {
1046       SDValue In = Op.getOperand(0);
1047       switch (In.getOpcode()) {
1048       default: break;
1049       case ISD::SRL:
1050         // Shrink SRL by a constant if none of the high bits shifted in are
1051         // demanded.
1052         if (TLO.LegalTypes() &&
1053             !isTypeDesirableForOp(ISD::SRL, Op.getValueType()))
1054           // Do not turn (vt1 truncate (vt2 srl)) into (vt1 srl) if vt1 is
1055           // undesirable.
1056           break;
1057         ConstantSDNode *ShAmt = dyn_cast<ConstantSDNode>(In.getOperand(1));
1058         if (!ShAmt)
1059           break;
1060         SDValue Shift = In.getOperand(1);
1061         if (TLO.LegalTypes()) {
1062           uint64_t ShVal = ShAmt->getZExtValue();
1063           Shift = TLO.DAG.getConstant(ShVal, dl,
1064                                       getShiftAmountTy(Op.getValueType(), DL));
1065         }
1066 
1067         APInt HighBits = APInt::getHighBitsSet(OperandBitWidth,
1068                                                OperandBitWidth - BitWidth);
1069         HighBits = HighBits.lshr(ShAmt->getZExtValue()).trunc(BitWidth);
1070 
1071         if (ShAmt->getZExtValue() < BitWidth && !(HighBits & NewMask)) {
1072           // None of the shifted in bits are needed.  Add a truncate of the
1073           // shift input, then shift it.
1074           SDValue NewTrunc = TLO.DAG.getNode(ISD::TRUNCATE, dl,
1075                                              Op.getValueType(),
1076                                              In.getOperand(0));
1077           return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL, dl,
1078                                                    Op.getValueType(),
1079                                                    NewTrunc,
1080                                                    Shift));
1081         }
1082         break;
1083       }
1084     }
1085 
1086     assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1087     break;
1088   }
1089   case ISD::AssertZext: {
1090     // AssertZext demands all of the high bits, plus any of the low bits
1091     // demanded by its users.
1092     EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT();
1093     APInt InMask = APInt::getLowBitsSet(BitWidth,
1094                                         VT.getSizeInBits());
1095     if (SimplifyDemandedBits(Op.getOperand(0), ~InMask | NewMask,
1096                              KnownZero, KnownOne, TLO, Depth+1))
1097       return true;
1098     assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1099 
1100     KnownZero |= ~InMask & NewMask;
1101     break;
1102   }
1103   case ISD::BITCAST:
1104     // If this is an FP->Int bitcast and if the sign bit is the only
1105     // thing demanded, turn this into a FGETSIGN.
1106     if (!TLO.LegalOperations() &&
1107         !Op.getValueType().isVector() &&
1108         !Op.getOperand(0).getValueType().isVector() &&
1109         NewMask == APInt::getSignBit(Op.getValueType().getSizeInBits()) &&
1110         Op.getOperand(0).getValueType().isFloatingPoint()) {
1111       bool OpVTLegal = isOperationLegalOrCustom(ISD::FGETSIGN, Op.getValueType());
1112       bool i32Legal  = isOperationLegalOrCustom(ISD::FGETSIGN, MVT::i32);
1113       if ((OpVTLegal || i32Legal) && Op.getValueType().isSimple() &&
1114            Op.getOperand(0).getValueType() != MVT::f128) {
1115         // Cannot eliminate/lower SHL for f128 yet.
1116         EVT Ty = OpVTLegal ? Op.getValueType() : MVT::i32;
1117         // Make a FGETSIGN + SHL to move the sign bit into the appropriate
1118         // place.  We expect the SHL to be eliminated by other optimizations.
1119         SDValue Sign = TLO.DAG.getNode(ISD::FGETSIGN, dl, Ty, Op.getOperand(0));
1120         unsigned OpVTSizeInBits = Op.getValueType().getSizeInBits();
1121         if (!OpVTLegal && OpVTSizeInBits > 32)
1122           Sign = TLO.DAG.getNode(ISD::ZERO_EXTEND, dl, Op.getValueType(), Sign);
1123         unsigned ShVal = Op.getValueType().getSizeInBits()-1;
1124         SDValue ShAmt = TLO.DAG.getConstant(ShVal, dl, Op.getValueType());
1125         return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SHL, dl,
1126                                                  Op.getValueType(),
1127                                                  Sign, ShAmt));
1128       }
1129     }
1130     break;
1131   case ISD::ADD:
1132   case ISD::MUL:
1133   case ISD::SUB: {
1134     // Add, Sub, and Mul don't demand any bits in positions beyond that
1135     // of the highest bit demanded of them.
1136     APInt LoMask = APInt::getLowBitsSet(BitWidth,
1137                                         BitWidth - NewMask.countLeadingZeros());
1138     if (SimplifyDemandedBits(Op.getOperand(0), LoMask, KnownZero2,
1139                              KnownOne2, TLO, Depth+1))
1140       return true;
1141     if (SimplifyDemandedBits(Op.getOperand(1), LoMask, KnownZero2,
1142                              KnownOne2, TLO, Depth+1))
1143       return true;
1144     // See if the operation should be performed at a smaller bit width.
1145     if (TLO.ShrinkDemandedOp(Op, BitWidth, NewMask, dl))
1146       return true;
1147   }
1148   // FALL THROUGH
1149   default:
1150     // Just use computeKnownBits to compute output bits.
1151     TLO.DAG.computeKnownBits(Op, KnownZero, KnownOne, Depth);
1152     break;
1153   }
1154 
1155   // If we know the value of all of the demanded bits, return this as a
1156   // constant.
1157   if ((NewMask & (KnownZero|KnownOne)) == NewMask) {
1158     // Avoid folding to a constant if any OpaqueConstant is involved.
1159     const SDNode *N = Op.getNode();
1160     for (SDNodeIterator I = SDNodeIterator::begin(N),
1161          E = SDNodeIterator::end(N); I != E; ++I) {
1162       SDNode *Op = *I;
1163       if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op))
1164         if (C->isOpaque())
1165           return false;
1166     }
1167     return TLO.CombineTo(Op,
1168                          TLO.DAG.getConstant(KnownOne, dl, Op.getValueType()));
1169   }
1170 
1171   return false;
1172 }
1173 
1174 /// Determine which of the bits specified in Mask are known to be either zero or
1175 /// one and return them in the KnownZero/KnownOne bitsets.
1176 void TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
1177                                                    APInt &KnownZero,
1178                                                    APInt &KnownOne,
1179                                                    const SelectionDAG &DAG,
1180                                                    unsigned Depth) const {
1181   assert((Op.getOpcode() >= ISD::BUILTIN_OP_END ||
1182           Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
1183           Op.getOpcode() == ISD::INTRINSIC_W_CHAIN ||
1184           Op.getOpcode() == ISD::INTRINSIC_VOID) &&
1185          "Should use MaskedValueIsZero if you don't know whether Op"
1186          " is a target node!");
1187   KnownZero = KnownOne = APInt(KnownOne.getBitWidth(), 0);
1188 }
1189 
1190 /// This method can be implemented by targets that want to expose additional
1191 /// information about sign bits to the DAG Combiner.
1192 unsigned TargetLowering::ComputeNumSignBitsForTargetNode(SDValue Op,
1193                                                          const SelectionDAG &,
1194                                                          unsigned Depth) const {
1195   assert((Op.getOpcode() >= ISD::BUILTIN_OP_END ||
1196           Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
1197           Op.getOpcode() == ISD::INTRINSIC_W_CHAIN ||
1198           Op.getOpcode() == ISD::INTRINSIC_VOID) &&
1199          "Should use ComputeNumSignBits if you don't know whether Op"
1200          " is a target node!");
1201   return 1;
1202 }
1203 
1204 /// Test if the given value is known to have exactly one bit set. This differs
1205 /// from computeKnownBits in that it doesn't need to determine which bit is set.
1206 static bool ValueHasExactlyOneBitSet(SDValue Val, const SelectionDAG &DAG) {
1207   // A left-shift of a constant one will have exactly one bit set, because
1208   // shifting the bit off the end is undefined.
1209   if (Val.getOpcode() == ISD::SHL)
1210     if (ConstantSDNode *C =
1211          dyn_cast<ConstantSDNode>(Val.getNode()->getOperand(0)))
1212       if (C->getAPIntValue() == 1)
1213         return true;
1214 
1215   // Similarly, a right-shift of a constant sign-bit will have exactly
1216   // one bit set.
1217   if (Val.getOpcode() == ISD::SRL)
1218     if (ConstantSDNode *C =
1219          dyn_cast<ConstantSDNode>(Val.getNode()->getOperand(0)))
1220       if (C->getAPIntValue().isSignBit())
1221         return true;
1222 
1223   // More could be done here, though the above checks are enough
1224   // to handle some common cases.
1225 
1226   // Fall back to computeKnownBits to catch other known cases.
1227   EVT OpVT = Val.getValueType();
1228   unsigned BitWidth = OpVT.getScalarType().getSizeInBits();
1229   APInt KnownZero, KnownOne;
1230   DAG.computeKnownBits(Val, KnownZero, KnownOne);
1231   return (KnownZero.countPopulation() == BitWidth - 1) &&
1232          (KnownOne.countPopulation() == 1);
1233 }
1234 
1235 bool TargetLowering::isConstTrueVal(const SDNode *N) const {
1236   if (!N)
1237     return false;
1238 
1239   const ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N);
1240   if (!CN) {
1241     const BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N);
1242     if (!BV)
1243       return false;
1244 
1245     BitVector UndefElements;
1246     CN = BV->getConstantSplatNode(&UndefElements);
1247     // Only interested in constant splats, and we don't try to handle undef
1248     // elements in identifying boolean constants.
1249     if (!CN || UndefElements.none())
1250       return false;
1251   }
1252 
1253   switch (getBooleanContents(N->getValueType(0))) {
1254   case UndefinedBooleanContent:
1255     return CN->getAPIntValue()[0];
1256   case ZeroOrOneBooleanContent:
1257     return CN->isOne();
1258   case ZeroOrNegativeOneBooleanContent:
1259     return CN->isAllOnesValue();
1260   }
1261 
1262   llvm_unreachable("Invalid boolean contents");
1263 }
1264 
1265 bool TargetLowering::isConstFalseVal(const SDNode *N) const {
1266   if (!N)
1267     return false;
1268 
1269   const ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N);
1270   if (!CN) {
1271     const BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N);
1272     if (!BV)
1273       return false;
1274 
1275     BitVector UndefElements;
1276     CN = BV->getConstantSplatNode(&UndefElements);
1277     // Only interested in constant splats, and we don't try to handle undef
1278     // elements in identifying boolean constants.
1279     if (!CN || UndefElements.none())
1280       return false;
1281   }
1282 
1283   if (getBooleanContents(N->getValueType(0)) == UndefinedBooleanContent)
1284     return !CN->getAPIntValue()[0];
1285 
1286   return CN->isNullValue();
1287 }
1288 
1289 bool TargetLowering::isExtendedTrueVal(const ConstantSDNode *N, EVT VT,
1290                                        bool SExt) const {
1291   if (VT == MVT::i1)
1292     return N->isOne();
1293 
1294   TargetLowering::BooleanContent Cnt = getBooleanContents(VT);
1295   switch (Cnt) {
1296   case TargetLowering::ZeroOrOneBooleanContent:
1297     // An extended value of 1 is always true, unless its original type is i1,
1298     // in which case it will be sign extended to -1.
1299     return (N->isOne() && !SExt) || (SExt && (N->getValueType(0) != MVT::i1));
1300   case TargetLowering::UndefinedBooleanContent:
1301   case TargetLowering::ZeroOrNegativeOneBooleanContent:
1302     return N->isAllOnesValue() && SExt;
1303   }
1304   llvm_unreachable("Unexpected enumeration.");
1305 }
1306 
1307 /// Try to simplify a setcc built with the specified operands and cc. If it is
1308 /// unable to simplify it, return a null SDValue.
1309 SDValue
1310 TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
1311                               ISD::CondCode Cond, bool foldBooleans,
1312                               DAGCombinerInfo &DCI, SDLoc dl) const {
1313   SelectionDAG &DAG = DCI.DAG;
1314 
1315   // These setcc operations always fold.
1316   switch (Cond) {
1317   default: break;
1318   case ISD::SETFALSE:
1319   case ISD::SETFALSE2: return DAG.getConstant(0, dl, VT);
1320   case ISD::SETTRUE:
1321   case ISD::SETTRUE2: {
1322     TargetLowering::BooleanContent Cnt =
1323         getBooleanContents(N0->getValueType(0));
1324     return DAG.getConstant(
1325         Cnt == TargetLowering::ZeroOrNegativeOneBooleanContent ? -1ULL : 1, dl,
1326         VT);
1327   }
1328   }
1329 
1330   // Ensure that the constant occurs on the RHS, and fold constant
1331   // comparisons.
1332   ISD::CondCode SwappedCC = ISD::getSetCCSwappedOperands(Cond);
1333   if (isa<ConstantSDNode>(N0.getNode()) &&
1334       (DCI.isBeforeLegalizeOps() ||
1335        isCondCodeLegal(SwappedCC, N0.getSimpleValueType())))
1336     return DAG.getSetCC(dl, VT, N1, N0, SwappedCC);
1337 
1338   if (auto *N1C = dyn_cast<ConstantSDNode>(N1.getNode())) {
1339     const APInt &C1 = N1C->getAPIntValue();
1340 
1341     // If the LHS is '(srl (ctlz x), 5)', the RHS is 0/1, and this is an
1342     // equality comparison, then we're just comparing whether X itself is
1343     // zero.
1344     if (N0.getOpcode() == ISD::SRL && (C1 == 0 || C1 == 1) &&
1345         N0.getOperand(0).getOpcode() == ISD::CTLZ &&
1346         N0.getOperand(1).getOpcode() == ISD::Constant) {
1347       const APInt &ShAmt
1348         = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue();
1349       if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
1350           ShAmt == Log2_32(N0.getValueType().getSizeInBits())) {
1351         if ((C1 == 0) == (Cond == ISD::SETEQ)) {
1352           // (srl (ctlz x), 5) == 0  -> X != 0
1353           // (srl (ctlz x), 5) != 1  -> X != 0
1354           Cond = ISD::SETNE;
1355         } else {
1356           // (srl (ctlz x), 5) != 0  -> X == 0
1357           // (srl (ctlz x), 5) == 1  -> X == 0
1358           Cond = ISD::SETEQ;
1359         }
1360         SDValue Zero = DAG.getConstant(0, dl, N0.getValueType());
1361         return DAG.getSetCC(dl, VT, N0.getOperand(0).getOperand(0),
1362                             Zero, Cond);
1363       }
1364     }
1365 
1366     SDValue CTPOP = N0;
1367     // Look through truncs that don't change the value of a ctpop.
1368     if (N0.hasOneUse() && N0.getOpcode() == ISD::TRUNCATE)
1369       CTPOP = N0.getOperand(0);
1370 
1371     if (CTPOP.hasOneUse() && CTPOP.getOpcode() == ISD::CTPOP &&
1372         (N0 == CTPOP || N0.getValueType().getSizeInBits() >
1373                         Log2_32_Ceil(CTPOP.getValueType().getSizeInBits()))) {
1374       EVT CTVT = CTPOP.getValueType();
1375       SDValue CTOp = CTPOP.getOperand(0);
1376 
1377       // (ctpop x) u< 2 -> (x & x-1) == 0
1378       // (ctpop x) u> 1 -> (x & x-1) != 0
1379       if ((Cond == ISD::SETULT && C1 == 2) || (Cond == ISD::SETUGT && C1 == 1)){
1380         SDValue Sub = DAG.getNode(ISD::SUB, dl, CTVT, CTOp,
1381                                   DAG.getConstant(1, dl, CTVT));
1382         SDValue And = DAG.getNode(ISD::AND, dl, CTVT, CTOp, Sub);
1383         ISD::CondCode CC = Cond == ISD::SETULT ? ISD::SETEQ : ISD::SETNE;
1384         return DAG.getSetCC(dl, VT, And, DAG.getConstant(0, dl, CTVT), CC);
1385       }
1386 
1387       // TODO: (ctpop x) == 1 -> x && (x & x-1) == 0 iff ctpop is illegal.
1388     }
1389 
1390     // (zext x) == C --> x == (trunc C)
1391     // (sext x) == C --> x == (trunc C)
1392     if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
1393         DCI.isBeforeLegalize() && N0->hasOneUse()) {
1394       unsigned MinBits = N0.getValueSizeInBits();
1395       SDValue PreExt;
1396       bool Signed = false;
1397       if (N0->getOpcode() == ISD::ZERO_EXTEND) {
1398         // ZExt
1399         MinBits = N0->getOperand(0).getValueSizeInBits();
1400         PreExt = N0->getOperand(0);
1401       } else if (N0->getOpcode() == ISD::AND) {
1402         // DAGCombine turns costly ZExts into ANDs
1403         if (auto *C = dyn_cast<ConstantSDNode>(N0->getOperand(1)))
1404           if ((C->getAPIntValue()+1).isPowerOf2()) {
1405             MinBits = C->getAPIntValue().countTrailingOnes();
1406             PreExt = N0->getOperand(0);
1407           }
1408       } else if (N0->getOpcode() == ISD::SIGN_EXTEND) {
1409         // SExt
1410         MinBits = N0->getOperand(0).getValueSizeInBits();
1411         PreExt = N0->getOperand(0);
1412         Signed = true;
1413       } else if (auto *LN0 = dyn_cast<LoadSDNode>(N0)) {
1414         // ZEXTLOAD / SEXTLOAD
1415         if (LN0->getExtensionType() == ISD::ZEXTLOAD) {
1416           MinBits = LN0->getMemoryVT().getSizeInBits();
1417           PreExt = N0;
1418         } else if (LN0->getExtensionType() == ISD::SEXTLOAD) {
1419           Signed = true;
1420           MinBits = LN0->getMemoryVT().getSizeInBits();
1421           PreExt = N0;
1422         }
1423       }
1424 
1425       // Figure out how many bits we need to preserve this constant.
1426       unsigned ReqdBits = Signed ?
1427         C1.getBitWidth() - C1.getNumSignBits() + 1 :
1428         C1.getActiveBits();
1429 
1430       // Make sure we're not losing bits from the constant.
1431       if (MinBits > 0 &&
1432           MinBits < C1.getBitWidth() &&
1433           MinBits >= ReqdBits) {
1434         EVT MinVT = EVT::getIntegerVT(*DAG.getContext(), MinBits);
1435         if (isTypeDesirableForOp(ISD::SETCC, MinVT)) {
1436           // Will get folded away.
1437           SDValue Trunc = DAG.getNode(ISD::TRUNCATE, dl, MinVT, PreExt);
1438           SDValue C = DAG.getConstant(C1.trunc(MinBits), dl, MinVT);
1439           return DAG.getSetCC(dl, VT, Trunc, C, Cond);
1440         }
1441 
1442         // If truncating the setcc operands is not desirable, we can still
1443         // simplify the expression in some cases:
1444         // setcc ([sz]ext (setcc x, y, cc)), 0, setne) -> setcc (x, y, cc)
1445         // setcc ([sz]ext (setcc x, y, cc)), 0, seteq) -> setcc (x, y, inv(cc))
1446         // setcc (zext (setcc x, y, cc)), 1, setne) -> setcc (x, y, inv(cc))
1447         // setcc (zext (setcc x, y, cc)), 1, seteq) -> setcc (x, y, cc)
1448         // setcc (sext (setcc x, y, cc)), -1, setne) -> setcc (x, y, inv(cc))
1449         // setcc (sext (setcc x, y, cc)), -1, seteq) -> setcc (x, y, cc)
1450         SDValue TopSetCC = N0->getOperand(0);
1451         unsigned N0Opc = N0->getOpcode();
1452         bool SExt = (N0Opc == ISD::SIGN_EXTEND);
1453         if (TopSetCC.getValueType() == MVT::i1 && VT == MVT::i1 &&
1454             TopSetCC.getOpcode() == ISD::SETCC &&
1455             (N0Opc == ISD::ZERO_EXTEND || N0Opc == ISD::SIGN_EXTEND) &&
1456             (isConstFalseVal(N1C) ||
1457              isExtendedTrueVal(N1C, N0->getValueType(0), SExt))) {
1458 
1459           bool Inverse = (N1C->isNullValue() && Cond == ISD::SETEQ) ||
1460                          (!N1C->isNullValue() && Cond == ISD::SETNE);
1461 
1462           if (!Inverse)
1463             return TopSetCC;
1464 
1465           ISD::CondCode InvCond = ISD::getSetCCInverse(
1466               cast<CondCodeSDNode>(TopSetCC.getOperand(2))->get(),
1467               TopSetCC.getOperand(0).getValueType().isInteger());
1468           return DAG.getSetCC(dl, VT, TopSetCC.getOperand(0),
1469                                       TopSetCC.getOperand(1),
1470                                       InvCond);
1471 
1472         }
1473       }
1474     }
1475 
1476     // If the LHS is '(and load, const)', the RHS is 0,
1477     // the test is for equality or unsigned, and all 1 bits of the const are
1478     // in the same partial word, see if we can shorten the load.
1479     if (DCI.isBeforeLegalize() &&
1480         !ISD::isSignedIntSetCC(Cond) &&
1481         N0.getOpcode() == ISD::AND && C1 == 0 &&
1482         N0.getNode()->hasOneUse() &&
1483         isa<LoadSDNode>(N0.getOperand(0)) &&
1484         N0.getOperand(0).getNode()->hasOneUse() &&
1485         isa<ConstantSDNode>(N0.getOperand(1))) {
1486       LoadSDNode *Lod = cast<LoadSDNode>(N0.getOperand(0));
1487       APInt bestMask;
1488       unsigned bestWidth = 0, bestOffset = 0;
1489       if (!Lod->isVolatile() && Lod->isUnindexed()) {
1490         unsigned origWidth = N0.getValueType().getSizeInBits();
1491         unsigned maskWidth = origWidth;
1492         // We can narrow (e.g.) 16-bit extending loads on 32-bit target to
1493         // 8 bits, but have to be careful...
1494         if (Lod->getExtensionType() != ISD::NON_EXTLOAD)
1495           origWidth = Lod->getMemoryVT().getSizeInBits();
1496         const APInt &Mask =
1497           cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue();
1498         for (unsigned width = origWidth / 2; width>=8; width /= 2) {
1499           APInt newMask = APInt::getLowBitsSet(maskWidth, width);
1500           for (unsigned offset=0; offset<origWidth/width; offset++) {
1501             if ((newMask & Mask) == Mask) {
1502               if (!DAG.getDataLayout().isLittleEndian())
1503                 bestOffset = (origWidth/width - offset - 1) * (width/8);
1504               else
1505                 bestOffset = (uint64_t)offset * (width/8);
1506               bestMask = Mask.lshr(offset * (width/8) * 8);
1507               bestWidth = width;
1508               break;
1509             }
1510             newMask = newMask << width;
1511           }
1512         }
1513       }
1514       if (bestWidth) {
1515         EVT newVT = EVT::getIntegerVT(*DAG.getContext(), bestWidth);
1516         if (newVT.isRound()) {
1517           EVT PtrType = Lod->getOperand(1).getValueType();
1518           SDValue Ptr = Lod->getBasePtr();
1519           if (bestOffset != 0)
1520             Ptr = DAG.getNode(ISD::ADD, dl, PtrType, Lod->getBasePtr(),
1521                               DAG.getConstant(bestOffset, dl, PtrType));
1522           unsigned NewAlign = MinAlign(Lod->getAlignment(), bestOffset);
1523           SDValue NewLoad = DAG.getLoad(newVT, dl, Lod->getChain(), Ptr,
1524                                 Lod->getPointerInfo().getWithOffset(bestOffset),
1525                                         false, false, false, NewAlign);
1526           return DAG.getSetCC(dl, VT,
1527                               DAG.getNode(ISD::AND, dl, newVT, NewLoad,
1528                                       DAG.getConstant(bestMask.trunc(bestWidth),
1529                                                       dl, newVT)),
1530                               DAG.getConstant(0LL, dl, newVT), Cond);
1531         }
1532       }
1533     }
1534 
1535     // If the LHS is a ZERO_EXTEND, perform the comparison on the input.
1536     if (N0.getOpcode() == ISD::ZERO_EXTEND) {
1537       unsigned InSize = N0.getOperand(0).getValueType().getSizeInBits();
1538 
1539       // If the comparison constant has bits in the upper part, the
1540       // zero-extended value could never match.
1541       if (C1.intersects(APInt::getHighBitsSet(C1.getBitWidth(),
1542                                               C1.getBitWidth() - InSize))) {
1543         switch (Cond) {
1544         case ISD::SETUGT:
1545         case ISD::SETUGE:
1546         case ISD::SETEQ: return DAG.getConstant(0, dl, VT);
1547         case ISD::SETULT:
1548         case ISD::SETULE:
1549         case ISD::SETNE: return DAG.getConstant(1, dl, VT);
1550         case ISD::SETGT:
1551         case ISD::SETGE:
1552           // True if the sign bit of C1 is set.
1553           return DAG.getConstant(C1.isNegative(), dl, VT);
1554         case ISD::SETLT:
1555         case ISD::SETLE:
1556           // True if the sign bit of C1 isn't set.
1557           return DAG.getConstant(C1.isNonNegative(), dl, VT);
1558         default:
1559           break;
1560         }
1561       }
1562 
1563       // Otherwise, we can perform the comparison with the low bits.
1564       switch (Cond) {
1565       case ISD::SETEQ:
1566       case ISD::SETNE:
1567       case ISD::SETUGT:
1568       case ISD::SETUGE:
1569       case ISD::SETULT:
1570       case ISD::SETULE: {
1571         EVT newVT = N0.getOperand(0).getValueType();
1572         if (DCI.isBeforeLegalizeOps() ||
1573             (isOperationLegal(ISD::SETCC, newVT) &&
1574              getCondCodeAction(Cond, newVT.getSimpleVT()) == Legal)) {
1575           EVT NewSetCCVT =
1576               getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), newVT);
1577           SDValue NewConst = DAG.getConstant(C1.trunc(InSize), dl, newVT);
1578 
1579           SDValue NewSetCC = DAG.getSetCC(dl, NewSetCCVT, N0.getOperand(0),
1580                                           NewConst, Cond);
1581           return DAG.getBoolExtOrTrunc(NewSetCC, dl, VT, N0.getValueType());
1582         }
1583         break;
1584       }
1585       default:
1586         break;   // todo, be more careful with signed comparisons
1587       }
1588     } else if (N0.getOpcode() == ISD::SIGN_EXTEND_INREG &&
1589                (Cond == ISD::SETEQ || Cond == ISD::SETNE)) {
1590       EVT ExtSrcTy = cast<VTSDNode>(N0.getOperand(1))->getVT();
1591       unsigned ExtSrcTyBits = ExtSrcTy.getSizeInBits();
1592       EVT ExtDstTy = N0.getValueType();
1593       unsigned ExtDstTyBits = ExtDstTy.getSizeInBits();
1594 
1595       // If the constant doesn't fit into the number of bits for the source of
1596       // the sign extension, it is impossible for both sides to be equal.
1597       if (C1.getMinSignedBits() > ExtSrcTyBits)
1598         return DAG.getConstant(Cond == ISD::SETNE, dl, VT);
1599 
1600       SDValue ZextOp;
1601       EVT Op0Ty = N0.getOperand(0).getValueType();
1602       if (Op0Ty == ExtSrcTy) {
1603         ZextOp = N0.getOperand(0);
1604       } else {
1605         APInt Imm = APInt::getLowBitsSet(ExtDstTyBits, ExtSrcTyBits);
1606         ZextOp = DAG.getNode(ISD::AND, dl, Op0Ty, N0.getOperand(0),
1607                               DAG.getConstant(Imm, dl, Op0Ty));
1608       }
1609       if (!DCI.isCalledByLegalizer())
1610         DCI.AddToWorklist(ZextOp.getNode());
1611       // Otherwise, make this a use of a zext.
1612       return DAG.getSetCC(dl, VT, ZextOp,
1613                           DAG.getConstant(C1 & APInt::getLowBitsSet(
1614                                                               ExtDstTyBits,
1615                                                               ExtSrcTyBits),
1616                                           dl, ExtDstTy),
1617                           Cond);
1618     } else if ((N1C->isNullValue() || N1C->getAPIntValue() == 1) &&
1619                 (Cond == ISD::SETEQ || Cond == ISD::SETNE)) {
1620       // SETCC (SETCC), [0|1], [EQ|NE]  -> SETCC
1621       if (N0.getOpcode() == ISD::SETCC &&
1622           isTypeLegal(VT) && VT.bitsLE(N0.getValueType())) {
1623         bool TrueWhenTrue = (Cond == ISD::SETEQ) ^ (N1C->getAPIntValue() != 1);
1624         if (TrueWhenTrue)
1625           return DAG.getNode(ISD::TRUNCATE, dl, VT, N0);
1626         // Invert the condition.
1627         ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get();
1628         CC = ISD::getSetCCInverse(CC,
1629                                   N0.getOperand(0).getValueType().isInteger());
1630         if (DCI.isBeforeLegalizeOps() ||
1631             isCondCodeLegal(CC, N0.getOperand(0).getSimpleValueType()))
1632           return DAG.getSetCC(dl, VT, N0.getOperand(0), N0.getOperand(1), CC);
1633       }
1634 
1635       if ((N0.getOpcode() == ISD::XOR ||
1636            (N0.getOpcode() == ISD::AND &&
1637             N0.getOperand(0).getOpcode() == ISD::XOR &&
1638             N0.getOperand(1) == N0.getOperand(0).getOperand(1))) &&
1639           isa<ConstantSDNode>(N0.getOperand(1)) &&
1640           cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue() == 1) {
1641         // If this is (X^1) == 0/1, swap the RHS and eliminate the xor.  We
1642         // can only do this if the top bits are known zero.
1643         unsigned BitWidth = N0.getValueSizeInBits();
1644         if (DAG.MaskedValueIsZero(N0,
1645                                   APInt::getHighBitsSet(BitWidth,
1646                                                         BitWidth-1))) {
1647           // Okay, get the un-inverted input value.
1648           SDValue Val;
1649           if (N0.getOpcode() == ISD::XOR)
1650             Val = N0.getOperand(0);
1651           else {
1652             assert(N0.getOpcode() == ISD::AND &&
1653                     N0.getOperand(0).getOpcode() == ISD::XOR);
1654             // ((X^1)&1)^1 -> X & 1
1655             Val = DAG.getNode(ISD::AND, dl, N0.getValueType(),
1656                               N0.getOperand(0).getOperand(0),
1657                               N0.getOperand(1));
1658           }
1659 
1660           return DAG.getSetCC(dl, VT, Val, N1,
1661                               Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ);
1662         }
1663       } else if (N1C->getAPIntValue() == 1 &&
1664                  (VT == MVT::i1 ||
1665                   getBooleanContents(N0->getValueType(0)) ==
1666                       ZeroOrOneBooleanContent)) {
1667         SDValue Op0 = N0;
1668         if (Op0.getOpcode() == ISD::TRUNCATE)
1669           Op0 = Op0.getOperand(0);
1670 
1671         if ((Op0.getOpcode() == ISD::XOR) &&
1672             Op0.getOperand(0).getOpcode() == ISD::SETCC &&
1673             Op0.getOperand(1).getOpcode() == ISD::SETCC) {
1674           // (xor (setcc), (setcc)) == / != 1 -> (setcc) != / == (setcc)
1675           Cond = (Cond == ISD::SETEQ) ? ISD::SETNE : ISD::SETEQ;
1676           return DAG.getSetCC(dl, VT, Op0.getOperand(0), Op0.getOperand(1),
1677                               Cond);
1678         }
1679         if (Op0.getOpcode() == ISD::AND &&
1680             isa<ConstantSDNode>(Op0.getOperand(1)) &&
1681             cast<ConstantSDNode>(Op0.getOperand(1))->getAPIntValue() == 1) {
1682           // If this is (X&1) == / != 1, normalize it to (X&1) != / == 0.
1683           if (Op0.getValueType().bitsGT(VT))
1684             Op0 = DAG.getNode(ISD::AND, dl, VT,
1685                           DAG.getNode(ISD::TRUNCATE, dl, VT, Op0.getOperand(0)),
1686                           DAG.getConstant(1, dl, VT));
1687           else if (Op0.getValueType().bitsLT(VT))
1688             Op0 = DAG.getNode(ISD::AND, dl, VT,
1689                         DAG.getNode(ISD::ANY_EXTEND, dl, VT, Op0.getOperand(0)),
1690                         DAG.getConstant(1, dl, VT));
1691 
1692           return DAG.getSetCC(dl, VT, Op0,
1693                               DAG.getConstant(0, dl, Op0.getValueType()),
1694                               Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ);
1695         }
1696         if (Op0.getOpcode() == ISD::AssertZext &&
1697             cast<VTSDNode>(Op0.getOperand(1))->getVT() == MVT::i1)
1698           return DAG.getSetCC(dl, VT, Op0,
1699                               DAG.getConstant(0, dl, Op0.getValueType()),
1700                               Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ);
1701       }
1702     }
1703 
1704     APInt MinVal, MaxVal;
1705     unsigned OperandBitSize = N1C->getValueType(0).getSizeInBits();
1706     if (ISD::isSignedIntSetCC(Cond)) {
1707       MinVal = APInt::getSignedMinValue(OperandBitSize);
1708       MaxVal = APInt::getSignedMaxValue(OperandBitSize);
1709     } else {
1710       MinVal = APInt::getMinValue(OperandBitSize);
1711       MaxVal = APInt::getMaxValue(OperandBitSize);
1712     }
1713 
1714     // Canonicalize GE/LE comparisons to use GT/LT comparisons.
1715     if (Cond == ISD::SETGE || Cond == ISD::SETUGE) {
1716       if (C1 == MinVal) return DAG.getConstant(1, dl, VT);  // X >= MIN --> true
1717       // X >= C0 --> X > (C0 - 1)
1718       APInt C = C1 - 1;
1719       ISD::CondCode NewCC = (Cond == ISD::SETGE) ? ISD::SETGT : ISD::SETUGT;
1720       if ((DCI.isBeforeLegalizeOps() ||
1721            isCondCodeLegal(NewCC, VT.getSimpleVT())) &&
1722           (!N1C->isOpaque() || (N1C->isOpaque() && C.getBitWidth() <= 64 &&
1723                                 isLegalICmpImmediate(C.getSExtValue())))) {
1724         return DAG.getSetCC(dl, VT, N0,
1725                             DAG.getConstant(C, dl, N1.getValueType()),
1726                             NewCC);
1727       }
1728     }
1729 
1730     if (Cond == ISD::SETLE || Cond == ISD::SETULE) {
1731       if (C1 == MaxVal) return DAG.getConstant(1, dl, VT);  // X <= MAX --> true
1732       // X <= C0 --> X < (C0 + 1)
1733       APInt C = C1 + 1;
1734       ISD::CondCode NewCC = (Cond == ISD::SETLE) ? ISD::SETLT : ISD::SETULT;
1735       if ((DCI.isBeforeLegalizeOps() ||
1736            isCondCodeLegal(NewCC, VT.getSimpleVT())) &&
1737           (!N1C->isOpaque() || (N1C->isOpaque() && C.getBitWidth() <= 64 &&
1738                                 isLegalICmpImmediate(C.getSExtValue())))) {
1739         return DAG.getSetCC(dl, VT, N0,
1740                             DAG.getConstant(C, dl, N1.getValueType()),
1741                             NewCC);
1742       }
1743     }
1744 
1745     if ((Cond == ISD::SETLT || Cond == ISD::SETULT) && C1 == MinVal)
1746       return DAG.getConstant(0, dl, VT);      // X < MIN --> false
1747     if ((Cond == ISD::SETGE || Cond == ISD::SETUGE) && C1 == MinVal)
1748       return DAG.getConstant(1, dl, VT);      // X >= MIN --> true
1749     if ((Cond == ISD::SETGT || Cond == ISD::SETUGT) && C1 == MaxVal)
1750       return DAG.getConstant(0, dl, VT);      // X > MAX --> false
1751     if ((Cond == ISD::SETLE || Cond == ISD::SETULE) && C1 == MaxVal)
1752       return DAG.getConstant(1, dl, VT);      // X <= MAX --> true
1753 
1754     // Canonicalize setgt X, Min --> setne X, Min
1755     if ((Cond == ISD::SETGT || Cond == ISD::SETUGT) && C1 == MinVal)
1756       return DAG.getSetCC(dl, VT, N0, N1, ISD::SETNE);
1757     // Canonicalize setlt X, Max --> setne X, Max
1758     if ((Cond == ISD::SETLT || Cond == ISD::SETULT) && C1 == MaxVal)
1759       return DAG.getSetCC(dl, VT, N0, N1, ISD::SETNE);
1760 
1761     // If we have setult X, 1, turn it into seteq X, 0
1762     if ((Cond == ISD::SETLT || Cond == ISD::SETULT) && C1 == MinVal+1)
1763       return DAG.getSetCC(dl, VT, N0,
1764                           DAG.getConstant(MinVal, dl, N0.getValueType()),
1765                           ISD::SETEQ);
1766     // If we have setugt X, Max-1, turn it into seteq X, Max
1767     if ((Cond == ISD::SETGT || Cond == ISD::SETUGT) && C1 == MaxVal-1)
1768       return DAG.getSetCC(dl, VT, N0,
1769                           DAG.getConstant(MaxVal, dl, N0.getValueType()),
1770                           ISD::SETEQ);
1771 
1772     // If we have "setcc X, C0", check to see if we can shrink the immediate
1773     // by changing cc.
1774 
1775     // SETUGT X, SINTMAX  -> SETLT X, 0
1776     if (Cond == ISD::SETUGT &&
1777         C1 == APInt::getSignedMaxValue(OperandBitSize))
1778       return DAG.getSetCC(dl, VT, N0,
1779                           DAG.getConstant(0, dl, N1.getValueType()),
1780                           ISD::SETLT);
1781 
1782     // SETULT X, SINTMIN  -> SETGT X, -1
1783     if (Cond == ISD::SETULT &&
1784         C1 == APInt::getSignedMinValue(OperandBitSize)) {
1785       SDValue ConstMinusOne =
1786           DAG.getConstant(APInt::getAllOnesValue(OperandBitSize), dl,
1787                           N1.getValueType());
1788       return DAG.getSetCC(dl, VT, N0, ConstMinusOne, ISD::SETGT);
1789     }
1790 
1791     // Fold bit comparisons when we can.
1792     if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
1793         (VT == N0.getValueType() ||
1794          (isTypeLegal(VT) && VT.bitsLE(N0.getValueType()))) &&
1795         N0.getOpcode() == ISD::AND) {
1796       auto &DL = DAG.getDataLayout();
1797       if (auto *AndRHS = dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
1798         EVT ShiftTy = DCI.isBeforeLegalize()
1799                           ? getPointerTy(DL)
1800                           : getShiftAmountTy(N0.getValueType(), DL);
1801         if (Cond == ISD::SETNE && C1 == 0) {// (X & 8) != 0  -->  (X & 8) >> 3
1802           // Perform the xform if the AND RHS is a single bit.
1803           if (AndRHS->getAPIntValue().isPowerOf2()) {
1804             return DAG.getNode(ISD::TRUNCATE, dl, VT,
1805                               DAG.getNode(ISD::SRL, dl, N0.getValueType(), N0,
1806                    DAG.getConstant(AndRHS->getAPIntValue().logBase2(), dl,
1807                                    ShiftTy)));
1808           }
1809         } else if (Cond == ISD::SETEQ && C1 == AndRHS->getAPIntValue()) {
1810           // (X & 8) == 8  -->  (X & 8) >> 3
1811           // Perform the xform if C1 is a single bit.
1812           if (C1.isPowerOf2()) {
1813             return DAG.getNode(ISD::TRUNCATE, dl, VT,
1814                                DAG.getNode(ISD::SRL, dl, N0.getValueType(), N0,
1815                                       DAG.getConstant(C1.logBase2(), dl,
1816                                                       ShiftTy)));
1817           }
1818         }
1819       }
1820     }
1821 
1822     if (C1.getMinSignedBits() <= 64 &&
1823         !isLegalICmpImmediate(C1.getSExtValue())) {
1824       // (X & -256) == 256 -> (X >> 8) == 1
1825       if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
1826           N0.getOpcode() == ISD::AND && N0.hasOneUse()) {
1827         if (auto *AndRHS = dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
1828           const APInt &AndRHSC = AndRHS->getAPIntValue();
1829           if ((-AndRHSC).isPowerOf2() && (AndRHSC & C1) == C1) {
1830             unsigned ShiftBits = AndRHSC.countTrailingZeros();
1831             auto &DL = DAG.getDataLayout();
1832             EVT ShiftTy = DCI.isBeforeLegalize()
1833                               ? getPointerTy(DL)
1834                               : getShiftAmountTy(N0.getValueType(), DL);
1835             EVT CmpTy = N0.getValueType();
1836             SDValue Shift = DAG.getNode(ISD::SRL, dl, CmpTy, N0.getOperand(0),
1837                                         DAG.getConstant(ShiftBits, dl,
1838                                                         ShiftTy));
1839             SDValue CmpRHS = DAG.getConstant(C1.lshr(ShiftBits), dl, CmpTy);
1840             return DAG.getSetCC(dl, VT, Shift, CmpRHS, Cond);
1841           }
1842         }
1843       } else if (Cond == ISD::SETULT || Cond == ISD::SETUGE ||
1844                  Cond == ISD::SETULE || Cond == ISD::SETUGT) {
1845         bool AdjOne = (Cond == ISD::SETULE || Cond == ISD::SETUGT);
1846         // X <  0x100000000 -> (X >> 32) <  1
1847         // X >= 0x100000000 -> (X >> 32) >= 1
1848         // X <= 0x0ffffffff -> (X >> 32) <  1
1849         // X >  0x0ffffffff -> (X >> 32) >= 1
1850         unsigned ShiftBits;
1851         APInt NewC = C1;
1852         ISD::CondCode NewCond = Cond;
1853         if (AdjOne) {
1854           ShiftBits = C1.countTrailingOnes();
1855           NewC = NewC + 1;
1856           NewCond = (Cond == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE;
1857         } else {
1858           ShiftBits = C1.countTrailingZeros();
1859         }
1860         NewC = NewC.lshr(ShiftBits);
1861         if (ShiftBits && NewC.getMinSignedBits() <= 64 &&
1862           isLegalICmpImmediate(NewC.getSExtValue())) {
1863           auto &DL = DAG.getDataLayout();
1864           EVT ShiftTy = DCI.isBeforeLegalize()
1865                             ? getPointerTy(DL)
1866                             : getShiftAmountTy(N0.getValueType(), DL);
1867           EVT CmpTy = N0.getValueType();
1868           SDValue Shift = DAG.getNode(ISD::SRL, dl, CmpTy, N0,
1869                                       DAG.getConstant(ShiftBits, dl, ShiftTy));
1870           SDValue CmpRHS = DAG.getConstant(NewC, dl, CmpTy);
1871           return DAG.getSetCC(dl, VT, Shift, CmpRHS, NewCond);
1872         }
1873       }
1874     }
1875   }
1876 
1877   if (isa<ConstantFPSDNode>(N0.getNode())) {
1878     // Constant fold or commute setcc.
1879     SDValue O = DAG.FoldSetCC(VT, N0, N1, Cond, dl);
1880     if (O.getNode()) return O;
1881   } else if (auto *CFP = dyn_cast<ConstantFPSDNode>(N1.getNode())) {
1882     // If the RHS of an FP comparison is a constant, simplify it away in
1883     // some cases.
1884     if (CFP->getValueAPF().isNaN()) {
1885       // If an operand is known to be a nan, we can fold it.
1886       switch (ISD::getUnorderedFlavor(Cond)) {
1887       default: llvm_unreachable("Unknown flavor!");
1888       case 0:  // Known false.
1889         return DAG.getConstant(0, dl, VT);
1890       case 1:  // Known true.
1891         return DAG.getConstant(1, dl, VT);
1892       case 2:  // Undefined.
1893         return DAG.getUNDEF(VT);
1894       }
1895     }
1896 
1897     // Otherwise, we know the RHS is not a NaN.  Simplify the node to drop the
1898     // constant if knowing that the operand is non-nan is enough.  We prefer to
1899     // have SETO(x,x) instead of SETO(x, 0.0) because this avoids having to
1900     // materialize 0.0.
1901     if (Cond == ISD::SETO || Cond == ISD::SETUO)
1902       return DAG.getSetCC(dl, VT, N0, N0, Cond);
1903 
1904     // If the condition is not legal, see if we can find an equivalent one
1905     // which is legal.
1906     if (!isCondCodeLegal(Cond, N0.getSimpleValueType())) {
1907       // If the comparison was an awkward floating-point == or != and one of
1908       // the comparison operands is infinity or negative infinity, convert the
1909       // condition to a less-awkward <= or >=.
1910       if (CFP->getValueAPF().isInfinity()) {
1911         if (CFP->getValueAPF().isNegative()) {
1912           if (Cond == ISD::SETOEQ &&
1913               isCondCodeLegal(ISD::SETOLE, N0.getSimpleValueType()))
1914             return DAG.getSetCC(dl, VT, N0, N1, ISD::SETOLE);
1915           if (Cond == ISD::SETUEQ &&
1916               isCondCodeLegal(ISD::SETOLE, N0.getSimpleValueType()))
1917             return DAG.getSetCC(dl, VT, N0, N1, ISD::SETULE);
1918           if (Cond == ISD::SETUNE &&
1919               isCondCodeLegal(ISD::SETUGT, N0.getSimpleValueType()))
1920             return DAG.getSetCC(dl, VT, N0, N1, ISD::SETUGT);
1921           if (Cond == ISD::SETONE &&
1922               isCondCodeLegal(ISD::SETUGT, N0.getSimpleValueType()))
1923             return DAG.getSetCC(dl, VT, N0, N1, ISD::SETOGT);
1924         } else {
1925           if (Cond == ISD::SETOEQ &&
1926               isCondCodeLegal(ISD::SETOGE, N0.getSimpleValueType()))
1927             return DAG.getSetCC(dl, VT, N0, N1, ISD::SETOGE);
1928           if (Cond == ISD::SETUEQ &&
1929               isCondCodeLegal(ISD::SETOGE, N0.getSimpleValueType()))
1930             return DAG.getSetCC(dl, VT, N0, N1, ISD::SETUGE);
1931           if (Cond == ISD::SETUNE &&
1932               isCondCodeLegal(ISD::SETULT, N0.getSimpleValueType()))
1933             return DAG.getSetCC(dl, VT, N0, N1, ISD::SETULT);
1934           if (Cond == ISD::SETONE &&
1935               isCondCodeLegal(ISD::SETULT, N0.getSimpleValueType()))
1936             return DAG.getSetCC(dl, VT, N0, N1, ISD::SETOLT);
1937         }
1938       }
1939     }
1940   }
1941 
1942   if (N0 == N1) {
1943     // The sext(setcc()) => setcc() optimization relies on the appropriate
1944     // constant being emitted.
1945     uint64_t EqVal = 0;
1946     switch (getBooleanContents(N0.getValueType())) {
1947     case UndefinedBooleanContent:
1948     case ZeroOrOneBooleanContent:
1949       EqVal = ISD::isTrueWhenEqual(Cond);
1950       break;
1951     case ZeroOrNegativeOneBooleanContent:
1952       EqVal = ISD::isTrueWhenEqual(Cond) ? -1 : 0;
1953       break;
1954     }
1955 
1956     // We can always fold X == X for integer setcc's.
1957     if (N0.getValueType().isInteger()) {
1958       return DAG.getConstant(EqVal, dl, VT);
1959     }
1960     unsigned UOF = ISD::getUnorderedFlavor(Cond);
1961     if (UOF == 2)   // FP operators that are undefined on NaNs.
1962       return DAG.getConstant(EqVal, dl, VT);
1963     if (UOF == unsigned(ISD::isTrueWhenEqual(Cond)))
1964       return DAG.getConstant(EqVal, dl, VT);
1965     // Otherwise, we can't fold it.  However, we can simplify it to SETUO/SETO
1966     // if it is not already.
1967     ISD::CondCode NewCond = UOF == 0 ? ISD::SETO : ISD::SETUO;
1968     if (NewCond != Cond && (DCI.isBeforeLegalizeOps() ||
1969           getCondCodeAction(NewCond, N0.getSimpleValueType()) == Legal))
1970       return DAG.getSetCC(dl, VT, N0, N1, NewCond);
1971   }
1972 
1973   if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
1974       N0.getValueType().isInteger()) {
1975     if (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::SUB ||
1976         N0.getOpcode() == ISD::XOR) {
1977       // Simplify (X+Y) == (X+Z) -->  Y == Z
1978       if (N0.getOpcode() == N1.getOpcode()) {
1979         if (N0.getOperand(0) == N1.getOperand(0))
1980           return DAG.getSetCC(dl, VT, N0.getOperand(1), N1.getOperand(1), Cond);
1981         if (N0.getOperand(1) == N1.getOperand(1))
1982           return DAG.getSetCC(dl, VT, N0.getOperand(0), N1.getOperand(0), Cond);
1983         if (DAG.isCommutativeBinOp(N0.getOpcode())) {
1984           // If X op Y == Y op X, try other combinations.
1985           if (N0.getOperand(0) == N1.getOperand(1))
1986             return DAG.getSetCC(dl, VT, N0.getOperand(1), N1.getOperand(0),
1987                                 Cond);
1988           if (N0.getOperand(1) == N1.getOperand(0))
1989             return DAG.getSetCC(dl, VT, N0.getOperand(0), N1.getOperand(1),
1990                                 Cond);
1991         }
1992       }
1993 
1994       // If RHS is a legal immediate value for a compare instruction, we need
1995       // to be careful about increasing register pressure needlessly.
1996       bool LegalRHSImm = false;
1997 
1998       if (auto *RHSC = dyn_cast<ConstantSDNode>(N1)) {
1999         if (auto *LHSR = dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
2000           // Turn (X+C1) == C2 --> X == C2-C1
2001           if (N0.getOpcode() == ISD::ADD && N0.getNode()->hasOneUse()) {
2002             return DAG.getSetCC(dl, VT, N0.getOperand(0),
2003                                 DAG.getConstant(RHSC->getAPIntValue()-
2004                                                 LHSR->getAPIntValue(),
2005                                 dl, N0.getValueType()), Cond);
2006           }
2007 
2008           // Turn (X^C1) == C2 into X == C1^C2 iff X&~C1 = 0.
2009           if (N0.getOpcode() == ISD::XOR)
2010             // If we know that all of the inverted bits are zero, don't bother
2011             // performing the inversion.
2012             if (DAG.MaskedValueIsZero(N0.getOperand(0), ~LHSR->getAPIntValue()))
2013               return
2014                 DAG.getSetCC(dl, VT, N0.getOperand(0),
2015                              DAG.getConstant(LHSR->getAPIntValue() ^
2016                                                RHSC->getAPIntValue(),
2017                                              dl, N0.getValueType()),
2018                              Cond);
2019         }
2020 
2021         // Turn (C1-X) == C2 --> X == C1-C2
2022         if (auto *SUBC = dyn_cast<ConstantSDNode>(N0.getOperand(0))) {
2023           if (N0.getOpcode() == ISD::SUB && N0.getNode()->hasOneUse()) {
2024             return
2025               DAG.getSetCC(dl, VT, N0.getOperand(1),
2026                            DAG.getConstant(SUBC->getAPIntValue() -
2027                                              RHSC->getAPIntValue(),
2028                                            dl, N0.getValueType()),
2029                            Cond);
2030           }
2031         }
2032 
2033         // Could RHSC fold directly into a compare?
2034         if (RHSC->getValueType(0).getSizeInBits() <= 64)
2035           LegalRHSImm = isLegalICmpImmediate(RHSC->getSExtValue());
2036       }
2037 
2038       // Simplify (X+Z) == X -->  Z == 0
2039       // Don't do this if X is an immediate that can fold into a cmp
2040       // instruction and X+Z has other uses. It could be an induction variable
2041       // chain, and the transform would increase register pressure.
2042       if (!LegalRHSImm || N0.getNode()->hasOneUse()) {
2043         if (N0.getOperand(0) == N1)
2044           return DAG.getSetCC(dl, VT, N0.getOperand(1),
2045                               DAG.getConstant(0, dl, N0.getValueType()), Cond);
2046         if (N0.getOperand(1) == N1) {
2047           if (DAG.isCommutativeBinOp(N0.getOpcode()))
2048             return DAG.getSetCC(dl, VT, N0.getOperand(0),
2049                                 DAG.getConstant(0, dl, N0.getValueType()),
2050                                 Cond);
2051           if (N0.getNode()->hasOneUse()) {
2052             assert(N0.getOpcode() == ISD::SUB && "Unexpected operation!");
2053             auto &DL = DAG.getDataLayout();
2054             // (Z-X) == X  --> Z == X<<1
2055             SDValue SH = DAG.getNode(
2056                 ISD::SHL, dl, N1.getValueType(), N1,
2057                 DAG.getConstant(1, dl,
2058                                 getShiftAmountTy(N1.getValueType(), DL)));
2059             if (!DCI.isCalledByLegalizer())
2060               DCI.AddToWorklist(SH.getNode());
2061             return DAG.getSetCC(dl, VT, N0.getOperand(0), SH, Cond);
2062           }
2063         }
2064       }
2065     }
2066 
2067     if (N1.getOpcode() == ISD::ADD || N1.getOpcode() == ISD::SUB ||
2068         N1.getOpcode() == ISD::XOR) {
2069       // Simplify  X == (X+Z) -->  Z == 0
2070       if (N1.getOperand(0) == N0)
2071         return DAG.getSetCC(dl, VT, N1.getOperand(1),
2072                         DAG.getConstant(0, dl, N1.getValueType()), Cond);
2073       if (N1.getOperand(1) == N0) {
2074         if (DAG.isCommutativeBinOp(N1.getOpcode()))
2075           return DAG.getSetCC(dl, VT, N1.getOperand(0),
2076                           DAG.getConstant(0, dl, N1.getValueType()), Cond);
2077         if (N1.getNode()->hasOneUse()) {
2078           assert(N1.getOpcode() == ISD::SUB && "Unexpected operation!");
2079           auto &DL = DAG.getDataLayout();
2080           // X == (Z-X)  --> X<<1 == Z
2081           SDValue SH = DAG.getNode(
2082               ISD::SHL, dl, N1.getValueType(), N0,
2083               DAG.getConstant(1, dl, getShiftAmountTy(N0.getValueType(), DL)));
2084           if (!DCI.isCalledByLegalizer())
2085             DCI.AddToWorklist(SH.getNode());
2086           return DAG.getSetCC(dl, VT, SH, N1.getOperand(0), Cond);
2087         }
2088       }
2089     }
2090 
2091     // Simplify x&y == y to x&y != 0 if y has exactly one bit set.
2092     // Note that where y is variable and is known to have at most
2093     // one bit set (for example, if it is z&1) we cannot do this;
2094     // the expressions are not equivalent when y==0.
2095     if (N0.getOpcode() == ISD::AND)
2096       if (N0.getOperand(0) == N1 || N0.getOperand(1) == N1) {
2097         if (ValueHasExactlyOneBitSet(N1, DAG)) {
2098           Cond = ISD::getSetCCInverse(Cond, /*isInteger=*/true);
2099           if (DCI.isBeforeLegalizeOps() ||
2100               isCondCodeLegal(Cond, N0.getSimpleValueType())) {
2101             SDValue Zero = DAG.getConstant(0, dl, N1.getValueType());
2102             return DAG.getSetCC(dl, VT, N0, Zero, Cond);
2103           }
2104         }
2105       }
2106     if (N1.getOpcode() == ISD::AND)
2107       if (N1.getOperand(0) == N0 || N1.getOperand(1) == N0) {
2108         if (ValueHasExactlyOneBitSet(N0, DAG)) {
2109           Cond = ISD::getSetCCInverse(Cond, /*isInteger=*/true);
2110           if (DCI.isBeforeLegalizeOps() ||
2111               isCondCodeLegal(Cond, N1.getSimpleValueType())) {
2112             SDValue Zero = DAG.getConstant(0, dl, N0.getValueType());
2113             return DAG.getSetCC(dl, VT, N1, Zero, Cond);
2114           }
2115         }
2116       }
2117   }
2118 
2119   // Fold away ALL boolean setcc's.
2120   SDValue Temp;
2121   if (N0.getValueType() == MVT::i1 && foldBooleans) {
2122     switch (Cond) {
2123     default: llvm_unreachable("Unknown integer setcc!");
2124     case ISD::SETEQ:  // X == Y  -> ~(X^Y)
2125       Temp = DAG.getNode(ISD::XOR, dl, MVT::i1, N0, N1);
2126       N0 = DAG.getNOT(dl, Temp, MVT::i1);
2127       if (!DCI.isCalledByLegalizer())
2128         DCI.AddToWorklist(Temp.getNode());
2129       break;
2130     case ISD::SETNE:  // X != Y   -->  (X^Y)
2131       N0 = DAG.getNode(ISD::XOR, dl, MVT::i1, N0, N1);
2132       break;
2133     case ISD::SETGT:  // X >s Y   -->  X == 0 & Y == 1  -->  ~X & Y
2134     case ISD::SETULT: // X <u Y   -->  X == 0 & Y == 1  -->  ~X & Y
2135       Temp = DAG.getNOT(dl, N0, MVT::i1);
2136       N0 = DAG.getNode(ISD::AND, dl, MVT::i1, N1, Temp);
2137       if (!DCI.isCalledByLegalizer())
2138         DCI.AddToWorklist(Temp.getNode());
2139       break;
2140     case ISD::SETLT:  // X <s Y   --> X == 1 & Y == 0  -->  ~Y & X
2141     case ISD::SETUGT: // X >u Y   --> X == 1 & Y == 0  -->  ~Y & X
2142       Temp = DAG.getNOT(dl, N1, MVT::i1);
2143       N0 = DAG.getNode(ISD::AND, dl, MVT::i1, N0, Temp);
2144       if (!DCI.isCalledByLegalizer())
2145         DCI.AddToWorklist(Temp.getNode());
2146       break;
2147     case ISD::SETULE: // X <=u Y  --> X == 0 | Y == 1  -->  ~X | Y
2148     case ISD::SETGE:  // X >=s Y  --> X == 0 | Y == 1  -->  ~X | Y
2149       Temp = DAG.getNOT(dl, N0, MVT::i1);
2150       N0 = DAG.getNode(ISD::OR, dl, MVT::i1, N1, Temp);
2151       if (!DCI.isCalledByLegalizer())
2152         DCI.AddToWorklist(Temp.getNode());
2153       break;
2154     case ISD::SETUGE: // X >=u Y  --> X == 1 | Y == 0  -->  ~Y | X
2155     case ISD::SETLE:  // X <=s Y  --> X == 1 | Y == 0  -->  ~Y | X
2156       Temp = DAG.getNOT(dl, N1, MVT::i1);
2157       N0 = DAG.getNode(ISD::OR, dl, MVT::i1, N0, Temp);
2158       break;
2159     }
2160     if (VT != MVT::i1) {
2161       if (!DCI.isCalledByLegalizer())
2162         DCI.AddToWorklist(N0.getNode());
2163       // FIXME: If running after legalize, we probably can't do this.
2164       N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, N0);
2165     }
2166     return N0;
2167   }
2168 
2169   // Could not fold it.
2170   return SDValue();
2171 }
2172 
2173 /// Returns true (and the GlobalValue and the offset) if the node is a
2174 /// GlobalAddress + offset.
2175 bool TargetLowering::isGAPlusOffset(SDNode *N, const GlobalValue *&GA,
2176                                     int64_t &Offset) const {
2177   if (auto *GASD = dyn_cast<GlobalAddressSDNode>(N)) {
2178     GA = GASD->getGlobal();
2179     Offset += GASD->getOffset();
2180     return true;
2181   }
2182 
2183   if (N->getOpcode() == ISD::ADD) {
2184     SDValue N1 = N->getOperand(0);
2185     SDValue N2 = N->getOperand(1);
2186     if (isGAPlusOffset(N1.getNode(), GA, Offset)) {
2187       if (auto *V = dyn_cast<ConstantSDNode>(N2)) {
2188         Offset += V->getSExtValue();
2189         return true;
2190       }
2191     } else if (isGAPlusOffset(N2.getNode(), GA, Offset)) {
2192       if (auto *V = dyn_cast<ConstantSDNode>(N1)) {
2193         Offset += V->getSExtValue();
2194         return true;
2195       }
2196     }
2197   }
2198 
2199   return false;
2200 }
2201 
2202 SDValue TargetLowering::PerformDAGCombine(SDNode *N,
2203                                           DAGCombinerInfo &DCI) const {
2204   // Default implementation: no optimization.
2205   return SDValue();
2206 }
2207 
2208 //===----------------------------------------------------------------------===//
2209 //  Inline Assembler Implementation Methods
2210 //===----------------------------------------------------------------------===//
2211 
2212 TargetLowering::ConstraintType
2213 TargetLowering::getConstraintType(StringRef Constraint) const {
2214   unsigned S = Constraint.size();
2215 
2216   if (S == 1) {
2217     switch (Constraint[0]) {
2218     default: break;
2219     case 'r': return C_RegisterClass;
2220     case 'm':    // memory
2221     case 'o':    // offsetable
2222     case 'V':    // not offsetable
2223       return C_Memory;
2224     case 'i':    // Simple Integer or Relocatable Constant
2225     case 'n':    // Simple Integer
2226     case 'E':    // Floating Point Constant
2227     case 'F':    // Floating Point Constant
2228     case 's':    // Relocatable Constant
2229     case 'p':    // Address.
2230     case 'X':    // Allow ANY value.
2231     case 'I':    // Target registers.
2232     case 'J':
2233     case 'K':
2234     case 'L':
2235     case 'M':
2236     case 'N':
2237     case 'O':
2238     case 'P':
2239     case '<':
2240     case '>':
2241       return C_Other;
2242     }
2243   }
2244 
2245   if (S > 1 && Constraint[0] == '{' && Constraint[S-1] == '}') {
2246     if (S == 8 && Constraint.substr(1, 6) == "memory") // "{memory}"
2247       return C_Memory;
2248     return C_Register;
2249   }
2250   return C_Unknown;
2251 }
2252 
2253 /// Try to replace an X constraint, which matches anything, with another that
2254 /// has more specific requirements based on the type of the corresponding
2255 /// operand.
2256 const char *TargetLowering::LowerXConstraint(EVT ConstraintVT) const{
2257   if (ConstraintVT.isInteger())
2258     return "r";
2259   if (ConstraintVT.isFloatingPoint())
2260     return "f";      // works for many targets
2261   return nullptr;
2262 }
2263 
2264 /// Lower the specified operand into the Ops vector.
2265 /// If it is invalid, don't add anything to Ops.
2266 void TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
2267                                                   std::string &Constraint,
2268                                                   std::vector<SDValue> &Ops,
2269                                                   SelectionDAG &DAG) const {
2270 
2271   if (Constraint.length() > 1) return;
2272 
2273   char ConstraintLetter = Constraint[0];
2274   switch (ConstraintLetter) {
2275   default: break;
2276   case 'X':     // Allows any operand; labels (basic block) use this.
2277     if (Op.getOpcode() == ISD::BasicBlock) {
2278       Ops.push_back(Op);
2279       return;
2280     }
2281     // fall through
2282   case 'i':    // Simple Integer or Relocatable Constant
2283   case 'n':    // Simple Integer
2284   case 's': {  // Relocatable Constant
2285     // These operands are interested in values of the form (GV+C), where C may
2286     // be folded in as an offset of GV, or it may be explicitly added.  Also, it
2287     // is possible and fine if either GV or C are missing.
2288     ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
2289     GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op);
2290 
2291     // If we have "(add GV, C)", pull out GV/C
2292     if (Op.getOpcode() == ISD::ADD) {
2293       C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
2294       GA = dyn_cast<GlobalAddressSDNode>(Op.getOperand(0));
2295       if (!C || !GA) {
2296         C = dyn_cast<ConstantSDNode>(Op.getOperand(0));
2297         GA = dyn_cast<GlobalAddressSDNode>(Op.getOperand(1));
2298       }
2299       if (!C || !GA) {
2300         C = nullptr;
2301         GA = nullptr;
2302       }
2303     }
2304 
2305     // If we find a valid operand, map to the TargetXXX version so that the
2306     // value itself doesn't get selected.
2307     if (GA) {   // Either &GV   or   &GV+C
2308       if (ConstraintLetter != 'n') {
2309         int64_t Offs = GA->getOffset();
2310         if (C) Offs += C->getZExtValue();
2311         Ops.push_back(DAG.getTargetGlobalAddress(GA->getGlobal(),
2312                                                  C ? SDLoc(C) : SDLoc(),
2313                                                  Op.getValueType(), Offs));
2314       }
2315       return;
2316     }
2317     if (C) {   // just C, no GV.
2318       // Simple constants are not allowed for 's'.
2319       if (ConstraintLetter != 's') {
2320         // gcc prints these as sign extended.  Sign extend value to 64 bits
2321         // now; without this it would get ZExt'd later in
2322         // ScheduleDAGSDNodes::EmitNode, which is very generic.
2323         Ops.push_back(DAG.getTargetConstant(C->getAPIntValue().getSExtValue(),
2324                                             SDLoc(C), MVT::i64));
2325       }
2326       return;
2327     }
2328     break;
2329   }
2330   }
2331 }
2332 
2333 std::pair<unsigned, const TargetRegisterClass *>
2334 TargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *RI,
2335                                              StringRef Constraint,
2336                                              MVT VT) const {
2337   if (Constraint.empty() || Constraint[0] != '{')
2338     return std::make_pair(0u, static_cast<TargetRegisterClass*>(nullptr));
2339   assert(*(Constraint.end()-1) == '}' && "Not a brace enclosed constraint?");
2340 
2341   // Remove the braces from around the name.
2342   StringRef RegName(Constraint.data()+1, Constraint.size()-2);
2343 
2344   std::pair<unsigned, const TargetRegisterClass*> R =
2345     std::make_pair(0u, static_cast<const TargetRegisterClass*>(nullptr));
2346 
2347   // Figure out which register class contains this reg.
2348   for (TargetRegisterInfo::regclass_iterator RCI = RI->regclass_begin(),
2349        E = RI->regclass_end(); RCI != E; ++RCI) {
2350     const TargetRegisterClass *RC = *RCI;
2351 
2352     // If none of the value types for this register class are valid, we
2353     // can't use it.  For example, 64-bit reg classes on 32-bit targets.
2354     if (!isLegalRC(RC))
2355       continue;
2356 
2357     for (TargetRegisterClass::iterator I = RC->begin(), E = RC->end();
2358          I != E; ++I) {
2359       if (RegName.equals_lower(RI->getRegAsmName(*I))) {
2360         std::pair<unsigned, const TargetRegisterClass*> S =
2361           std::make_pair(*I, RC);
2362 
2363         // If this register class has the requested value type, return it,
2364         // otherwise keep searching and return the first class found
2365         // if no other is found which explicitly has the requested type.
2366         if (RC->hasType(VT))
2367           return S;
2368         else if (!R.second)
2369           R = S;
2370       }
2371     }
2372   }
2373 
2374   return R;
2375 }
2376 
2377 //===----------------------------------------------------------------------===//
2378 // Constraint Selection.
2379 
2380 /// Return true of this is an input operand that is a matching constraint like
2381 /// "4".
2382 bool TargetLowering::AsmOperandInfo::isMatchingInputConstraint() const {
2383   assert(!ConstraintCode.empty() && "No known constraint!");
2384   return isdigit(static_cast<unsigned char>(ConstraintCode[0]));
2385 }
2386 
2387 /// If this is an input matching constraint, this method returns the output
2388 /// operand it matches.
2389 unsigned TargetLowering::AsmOperandInfo::getMatchedOperand() const {
2390   assert(!ConstraintCode.empty() && "No known constraint!");
2391   return atoi(ConstraintCode.c_str());
2392 }
2393 
2394 /// Split up the constraint string from the inline assembly value into the
2395 /// specific constraints and their prefixes, and also tie in the associated
2396 /// operand values.
2397 /// If this returns an empty vector, and if the constraint string itself
2398 /// isn't empty, there was an error parsing.
2399 TargetLowering::AsmOperandInfoVector
2400 TargetLowering::ParseConstraints(const DataLayout &DL,
2401                                  const TargetRegisterInfo *TRI,
2402                                  ImmutableCallSite CS) const {
2403   /// Information about all of the constraints.
2404   AsmOperandInfoVector ConstraintOperands;
2405   const InlineAsm *IA = cast<InlineAsm>(CS.getCalledValue());
2406   unsigned maCount = 0; // Largest number of multiple alternative constraints.
2407 
2408   // Do a prepass over the constraints, canonicalizing them, and building up the
2409   // ConstraintOperands list.
2410   unsigned ArgNo = 0;   // ArgNo - The argument of the CallInst.
2411   unsigned ResNo = 0;   // ResNo - The result number of the next output.
2412 
2413   for (InlineAsm::ConstraintInfo &CI : IA->ParseConstraints()) {
2414     ConstraintOperands.emplace_back(std::move(CI));
2415     AsmOperandInfo &OpInfo = ConstraintOperands.back();
2416 
2417     // Update multiple alternative constraint count.
2418     if (OpInfo.multipleAlternatives.size() > maCount)
2419       maCount = OpInfo.multipleAlternatives.size();
2420 
2421     OpInfo.ConstraintVT = MVT::Other;
2422 
2423     // Compute the value type for each operand.
2424     switch (OpInfo.Type) {
2425     case InlineAsm::isOutput:
2426       // Indirect outputs just consume an argument.
2427       if (OpInfo.isIndirect) {
2428         OpInfo.CallOperandVal = const_cast<Value *>(CS.getArgument(ArgNo++));
2429         break;
2430       }
2431 
2432       // The return value of the call is this value.  As such, there is no
2433       // corresponding argument.
2434       assert(!CS.getType()->isVoidTy() &&
2435              "Bad inline asm!");
2436       if (StructType *STy = dyn_cast<StructType>(CS.getType())) {
2437         OpInfo.ConstraintVT =
2438             getSimpleValueType(DL, STy->getElementType(ResNo));
2439       } else {
2440         assert(ResNo == 0 && "Asm only has one result!");
2441         OpInfo.ConstraintVT = getSimpleValueType(DL, CS.getType());
2442       }
2443       ++ResNo;
2444       break;
2445     case InlineAsm::isInput:
2446       OpInfo.CallOperandVal = const_cast<Value *>(CS.getArgument(ArgNo++));
2447       break;
2448     case InlineAsm::isClobber:
2449       // Nothing to do.
2450       break;
2451     }
2452 
2453     if (OpInfo.CallOperandVal) {
2454       llvm::Type *OpTy = OpInfo.CallOperandVal->getType();
2455       if (OpInfo.isIndirect) {
2456         llvm::PointerType *PtrTy = dyn_cast<PointerType>(OpTy);
2457         if (!PtrTy)
2458           report_fatal_error("Indirect operand for inline asm not a pointer!");
2459         OpTy = PtrTy->getElementType();
2460       }
2461 
2462       // Look for vector wrapped in a struct. e.g. { <16 x i8> }.
2463       if (StructType *STy = dyn_cast<StructType>(OpTy))
2464         if (STy->getNumElements() == 1)
2465           OpTy = STy->getElementType(0);
2466 
2467       // If OpTy is not a single value, it may be a struct/union that we
2468       // can tile with integers.
2469       if (!OpTy->isSingleValueType() && OpTy->isSized()) {
2470         unsigned BitSize = DL.getTypeSizeInBits(OpTy);
2471         switch (BitSize) {
2472         default: break;
2473         case 1:
2474         case 8:
2475         case 16:
2476         case 32:
2477         case 64:
2478         case 128:
2479           OpInfo.ConstraintVT =
2480             MVT::getVT(IntegerType::get(OpTy->getContext(), BitSize), true);
2481           break;
2482         }
2483       } else if (PointerType *PT = dyn_cast<PointerType>(OpTy)) {
2484         unsigned PtrSize = DL.getPointerSizeInBits(PT->getAddressSpace());
2485         OpInfo.ConstraintVT = MVT::getIntegerVT(PtrSize);
2486       } else {
2487         OpInfo.ConstraintVT = MVT::getVT(OpTy, true);
2488       }
2489     }
2490   }
2491 
2492   // If we have multiple alternative constraints, select the best alternative.
2493   if (!ConstraintOperands.empty()) {
2494     if (maCount) {
2495       unsigned bestMAIndex = 0;
2496       int bestWeight = -1;
2497       // weight:  -1 = invalid match, and 0 = so-so match to 5 = good match.
2498       int weight = -1;
2499       unsigned maIndex;
2500       // Compute the sums of the weights for each alternative, keeping track
2501       // of the best (highest weight) one so far.
2502       for (maIndex = 0; maIndex < maCount; ++maIndex) {
2503         int weightSum = 0;
2504         for (unsigned cIndex = 0, eIndex = ConstraintOperands.size();
2505             cIndex != eIndex; ++cIndex) {
2506           AsmOperandInfo& OpInfo = ConstraintOperands[cIndex];
2507           if (OpInfo.Type == InlineAsm::isClobber)
2508             continue;
2509 
2510           // If this is an output operand with a matching input operand,
2511           // look up the matching input. If their types mismatch, e.g. one
2512           // is an integer, the other is floating point, or their sizes are
2513           // different, flag it as an maCantMatch.
2514           if (OpInfo.hasMatchingInput()) {
2515             AsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput];
2516             if (OpInfo.ConstraintVT != Input.ConstraintVT) {
2517               if ((OpInfo.ConstraintVT.isInteger() !=
2518                    Input.ConstraintVT.isInteger()) ||
2519                   (OpInfo.ConstraintVT.getSizeInBits() !=
2520                    Input.ConstraintVT.getSizeInBits())) {
2521                 weightSum = -1;  // Can't match.
2522                 break;
2523               }
2524             }
2525           }
2526           weight = getMultipleConstraintMatchWeight(OpInfo, maIndex);
2527           if (weight == -1) {
2528             weightSum = -1;
2529             break;
2530           }
2531           weightSum += weight;
2532         }
2533         // Update best.
2534         if (weightSum > bestWeight) {
2535           bestWeight = weightSum;
2536           bestMAIndex = maIndex;
2537         }
2538       }
2539 
2540       // Now select chosen alternative in each constraint.
2541       for (unsigned cIndex = 0, eIndex = ConstraintOperands.size();
2542           cIndex != eIndex; ++cIndex) {
2543         AsmOperandInfo& cInfo = ConstraintOperands[cIndex];
2544         if (cInfo.Type == InlineAsm::isClobber)
2545           continue;
2546         cInfo.selectAlternative(bestMAIndex);
2547       }
2548     }
2549   }
2550 
2551   // Check and hook up tied operands, choose constraint code to use.
2552   for (unsigned cIndex = 0, eIndex = ConstraintOperands.size();
2553       cIndex != eIndex; ++cIndex) {
2554     AsmOperandInfo& OpInfo = ConstraintOperands[cIndex];
2555 
2556     // If this is an output operand with a matching input operand, look up the
2557     // matching input. If their types mismatch, e.g. one is an integer, the
2558     // other is floating point, or their sizes are different, flag it as an
2559     // error.
2560     if (OpInfo.hasMatchingInput()) {
2561       AsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput];
2562 
2563       if (OpInfo.ConstraintVT != Input.ConstraintVT) {
2564         std::pair<unsigned, const TargetRegisterClass *> MatchRC =
2565             getRegForInlineAsmConstraint(TRI, OpInfo.ConstraintCode,
2566                                          OpInfo.ConstraintVT);
2567         std::pair<unsigned, const TargetRegisterClass *> InputRC =
2568             getRegForInlineAsmConstraint(TRI, Input.ConstraintCode,
2569                                          Input.ConstraintVT);
2570         if ((OpInfo.ConstraintVT.isInteger() !=
2571              Input.ConstraintVT.isInteger()) ||
2572             (MatchRC.second != InputRC.second)) {
2573           report_fatal_error("Unsupported asm: input constraint"
2574                              " with a matching output constraint of"
2575                              " incompatible type!");
2576         }
2577       }
2578     }
2579   }
2580 
2581   return ConstraintOperands;
2582 }
2583 
2584 /// Return an integer indicating how general CT is.
2585 static unsigned getConstraintGenerality(TargetLowering::ConstraintType CT) {
2586   switch (CT) {
2587   case TargetLowering::C_Other:
2588   case TargetLowering::C_Unknown:
2589     return 0;
2590   case TargetLowering::C_Register:
2591     return 1;
2592   case TargetLowering::C_RegisterClass:
2593     return 2;
2594   case TargetLowering::C_Memory:
2595     return 3;
2596   }
2597   llvm_unreachable("Invalid constraint type");
2598 }
2599 
2600 /// Examine constraint type and operand type and determine a weight value.
2601 /// This object must already have been set up with the operand type
2602 /// and the current alternative constraint selected.
2603 TargetLowering::ConstraintWeight
2604   TargetLowering::getMultipleConstraintMatchWeight(
2605     AsmOperandInfo &info, int maIndex) const {
2606   InlineAsm::ConstraintCodeVector *rCodes;
2607   if (maIndex >= (int)info.multipleAlternatives.size())
2608     rCodes = &info.Codes;
2609   else
2610     rCodes = &info.multipleAlternatives[maIndex].Codes;
2611   ConstraintWeight BestWeight = CW_Invalid;
2612 
2613   // Loop over the options, keeping track of the most general one.
2614   for (unsigned i = 0, e = rCodes->size(); i != e; ++i) {
2615     ConstraintWeight weight =
2616       getSingleConstraintMatchWeight(info, (*rCodes)[i].c_str());
2617     if (weight > BestWeight)
2618       BestWeight = weight;
2619   }
2620 
2621   return BestWeight;
2622 }
2623 
2624 /// Examine constraint type and operand type and determine a weight value.
2625 /// This object must already have been set up with the operand type
2626 /// and the current alternative constraint selected.
2627 TargetLowering::ConstraintWeight
2628   TargetLowering::getSingleConstraintMatchWeight(
2629     AsmOperandInfo &info, const char *constraint) const {
2630   ConstraintWeight weight = CW_Invalid;
2631   Value *CallOperandVal = info.CallOperandVal;
2632     // If we don't have a value, we can't do a match,
2633     // but allow it at the lowest weight.
2634   if (!CallOperandVal)
2635     return CW_Default;
2636   // Look at the constraint type.
2637   switch (*constraint) {
2638     case 'i': // immediate integer.
2639     case 'n': // immediate integer with a known value.
2640       if (isa<ConstantInt>(CallOperandVal))
2641         weight = CW_Constant;
2642       break;
2643     case 's': // non-explicit intregal immediate.
2644       if (isa<GlobalValue>(CallOperandVal))
2645         weight = CW_Constant;
2646       break;
2647     case 'E': // immediate float if host format.
2648     case 'F': // immediate float.
2649       if (isa<ConstantFP>(CallOperandVal))
2650         weight = CW_Constant;
2651       break;
2652     case '<': // memory operand with autodecrement.
2653     case '>': // memory operand with autoincrement.
2654     case 'm': // memory operand.
2655     case 'o': // offsettable memory operand
2656     case 'V': // non-offsettable memory operand
2657       weight = CW_Memory;
2658       break;
2659     case 'r': // general register.
2660     case 'g': // general register, memory operand or immediate integer.
2661               // note: Clang converts "g" to "imr".
2662       if (CallOperandVal->getType()->isIntegerTy())
2663         weight = CW_Register;
2664       break;
2665     case 'X': // any operand.
2666     default:
2667       weight = CW_Default;
2668       break;
2669   }
2670   return weight;
2671 }
2672 
2673 /// If there are multiple different constraints that we could pick for this
2674 /// operand (e.g. "imr") try to pick the 'best' one.
2675 /// This is somewhat tricky: constraints fall into four classes:
2676 ///    Other         -> immediates and magic values
2677 ///    Register      -> one specific register
2678 ///    RegisterClass -> a group of regs
2679 ///    Memory        -> memory
2680 /// Ideally, we would pick the most specific constraint possible: if we have
2681 /// something that fits into a register, we would pick it.  The problem here
2682 /// is that if we have something that could either be in a register or in
2683 /// memory that use of the register could cause selection of *other*
2684 /// operands to fail: they might only succeed if we pick memory.  Because of
2685 /// this the heuristic we use is:
2686 ///
2687 ///  1) If there is an 'other' constraint, and if the operand is valid for
2688 ///     that constraint, use it.  This makes us take advantage of 'i'
2689 ///     constraints when available.
2690 ///  2) Otherwise, pick the most general constraint present.  This prefers
2691 ///     'm' over 'r', for example.
2692 ///
2693 static void ChooseConstraint(TargetLowering::AsmOperandInfo &OpInfo,
2694                              const TargetLowering &TLI,
2695                              SDValue Op, SelectionDAG *DAG) {
2696   assert(OpInfo.Codes.size() > 1 && "Doesn't have multiple constraint options");
2697   unsigned BestIdx = 0;
2698   TargetLowering::ConstraintType BestType = TargetLowering::C_Unknown;
2699   int BestGenerality = -1;
2700 
2701   // Loop over the options, keeping track of the most general one.
2702   for (unsigned i = 0, e = OpInfo.Codes.size(); i != e; ++i) {
2703     TargetLowering::ConstraintType CType =
2704       TLI.getConstraintType(OpInfo.Codes[i]);
2705 
2706     // If this is an 'other' constraint, see if the operand is valid for it.
2707     // For example, on X86 we might have an 'rI' constraint.  If the operand
2708     // is an integer in the range [0..31] we want to use I (saving a load
2709     // of a register), otherwise we must use 'r'.
2710     if (CType == TargetLowering::C_Other && Op.getNode()) {
2711       assert(OpInfo.Codes[i].size() == 1 &&
2712              "Unhandled multi-letter 'other' constraint");
2713       std::vector<SDValue> ResultOps;
2714       TLI.LowerAsmOperandForConstraint(Op, OpInfo.Codes[i],
2715                                        ResultOps, *DAG);
2716       if (!ResultOps.empty()) {
2717         BestType = CType;
2718         BestIdx = i;
2719         break;
2720       }
2721     }
2722 
2723     // Things with matching constraints can only be registers, per gcc
2724     // documentation.  This mainly affects "g" constraints.
2725     if (CType == TargetLowering::C_Memory && OpInfo.hasMatchingInput())
2726       continue;
2727 
2728     // This constraint letter is more general than the previous one, use it.
2729     int Generality = getConstraintGenerality(CType);
2730     if (Generality > BestGenerality) {
2731       BestType = CType;
2732       BestIdx = i;
2733       BestGenerality = Generality;
2734     }
2735   }
2736 
2737   OpInfo.ConstraintCode = OpInfo.Codes[BestIdx];
2738   OpInfo.ConstraintType = BestType;
2739 }
2740 
2741 /// Determines the constraint code and constraint type to use for the specific
2742 /// AsmOperandInfo, setting OpInfo.ConstraintCode and OpInfo.ConstraintType.
2743 void TargetLowering::ComputeConstraintToUse(AsmOperandInfo &OpInfo,
2744                                             SDValue Op,
2745                                             SelectionDAG *DAG) const {
2746   assert(!OpInfo.Codes.empty() && "Must have at least one constraint");
2747 
2748   // Single-letter constraints ('r') are very common.
2749   if (OpInfo.Codes.size() == 1) {
2750     OpInfo.ConstraintCode = OpInfo.Codes[0];
2751     OpInfo.ConstraintType = getConstraintType(OpInfo.ConstraintCode);
2752   } else {
2753     ChooseConstraint(OpInfo, *this, Op, DAG);
2754   }
2755 
2756   // 'X' matches anything.
2757   if (OpInfo.ConstraintCode == "X" && OpInfo.CallOperandVal) {
2758     // Labels and constants are handled elsewhere ('X' is the only thing
2759     // that matches labels).  For Functions, the type here is the type of
2760     // the result, which is not what we want to look at; leave them alone.
2761     Value *v = OpInfo.CallOperandVal;
2762     if (isa<BasicBlock>(v) || isa<ConstantInt>(v) || isa<Function>(v)) {
2763       OpInfo.CallOperandVal = v;
2764       return;
2765     }
2766 
2767     // Otherwise, try to resolve it to something we know about by looking at
2768     // the actual operand type.
2769     if (const char *Repl = LowerXConstraint(OpInfo.ConstraintVT)) {
2770       OpInfo.ConstraintCode = Repl;
2771       OpInfo.ConstraintType = getConstraintType(OpInfo.ConstraintCode);
2772     }
2773   }
2774 }
2775 
2776 /// \brief Given an exact SDIV by a constant, create a multiplication
2777 /// with the multiplicative inverse of the constant.
2778 static SDValue BuildExactSDIV(const TargetLowering &TLI, SDValue Op1, APInt d,
2779                               SDLoc dl, SelectionDAG &DAG,
2780                               std::vector<SDNode *> &Created) {
2781   assert(d != 0 && "Division by zero!");
2782 
2783   // Shift the value upfront if it is even, so the LSB is one.
2784   unsigned ShAmt = d.countTrailingZeros();
2785   if (ShAmt) {
2786     // TODO: For UDIV use SRL instead of SRA.
2787     SDValue Amt =
2788         DAG.getConstant(ShAmt, dl, TLI.getShiftAmountTy(Op1.getValueType(),
2789                                                         DAG.getDataLayout()));
2790     SDNodeFlags Flags;
2791     Flags.setExact(true);
2792     Op1 = DAG.getNode(ISD::SRA, dl, Op1.getValueType(), Op1, Amt, &Flags);
2793     Created.push_back(Op1.getNode());
2794     d = d.ashr(ShAmt);
2795   }
2796 
2797   // Calculate the multiplicative inverse, using Newton's method.
2798   APInt t, xn = d;
2799   while ((t = d*xn) != 1)
2800     xn *= APInt(d.getBitWidth(), 2) - t;
2801 
2802   SDValue Op2 = DAG.getConstant(xn, dl, Op1.getValueType());
2803   SDValue Mul = DAG.getNode(ISD::MUL, dl, Op1.getValueType(), Op1, Op2);
2804   Created.push_back(Mul.getNode());
2805   return Mul;
2806 }
2807 
2808 SDValue TargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
2809                                       SelectionDAG &DAG,
2810                                       std::vector<SDNode *> *Created) const {
2811   AttributeSet Attr = DAG.getMachineFunction().getFunction()->getAttributes();
2812   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2813   if (TLI.isIntDivCheap(N->getValueType(0), Attr))
2814     return SDValue(N,0); // Lower SDIV as SDIV
2815   return SDValue();
2816 }
2817 
2818 /// \brief Given an ISD::SDIV node expressing a divide by constant,
2819 /// return a DAG expression to select that will generate the same value by
2820 /// multiplying by a magic number.
2821 /// Ref: "Hacker's Delight" or "The PowerPC Compiler Writer's Guide".
2822 SDValue TargetLowering::BuildSDIV(SDNode *N, const APInt &Divisor,
2823                                   SelectionDAG &DAG, bool IsAfterLegalization,
2824                                   std::vector<SDNode *> *Created) const {
2825   assert(Created && "No vector to hold sdiv ops.");
2826 
2827   EVT VT = N->getValueType(0);
2828   SDLoc dl(N);
2829 
2830   // Check to see if we can do this.
2831   // FIXME: We should be more aggressive here.
2832   if (!isTypeLegal(VT))
2833     return SDValue();
2834 
2835   // If the sdiv has an 'exact' bit we can use a simpler lowering.
2836   if (cast<BinaryWithFlagsSDNode>(N)->Flags.hasExact())
2837     return BuildExactSDIV(*this, N->getOperand(0), Divisor, dl, DAG, *Created);
2838 
2839   APInt::ms magics = Divisor.magic();
2840 
2841   // Multiply the numerator (operand 0) by the magic value
2842   // FIXME: We should support doing a MUL in a wider type
2843   SDValue Q;
2844   if (IsAfterLegalization ? isOperationLegal(ISD::MULHS, VT) :
2845                             isOperationLegalOrCustom(ISD::MULHS, VT))
2846     Q = DAG.getNode(ISD::MULHS, dl, VT, N->getOperand(0),
2847                     DAG.getConstant(magics.m, dl, VT));
2848   else if (IsAfterLegalization ? isOperationLegal(ISD::SMUL_LOHI, VT) :
2849                                  isOperationLegalOrCustom(ISD::SMUL_LOHI, VT))
2850     Q = SDValue(DAG.getNode(ISD::SMUL_LOHI, dl, DAG.getVTList(VT, VT),
2851                               N->getOperand(0),
2852                               DAG.getConstant(magics.m, dl, VT)).getNode(), 1);
2853   else
2854     return SDValue();       // No mulhs or equvialent
2855   // If d > 0 and m < 0, add the numerator
2856   if (Divisor.isStrictlyPositive() && magics.m.isNegative()) {
2857     Q = DAG.getNode(ISD::ADD, dl, VT, Q, N->getOperand(0));
2858     Created->push_back(Q.getNode());
2859   }
2860   // If d < 0 and m > 0, subtract the numerator.
2861   if (Divisor.isNegative() && magics.m.isStrictlyPositive()) {
2862     Q = DAG.getNode(ISD::SUB, dl, VT, Q, N->getOperand(0));
2863     Created->push_back(Q.getNode());
2864   }
2865   auto &DL = DAG.getDataLayout();
2866   // Shift right algebraic if shift value is nonzero
2867   if (magics.s > 0) {
2868     Q = DAG.getNode(
2869         ISD::SRA, dl, VT, Q,
2870         DAG.getConstant(magics.s, dl, getShiftAmountTy(Q.getValueType(), DL)));
2871     Created->push_back(Q.getNode());
2872   }
2873   // Extract the sign bit and add it to the quotient
2874   SDValue T =
2875       DAG.getNode(ISD::SRL, dl, VT, Q,
2876                   DAG.getConstant(VT.getScalarSizeInBits() - 1, dl,
2877                                   getShiftAmountTy(Q.getValueType(), DL)));
2878   Created->push_back(T.getNode());
2879   return DAG.getNode(ISD::ADD, dl, VT, Q, T);
2880 }
2881 
2882 /// \brief Given an ISD::UDIV node expressing a divide by constant,
2883 /// return a DAG expression to select that will generate the same value by
2884 /// multiplying by a magic number.
2885 /// Ref: "Hacker's Delight" or "The PowerPC Compiler Writer's Guide".
2886 SDValue TargetLowering::BuildUDIV(SDNode *N, const APInt &Divisor,
2887                                   SelectionDAG &DAG, bool IsAfterLegalization,
2888                                   std::vector<SDNode *> *Created) const {
2889   assert(Created && "No vector to hold udiv ops.");
2890 
2891   EVT VT = N->getValueType(0);
2892   SDLoc dl(N);
2893   auto &DL = DAG.getDataLayout();
2894 
2895   // Check to see if we can do this.
2896   // FIXME: We should be more aggressive here.
2897   if (!isTypeLegal(VT))
2898     return SDValue();
2899 
2900   // FIXME: We should use a narrower constant when the upper
2901   // bits are known to be zero.
2902   APInt::mu magics = Divisor.magicu();
2903 
2904   SDValue Q = N->getOperand(0);
2905 
2906   // If the divisor is even, we can avoid using the expensive fixup by shifting
2907   // the divided value upfront.
2908   if (magics.a != 0 && !Divisor[0]) {
2909     unsigned Shift = Divisor.countTrailingZeros();
2910     Q = DAG.getNode(
2911         ISD::SRL, dl, VT, Q,
2912         DAG.getConstant(Shift, dl, getShiftAmountTy(Q.getValueType(), DL)));
2913     Created->push_back(Q.getNode());
2914 
2915     // Get magic number for the shifted divisor.
2916     magics = Divisor.lshr(Shift).magicu(Shift);
2917     assert(magics.a == 0 && "Should use cheap fixup now");
2918   }
2919 
2920   // Multiply the numerator (operand 0) by the magic value
2921   // FIXME: We should support doing a MUL in a wider type
2922   if (IsAfterLegalization ? isOperationLegal(ISD::MULHU, VT) :
2923                             isOperationLegalOrCustom(ISD::MULHU, VT))
2924     Q = DAG.getNode(ISD::MULHU, dl, VT, Q, DAG.getConstant(magics.m, dl, VT));
2925   else if (IsAfterLegalization ? isOperationLegal(ISD::UMUL_LOHI, VT) :
2926                                  isOperationLegalOrCustom(ISD::UMUL_LOHI, VT))
2927     Q = SDValue(DAG.getNode(ISD::UMUL_LOHI, dl, DAG.getVTList(VT, VT), Q,
2928                             DAG.getConstant(magics.m, dl, VT)).getNode(), 1);
2929   else
2930     return SDValue();       // No mulhu or equvialent
2931 
2932   Created->push_back(Q.getNode());
2933 
2934   if (magics.a == 0) {
2935     assert(magics.s < Divisor.getBitWidth() &&
2936            "We shouldn't generate an undefined shift!");
2937     return DAG.getNode(
2938         ISD::SRL, dl, VT, Q,
2939         DAG.getConstant(magics.s, dl, getShiftAmountTy(Q.getValueType(), DL)));
2940   } else {
2941     SDValue NPQ = DAG.getNode(ISD::SUB, dl, VT, N->getOperand(0), Q);
2942     Created->push_back(NPQ.getNode());
2943     NPQ = DAG.getNode(
2944         ISD::SRL, dl, VT, NPQ,
2945         DAG.getConstant(1, dl, getShiftAmountTy(NPQ.getValueType(), DL)));
2946     Created->push_back(NPQ.getNode());
2947     NPQ = DAG.getNode(ISD::ADD, dl, VT, NPQ, Q);
2948     Created->push_back(NPQ.getNode());
2949     return DAG.getNode(
2950         ISD::SRL, dl, VT, NPQ,
2951         DAG.getConstant(magics.s - 1, dl,
2952                         getShiftAmountTy(NPQ.getValueType(), DL)));
2953   }
2954 }
2955 
2956 bool TargetLowering::
2957 verifyReturnAddressArgumentIsConstant(SDValue Op, SelectionDAG &DAG) const {
2958   if (!isa<ConstantSDNode>(Op.getOperand(0))) {
2959     DAG.getContext()->emitError("argument to '__builtin_return_address' must "
2960                                 "be a constant integer");
2961     return true;
2962   }
2963 
2964   return false;
2965 }
2966 
2967 //===----------------------------------------------------------------------===//
2968 // Legalization Utilities
2969 //===----------------------------------------------------------------------===//
2970 
2971 bool TargetLowering::expandMUL(SDNode *N, SDValue &Lo, SDValue &Hi, EVT HiLoVT,
2972                                SelectionDAG &DAG, SDValue LL, SDValue LH,
2973                                SDValue RL, SDValue RH) const {
2974   EVT VT = N->getValueType(0);
2975   SDLoc dl(N);
2976 
2977   bool HasMULHS = isOperationLegalOrCustom(ISD::MULHS, HiLoVT);
2978   bool HasMULHU = isOperationLegalOrCustom(ISD::MULHU, HiLoVT);
2979   bool HasSMUL_LOHI = isOperationLegalOrCustom(ISD::SMUL_LOHI, HiLoVT);
2980   bool HasUMUL_LOHI = isOperationLegalOrCustom(ISD::UMUL_LOHI, HiLoVT);
2981   if (HasMULHU || HasMULHS || HasUMUL_LOHI || HasSMUL_LOHI) {
2982     unsigned OuterBitSize = VT.getSizeInBits();
2983     unsigned InnerBitSize = HiLoVT.getSizeInBits();
2984     unsigned LHSSB = DAG.ComputeNumSignBits(N->getOperand(0));
2985     unsigned RHSSB = DAG.ComputeNumSignBits(N->getOperand(1));
2986 
2987     // LL, LH, RL, and RH must be either all NULL or all set to a value.
2988     assert((LL.getNode() && LH.getNode() && RL.getNode() && RH.getNode()) ||
2989            (!LL.getNode() && !LH.getNode() && !RL.getNode() && !RH.getNode()));
2990 
2991     if (!LL.getNode() && !RL.getNode() &&
2992         isOperationLegalOrCustom(ISD::TRUNCATE, HiLoVT)) {
2993       LL = DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, N->getOperand(0));
2994       RL = DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, N->getOperand(1));
2995     }
2996 
2997     if (!LL.getNode())
2998       return false;
2999 
3000     APInt HighMask = APInt::getHighBitsSet(OuterBitSize, InnerBitSize);
3001     if (DAG.MaskedValueIsZero(N->getOperand(0), HighMask) &&
3002         DAG.MaskedValueIsZero(N->getOperand(1), HighMask)) {
3003       // The inputs are both zero-extended.
3004       if (HasUMUL_LOHI) {
3005         // We can emit a umul_lohi.
3006         Lo = DAG.getNode(ISD::UMUL_LOHI, dl, DAG.getVTList(HiLoVT, HiLoVT), LL,
3007                          RL);
3008         Hi = SDValue(Lo.getNode(), 1);
3009         return true;
3010       }
3011       if (HasMULHU) {
3012         // We can emit a mulhu+mul.
3013         Lo = DAG.getNode(ISD::MUL, dl, HiLoVT, LL, RL);
3014         Hi = DAG.getNode(ISD::MULHU, dl, HiLoVT, LL, RL);
3015         return true;
3016       }
3017     }
3018     if (LHSSB > InnerBitSize && RHSSB > InnerBitSize) {
3019       // The input values are both sign-extended.
3020       if (HasSMUL_LOHI) {
3021         // We can emit a smul_lohi.
3022         Lo = DAG.getNode(ISD::SMUL_LOHI, dl, DAG.getVTList(HiLoVT, HiLoVT), LL,
3023                          RL);
3024         Hi = SDValue(Lo.getNode(), 1);
3025         return true;
3026       }
3027       if (HasMULHS) {
3028         // We can emit a mulhs+mul.
3029         Lo = DAG.getNode(ISD::MUL, dl, HiLoVT, LL, RL);
3030         Hi = DAG.getNode(ISD::MULHS, dl, HiLoVT, LL, RL);
3031         return true;
3032       }
3033     }
3034 
3035     if (!LH.getNode() && !RH.getNode() &&
3036         isOperationLegalOrCustom(ISD::SRL, VT) &&
3037         isOperationLegalOrCustom(ISD::TRUNCATE, HiLoVT)) {
3038       auto &DL = DAG.getDataLayout();
3039       unsigned ShiftAmt = VT.getSizeInBits() - HiLoVT.getSizeInBits();
3040       SDValue Shift = DAG.getConstant(ShiftAmt, dl, getShiftAmountTy(VT, DL));
3041       LH = DAG.getNode(ISD::SRL, dl, VT, N->getOperand(0), Shift);
3042       LH = DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, LH);
3043       RH = DAG.getNode(ISD::SRL, dl, VT, N->getOperand(1), Shift);
3044       RH = DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, RH);
3045     }
3046 
3047     if (!LH.getNode())
3048       return false;
3049 
3050     if (HasUMUL_LOHI) {
3051       // Lo,Hi = umul LHS, RHS.
3052       SDValue UMulLOHI = DAG.getNode(ISD::UMUL_LOHI, dl,
3053                                      DAG.getVTList(HiLoVT, HiLoVT), LL, RL);
3054       Lo = UMulLOHI;
3055       Hi = UMulLOHI.getValue(1);
3056       RH = DAG.getNode(ISD::MUL, dl, HiLoVT, LL, RH);
3057       LH = DAG.getNode(ISD::MUL, dl, HiLoVT, LH, RL);
3058       Hi = DAG.getNode(ISD::ADD, dl, HiLoVT, Hi, RH);
3059       Hi = DAG.getNode(ISD::ADD, dl, HiLoVT, Hi, LH);
3060       return true;
3061     }
3062     if (HasMULHU) {
3063       Lo = DAG.getNode(ISD::MUL, dl, HiLoVT, LL, RL);
3064       Hi = DAG.getNode(ISD::MULHU, dl, HiLoVT, LL, RL);
3065       RH = DAG.getNode(ISD::MUL, dl, HiLoVT, LL, RH);
3066       LH = DAG.getNode(ISD::MUL, dl, HiLoVT, LH, RL);
3067       Hi = DAG.getNode(ISD::ADD, dl, HiLoVT, Hi, RH);
3068       Hi = DAG.getNode(ISD::ADD, dl, HiLoVT, Hi, LH);
3069       return true;
3070     }
3071   }
3072   return false;
3073 }
3074 
3075 bool TargetLowering::expandFP_TO_SINT(SDNode *Node, SDValue &Result,
3076                                SelectionDAG &DAG) const {
3077   EVT VT = Node->getOperand(0).getValueType();
3078   EVT NVT = Node->getValueType(0);
3079   SDLoc dl(SDValue(Node, 0));
3080 
3081   // FIXME: Only f32 to i64 conversions are supported.
3082   if (VT != MVT::f32 || NVT != MVT::i64)
3083     return false;
3084 
3085   // Expand f32 -> i64 conversion
3086   // This algorithm comes from compiler-rt's implementation of fixsfdi:
3087   // https://github.com/llvm-mirror/compiler-rt/blob/master/lib/builtins/fixsfdi.c
3088   EVT IntVT = EVT::getIntegerVT(*DAG.getContext(),
3089                                 VT.getSizeInBits());
3090   SDValue ExponentMask = DAG.getConstant(0x7F800000, dl, IntVT);
3091   SDValue ExponentLoBit = DAG.getConstant(23, dl, IntVT);
3092   SDValue Bias = DAG.getConstant(127, dl, IntVT);
3093   SDValue SignMask = DAG.getConstant(APInt::getSignBit(VT.getSizeInBits()), dl,
3094                                      IntVT);
3095   SDValue SignLowBit = DAG.getConstant(VT.getSizeInBits() - 1, dl, IntVT);
3096   SDValue MantissaMask = DAG.getConstant(0x007FFFFF, dl, IntVT);
3097 
3098   SDValue Bits = DAG.getNode(ISD::BITCAST, dl, IntVT, Node->getOperand(0));
3099 
3100   auto &DL = DAG.getDataLayout();
3101   SDValue ExponentBits = DAG.getNode(
3102       ISD::SRL, dl, IntVT, DAG.getNode(ISD::AND, dl, IntVT, Bits, ExponentMask),
3103       DAG.getZExtOrTrunc(ExponentLoBit, dl, getShiftAmountTy(IntVT, DL)));
3104   SDValue Exponent = DAG.getNode(ISD::SUB, dl, IntVT, ExponentBits, Bias);
3105 
3106   SDValue Sign = DAG.getNode(
3107       ISD::SRA, dl, IntVT, DAG.getNode(ISD::AND, dl, IntVT, Bits, SignMask),
3108       DAG.getZExtOrTrunc(SignLowBit, dl, getShiftAmountTy(IntVT, DL)));
3109   Sign = DAG.getSExtOrTrunc(Sign, dl, NVT);
3110 
3111   SDValue R = DAG.getNode(ISD::OR, dl, IntVT,
3112       DAG.getNode(ISD::AND, dl, IntVT, Bits, MantissaMask),
3113       DAG.getConstant(0x00800000, dl, IntVT));
3114 
3115   R = DAG.getZExtOrTrunc(R, dl, NVT);
3116 
3117   R = DAG.getSelectCC(
3118       dl, Exponent, ExponentLoBit,
3119       DAG.getNode(ISD::SHL, dl, NVT, R,
3120                   DAG.getZExtOrTrunc(
3121                       DAG.getNode(ISD::SUB, dl, IntVT, Exponent, ExponentLoBit),
3122                       dl, getShiftAmountTy(IntVT, DL))),
3123       DAG.getNode(ISD::SRL, dl, NVT, R,
3124                   DAG.getZExtOrTrunc(
3125                       DAG.getNode(ISD::SUB, dl, IntVT, ExponentLoBit, Exponent),
3126                       dl, getShiftAmountTy(IntVT, DL))),
3127       ISD::SETGT);
3128 
3129   SDValue Ret = DAG.getNode(ISD::SUB, dl, NVT,
3130       DAG.getNode(ISD::XOR, dl, NVT, R, Sign),
3131       Sign);
3132 
3133   Result = DAG.getSelectCC(dl, Exponent, DAG.getConstant(0, dl, IntVT),
3134       DAG.getConstant(0, dl, NVT), Ret, ISD::SETLT);
3135   return true;
3136 }
3137 
3138 SDValue TargetLowering::scalarizeVectorLoad(LoadSDNode *LD,
3139                                             SelectionDAG &DAG) const {
3140   SDLoc SL(LD);
3141   SDValue Chain = LD->getChain();
3142   SDValue BasePTR = LD->getBasePtr();
3143   EVT SrcVT = LD->getMemoryVT();
3144   ISD::LoadExtType ExtType = LD->getExtensionType();
3145 
3146   unsigned NumElem = SrcVT.getVectorNumElements();
3147 
3148   EVT SrcEltVT = SrcVT.getScalarType();
3149   EVT DstEltVT = LD->getValueType(0).getScalarType();
3150 
3151   unsigned Stride = SrcEltVT.getSizeInBits() / 8;
3152   assert(SrcEltVT.isByteSized());
3153 
3154   EVT PtrVT = BasePTR.getValueType();
3155 
3156   SmallVector<SDValue, 8> Vals;
3157   SmallVector<SDValue, 8> LoadChains;
3158 
3159   for (unsigned Idx = 0; Idx < NumElem; ++Idx) {
3160     SDValue ScalarLoad = DAG.getExtLoad(
3161       ExtType, SL, DstEltVT,
3162       Chain, BasePTR, LD->getPointerInfo().getWithOffset(Idx * Stride),
3163       SrcEltVT,
3164       LD->isVolatile(), LD->isNonTemporal(), LD->isInvariant(),
3165       MinAlign(LD->getAlignment(), Idx * Stride), LD->getAAInfo());
3166 
3167     BasePTR = DAG.getNode(ISD::ADD, SL, PtrVT, BasePTR,
3168                           DAG.getConstant(Stride, SL, PtrVT));
3169 
3170     Vals.push_back(ScalarLoad.getValue(0));
3171     LoadChains.push_back(ScalarLoad.getValue(1));
3172   }
3173 
3174   SDValue NewChain = DAG.getNode(ISD::TokenFactor, SL, MVT::Other, LoadChains);
3175   SDValue Value = DAG.getNode(ISD::BUILD_VECTOR, SL, LD->getValueType(0), Vals);
3176 
3177   return DAG.getMergeValues({ Value, NewChain }, SL);
3178 }
3179 
3180 // FIXME: This relies on each element having a byte size, otherwise the stride
3181 // is 0 and just overwrites the same location. ExpandStore currently expects
3182 // this broken behavior.
3183 SDValue TargetLowering::scalarizeVectorStore(StoreSDNode *ST,
3184                                              SelectionDAG &DAG) const {
3185   SDLoc SL(ST);
3186 
3187   SDValue Chain = ST->getChain();
3188   SDValue BasePtr = ST->getBasePtr();
3189   SDValue Value = ST->getValue();
3190   EVT StVT = ST->getMemoryVT();
3191 
3192   unsigned Alignment = ST->getAlignment();
3193   bool isVolatile = ST->isVolatile();
3194   bool isNonTemporal = ST->isNonTemporal();
3195   AAMDNodes AAInfo = ST->getAAInfo();
3196 
3197   // The type of the data we want to save
3198   EVT RegVT = Value.getValueType();
3199   EVT RegSclVT = RegVT.getScalarType();
3200 
3201   // The type of data as saved in memory.
3202   EVT MemSclVT = StVT.getScalarType();
3203 
3204   EVT PtrVT = BasePtr.getValueType();
3205 
3206   // Store Stride in bytes
3207   unsigned Stride = MemSclVT.getSizeInBits() / 8;
3208   EVT IdxVT = getVectorIdxTy(DAG.getDataLayout());
3209   unsigned NumElem = StVT.getVectorNumElements();
3210 
3211   // Extract each of the elements from the original vector and save them into
3212   // memory individually.
3213   SmallVector<SDValue, 8> Stores;
3214   for (unsigned Idx = 0; Idx < NumElem; ++Idx) {
3215     SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, RegSclVT, Value,
3216                               DAG.getConstant(Idx, SL, IdxVT));
3217 
3218     SDValue Ptr = DAG.getNode(ISD::ADD, SL, PtrVT, BasePtr,
3219                               DAG.getConstant(Idx * Stride, SL, PtrVT));
3220 
3221     // This scalar TruncStore may be illegal, but we legalize it later.
3222     SDValue Store = DAG.getTruncStore(
3223       Chain, SL, Elt, Ptr,
3224       ST->getPointerInfo().getWithOffset(Idx * Stride), MemSclVT,
3225       isVolatile, isNonTemporal, MinAlign(Alignment, Idx * Stride),
3226       AAInfo);
3227 
3228     Stores.push_back(Store);
3229   }
3230 
3231   return DAG.getNode(ISD::TokenFactor, SL, MVT::Other, Stores);
3232 }
3233 
3234 std::pair<SDValue, SDValue>
3235 TargetLowering::expandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG) const {
3236   assert(LD->getAddressingMode() == ISD::UNINDEXED &&
3237          "unaligned indexed loads not implemented!");
3238   SDValue Chain = LD->getChain();
3239   SDValue Ptr = LD->getBasePtr();
3240   EVT VT = LD->getValueType(0);
3241   EVT LoadedVT = LD->getMemoryVT();
3242   SDLoc dl(LD);
3243   if (VT.isFloatingPoint() || VT.isVector()) {
3244     EVT intVT = EVT::getIntegerVT(*DAG.getContext(), LoadedVT.getSizeInBits());
3245     if (isTypeLegal(intVT) && isTypeLegal(LoadedVT)) {
3246       if (!isOperationLegalOrCustom(ISD::LOAD, intVT)) {
3247         // Scalarize the load and let the individual components be handled.
3248         SDValue Scalarized = scalarizeVectorLoad(LD, DAG);
3249         return std::make_pair(Scalarized.getValue(0), Scalarized.getValue(1));
3250       }
3251 
3252       // Expand to a (misaligned) integer load of the same size,
3253       // then bitconvert to floating point or vector.
3254       SDValue newLoad = DAG.getLoad(intVT, dl, Chain, Ptr,
3255                                     LD->getMemOperand());
3256       SDValue Result = DAG.getNode(ISD::BITCAST, dl, LoadedVT, newLoad);
3257       if (LoadedVT != VT)
3258         Result = DAG.getNode(VT.isFloatingPoint() ? ISD::FP_EXTEND :
3259                              ISD::ANY_EXTEND, dl, VT, Result);
3260 
3261       return std::make_pair(Result, newLoad.getValue(1));
3262     }
3263 
3264     // Copy the value to a (aligned) stack slot using (unaligned) integer
3265     // loads and stores, then do a (aligned) load from the stack slot.
3266     MVT RegVT = getRegisterType(*DAG.getContext(), intVT);
3267     unsigned LoadedBytes = LoadedVT.getSizeInBits() / 8;
3268     unsigned RegBytes = RegVT.getSizeInBits() / 8;
3269     unsigned NumRegs = (LoadedBytes + RegBytes - 1) / RegBytes;
3270 
3271     // Make sure the stack slot is also aligned for the register type.
3272     SDValue StackBase = DAG.CreateStackTemporary(LoadedVT, RegVT);
3273 
3274     SmallVector<SDValue, 8> Stores;
3275     SDValue StackPtr = StackBase;
3276     unsigned Offset = 0;
3277 
3278     EVT PtrVT = Ptr.getValueType();
3279     EVT StackPtrVT = StackPtr.getValueType();
3280 
3281     SDValue PtrIncrement = DAG.getConstant(RegBytes, dl, PtrVT);
3282     SDValue StackPtrIncrement = DAG.getConstant(RegBytes, dl, StackPtrVT);
3283 
3284     // Do all but one copies using the full register width.
3285     for (unsigned i = 1; i < NumRegs; i++) {
3286       // Load one integer register's worth from the original location.
3287       SDValue Load = DAG.getLoad(RegVT, dl, Chain, Ptr,
3288                                  LD->getPointerInfo().getWithOffset(Offset),
3289                                  LD->isVolatile(), LD->isNonTemporal(),
3290                                  LD->isInvariant(),
3291                                  MinAlign(LD->getAlignment(), Offset),
3292                                  LD->getAAInfo());
3293       // Follow the load with a store to the stack slot.  Remember the store.
3294       Stores.push_back(DAG.getStore(Load.getValue(1), dl, Load, StackPtr,
3295                                     MachinePointerInfo(), false, false, 0));
3296       // Increment the pointers.
3297       Offset += RegBytes;
3298       Ptr = DAG.getNode(ISD::ADD, dl, PtrVT, Ptr, PtrIncrement);
3299       StackPtr = DAG.getNode(ISD::ADD, dl, StackPtrVT, StackPtr,
3300                              StackPtrIncrement);
3301     }
3302 
3303     // The last copy may be partial.  Do an extending load.
3304     EVT MemVT = EVT::getIntegerVT(*DAG.getContext(),
3305                                   8 * (LoadedBytes - Offset));
3306     SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, RegVT, Chain, Ptr,
3307                                   LD->getPointerInfo().getWithOffset(Offset),
3308                                   MemVT, LD->isVolatile(),
3309                                   LD->isNonTemporal(),
3310                                   LD->isInvariant(),
3311                                   MinAlign(LD->getAlignment(), Offset),
3312                                   LD->getAAInfo());
3313     // Follow the load with a store to the stack slot.  Remember the store.
3314     // On big-endian machines this requires a truncating store to ensure
3315     // that the bits end up in the right place.
3316     Stores.push_back(DAG.getTruncStore(Load.getValue(1), dl, Load, StackPtr,
3317                                        MachinePointerInfo(), MemVT,
3318                                        false, false, 0));
3319 
3320     // The order of the stores doesn't matter - say it with a TokenFactor.
3321     SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores);
3322 
3323     // Finally, perform the original load only redirected to the stack slot.
3324     Load = DAG.getExtLoad(LD->getExtensionType(), dl, VT, TF, StackBase,
3325                           MachinePointerInfo(), LoadedVT, false,false, false,
3326                           0);
3327 
3328     // Callers expect a MERGE_VALUES node.
3329     return std::make_pair(Load, TF);
3330   }
3331 
3332   assert(LoadedVT.isInteger() && !LoadedVT.isVector() &&
3333          "Unaligned load of unsupported type.");
3334 
3335   // Compute the new VT that is half the size of the old one.  This is an
3336   // integer MVT.
3337   unsigned NumBits = LoadedVT.getSizeInBits();
3338   EVT NewLoadedVT;
3339   NewLoadedVT = EVT::getIntegerVT(*DAG.getContext(), NumBits/2);
3340   NumBits >>= 1;
3341 
3342   unsigned Alignment = LD->getAlignment();
3343   unsigned IncrementSize = NumBits / 8;
3344   ISD::LoadExtType HiExtType = LD->getExtensionType();
3345 
3346   // If the original load is NON_EXTLOAD, the hi part load must be ZEXTLOAD.
3347   if (HiExtType == ISD::NON_EXTLOAD)
3348     HiExtType = ISD::ZEXTLOAD;
3349 
3350   // Load the value in two parts
3351   SDValue Lo, Hi;
3352   if (DAG.getDataLayout().isLittleEndian()) {
3353     Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, VT, Chain, Ptr, LD->getPointerInfo(),
3354                         NewLoadedVT, LD->isVolatile(),
3355                         LD->isNonTemporal(), LD->isInvariant(), Alignment,
3356                         LD->getAAInfo());
3357     Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr,
3358                       DAG.getConstant(IncrementSize, dl, Ptr.getValueType()));
3359     Hi = DAG.getExtLoad(HiExtType, dl, VT, Chain, Ptr,
3360                         LD->getPointerInfo().getWithOffset(IncrementSize),
3361                         NewLoadedVT, LD->isVolatile(),
3362                         LD->isNonTemporal(),LD->isInvariant(),
3363                         MinAlign(Alignment, IncrementSize), LD->getAAInfo());
3364   } else {
3365     Hi = DAG.getExtLoad(HiExtType, dl, VT, Chain, Ptr, LD->getPointerInfo(),
3366                         NewLoadedVT, LD->isVolatile(),
3367                         LD->isNonTemporal(), LD->isInvariant(), Alignment,
3368                         LD->getAAInfo());
3369     Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr,
3370                       DAG.getConstant(IncrementSize, dl, Ptr.getValueType()));
3371     Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, VT, Chain, Ptr,
3372                         LD->getPointerInfo().getWithOffset(IncrementSize),
3373                         NewLoadedVT, LD->isVolatile(),
3374                         LD->isNonTemporal(), LD->isInvariant(),
3375                         MinAlign(Alignment, IncrementSize), LD->getAAInfo());
3376   }
3377 
3378   // aggregate the two parts
3379   SDValue ShiftAmount =
3380       DAG.getConstant(NumBits, dl, getShiftAmountTy(Hi.getValueType(),
3381                                                     DAG.getDataLayout()));
3382   SDValue Result = DAG.getNode(ISD::SHL, dl, VT, Hi, ShiftAmount);
3383   Result = DAG.getNode(ISD::OR, dl, VT, Result, Lo);
3384 
3385   SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1),
3386                              Hi.getValue(1));
3387 
3388   return std::make_pair(Result, TF);
3389 }
3390 
3391 SDValue TargetLowering::expandUnalignedStore(StoreSDNode *ST,
3392                                              SelectionDAG &DAG) const {
3393   assert(ST->getAddressingMode() == ISD::UNINDEXED &&
3394          "unaligned indexed stores not implemented!");
3395   SDValue Chain = ST->getChain();
3396   SDValue Ptr = ST->getBasePtr();
3397   SDValue Val = ST->getValue();
3398   EVT VT = Val.getValueType();
3399   int Alignment = ST->getAlignment();
3400 
3401   SDLoc dl(ST);
3402   if (ST->getMemoryVT().isFloatingPoint() ||
3403       ST->getMemoryVT().isVector()) {
3404     EVT intVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits());
3405     if (isTypeLegal(intVT)) {
3406       if (!isOperationLegalOrCustom(ISD::STORE, intVT)) {
3407         // Scalarize the store and let the individual components be handled.
3408         SDValue Result = scalarizeVectorStore(ST, DAG);
3409 
3410         return Result;
3411       }
3412       // Expand to a bitconvert of the value to the integer type of the
3413       // same size, then a (misaligned) int store.
3414       // FIXME: Does not handle truncating floating point stores!
3415       SDValue Result = DAG.getNode(ISD::BITCAST, dl, intVT, Val);
3416       Result = DAG.getStore(Chain, dl, Result, Ptr, ST->getPointerInfo(),
3417                            ST->isVolatile(), ST->isNonTemporal(), Alignment);
3418       return Result;
3419     }
3420     // Do a (aligned) store to a stack slot, then copy from the stack slot
3421     // to the final destination using (unaligned) integer loads and stores.
3422     EVT StoredVT = ST->getMemoryVT();
3423     MVT RegVT =
3424       getRegisterType(*DAG.getContext(),
3425                       EVT::getIntegerVT(*DAG.getContext(),
3426                                         StoredVT.getSizeInBits()));
3427     EVT PtrVT = Ptr.getValueType();
3428     unsigned StoredBytes = StoredVT.getSizeInBits() / 8;
3429     unsigned RegBytes = RegVT.getSizeInBits() / 8;
3430     unsigned NumRegs = (StoredBytes + RegBytes - 1) / RegBytes;
3431 
3432     // Make sure the stack slot is also aligned for the register type.
3433     SDValue StackPtr = DAG.CreateStackTemporary(StoredVT, RegVT);
3434 
3435     // Perform the original store, only redirected to the stack slot.
3436     SDValue Store = DAG.getTruncStore(Chain, dl,
3437                                       Val, StackPtr, MachinePointerInfo(),
3438                                       StoredVT, false, false, 0);
3439 
3440     EVT StackPtrVT = StackPtr.getValueType();
3441 
3442     SDValue PtrIncrement = DAG.getConstant(RegBytes, dl, PtrVT);
3443     SDValue StackPtrIncrement = DAG.getConstant(RegBytes, dl, StackPtrVT);
3444     SmallVector<SDValue, 8> Stores;
3445     unsigned Offset = 0;
3446 
3447     // Do all but one copies using the full register width.
3448     for (unsigned i = 1; i < NumRegs; i++) {
3449       // Load one integer register's worth from the stack slot.
3450       SDValue Load = DAG.getLoad(RegVT, dl, Store, StackPtr,
3451                                  MachinePointerInfo(),
3452                                  false, false, false, 0);
3453       // Store it to the final location.  Remember the store.
3454       Stores.push_back(DAG.getStore(Load.getValue(1), dl, Load, Ptr,
3455                                   ST->getPointerInfo().getWithOffset(Offset),
3456                                     ST->isVolatile(), ST->isNonTemporal(),
3457                                     MinAlign(ST->getAlignment(), Offset)));
3458       // Increment the pointers.
3459       Offset += RegBytes;
3460       StackPtr = DAG.getNode(ISD::ADD, dl, StackPtrVT,
3461                              StackPtr, StackPtrIncrement);
3462       Ptr = DAG.getNode(ISD::ADD, dl, PtrVT, Ptr, PtrIncrement);
3463     }
3464 
3465     // The last store may be partial.  Do a truncating store.  On big-endian
3466     // machines this requires an extending load from the stack slot to ensure
3467     // that the bits are in the right place.
3468     EVT MemVT = EVT::getIntegerVT(*DAG.getContext(),
3469                                   8 * (StoredBytes - Offset));
3470 
3471     // Load from the stack slot.
3472     SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, RegVT, Store, StackPtr,
3473                                   MachinePointerInfo(),
3474                                   MemVT, false, false, false, 0);
3475 
3476     Stores.push_back(DAG.getTruncStore(Load.getValue(1), dl, Load, Ptr,
3477                                        ST->getPointerInfo()
3478                                          .getWithOffset(Offset),
3479                                        MemVT, ST->isVolatile(),
3480                                        ST->isNonTemporal(),
3481                                        MinAlign(ST->getAlignment(), Offset),
3482                                        ST->getAAInfo()));
3483     // The order of the stores doesn't matter - say it with a TokenFactor.
3484     SDValue Result = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores);
3485     return Result;
3486   }
3487 
3488   assert(ST->getMemoryVT().isInteger() &&
3489          !ST->getMemoryVT().isVector() &&
3490          "Unaligned store of unknown type.");
3491   // Get the half-size VT
3492   EVT NewStoredVT = ST->getMemoryVT().getHalfSizedIntegerVT(*DAG.getContext());
3493   int NumBits = NewStoredVT.getSizeInBits();
3494   int IncrementSize = NumBits / 8;
3495 
3496   // Divide the stored value in two parts.
3497   SDValue ShiftAmount =
3498       DAG.getConstant(NumBits, dl, getShiftAmountTy(Val.getValueType(),
3499                                                     DAG.getDataLayout()));
3500   SDValue Lo = Val;
3501   SDValue Hi = DAG.getNode(ISD::SRL, dl, VT, Val, ShiftAmount);
3502 
3503   // Store the two parts
3504   SDValue Store1, Store2;
3505   Store1 = DAG.getTruncStore(Chain, dl,
3506                              DAG.getDataLayout().isLittleEndian() ? Lo : Hi,
3507                              Ptr, ST->getPointerInfo(), NewStoredVT,
3508                              ST->isVolatile(), ST->isNonTemporal(), Alignment);
3509 
3510   EVT PtrVT = Ptr.getValueType();
3511   Ptr = DAG.getNode(ISD::ADD, dl, PtrVT, Ptr,
3512                     DAG.getConstant(IncrementSize, dl, PtrVT));
3513   Alignment = MinAlign(Alignment, IncrementSize);
3514   Store2 = DAG.getTruncStore(
3515       Chain, dl, DAG.getDataLayout().isLittleEndian() ? Hi : Lo, Ptr,
3516       ST->getPointerInfo().getWithOffset(IncrementSize), NewStoredVT,
3517       ST->isVolatile(), ST->isNonTemporal(), Alignment, ST->getAAInfo());
3518 
3519   SDValue Result =
3520     DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Store1, Store2);
3521   return Result;
3522 }
3523 
3524 //===----------------------------------------------------------------------===//
3525 // Implementation of Emulated TLS Model
3526 //===----------------------------------------------------------------------===//
3527 
3528 SDValue TargetLowering::LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA,
3529                                                 SelectionDAG &DAG) const {
3530   // Access to address of TLS varialbe xyz is lowered to a function call:
3531   //   __emutls_get_address( address of global variable named "__emutls_v.xyz" )
3532   EVT PtrVT = getPointerTy(DAG.getDataLayout());
3533   PointerType *VoidPtrType = Type::getInt8PtrTy(*DAG.getContext());
3534   SDLoc dl(GA);
3535 
3536   ArgListTy Args;
3537   ArgListEntry Entry;
3538   std::string NameString = ("__emutls_v." + GA->getGlobal()->getName()).str();
3539   Module *VariableModule = const_cast<Module*>(GA->getGlobal()->getParent());
3540   StringRef EmuTlsVarName(NameString);
3541   GlobalVariable *EmuTlsVar = VariableModule->getNamedGlobal(EmuTlsVarName);
3542   assert(EmuTlsVar && "Cannot find EmuTlsVar ");
3543   Entry.Node = DAG.getGlobalAddress(EmuTlsVar, dl, PtrVT);
3544   Entry.Ty = VoidPtrType;
3545   Args.push_back(Entry);
3546 
3547   SDValue EmuTlsGetAddr = DAG.getExternalSymbol("__emutls_get_address", PtrVT);
3548 
3549   TargetLowering::CallLoweringInfo CLI(DAG);
3550   CLI.setDebugLoc(dl).setChain(DAG.getEntryNode());
3551   CLI.setCallee(CallingConv::C, VoidPtrType, EmuTlsGetAddr, std::move(Args), 0);
3552   std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
3553 
3554   // TLSADDR will be codegen'ed as call. Inform MFI that function has calls.
3555   // At last for X86 targets, maybe good for other targets too?
3556   MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
3557   MFI->setAdjustsStack(true);  // Is this only for X86 target?
3558   MFI->setHasCalls(true);
3559 
3560   assert((GA->getOffset() == 0) &&
3561          "Emulated TLS must have zero offset in GlobalAddressSDNode");
3562   return CallResult.first;
3563 }
3564