1 //===-- TargetLowering.cpp - Implement the TargetLowering class -----------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This implements the TargetLowering class.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/Target/TargetLowering.h"
15 #include "llvm/ADT/BitVector.h"
16 #include "llvm/ADT/STLExtras.h"
17 #include "llvm/CodeGen/CallingConvLower.h"
18 #include "llvm/CodeGen/MachineFrameInfo.h"
19 #include "llvm/CodeGen/MachineFunction.h"
20 #include "llvm/CodeGen/MachineJumpTableInfo.h"
21 #include "llvm/CodeGen/MachineRegisterInfo.h"
22 #include "llvm/CodeGen/SelectionDAG.h"
23 #include "llvm/IR/DataLayout.h"
24 #include "llvm/IR/DerivedTypes.h"
25 #include "llvm/IR/GlobalVariable.h"
26 #include "llvm/IR/LLVMContext.h"
27 #include "llvm/MC/MCAsmInfo.h"
28 #include "llvm/MC/MCExpr.h"
29 #include "llvm/Support/ErrorHandling.h"
30 #include "llvm/Support/MathExtras.h"
31 #include "llvm/Target/TargetLoweringObjectFile.h"
32 #include "llvm/Target/TargetMachine.h"
33 #include "llvm/Target/TargetRegisterInfo.h"
34 #include "llvm/Target/TargetSubtargetInfo.h"
35 #include <cctype>
36 using namespace llvm;
37 
38 /// NOTE: The TargetMachine owns TLOF.
39 TargetLowering::TargetLowering(const TargetMachine &tm)
40   : TargetLoweringBase(tm) {}
41 
42 const char *TargetLowering::getTargetNodeName(unsigned Opcode) const {
43   return nullptr;
44 }
45 
46 bool TargetLowering::isPositionIndependent() const {
47   return getTargetMachine().isPositionIndependent();
48 }
49 
50 /// Check whether a given call node is in tail position within its function. If
51 /// so, it sets Chain to the input chain of the tail call.
52 bool TargetLowering::isInTailCallPosition(SelectionDAG &DAG, SDNode *Node,
53                                           SDValue &Chain) const {
54   const Function *F = DAG.getMachineFunction().getFunction();
55 
56   // Conservatively require the attributes of the call to match those of
57   // the return. Ignore noalias because it doesn't affect the call sequence.
58   AttributeSet CallerAttrs = F->getAttributes();
59   if (AttrBuilder(CallerAttrs, AttributeSet::ReturnIndex)
60       .removeAttribute(Attribute::NoAlias).hasAttributes())
61     return false;
62 
63   // It's not safe to eliminate the sign / zero extension of the return value.
64   if (CallerAttrs.hasAttribute(AttributeSet::ReturnIndex, Attribute::ZExt) ||
65       CallerAttrs.hasAttribute(AttributeSet::ReturnIndex, Attribute::SExt))
66     return false;
67 
68   // Check if the only use is a function return node.
69   return isUsedByReturnOnly(Node, Chain);
70 }
71 
72 bool TargetLowering::parametersInCSRMatch(const MachineRegisterInfo &MRI,
73     const uint32_t *CallerPreservedMask,
74     const SmallVectorImpl<CCValAssign> &ArgLocs,
75     const SmallVectorImpl<SDValue> &OutVals) const {
76   for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) {
77     const CCValAssign &ArgLoc = ArgLocs[I];
78     if (!ArgLoc.isRegLoc())
79       continue;
80     unsigned Reg = ArgLoc.getLocReg();
81     // Only look at callee saved registers.
82     if (MachineOperand::clobbersPhysReg(CallerPreservedMask, Reg))
83       continue;
84     // Check that we pass the value used for the caller.
85     // (We look for a CopyFromReg reading a virtual register that is used
86     //  for the function live-in value of register Reg)
87     SDValue Value = OutVals[I];
88     if (Value->getOpcode() != ISD::CopyFromReg)
89       return false;
90     unsigned ArgReg = cast<RegisterSDNode>(Value->getOperand(1))->getReg();
91     if (MRI.getLiveInPhysReg(ArgReg) != Reg)
92       return false;
93   }
94   return true;
95 }
96 
97 /// \brief Set CallLoweringInfo attribute flags based on a call instruction
98 /// and called function attributes.
99 void TargetLowering::ArgListEntry::setAttributes(ImmutableCallSite *CS,
100                                                  unsigned AttrIdx) {
101   isSExt     = CS->paramHasAttr(AttrIdx, Attribute::SExt);
102   isZExt     = CS->paramHasAttr(AttrIdx, Attribute::ZExt);
103   isInReg    = CS->paramHasAttr(AttrIdx, Attribute::InReg);
104   isSRet     = CS->paramHasAttr(AttrIdx, Attribute::StructRet);
105   isNest     = CS->paramHasAttr(AttrIdx, Attribute::Nest);
106   isByVal    = CS->paramHasAttr(AttrIdx, Attribute::ByVal);
107   isInAlloca = CS->paramHasAttr(AttrIdx, Attribute::InAlloca);
108   isReturned = CS->paramHasAttr(AttrIdx, Attribute::Returned);
109   isSwiftSelf = CS->paramHasAttr(AttrIdx, Attribute::SwiftSelf);
110   isSwiftError = CS->paramHasAttr(AttrIdx, Attribute::SwiftError);
111   Alignment  = CS->getParamAlignment(AttrIdx);
112 }
113 
114 /// Generate a libcall taking the given operands as arguments and returning a
115 /// result of type RetVT.
116 std::pair<SDValue, SDValue>
117 TargetLowering::makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, EVT RetVT,
118                             ArrayRef<SDValue> Ops, bool isSigned,
119                             const SDLoc &dl, bool doesNotReturn,
120                             bool isReturnValueUsed) const {
121   TargetLowering::ArgListTy Args;
122   Args.reserve(Ops.size());
123 
124   TargetLowering::ArgListEntry Entry;
125   for (SDValue Op : Ops) {
126     Entry.Node = Op;
127     Entry.Ty = Entry.Node.getValueType().getTypeForEVT(*DAG.getContext());
128     Entry.isSExt = shouldSignExtendTypeInLibCall(Op.getValueType(), isSigned);
129     Entry.isZExt = !shouldSignExtendTypeInLibCall(Op.getValueType(), isSigned);
130     Args.push_back(Entry);
131   }
132 
133   if (LC == RTLIB::UNKNOWN_LIBCALL)
134     report_fatal_error("Unsupported library call operation!");
135   SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC),
136                                          getPointerTy(DAG.getDataLayout()));
137 
138   Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext());
139   TargetLowering::CallLoweringInfo CLI(DAG);
140   bool signExtend = shouldSignExtendTypeInLibCall(RetVT, isSigned);
141   CLI.setDebugLoc(dl).setChain(DAG.getEntryNode())
142     .setCallee(getLibcallCallingConv(LC), RetTy, Callee, std::move(Args))
143     .setNoReturn(doesNotReturn).setDiscardResult(!isReturnValueUsed)
144     .setSExtResult(signExtend).setZExtResult(!signExtend);
145   return LowerCallTo(CLI);
146 }
147 
148 /// Soften the operands of a comparison. This code is shared among BR_CC,
149 /// SELECT_CC, and SETCC handlers.
150 void TargetLowering::softenSetCCOperands(SelectionDAG &DAG, EVT VT,
151                                          SDValue &NewLHS, SDValue &NewRHS,
152                                          ISD::CondCode &CCCode,
153                                          const SDLoc &dl) const {
154   assert((VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f128 || VT == MVT::ppcf128)
155          && "Unsupported setcc type!");
156 
157   // Expand into one or more soft-fp libcall(s).
158   RTLIB::Libcall LC1 = RTLIB::UNKNOWN_LIBCALL, LC2 = RTLIB::UNKNOWN_LIBCALL;
159   bool ShouldInvertCC = false;
160   switch (CCCode) {
161   case ISD::SETEQ:
162   case ISD::SETOEQ:
163     LC1 = (VT == MVT::f32) ? RTLIB::OEQ_F32 :
164           (VT == MVT::f64) ? RTLIB::OEQ_F64 :
165           (VT == MVT::f128) ? RTLIB::OEQ_F128 : RTLIB::OEQ_PPCF128;
166     break;
167   case ISD::SETNE:
168   case ISD::SETUNE:
169     LC1 = (VT == MVT::f32) ? RTLIB::UNE_F32 :
170           (VT == MVT::f64) ? RTLIB::UNE_F64 :
171           (VT == MVT::f128) ? RTLIB::UNE_F128 : RTLIB::UNE_PPCF128;
172     break;
173   case ISD::SETGE:
174   case ISD::SETOGE:
175     LC1 = (VT == MVT::f32) ? RTLIB::OGE_F32 :
176           (VT == MVT::f64) ? RTLIB::OGE_F64 :
177           (VT == MVT::f128) ? RTLIB::OGE_F128 : RTLIB::OGE_PPCF128;
178     break;
179   case ISD::SETLT:
180   case ISD::SETOLT:
181     LC1 = (VT == MVT::f32) ? RTLIB::OLT_F32 :
182           (VT == MVT::f64) ? RTLIB::OLT_F64 :
183           (VT == MVT::f128) ? RTLIB::OLT_F128 : RTLIB::OLT_PPCF128;
184     break;
185   case ISD::SETLE:
186   case ISD::SETOLE:
187     LC1 = (VT == MVT::f32) ? RTLIB::OLE_F32 :
188           (VT == MVT::f64) ? RTLIB::OLE_F64 :
189           (VT == MVT::f128) ? RTLIB::OLE_F128 : RTLIB::OLE_PPCF128;
190     break;
191   case ISD::SETGT:
192   case ISD::SETOGT:
193     LC1 = (VT == MVT::f32) ? RTLIB::OGT_F32 :
194           (VT == MVT::f64) ? RTLIB::OGT_F64 :
195           (VT == MVT::f128) ? RTLIB::OGT_F128 : RTLIB::OGT_PPCF128;
196     break;
197   case ISD::SETUO:
198     LC1 = (VT == MVT::f32) ? RTLIB::UO_F32 :
199           (VT == MVT::f64) ? RTLIB::UO_F64 :
200           (VT == MVT::f128) ? RTLIB::UO_F128 : RTLIB::UO_PPCF128;
201     break;
202   case ISD::SETO:
203     LC1 = (VT == MVT::f32) ? RTLIB::O_F32 :
204           (VT == MVT::f64) ? RTLIB::O_F64 :
205           (VT == MVT::f128) ? RTLIB::O_F128 : RTLIB::O_PPCF128;
206     break;
207   case ISD::SETONE:
208     // SETONE = SETOLT | SETOGT
209     LC1 = (VT == MVT::f32) ? RTLIB::OLT_F32 :
210           (VT == MVT::f64) ? RTLIB::OLT_F64 :
211           (VT == MVT::f128) ? RTLIB::OLT_F128 : RTLIB::OLT_PPCF128;
212     LC2 = (VT == MVT::f32) ? RTLIB::OGT_F32 :
213           (VT == MVT::f64) ? RTLIB::OGT_F64 :
214           (VT == MVT::f128) ? RTLIB::OGT_F128 : RTLIB::OGT_PPCF128;
215     break;
216   case ISD::SETUEQ:
217     LC1 = (VT == MVT::f32) ? RTLIB::UO_F32 :
218           (VT == MVT::f64) ? RTLIB::UO_F64 :
219           (VT == MVT::f128) ? RTLIB::UO_F128 : RTLIB::UO_PPCF128;
220     LC2 = (VT == MVT::f32) ? RTLIB::OEQ_F32 :
221           (VT == MVT::f64) ? RTLIB::OEQ_F64 :
222           (VT == MVT::f128) ? RTLIB::OEQ_F128 : RTLIB::OEQ_PPCF128;
223     break;
224   default:
225     // Invert CC for unordered comparisons
226     ShouldInvertCC = true;
227     switch (CCCode) {
228     case ISD::SETULT:
229       LC1 = (VT == MVT::f32) ? RTLIB::OGE_F32 :
230             (VT == MVT::f64) ? RTLIB::OGE_F64 :
231             (VT == MVT::f128) ? RTLIB::OGE_F128 : RTLIB::OGE_PPCF128;
232       break;
233     case ISD::SETULE:
234       LC1 = (VT == MVT::f32) ? RTLIB::OGT_F32 :
235             (VT == MVT::f64) ? RTLIB::OGT_F64 :
236             (VT == MVT::f128) ? RTLIB::OGT_F128 : RTLIB::OGT_PPCF128;
237       break;
238     case ISD::SETUGT:
239       LC1 = (VT == MVT::f32) ? RTLIB::OLE_F32 :
240             (VT == MVT::f64) ? RTLIB::OLE_F64 :
241             (VT == MVT::f128) ? RTLIB::OLE_F128 : RTLIB::OLE_PPCF128;
242       break;
243     case ISD::SETUGE:
244       LC1 = (VT == MVT::f32) ? RTLIB::OLT_F32 :
245             (VT == MVT::f64) ? RTLIB::OLT_F64 :
246             (VT == MVT::f128) ? RTLIB::OLT_F128 : RTLIB::OLT_PPCF128;
247       break;
248     default: llvm_unreachable("Do not know how to soften this setcc!");
249     }
250   }
251 
252   // Use the target specific return value for comparions lib calls.
253   EVT RetVT = getCmpLibcallReturnType();
254   SDValue Ops[2] = {NewLHS, NewRHS};
255   NewLHS = makeLibCall(DAG, LC1, RetVT, Ops, false /*sign irrelevant*/,
256                        dl).first;
257   NewRHS = DAG.getConstant(0, dl, RetVT);
258 
259   CCCode = getCmpLibcallCC(LC1);
260   if (ShouldInvertCC)
261     CCCode = getSetCCInverse(CCCode, /*isInteger=*/true);
262 
263   if (LC2 != RTLIB::UNKNOWN_LIBCALL) {
264     SDValue Tmp = DAG.getNode(
265         ISD::SETCC, dl,
266         getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), RetVT),
267         NewLHS, NewRHS, DAG.getCondCode(CCCode));
268     NewLHS = makeLibCall(DAG, LC2, RetVT, Ops, false/*sign irrelevant*/,
269                          dl).first;
270     NewLHS = DAG.getNode(
271         ISD::SETCC, dl,
272         getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), RetVT),
273         NewLHS, NewRHS, DAG.getCondCode(getCmpLibcallCC(LC2)));
274     NewLHS = DAG.getNode(ISD::OR, dl, Tmp.getValueType(), Tmp, NewLHS);
275     NewRHS = SDValue();
276   }
277 }
278 
279 /// Return the entry encoding for a jump table in the current function. The
280 /// returned value is a member of the MachineJumpTableInfo::JTEntryKind enum.
281 unsigned TargetLowering::getJumpTableEncoding() const {
282   // In non-pic modes, just use the address of a block.
283   if (!isPositionIndependent())
284     return MachineJumpTableInfo::EK_BlockAddress;
285 
286   // In PIC mode, if the target supports a GPRel32 directive, use it.
287   if (getTargetMachine().getMCAsmInfo()->getGPRel32Directive() != nullptr)
288     return MachineJumpTableInfo::EK_GPRel32BlockAddress;
289 
290   // Otherwise, use a label difference.
291   return MachineJumpTableInfo::EK_LabelDifference32;
292 }
293 
294 SDValue TargetLowering::getPICJumpTableRelocBase(SDValue Table,
295                                                  SelectionDAG &DAG) const {
296   // If our PIC model is GP relative, use the global offset table as the base.
297   unsigned JTEncoding = getJumpTableEncoding();
298 
299   if ((JTEncoding == MachineJumpTableInfo::EK_GPRel64BlockAddress) ||
300       (JTEncoding == MachineJumpTableInfo::EK_GPRel32BlockAddress))
301     return DAG.getGLOBAL_OFFSET_TABLE(getPointerTy(DAG.getDataLayout()));
302 
303   return Table;
304 }
305 
306 /// This returns the relocation base for the given PIC jumptable, the same as
307 /// getPICJumpTableRelocBase, but as an MCExpr.
308 const MCExpr *
309 TargetLowering::getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
310                                              unsigned JTI,MCContext &Ctx) const{
311   // The normal PIC reloc base is the label at the start of the jump table.
312   return MCSymbolRefExpr::create(MF->getJTISymbol(JTI, Ctx), Ctx);
313 }
314 
315 bool
316 TargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
317   const TargetMachine &TM = getTargetMachine();
318   const GlobalValue *GV = GA->getGlobal();
319 
320   // If the address is not even local to this DSO we will have to load it from
321   // a got and then add the offset.
322   if (!TM.shouldAssumeDSOLocal(*GV->getParent(), GV))
323     return false;
324 
325   // If the code is position independent we will have to add a base register.
326   if (isPositionIndependent())
327     return false;
328 
329   // Otherwise we can do it.
330   return true;
331 }
332 
333 //===----------------------------------------------------------------------===//
334 //  Optimization Methods
335 //===----------------------------------------------------------------------===//
336 
337 /// Check to see if the specified operand of the specified instruction is a
338 /// constant integer. If so, check to see if there are any bits set in the
339 /// constant that are not demanded. If so, shrink the constant and return true.
340 bool TargetLowering::TargetLoweringOpt::ShrinkDemandedConstant(SDValue Op,
341                                                         const APInt &Demanded) {
342   SDLoc dl(Op);
343 
344   // FIXME: ISD::SELECT, ISD::SELECT_CC
345   switch (Op.getOpcode()) {
346   default: break;
347   case ISD::XOR:
348   case ISD::AND:
349   case ISD::OR: {
350     ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
351     if (!C) return false;
352 
353     if (Op.getOpcode() == ISD::XOR &&
354         (C->getAPIntValue() | (~Demanded)).isAllOnesValue())
355       return false;
356 
357     // if we can expand it to have all bits set, do it
358     if (C->getAPIntValue().intersects(~Demanded)) {
359       EVT VT = Op.getValueType();
360       SDValue New = DAG.getNode(Op.getOpcode(), dl, VT, Op.getOperand(0),
361                                 DAG.getConstant(Demanded &
362                                                 C->getAPIntValue(),
363                                                 dl, VT));
364       return CombineTo(Op, New);
365     }
366 
367     break;
368   }
369   }
370 
371   return false;
372 }
373 
374 /// Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if the casts are free.
375 /// This uses isZExtFree and ZERO_EXTEND for the widening cast, but it could be
376 /// generalized for targets with other types of implicit widening casts.
377 bool TargetLowering::TargetLoweringOpt::ShrinkDemandedOp(SDValue Op,
378                                                          unsigned BitWidth,
379                                                          const APInt &Demanded,
380                                                          const SDLoc &dl) {
381   assert(Op.getNumOperands() == 2 &&
382          "ShrinkDemandedOp only supports binary operators!");
383   assert(Op.getNode()->getNumValues() == 1 &&
384          "ShrinkDemandedOp only supports nodes with one result!");
385 
386   // Early return, as this function cannot handle vector types.
387   if (Op.getValueType().isVector())
388     return false;
389 
390   // Don't do this if the node has another user, which may require the
391   // full value.
392   if (!Op.getNode()->hasOneUse())
393     return false;
394 
395   // Search for the smallest integer type with free casts to and from
396   // Op's type. For expedience, just check power-of-2 integer types.
397   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
398   unsigned DemandedSize = BitWidth - Demanded.countLeadingZeros();
399   unsigned SmallVTBits = DemandedSize;
400   if (!isPowerOf2_32(SmallVTBits))
401     SmallVTBits = NextPowerOf2(SmallVTBits);
402   for (; SmallVTBits < BitWidth; SmallVTBits = NextPowerOf2(SmallVTBits)) {
403     EVT SmallVT = EVT::getIntegerVT(*DAG.getContext(), SmallVTBits);
404     if (TLI.isTruncateFree(Op.getValueType(), SmallVT) &&
405         TLI.isZExtFree(SmallVT, Op.getValueType())) {
406       // We found a type with free casts.
407       SDValue X = DAG.getNode(Op.getOpcode(), dl, SmallVT,
408                               DAG.getNode(ISD::TRUNCATE, dl, SmallVT,
409                                           Op.getNode()->getOperand(0)),
410                               DAG.getNode(ISD::TRUNCATE, dl, SmallVT,
411                                           Op.getNode()->getOperand(1)));
412       bool NeedZext = DemandedSize > SmallVTBits;
413       SDValue Z = DAG.getNode(NeedZext ? ISD::ZERO_EXTEND : ISD::ANY_EXTEND,
414                               dl, Op.getValueType(), X);
415       return CombineTo(Op, Z);
416     }
417   }
418   return false;
419 }
420 
421 /// Look at Op. At this point, we know that only the DemandedMask bits of the
422 /// result of Op are ever used downstream. If we can use this information to
423 /// simplify Op, create a new simplified DAG node and return true, returning the
424 /// original and new nodes in Old and New. Otherwise, analyze the expression and
425 /// return a mask of KnownOne and KnownZero bits for the expression (used to
426 /// simplify the caller).  The KnownZero/One bits may only be accurate for those
427 /// bits in the DemandedMask.
428 bool TargetLowering::SimplifyDemandedBits(SDValue Op,
429                                           const APInt &DemandedMask,
430                                           APInt &KnownZero,
431                                           APInt &KnownOne,
432                                           TargetLoweringOpt &TLO,
433                                           unsigned Depth) const {
434   unsigned BitWidth = DemandedMask.getBitWidth();
435   assert(Op.getScalarValueSizeInBits() == BitWidth &&
436          "Mask size mismatches value type size!");
437   APInt NewMask = DemandedMask;
438   SDLoc dl(Op);
439   auto &DL = TLO.DAG.getDataLayout();
440 
441   // Don't know anything.
442   KnownZero = KnownOne = APInt(BitWidth, 0);
443 
444   // Other users may use these bits.
445   if (!Op.getNode()->hasOneUse()) {
446     if (Depth != 0) {
447       // If not at the root, Just compute the KnownZero/KnownOne bits to
448       // simplify things downstream.
449       TLO.DAG.computeKnownBits(Op, KnownZero, KnownOne, Depth);
450       return false;
451     }
452     // If this is the root being simplified, allow it to have multiple uses,
453     // just set the NewMask to all bits.
454     NewMask = APInt::getAllOnesValue(BitWidth);
455   } else if (DemandedMask == 0) {
456     // Not demanding any bits from Op.
457     if (!Op.isUndef())
458       return TLO.CombineTo(Op, TLO.DAG.getUNDEF(Op.getValueType()));
459     return false;
460   } else if (Depth == 6) {        // Limit search depth.
461     return false;
462   }
463 
464   APInt KnownZero2, KnownOne2, KnownZeroOut, KnownOneOut;
465   switch (Op.getOpcode()) {
466   case ISD::Constant:
467     // We know all of the bits for a constant!
468     KnownOne = cast<ConstantSDNode>(Op)->getAPIntValue();
469     KnownZero = ~KnownOne;
470     return false;   // Don't fall through, will infinitely loop.
471   case ISD::BUILD_VECTOR:
472     // Collect the known bits that are shared by every constant vector element.
473     KnownZero = KnownOne = APInt::getAllOnesValue(BitWidth);
474     for (SDValue SrcOp : Op->ops()) {
475       if (!isa<ConstantSDNode>(SrcOp)) {
476         // We can only handle all constant values - bail out with no known bits.
477         KnownZero = KnownOne = APInt(BitWidth, 0);
478         return false;
479       }
480       KnownOne2 = cast<ConstantSDNode>(SrcOp)->getAPIntValue();
481       KnownZero2 = ~KnownOne2;
482 
483       // BUILD_VECTOR can implicitly truncate sources, we must handle this.
484       if (KnownOne2.getBitWidth() != BitWidth) {
485         assert(KnownOne2.getBitWidth() > BitWidth &&
486                KnownZero2.getBitWidth() > BitWidth &&
487                "Expected BUILD_VECTOR implicit truncation");
488         KnownOne2 = KnownOne2.trunc(BitWidth);
489         KnownZero2 = KnownZero2.trunc(BitWidth);
490       }
491 
492       // Known bits are the values that are shared by every element.
493       // TODO: support per-element known bits.
494       KnownOne &= KnownOne2;
495       KnownZero &= KnownZero2;
496     }
497     return false;   // Don't fall through, will infinitely loop.
498   case ISD::AND:
499     // If the RHS is a constant, check to see if the LHS would be zero without
500     // using the bits from the RHS.  Below, we use knowledge about the RHS to
501     // simplify the LHS, here we're using information from the LHS to simplify
502     // the RHS.
503     if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
504       APInt LHSZero, LHSOne;
505       // Do not increment Depth here; that can cause an infinite loop.
506       TLO.DAG.computeKnownBits(Op.getOperand(0), LHSZero, LHSOne, Depth);
507       // If the LHS already has zeros where RHSC does, this and is dead.
508       if ((LHSZero & NewMask) == (~RHSC->getAPIntValue() & NewMask))
509         return TLO.CombineTo(Op, Op.getOperand(0));
510       // If any of the set bits in the RHS are known zero on the LHS, shrink
511       // the constant.
512       if (TLO.ShrinkDemandedConstant(Op, ~LHSZero & NewMask))
513         return true;
514     }
515 
516     if (SimplifyDemandedBits(Op.getOperand(1), NewMask, KnownZero,
517                              KnownOne, TLO, Depth+1))
518       return true;
519     assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
520     if (SimplifyDemandedBits(Op.getOperand(0), ~KnownZero & NewMask,
521                              KnownZero2, KnownOne2, TLO, Depth+1))
522       return true;
523     assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
524 
525     // If all of the demanded bits are known one on one side, return the other.
526     // These bits cannot contribute to the result of the 'and'.
527     if ((NewMask & ~KnownZero2 & KnownOne) == (~KnownZero2 & NewMask))
528       return TLO.CombineTo(Op, Op.getOperand(0));
529     if ((NewMask & ~KnownZero & KnownOne2) == (~KnownZero & NewMask))
530       return TLO.CombineTo(Op, Op.getOperand(1));
531     // If all of the demanded bits in the inputs are known zeros, return zero.
532     if ((NewMask & (KnownZero|KnownZero2)) == NewMask)
533       return TLO.CombineTo(Op, TLO.DAG.getConstant(0, dl, Op.getValueType()));
534     // If the RHS is a constant, see if we can simplify it.
535     if (TLO.ShrinkDemandedConstant(Op, ~KnownZero2 & NewMask))
536       return true;
537     // If the operation can be done in a smaller type, do so.
538     if (TLO.ShrinkDemandedOp(Op, BitWidth, NewMask, dl))
539       return true;
540 
541     // Output known-1 bits are only known if set in both the LHS & RHS.
542     KnownOne &= KnownOne2;
543     // Output known-0 are known to be clear if zero in either the LHS | RHS.
544     KnownZero |= KnownZero2;
545     break;
546   case ISD::OR:
547     if (SimplifyDemandedBits(Op.getOperand(1), NewMask, KnownZero,
548                              KnownOne, TLO, Depth+1))
549       return true;
550     assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
551     if (SimplifyDemandedBits(Op.getOperand(0), ~KnownOne & NewMask,
552                              KnownZero2, KnownOne2, TLO, Depth+1))
553       return true;
554     assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
555 
556     // If all of the demanded bits are known zero on one side, return the other.
557     // These bits cannot contribute to the result of the 'or'.
558     if ((NewMask & ~KnownOne2 & KnownZero) == (~KnownOne2 & NewMask))
559       return TLO.CombineTo(Op, Op.getOperand(0));
560     if ((NewMask & ~KnownOne & KnownZero2) == (~KnownOne & NewMask))
561       return TLO.CombineTo(Op, Op.getOperand(1));
562     // If all of the potentially set bits on one side are known to be set on
563     // the other side, just use the 'other' side.
564     if ((NewMask & ~KnownZero & KnownOne2) == (~KnownZero & NewMask))
565       return TLO.CombineTo(Op, Op.getOperand(0));
566     if ((NewMask & ~KnownZero2 & KnownOne) == (~KnownZero2 & NewMask))
567       return TLO.CombineTo(Op, Op.getOperand(1));
568     // If the RHS is a constant, see if we can simplify it.
569     if (TLO.ShrinkDemandedConstant(Op, NewMask))
570       return true;
571     // If the operation can be done in a smaller type, do so.
572     if (TLO.ShrinkDemandedOp(Op, BitWidth, NewMask, dl))
573       return true;
574 
575     // Output known-0 bits are only known if clear in both the LHS & RHS.
576     KnownZero &= KnownZero2;
577     // Output known-1 are known to be set if set in either the LHS | RHS.
578     KnownOne |= KnownOne2;
579     break;
580   case ISD::XOR:
581     if (SimplifyDemandedBits(Op.getOperand(1), NewMask, KnownZero,
582                              KnownOne, TLO, Depth+1))
583       return true;
584     assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
585     if (SimplifyDemandedBits(Op.getOperand(0), NewMask, KnownZero2,
586                              KnownOne2, TLO, Depth+1))
587       return true;
588     assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
589 
590     // If all of the demanded bits are known zero on one side, return the other.
591     // These bits cannot contribute to the result of the 'xor'.
592     if ((KnownZero & NewMask) == NewMask)
593       return TLO.CombineTo(Op, Op.getOperand(0));
594     if ((KnownZero2 & NewMask) == NewMask)
595       return TLO.CombineTo(Op, Op.getOperand(1));
596     // If the operation can be done in a smaller type, do so.
597     if (TLO.ShrinkDemandedOp(Op, BitWidth, NewMask, dl))
598       return true;
599 
600     // If all of the unknown bits are known to be zero on one side or the other
601     // (but not both) turn this into an *inclusive* or.
602     //    e.g. (A & C1)^(B & C2) -> (A & C1)|(B & C2) iff C1&C2 == 0
603     if ((NewMask & ~KnownZero & ~KnownZero2) == 0)
604       return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::OR, dl, Op.getValueType(),
605                                                Op.getOperand(0),
606                                                Op.getOperand(1)));
607 
608     // Output known-0 bits are known if clear or set in both the LHS & RHS.
609     KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2);
610     // Output known-1 are known to be set if set in only one of the LHS, RHS.
611     KnownOneOut = (KnownZero & KnownOne2) | (KnownOne & KnownZero2);
612 
613     // If all of the demanded bits on one side are known, and all of the set
614     // bits on that side are also known to be set on the other side, turn this
615     // into an AND, as we know the bits will be cleared.
616     //    e.g. (X | C1) ^ C2 --> (X | C1) & ~C2 iff (C1&C2) == C2
617     // NB: it is okay if more bits are known than are requested
618     if ((NewMask & (KnownZero|KnownOne)) == NewMask) { // all known on one side
619       if (KnownOne == KnownOne2) { // set bits are the same on both sides
620         EVT VT = Op.getValueType();
621         SDValue ANDC = TLO.DAG.getConstant(~KnownOne & NewMask, dl, VT);
622         return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::AND, dl, VT,
623                                                  Op.getOperand(0), ANDC));
624       }
625     }
626 
627     // If the RHS is a constant, see if we can simplify it.
628     // for XOR, we prefer to force bits to 1 if they will make a -1.
629     // if we can't force bits, try to shrink constant
630     if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
631       APInt Expanded = C->getAPIntValue() | (~NewMask);
632       // if we can expand it to have all bits set, do it
633       if (Expanded.isAllOnesValue()) {
634         if (Expanded != C->getAPIntValue()) {
635           EVT VT = Op.getValueType();
636           SDValue New = TLO.DAG.getNode(Op.getOpcode(), dl,VT, Op.getOperand(0),
637                                         TLO.DAG.getConstant(Expanded, dl, VT));
638           return TLO.CombineTo(Op, New);
639         }
640         // if it already has all the bits set, nothing to change
641         // but don't shrink either!
642       } else if (TLO.ShrinkDemandedConstant(Op, NewMask)) {
643         return true;
644       }
645     }
646 
647     KnownZero = KnownZeroOut;
648     KnownOne  = KnownOneOut;
649     break;
650   case ISD::SELECT:
651     if (SimplifyDemandedBits(Op.getOperand(2), NewMask, KnownZero,
652                              KnownOne, TLO, Depth+1))
653       return true;
654     if (SimplifyDemandedBits(Op.getOperand(1), NewMask, KnownZero2,
655                              KnownOne2, TLO, Depth+1))
656       return true;
657     assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
658     assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
659 
660     // If the operands are constants, see if we can simplify them.
661     if (TLO.ShrinkDemandedConstant(Op, NewMask))
662       return true;
663 
664     // Only known if known in both the LHS and RHS.
665     KnownOne &= KnownOne2;
666     KnownZero &= KnownZero2;
667     break;
668   case ISD::SELECT_CC:
669     if (SimplifyDemandedBits(Op.getOperand(3), NewMask, KnownZero,
670                              KnownOne, TLO, Depth+1))
671       return true;
672     if (SimplifyDemandedBits(Op.getOperand(2), NewMask, KnownZero2,
673                              KnownOne2, TLO, Depth+1))
674       return true;
675     assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
676     assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
677 
678     // If the operands are constants, see if we can simplify them.
679     if (TLO.ShrinkDemandedConstant(Op, NewMask))
680       return true;
681 
682     // Only known if known in both the LHS and RHS.
683     KnownOne &= KnownOne2;
684     KnownZero &= KnownZero2;
685     break;
686   case ISD::SHL:
687     if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
688       unsigned ShAmt = SA->getZExtValue();
689       SDValue InOp = Op.getOperand(0);
690 
691       // If the shift count is an invalid immediate, don't do anything.
692       if (ShAmt >= BitWidth)
693         break;
694 
695       // If this is ((X >>u C1) << ShAmt), see if we can simplify this into a
696       // single shift.  We can do this if the bottom bits (which are shifted
697       // out) are never demanded.
698       if (InOp.getOpcode() == ISD::SRL &&
699           isa<ConstantSDNode>(InOp.getOperand(1))) {
700         if (ShAmt && (NewMask & APInt::getLowBitsSet(BitWidth, ShAmt)) == 0) {
701           unsigned C1= cast<ConstantSDNode>(InOp.getOperand(1))->getZExtValue();
702           unsigned Opc = ISD::SHL;
703           int Diff = ShAmt-C1;
704           if (Diff < 0) {
705             Diff = -Diff;
706             Opc = ISD::SRL;
707           }
708 
709           SDValue NewSA =
710             TLO.DAG.getConstant(Diff, dl, Op.getOperand(1).getValueType());
711           EVT VT = Op.getValueType();
712           return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, dl, VT,
713                                                    InOp.getOperand(0), NewSA));
714         }
715       }
716 
717       if (SimplifyDemandedBits(InOp, NewMask.lshr(ShAmt),
718                                KnownZero, KnownOne, TLO, Depth+1))
719         return true;
720 
721       // Convert (shl (anyext x, c)) to (anyext (shl x, c)) if the high bits
722       // are not demanded. This will likely allow the anyext to be folded away.
723       if (InOp.getNode()->getOpcode() == ISD::ANY_EXTEND) {
724         SDValue InnerOp = InOp.getNode()->getOperand(0);
725         EVT InnerVT = InnerOp.getValueType();
726         unsigned InnerBits = InnerVT.getSizeInBits();
727         if (ShAmt < InnerBits && NewMask.lshr(InnerBits) == 0 &&
728             isTypeDesirableForOp(ISD::SHL, InnerVT)) {
729           EVT ShTy = getShiftAmountTy(InnerVT, DL);
730           if (!APInt(BitWidth, ShAmt).isIntN(ShTy.getSizeInBits()))
731             ShTy = InnerVT;
732           SDValue NarrowShl =
733             TLO.DAG.getNode(ISD::SHL, dl, InnerVT, InnerOp,
734                             TLO.DAG.getConstant(ShAmt, dl, ShTy));
735           return
736             TLO.CombineTo(Op,
737                           TLO.DAG.getNode(ISD::ANY_EXTEND, dl, Op.getValueType(),
738                                           NarrowShl));
739         }
740         // Repeat the SHL optimization above in cases where an extension
741         // intervenes: (shl (anyext (shr x, c1)), c2) to
742         // (shl (anyext x), c2-c1).  This requires that the bottom c1 bits
743         // aren't demanded (as above) and that the shifted upper c1 bits of
744         // x aren't demanded.
745         if (InOp.hasOneUse() &&
746             InnerOp.getOpcode() == ISD::SRL &&
747             InnerOp.hasOneUse() &&
748             isa<ConstantSDNode>(InnerOp.getOperand(1))) {
749           uint64_t InnerShAmt = cast<ConstantSDNode>(InnerOp.getOperand(1))
750             ->getZExtValue();
751           if (InnerShAmt < ShAmt &&
752               InnerShAmt < InnerBits &&
753               NewMask.lshr(InnerBits - InnerShAmt + ShAmt) == 0 &&
754               NewMask.trunc(ShAmt) == 0) {
755             SDValue NewSA =
756               TLO.DAG.getConstant(ShAmt - InnerShAmt, dl,
757                                   Op.getOperand(1).getValueType());
758             EVT VT = Op.getValueType();
759             SDValue NewExt = TLO.DAG.getNode(ISD::ANY_EXTEND, dl, VT,
760                                              InnerOp.getOperand(0));
761             return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SHL, dl, VT,
762                                                      NewExt, NewSA));
763           }
764         }
765       }
766 
767       KnownZero <<= SA->getZExtValue();
768       KnownOne  <<= SA->getZExtValue();
769       // low bits known zero.
770       KnownZero |= APInt::getLowBitsSet(BitWidth, SA->getZExtValue());
771     }
772     break;
773   case ISD::SRL:
774     if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
775       EVT VT = Op.getValueType();
776       unsigned ShAmt = SA->getZExtValue();
777       unsigned VTSize = VT.getSizeInBits();
778       SDValue InOp = Op.getOperand(0);
779 
780       // If the shift count is an invalid immediate, don't do anything.
781       if (ShAmt >= BitWidth)
782         break;
783 
784       APInt InDemandedMask = (NewMask << ShAmt);
785 
786       // If the shift is exact, then it does demand the low bits (and knows that
787       // they are zero).
788       if (cast<BinaryWithFlagsSDNode>(Op)->Flags.hasExact())
789         InDemandedMask |= APInt::getLowBitsSet(BitWidth, ShAmt);
790 
791       // If this is ((X << C1) >>u ShAmt), see if we can simplify this into a
792       // single shift.  We can do this if the top bits (which are shifted out)
793       // are never demanded.
794       if (InOp.getOpcode() == ISD::SHL &&
795           isa<ConstantSDNode>(InOp.getOperand(1))) {
796         if (ShAmt && (NewMask & APInt::getHighBitsSet(VTSize, ShAmt)) == 0) {
797           unsigned C1= cast<ConstantSDNode>(InOp.getOperand(1))->getZExtValue();
798           unsigned Opc = ISD::SRL;
799           int Diff = ShAmt-C1;
800           if (Diff < 0) {
801             Diff = -Diff;
802             Opc = ISD::SHL;
803           }
804 
805           SDValue NewSA =
806             TLO.DAG.getConstant(Diff, dl, Op.getOperand(1).getValueType());
807           return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, dl, VT,
808                                                    InOp.getOperand(0), NewSA));
809         }
810       }
811 
812       // Compute the new bits that are at the top now.
813       if (SimplifyDemandedBits(InOp, InDemandedMask,
814                                KnownZero, KnownOne, TLO, Depth+1))
815         return true;
816       assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
817       KnownZero = KnownZero.lshr(ShAmt);
818       KnownOne  = KnownOne.lshr(ShAmt);
819 
820       APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt);
821       KnownZero |= HighBits;  // High bits known zero.
822     }
823     break;
824   case ISD::SRA:
825     // If this is an arithmetic shift right and only the low-bit is set, we can
826     // always convert this into a logical shr, even if the shift amount is
827     // variable.  The low bit of the shift cannot be an input sign bit unless
828     // the shift amount is >= the size of the datatype, which is undefined.
829     if (NewMask == 1)
830       return TLO.CombineTo(Op,
831                            TLO.DAG.getNode(ISD::SRL, dl, Op.getValueType(),
832                                            Op.getOperand(0), Op.getOperand(1)));
833 
834     if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
835       EVT VT = Op.getValueType();
836       unsigned ShAmt = SA->getZExtValue();
837 
838       // If the shift count is an invalid immediate, don't do anything.
839       if (ShAmt >= BitWidth)
840         break;
841 
842       APInt InDemandedMask = (NewMask << ShAmt);
843 
844       // If the shift is exact, then it does demand the low bits (and knows that
845       // they are zero).
846       if (cast<BinaryWithFlagsSDNode>(Op)->Flags.hasExact())
847         InDemandedMask |= APInt::getLowBitsSet(BitWidth, ShAmt);
848 
849       // If any of the demanded bits are produced by the sign extension, we also
850       // demand the input sign bit.
851       APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt);
852       if (HighBits.intersects(NewMask))
853         InDemandedMask |= APInt::getSignBit(VT.getScalarSizeInBits());
854 
855       if (SimplifyDemandedBits(Op.getOperand(0), InDemandedMask,
856                                KnownZero, KnownOne, TLO, Depth+1))
857         return true;
858       assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
859       KnownZero = KnownZero.lshr(ShAmt);
860       KnownOne  = KnownOne.lshr(ShAmt);
861 
862       // Handle the sign bit, adjusted to where it is now in the mask.
863       APInt SignBit = APInt::getSignBit(BitWidth).lshr(ShAmt);
864 
865       // If the input sign bit is known to be zero, or if none of the top bits
866       // are demanded, turn this into an unsigned shift right.
867       if (KnownZero.intersects(SignBit) || (HighBits & ~NewMask) == HighBits) {
868         SDNodeFlags Flags;
869         Flags.setExact(cast<BinaryWithFlagsSDNode>(Op)->Flags.hasExact());
870         return TLO.CombineTo(Op,
871                              TLO.DAG.getNode(ISD::SRL, dl, VT, Op.getOperand(0),
872                                              Op.getOperand(1), &Flags));
873       }
874 
875       int Log2 = NewMask.exactLogBase2();
876       if (Log2 >= 0) {
877         // The bit must come from the sign.
878         SDValue NewSA =
879           TLO.DAG.getConstant(BitWidth - 1 - Log2, dl,
880                               Op.getOperand(1).getValueType());
881         return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL, dl, VT,
882                                                  Op.getOperand(0), NewSA));
883       }
884 
885       if (KnownOne.intersects(SignBit))
886         // New bits are known one.
887         KnownOne |= HighBits;
888     }
889     break;
890   case ISD::SIGN_EXTEND_INREG: {
891     EVT ExVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
892 
893     APInt MsbMask = APInt::getHighBitsSet(BitWidth, 1);
894     // If we only care about the highest bit, don't bother shifting right.
895     if (MsbMask == NewMask) {
896       unsigned ShAmt = ExVT.getScalarSizeInBits();
897       SDValue InOp = Op.getOperand(0);
898       unsigned VTBits = Op->getValueType(0).getScalarSizeInBits();
899       bool AlreadySignExtended =
900         TLO.DAG.ComputeNumSignBits(InOp) >= VTBits-ShAmt+1;
901       // However if the input is already sign extended we expect the sign
902       // extension to be dropped altogether later and do not simplify.
903       if (!AlreadySignExtended) {
904         // Compute the correct shift amount type, which must be getShiftAmountTy
905         // for scalar types after legalization.
906         EVT ShiftAmtTy = Op.getValueType();
907         if (TLO.LegalTypes() && !ShiftAmtTy.isVector())
908           ShiftAmtTy = getShiftAmountTy(ShiftAmtTy, DL);
909 
910         SDValue ShiftAmt = TLO.DAG.getConstant(BitWidth - ShAmt, dl,
911                                                ShiftAmtTy);
912         return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SHL, dl,
913                                                  Op.getValueType(), InOp,
914                                                  ShiftAmt));
915       }
916     }
917 
918     // Sign extension.  Compute the demanded bits in the result that are not
919     // present in the input.
920     APInt NewBits =
921       APInt::getHighBitsSet(BitWidth,
922                             BitWidth - ExVT.getScalarSizeInBits());
923 
924     // If none of the extended bits are demanded, eliminate the sextinreg.
925     if ((NewBits & NewMask) == 0)
926       return TLO.CombineTo(Op, Op.getOperand(0));
927 
928     APInt InSignBit =
929       APInt::getSignBit(ExVT.getScalarSizeInBits()).zext(BitWidth);
930     APInt InputDemandedBits =
931       APInt::getLowBitsSet(BitWidth,
932                            ExVT.getScalarSizeInBits()) &
933       NewMask;
934 
935     // Since the sign extended bits are demanded, we know that the sign
936     // bit is demanded.
937     InputDemandedBits |= InSignBit;
938 
939     if (SimplifyDemandedBits(Op.getOperand(0), InputDemandedBits,
940                              KnownZero, KnownOne, TLO, Depth+1))
941       return true;
942     assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
943 
944     // If the sign bit of the input is known set or clear, then we know the
945     // top bits of the result.
946 
947     // If the input sign bit is known zero, convert this into a zero extension.
948     if (KnownZero.intersects(InSignBit))
949       return TLO.CombineTo(Op, TLO.DAG.getZeroExtendInReg(
950                                    Op.getOperand(0), dl, ExVT.getScalarType()));
951 
952     if (KnownOne.intersects(InSignBit)) {    // Input sign bit known set
953       KnownOne |= NewBits;
954       KnownZero &= ~NewBits;
955     } else {                       // Input sign bit unknown
956       KnownZero &= ~NewBits;
957       KnownOne &= ~NewBits;
958     }
959     break;
960   }
961   case ISD::BUILD_PAIR: {
962     EVT HalfVT = Op.getOperand(0).getValueType();
963     unsigned HalfBitWidth = HalfVT.getScalarSizeInBits();
964 
965     APInt MaskLo = NewMask.getLoBits(HalfBitWidth).trunc(HalfBitWidth);
966     APInt MaskHi = NewMask.getHiBits(HalfBitWidth).trunc(HalfBitWidth);
967 
968     APInt KnownZeroLo, KnownOneLo;
969     APInt KnownZeroHi, KnownOneHi;
970 
971     if (SimplifyDemandedBits(Op.getOperand(0), MaskLo, KnownZeroLo,
972                              KnownOneLo, TLO, Depth + 1))
973       return true;
974 
975     if (SimplifyDemandedBits(Op.getOperand(1), MaskHi, KnownZeroHi,
976                              KnownOneHi, TLO, Depth + 1))
977       return true;
978 
979     KnownZero = KnownZeroLo.zext(BitWidth) |
980                 KnownZeroHi.zext(BitWidth).shl(HalfBitWidth);
981 
982     KnownOne = KnownOneLo.zext(BitWidth) |
983                KnownOneHi.zext(BitWidth).shl(HalfBitWidth);
984     break;
985   }
986   case ISD::ZERO_EXTEND: {
987     unsigned OperandBitWidth = Op.getOperand(0).getScalarValueSizeInBits();
988     APInt InMask = NewMask.trunc(OperandBitWidth);
989 
990     // If none of the top bits are demanded, convert this into an any_extend.
991     APInt NewBits =
992       APInt::getHighBitsSet(BitWidth, BitWidth - OperandBitWidth) & NewMask;
993     if (!NewBits.intersects(NewMask))
994       return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::ANY_EXTEND, dl,
995                                                Op.getValueType(),
996                                                Op.getOperand(0)));
997 
998     if (SimplifyDemandedBits(Op.getOperand(0), InMask,
999                              KnownZero, KnownOne, TLO, Depth+1))
1000       return true;
1001     assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1002     KnownZero = KnownZero.zext(BitWidth);
1003     KnownOne = KnownOne.zext(BitWidth);
1004     KnownZero |= NewBits;
1005     break;
1006   }
1007   case ISD::SIGN_EXTEND: {
1008     EVT InVT = Op.getOperand(0).getValueType();
1009     unsigned InBits = InVT.getScalarSizeInBits();
1010     APInt InMask    = APInt::getLowBitsSet(BitWidth, InBits);
1011     APInt InSignBit = APInt::getBitsSet(BitWidth, InBits - 1, InBits);
1012     APInt NewBits   = ~InMask & NewMask;
1013 
1014     // If none of the top bits are demanded, convert this into an any_extend.
1015     if (NewBits == 0)
1016       return TLO.CombineTo(Op,TLO.DAG.getNode(ISD::ANY_EXTEND, dl,
1017                                               Op.getValueType(),
1018                                               Op.getOperand(0)));
1019 
1020     // Since some of the sign extended bits are demanded, we know that the sign
1021     // bit is demanded.
1022     APInt InDemandedBits = InMask & NewMask;
1023     InDemandedBits |= InSignBit;
1024     InDemandedBits = InDemandedBits.trunc(InBits);
1025 
1026     if (SimplifyDemandedBits(Op.getOperand(0), InDemandedBits, KnownZero,
1027                              KnownOne, TLO, Depth+1))
1028       return true;
1029     KnownZero = KnownZero.zext(BitWidth);
1030     KnownOne = KnownOne.zext(BitWidth);
1031 
1032     // If the sign bit is known zero, convert this to a zero extend.
1033     if (KnownZero.intersects(InSignBit))
1034       return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::ZERO_EXTEND, dl,
1035                                                Op.getValueType(),
1036                                                Op.getOperand(0)));
1037 
1038     // If the sign bit is known one, the top bits match.
1039     if (KnownOne.intersects(InSignBit)) {
1040       KnownOne |= NewBits;
1041       assert((KnownZero & NewBits) == 0);
1042     } else {   // Otherwise, top bits aren't known.
1043       assert((KnownOne & NewBits) == 0);
1044       assert((KnownZero & NewBits) == 0);
1045     }
1046     break;
1047   }
1048   case ISD::ANY_EXTEND: {
1049     unsigned OperandBitWidth = Op.getOperand(0).getScalarValueSizeInBits();
1050     APInt InMask = NewMask.trunc(OperandBitWidth);
1051     if (SimplifyDemandedBits(Op.getOperand(0), InMask,
1052                              KnownZero, KnownOne, TLO, Depth+1))
1053       return true;
1054     assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1055     KnownZero = KnownZero.zext(BitWidth);
1056     KnownOne = KnownOne.zext(BitWidth);
1057     break;
1058   }
1059   case ISD::TRUNCATE: {
1060     // Simplify the input, using demanded bit information, and compute the known
1061     // zero/one bits live out.
1062     unsigned OperandBitWidth = Op.getOperand(0).getScalarValueSizeInBits();
1063     APInt TruncMask = NewMask.zext(OperandBitWidth);
1064     if (SimplifyDemandedBits(Op.getOperand(0), TruncMask,
1065                              KnownZero, KnownOne, TLO, Depth+1))
1066       return true;
1067     KnownZero = KnownZero.trunc(BitWidth);
1068     KnownOne = KnownOne.trunc(BitWidth);
1069 
1070     // If the input is only used by this truncate, see if we can shrink it based
1071     // on the known demanded bits.
1072     if (Op.getOperand(0).getNode()->hasOneUse()) {
1073       SDValue In = Op.getOperand(0);
1074       switch (In.getOpcode()) {
1075       default: break;
1076       case ISD::SRL:
1077         // Shrink SRL by a constant if none of the high bits shifted in are
1078         // demanded.
1079         if (TLO.LegalTypes() &&
1080             !isTypeDesirableForOp(ISD::SRL, Op.getValueType()))
1081           // Do not turn (vt1 truncate (vt2 srl)) into (vt1 srl) if vt1 is
1082           // undesirable.
1083           break;
1084         ConstantSDNode *ShAmt = dyn_cast<ConstantSDNode>(In.getOperand(1));
1085         if (!ShAmt)
1086           break;
1087         SDValue Shift = In.getOperand(1);
1088         if (TLO.LegalTypes()) {
1089           uint64_t ShVal = ShAmt->getZExtValue();
1090           Shift = TLO.DAG.getConstant(ShVal, dl,
1091                                       getShiftAmountTy(Op.getValueType(), DL));
1092         }
1093 
1094         APInt HighBits = APInt::getHighBitsSet(OperandBitWidth,
1095                                                OperandBitWidth - BitWidth);
1096         HighBits = HighBits.lshr(ShAmt->getZExtValue()).trunc(BitWidth);
1097 
1098         if (ShAmt->getZExtValue() < BitWidth && !(HighBits & NewMask)) {
1099           // None of the shifted in bits are needed.  Add a truncate of the
1100           // shift input, then shift it.
1101           SDValue NewTrunc = TLO.DAG.getNode(ISD::TRUNCATE, dl,
1102                                              Op.getValueType(),
1103                                              In.getOperand(0));
1104           return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL, dl,
1105                                                    Op.getValueType(),
1106                                                    NewTrunc,
1107                                                    Shift));
1108         }
1109         break;
1110       }
1111     }
1112 
1113     assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1114     break;
1115   }
1116   case ISD::AssertZext: {
1117     // AssertZext demands all of the high bits, plus any of the low bits
1118     // demanded by its users.
1119     EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT();
1120     APInt InMask = APInt::getLowBitsSet(BitWidth,
1121                                         VT.getSizeInBits());
1122     if (SimplifyDemandedBits(Op.getOperand(0), ~InMask | NewMask,
1123                              KnownZero, KnownOne, TLO, Depth+1))
1124       return true;
1125     assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1126 
1127     KnownZero |= ~InMask & NewMask;
1128     break;
1129   }
1130   case ISD::BITCAST:
1131     // If this is an FP->Int bitcast and if the sign bit is the only
1132     // thing demanded, turn this into a FGETSIGN.
1133     if (!TLO.LegalOperations() &&
1134         !Op.getValueType().isVector() &&
1135         !Op.getOperand(0).getValueType().isVector() &&
1136         NewMask == APInt::getSignBit(Op.getValueSizeInBits()) &&
1137         Op.getOperand(0).getValueType().isFloatingPoint()) {
1138       bool OpVTLegal = isOperationLegalOrCustom(ISD::FGETSIGN, Op.getValueType());
1139       bool i32Legal  = isOperationLegalOrCustom(ISD::FGETSIGN, MVT::i32);
1140       if ((OpVTLegal || i32Legal) && Op.getValueType().isSimple() &&
1141            Op.getOperand(0).getValueType() != MVT::f128) {
1142         // Cannot eliminate/lower SHL for f128 yet.
1143         EVT Ty = OpVTLegal ? Op.getValueType() : MVT::i32;
1144         // Make a FGETSIGN + SHL to move the sign bit into the appropriate
1145         // place.  We expect the SHL to be eliminated by other optimizations.
1146         SDValue Sign = TLO.DAG.getNode(ISD::FGETSIGN, dl, Ty, Op.getOperand(0));
1147         unsigned OpVTSizeInBits = Op.getValueSizeInBits();
1148         if (!OpVTLegal && OpVTSizeInBits > 32)
1149           Sign = TLO.DAG.getNode(ISD::ZERO_EXTEND, dl, Op.getValueType(), Sign);
1150         unsigned ShVal = Op.getValueSizeInBits() - 1;
1151         SDValue ShAmt = TLO.DAG.getConstant(ShVal, dl, Op.getValueType());
1152         return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SHL, dl,
1153                                                  Op.getValueType(),
1154                                                  Sign, ShAmt));
1155       }
1156     }
1157     break;
1158   case ISD::ADD:
1159   case ISD::MUL:
1160   case ISD::SUB: {
1161     // Add, Sub, and Mul don't demand any bits in positions beyond that
1162     // of the highest bit demanded of them.
1163     APInt LoMask = APInt::getLowBitsSet(BitWidth,
1164                                         BitWidth - NewMask.countLeadingZeros());
1165     if (SimplifyDemandedBits(Op.getOperand(0), LoMask, KnownZero2,
1166                              KnownOne2, TLO, Depth+1))
1167       return true;
1168     if (SimplifyDemandedBits(Op.getOperand(1), LoMask, KnownZero2,
1169                              KnownOne2, TLO, Depth+1))
1170       return true;
1171     // See if the operation should be performed at a smaller bit width.
1172     if (TLO.ShrinkDemandedOp(Op, BitWidth, NewMask, dl))
1173       return true;
1174     LLVM_FALLTHROUGH;
1175   }
1176   default:
1177     // Just use computeKnownBits to compute output bits.
1178     TLO.DAG.computeKnownBits(Op, KnownZero, KnownOne, Depth);
1179     break;
1180   }
1181 
1182   // If we know the value of all of the demanded bits, return this as a
1183   // constant.
1184   if ((NewMask & (KnownZero|KnownOne)) == NewMask) {
1185     // Avoid folding to a constant if any OpaqueConstant is involved.
1186     const SDNode *N = Op.getNode();
1187     for (SDNodeIterator I = SDNodeIterator::begin(N),
1188          E = SDNodeIterator::end(N); I != E; ++I) {
1189       SDNode *Op = *I;
1190       if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op))
1191         if (C->isOpaque())
1192           return false;
1193     }
1194     return TLO.CombineTo(Op,
1195                          TLO.DAG.getConstant(KnownOne, dl, Op.getValueType()));
1196   }
1197 
1198   return false;
1199 }
1200 
1201 /// Determine which of the bits specified in Mask are known to be either zero or
1202 /// one and return them in the KnownZero/KnownOne bitsets.
1203 void TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
1204                                                    APInt &KnownZero,
1205                                                    APInt &KnownOne,
1206                                                    const SelectionDAG &DAG,
1207                                                    unsigned Depth) const {
1208   assert((Op.getOpcode() >= ISD::BUILTIN_OP_END ||
1209           Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
1210           Op.getOpcode() == ISD::INTRINSIC_W_CHAIN ||
1211           Op.getOpcode() == ISD::INTRINSIC_VOID) &&
1212          "Should use MaskedValueIsZero if you don't know whether Op"
1213          " is a target node!");
1214   KnownZero = KnownOne = APInt(KnownOne.getBitWidth(), 0);
1215 }
1216 
1217 /// This method can be implemented by targets that want to expose additional
1218 /// information about sign bits to the DAG Combiner.
1219 unsigned TargetLowering::ComputeNumSignBitsForTargetNode(SDValue Op,
1220                                                          const SelectionDAG &,
1221                                                          unsigned Depth) const {
1222   assert((Op.getOpcode() >= ISD::BUILTIN_OP_END ||
1223           Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
1224           Op.getOpcode() == ISD::INTRINSIC_W_CHAIN ||
1225           Op.getOpcode() == ISD::INTRINSIC_VOID) &&
1226          "Should use ComputeNumSignBits if you don't know whether Op"
1227          " is a target node!");
1228   return 1;
1229 }
1230 
1231 bool TargetLowering::isConstTrueVal(const SDNode *N) const {
1232   if (!N)
1233     return false;
1234 
1235   const ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N);
1236   if (!CN) {
1237     const BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N);
1238     if (!BV)
1239       return false;
1240 
1241     BitVector UndefElements;
1242     CN = BV->getConstantSplatNode(&UndefElements);
1243     // Only interested in constant splats, and we don't try to handle undef
1244     // elements in identifying boolean constants.
1245     if (!CN || UndefElements.none())
1246       return false;
1247   }
1248 
1249   switch (getBooleanContents(N->getValueType(0))) {
1250   case UndefinedBooleanContent:
1251     return CN->getAPIntValue()[0];
1252   case ZeroOrOneBooleanContent:
1253     return CN->isOne();
1254   case ZeroOrNegativeOneBooleanContent:
1255     return CN->isAllOnesValue();
1256   }
1257 
1258   llvm_unreachable("Invalid boolean contents");
1259 }
1260 
1261 SDValue TargetLowering::getConstTrueVal(SelectionDAG &DAG, EVT VT,
1262                                         const SDLoc &DL) const {
1263   unsigned ElementWidth = VT.getScalarSizeInBits();
1264   APInt TrueInt =
1265       getBooleanContents(VT) == TargetLowering::ZeroOrOneBooleanContent
1266           ? APInt(ElementWidth, 1)
1267           : APInt::getAllOnesValue(ElementWidth);
1268   return DAG.getConstant(TrueInt, DL, VT);
1269 }
1270 
1271 bool TargetLowering::isConstFalseVal(const SDNode *N) const {
1272   if (!N)
1273     return false;
1274 
1275   const ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N);
1276   if (!CN) {
1277     const BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N);
1278     if (!BV)
1279       return false;
1280 
1281     BitVector UndefElements;
1282     CN = BV->getConstantSplatNode(&UndefElements);
1283     // Only interested in constant splats, and we don't try to handle undef
1284     // elements in identifying boolean constants.
1285     if (!CN || UndefElements.none())
1286       return false;
1287   }
1288 
1289   if (getBooleanContents(N->getValueType(0)) == UndefinedBooleanContent)
1290     return !CN->getAPIntValue()[0];
1291 
1292   return CN->isNullValue();
1293 }
1294 
1295 bool TargetLowering::isExtendedTrueVal(const ConstantSDNode *N, EVT VT,
1296                                        bool SExt) const {
1297   if (VT == MVT::i1)
1298     return N->isOne();
1299 
1300   TargetLowering::BooleanContent Cnt = getBooleanContents(VT);
1301   switch (Cnt) {
1302   case TargetLowering::ZeroOrOneBooleanContent:
1303     // An extended value of 1 is always true, unless its original type is i1,
1304     // in which case it will be sign extended to -1.
1305     return (N->isOne() && !SExt) || (SExt && (N->getValueType(0) != MVT::i1));
1306   case TargetLowering::UndefinedBooleanContent:
1307   case TargetLowering::ZeroOrNegativeOneBooleanContent:
1308     return N->isAllOnesValue() && SExt;
1309   }
1310   llvm_unreachable("Unexpected enumeration.");
1311 }
1312 
1313 /// This helper function of SimplifySetCC tries to optimize the comparison when
1314 /// either operand of the SetCC node is a bitwise-and instruction.
1315 SDValue TargetLowering::simplifySetCCWithAnd(EVT VT, SDValue N0, SDValue N1,
1316                                              ISD::CondCode Cond,
1317                                              DAGCombinerInfo &DCI,
1318                                              const SDLoc &DL) const {
1319   // Match these patterns in any of their permutations:
1320   // (X & Y) == Y
1321   // (X & Y) != Y
1322   if (N1.getOpcode() == ISD::AND && N0.getOpcode() != ISD::AND)
1323     std::swap(N0, N1);
1324 
1325   EVT OpVT = N0.getValueType();
1326   if (N0.getOpcode() != ISD::AND || !OpVT.isInteger() ||
1327       (Cond != ISD::SETEQ && Cond != ISD::SETNE))
1328     return SDValue();
1329 
1330   SDValue X, Y;
1331   if (N0.getOperand(0) == N1) {
1332     X = N0.getOperand(1);
1333     Y = N0.getOperand(0);
1334   } else if (N0.getOperand(1) == N1) {
1335     X = N0.getOperand(0);
1336     Y = N0.getOperand(1);
1337   } else {
1338     return SDValue();
1339   }
1340 
1341   SelectionDAG &DAG = DCI.DAG;
1342   SDValue Zero = DAG.getConstant(0, DL, OpVT);
1343   if (DAG.isKnownToBeAPowerOfTwo(Y)) {
1344     // Simplify X & Y == Y to X & Y != 0 if Y has exactly one bit set.
1345     // Note that where Y is variable and is known to have at most one bit set
1346     // (for example, if it is Z & 1) we cannot do this; the expressions are not
1347     // equivalent when Y == 0.
1348     Cond = ISD::getSetCCInverse(Cond, /*isInteger=*/true);
1349     if (DCI.isBeforeLegalizeOps() ||
1350         isCondCodeLegal(Cond, N0.getSimpleValueType()))
1351       return DAG.getSetCC(DL, VT, N0, Zero, Cond);
1352   } else if (N0.hasOneUse() && hasAndNotCompare(Y)) {
1353     // If the target supports an 'and-not' or 'and-complement' logic operation,
1354     // try to use that to make a comparison operation more efficient.
1355     // But don't do this transform if the mask is a single bit because there are
1356     // more efficient ways to deal with that case (for example, 'bt' on x86 or
1357     // 'rlwinm' on PPC).
1358 
1359     // Bail out if the compare operand that we want to turn into a zero is
1360     // already a zero (otherwise, infinite loop).
1361     auto *YConst = dyn_cast<ConstantSDNode>(Y);
1362     if (YConst && YConst->isNullValue())
1363       return SDValue();
1364 
1365     // Transform this into: ~X & Y == 0.
1366     SDValue NotX = DAG.getNOT(SDLoc(X), X, OpVT);
1367     SDValue NewAnd = DAG.getNode(ISD::AND, SDLoc(N0), OpVT, NotX, Y);
1368     return DAG.getSetCC(DL, VT, NewAnd, Zero, Cond);
1369   }
1370 
1371   return SDValue();
1372 }
1373 
1374 /// Try to simplify a setcc built with the specified operands and cc. If it is
1375 /// unable to simplify it, return a null SDValue.
1376 SDValue TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
1377                                       ISD::CondCode Cond, bool foldBooleans,
1378                                       DAGCombinerInfo &DCI,
1379                                       const SDLoc &dl) const {
1380   SelectionDAG &DAG = DCI.DAG;
1381 
1382   // These setcc operations always fold.
1383   switch (Cond) {
1384   default: break;
1385   case ISD::SETFALSE:
1386   case ISD::SETFALSE2: return DAG.getConstant(0, dl, VT);
1387   case ISD::SETTRUE:
1388   case ISD::SETTRUE2: {
1389     TargetLowering::BooleanContent Cnt =
1390         getBooleanContents(N0->getValueType(0));
1391     return DAG.getConstant(
1392         Cnt == TargetLowering::ZeroOrNegativeOneBooleanContent ? -1ULL : 1, dl,
1393         VT);
1394   }
1395   }
1396 
1397   // Ensure that the constant occurs on the RHS, and fold constant
1398   // comparisons.
1399   ISD::CondCode SwappedCC = ISD::getSetCCSwappedOperands(Cond);
1400   if (isa<ConstantSDNode>(N0.getNode()) &&
1401       (DCI.isBeforeLegalizeOps() ||
1402        isCondCodeLegal(SwappedCC, N0.getSimpleValueType())))
1403     return DAG.getSetCC(dl, VT, N1, N0, SwappedCC);
1404 
1405   if (auto *N1C = dyn_cast<ConstantSDNode>(N1.getNode())) {
1406     const APInt &C1 = N1C->getAPIntValue();
1407 
1408     // If the LHS is '(srl (ctlz x), 5)', the RHS is 0/1, and this is an
1409     // equality comparison, then we're just comparing whether X itself is
1410     // zero.
1411     if (N0.getOpcode() == ISD::SRL && (C1 == 0 || C1 == 1) &&
1412         N0.getOperand(0).getOpcode() == ISD::CTLZ &&
1413         N0.getOperand(1).getOpcode() == ISD::Constant) {
1414       const APInt &ShAmt
1415         = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue();
1416       if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
1417           ShAmt == Log2_32(N0.getValueSizeInBits())) {
1418         if ((C1 == 0) == (Cond == ISD::SETEQ)) {
1419           // (srl (ctlz x), 5) == 0  -> X != 0
1420           // (srl (ctlz x), 5) != 1  -> X != 0
1421           Cond = ISD::SETNE;
1422         } else {
1423           // (srl (ctlz x), 5) != 0  -> X == 0
1424           // (srl (ctlz x), 5) == 1  -> X == 0
1425           Cond = ISD::SETEQ;
1426         }
1427         SDValue Zero = DAG.getConstant(0, dl, N0.getValueType());
1428         return DAG.getSetCC(dl, VT, N0.getOperand(0).getOperand(0),
1429                             Zero, Cond);
1430       }
1431     }
1432 
1433     SDValue CTPOP = N0;
1434     // Look through truncs that don't change the value of a ctpop.
1435     if (N0.hasOneUse() && N0.getOpcode() == ISD::TRUNCATE)
1436       CTPOP = N0.getOperand(0);
1437 
1438     if (CTPOP.hasOneUse() && CTPOP.getOpcode() == ISD::CTPOP &&
1439         (N0 == CTPOP ||
1440          N0.getValueSizeInBits() > Log2_32_Ceil(CTPOP.getValueSizeInBits()))) {
1441       EVT CTVT = CTPOP.getValueType();
1442       SDValue CTOp = CTPOP.getOperand(0);
1443 
1444       // (ctpop x) u< 2 -> (x & x-1) == 0
1445       // (ctpop x) u> 1 -> (x & x-1) != 0
1446       if ((Cond == ISD::SETULT && C1 == 2) || (Cond == ISD::SETUGT && C1 == 1)){
1447         SDValue Sub = DAG.getNode(ISD::SUB, dl, CTVT, CTOp,
1448                                   DAG.getConstant(1, dl, CTVT));
1449         SDValue And = DAG.getNode(ISD::AND, dl, CTVT, CTOp, Sub);
1450         ISD::CondCode CC = Cond == ISD::SETULT ? ISD::SETEQ : ISD::SETNE;
1451         return DAG.getSetCC(dl, VT, And, DAG.getConstant(0, dl, CTVT), CC);
1452       }
1453 
1454       // TODO: (ctpop x) == 1 -> x && (x & x-1) == 0 iff ctpop is illegal.
1455     }
1456 
1457     // (zext x) == C --> x == (trunc C)
1458     // (sext x) == C --> x == (trunc C)
1459     if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
1460         DCI.isBeforeLegalize() && N0->hasOneUse()) {
1461       unsigned MinBits = N0.getValueSizeInBits();
1462       SDValue PreExt;
1463       bool Signed = false;
1464       if (N0->getOpcode() == ISD::ZERO_EXTEND) {
1465         // ZExt
1466         MinBits = N0->getOperand(0).getValueSizeInBits();
1467         PreExt = N0->getOperand(0);
1468       } else if (N0->getOpcode() == ISD::AND) {
1469         // DAGCombine turns costly ZExts into ANDs
1470         if (auto *C = dyn_cast<ConstantSDNode>(N0->getOperand(1)))
1471           if ((C->getAPIntValue()+1).isPowerOf2()) {
1472             MinBits = C->getAPIntValue().countTrailingOnes();
1473             PreExt = N0->getOperand(0);
1474           }
1475       } else if (N0->getOpcode() == ISD::SIGN_EXTEND) {
1476         // SExt
1477         MinBits = N0->getOperand(0).getValueSizeInBits();
1478         PreExt = N0->getOperand(0);
1479         Signed = true;
1480       } else if (auto *LN0 = dyn_cast<LoadSDNode>(N0)) {
1481         // ZEXTLOAD / SEXTLOAD
1482         if (LN0->getExtensionType() == ISD::ZEXTLOAD) {
1483           MinBits = LN0->getMemoryVT().getSizeInBits();
1484           PreExt = N0;
1485         } else if (LN0->getExtensionType() == ISD::SEXTLOAD) {
1486           Signed = true;
1487           MinBits = LN0->getMemoryVT().getSizeInBits();
1488           PreExt = N0;
1489         }
1490       }
1491 
1492       // Figure out how many bits we need to preserve this constant.
1493       unsigned ReqdBits = Signed ?
1494         C1.getBitWidth() - C1.getNumSignBits() + 1 :
1495         C1.getActiveBits();
1496 
1497       // Make sure we're not losing bits from the constant.
1498       if (MinBits > 0 &&
1499           MinBits < C1.getBitWidth() &&
1500           MinBits >= ReqdBits) {
1501         EVT MinVT = EVT::getIntegerVT(*DAG.getContext(), MinBits);
1502         if (isTypeDesirableForOp(ISD::SETCC, MinVT)) {
1503           // Will get folded away.
1504           SDValue Trunc = DAG.getNode(ISD::TRUNCATE, dl, MinVT, PreExt);
1505           if (MinBits == 1 && C1 == 1)
1506             // Invert the condition.
1507             return DAG.getSetCC(dl, VT, Trunc, DAG.getConstant(0, dl, MVT::i1),
1508                                 Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ);
1509           SDValue C = DAG.getConstant(C1.trunc(MinBits), dl, MinVT);
1510           return DAG.getSetCC(dl, VT, Trunc, C, Cond);
1511         }
1512 
1513         // If truncating the setcc operands is not desirable, we can still
1514         // simplify the expression in some cases:
1515         // setcc ([sz]ext (setcc x, y, cc)), 0, setne) -> setcc (x, y, cc)
1516         // setcc ([sz]ext (setcc x, y, cc)), 0, seteq) -> setcc (x, y, inv(cc))
1517         // setcc (zext (setcc x, y, cc)), 1, setne) -> setcc (x, y, inv(cc))
1518         // setcc (zext (setcc x, y, cc)), 1, seteq) -> setcc (x, y, cc)
1519         // setcc (sext (setcc x, y, cc)), -1, setne) -> setcc (x, y, inv(cc))
1520         // setcc (sext (setcc x, y, cc)), -1, seteq) -> setcc (x, y, cc)
1521         SDValue TopSetCC = N0->getOperand(0);
1522         unsigned N0Opc = N0->getOpcode();
1523         bool SExt = (N0Opc == ISD::SIGN_EXTEND);
1524         if (TopSetCC.getValueType() == MVT::i1 && VT == MVT::i1 &&
1525             TopSetCC.getOpcode() == ISD::SETCC &&
1526             (N0Opc == ISD::ZERO_EXTEND || N0Opc == ISD::SIGN_EXTEND) &&
1527             (isConstFalseVal(N1C) ||
1528              isExtendedTrueVal(N1C, N0->getValueType(0), SExt))) {
1529 
1530           bool Inverse = (N1C->isNullValue() && Cond == ISD::SETEQ) ||
1531                          (!N1C->isNullValue() && Cond == ISD::SETNE);
1532 
1533           if (!Inverse)
1534             return TopSetCC;
1535 
1536           ISD::CondCode InvCond = ISD::getSetCCInverse(
1537               cast<CondCodeSDNode>(TopSetCC.getOperand(2))->get(),
1538               TopSetCC.getOperand(0).getValueType().isInteger());
1539           return DAG.getSetCC(dl, VT, TopSetCC.getOperand(0),
1540                                       TopSetCC.getOperand(1),
1541                                       InvCond);
1542 
1543         }
1544       }
1545     }
1546 
1547     // If the LHS is '(and load, const)', the RHS is 0,
1548     // the test is for equality or unsigned, and all 1 bits of the const are
1549     // in the same partial word, see if we can shorten the load.
1550     if (DCI.isBeforeLegalize() &&
1551         !ISD::isSignedIntSetCC(Cond) &&
1552         N0.getOpcode() == ISD::AND && C1 == 0 &&
1553         N0.getNode()->hasOneUse() &&
1554         isa<LoadSDNode>(N0.getOperand(0)) &&
1555         N0.getOperand(0).getNode()->hasOneUse() &&
1556         isa<ConstantSDNode>(N0.getOperand(1))) {
1557       LoadSDNode *Lod = cast<LoadSDNode>(N0.getOperand(0));
1558       APInt bestMask;
1559       unsigned bestWidth = 0, bestOffset = 0;
1560       if (!Lod->isVolatile() && Lod->isUnindexed()) {
1561         unsigned origWidth = N0.getValueSizeInBits();
1562         unsigned maskWidth = origWidth;
1563         // We can narrow (e.g.) 16-bit extending loads on 32-bit target to
1564         // 8 bits, but have to be careful...
1565         if (Lod->getExtensionType() != ISD::NON_EXTLOAD)
1566           origWidth = Lod->getMemoryVT().getSizeInBits();
1567         const APInt &Mask =
1568           cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue();
1569         for (unsigned width = origWidth / 2; width>=8; width /= 2) {
1570           APInt newMask = APInt::getLowBitsSet(maskWidth, width);
1571           for (unsigned offset=0; offset<origWidth/width; offset++) {
1572             if ((newMask & Mask) == Mask) {
1573               if (!DAG.getDataLayout().isLittleEndian())
1574                 bestOffset = (origWidth/width - offset - 1) * (width/8);
1575               else
1576                 bestOffset = (uint64_t)offset * (width/8);
1577               bestMask = Mask.lshr(offset * (width/8) * 8);
1578               bestWidth = width;
1579               break;
1580             }
1581             newMask = newMask << width;
1582           }
1583         }
1584       }
1585       if (bestWidth) {
1586         EVT newVT = EVT::getIntegerVT(*DAG.getContext(), bestWidth);
1587         if (newVT.isRound()) {
1588           EVT PtrType = Lod->getOperand(1).getValueType();
1589           SDValue Ptr = Lod->getBasePtr();
1590           if (bestOffset != 0)
1591             Ptr = DAG.getNode(ISD::ADD, dl, PtrType, Lod->getBasePtr(),
1592                               DAG.getConstant(bestOffset, dl, PtrType));
1593           unsigned NewAlign = MinAlign(Lod->getAlignment(), bestOffset);
1594           SDValue NewLoad = DAG.getLoad(
1595               newVT, dl, Lod->getChain(), Ptr,
1596               Lod->getPointerInfo().getWithOffset(bestOffset), NewAlign);
1597           return DAG.getSetCC(dl, VT,
1598                               DAG.getNode(ISD::AND, dl, newVT, NewLoad,
1599                                       DAG.getConstant(bestMask.trunc(bestWidth),
1600                                                       dl, newVT)),
1601                               DAG.getConstant(0LL, dl, newVT), Cond);
1602         }
1603       }
1604     }
1605 
1606     // If the LHS is a ZERO_EXTEND, perform the comparison on the input.
1607     if (N0.getOpcode() == ISD::ZERO_EXTEND) {
1608       unsigned InSize = N0.getOperand(0).getValueSizeInBits();
1609 
1610       // If the comparison constant has bits in the upper part, the
1611       // zero-extended value could never match.
1612       if (C1.intersects(APInt::getHighBitsSet(C1.getBitWidth(),
1613                                               C1.getBitWidth() - InSize))) {
1614         switch (Cond) {
1615         case ISD::SETUGT:
1616         case ISD::SETUGE:
1617         case ISD::SETEQ: return DAG.getConstant(0, dl, VT);
1618         case ISD::SETULT:
1619         case ISD::SETULE:
1620         case ISD::SETNE: return DAG.getConstant(1, dl, VT);
1621         case ISD::SETGT:
1622         case ISD::SETGE:
1623           // True if the sign bit of C1 is set.
1624           return DAG.getConstant(C1.isNegative(), dl, VT);
1625         case ISD::SETLT:
1626         case ISD::SETLE:
1627           // True if the sign bit of C1 isn't set.
1628           return DAG.getConstant(C1.isNonNegative(), dl, VT);
1629         default:
1630           break;
1631         }
1632       }
1633 
1634       // Otherwise, we can perform the comparison with the low bits.
1635       switch (Cond) {
1636       case ISD::SETEQ:
1637       case ISD::SETNE:
1638       case ISD::SETUGT:
1639       case ISD::SETUGE:
1640       case ISD::SETULT:
1641       case ISD::SETULE: {
1642         EVT newVT = N0.getOperand(0).getValueType();
1643         if (DCI.isBeforeLegalizeOps() ||
1644             (isOperationLegal(ISD::SETCC, newVT) &&
1645              getCondCodeAction(Cond, newVT.getSimpleVT()) == Legal)) {
1646           EVT NewSetCCVT =
1647               getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), newVT);
1648           SDValue NewConst = DAG.getConstant(C1.trunc(InSize), dl, newVT);
1649 
1650           SDValue NewSetCC = DAG.getSetCC(dl, NewSetCCVT, N0.getOperand(0),
1651                                           NewConst, Cond);
1652           return DAG.getBoolExtOrTrunc(NewSetCC, dl, VT, N0.getValueType());
1653         }
1654         break;
1655       }
1656       default:
1657         break;   // todo, be more careful with signed comparisons
1658       }
1659     } else if (N0.getOpcode() == ISD::SIGN_EXTEND_INREG &&
1660                (Cond == ISD::SETEQ || Cond == ISD::SETNE)) {
1661       EVT ExtSrcTy = cast<VTSDNode>(N0.getOperand(1))->getVT();
1662       unsigned ExtSrcTyBits = ExtSrcTy.getSizeInBits();
1663       EVT ExtDstTy = N0.getValueType();
1664       unsigned ExtDstTyBits = ExtDstTy.getSizeInBits();
1665 
1666       // If the constant doesn't fit into the number of bits for the source of
1667       // the sign extension, it is impossible for both sides to be equal.
1668       if (C1.getMinSignedBits() > ExtSrcTyBits)
1669         return DAG.getConstant(Cond == ISD::SETNE, dl, VT);
1670 
1671       SDValue ZextOp;
1672       EVT Op0Ty = N0.getOperand(0).getValueType();
1673       if (Op0Ty == ExtSrcTy) {
1674         ZextOp = N0.getOperand(0);
1675       } else {
1676         APInt Imm = APInt::getLowBitsSet(ExtDstTyBits, ExtSrcTyBits);
1677         ZextOp = DAG.getNode(ISD::AND, dl, Op0Ty, N0.getOperand(0),
1678                               DAG.getConstant(Imm, dl, Op0Ty));
1679       }
1680       if (!DCI.isCalledByLegalizer())
1681         DCI.AddToWorklist(ZextOp.getNode());
1682       // Otherwise, make this a use of a zext.
1683       return DAG.getSetCC(dl, VT, ZextOp,
1684                           DAG.getConstant(C1 & APInt::getLowBitsSet(
1685                                                               ExtDstTyBits,
1686                                                               ExtSrcTyBits),
1687                                           dl, ExtDstTy),
1688                           Cond);
1689     } else if ((N1C->isNullValue() || N1C->getAPIntValue() == 1) &&
1690                 (Cond == ISD::SETEQ || Cond == ISD::SETNE)) {
1691       // SETCC (SETCC), [0|1], [EQ|NE]  -> SETCC
1692       if (N0.getOpcode() == ISD::SETCC &&
1693           isTypeLegal(VT) && VT.bitsLE(N0.getValueType())) {
1694         bool TrueWhenTrue = (Cond == ISD::SETEQ) ^ (N1C->getAPIntValue() != 1);
1695         if (TrueWhenTrue)
1696           return DAG.getNode(ISD::TRUNCATE, dl, VT, N0);
1697         // Invert the condition.
1698         ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get();
1699         CC = ISD::getSetCCInverse(CC,
1700                                   N0.getOperand(0).getValueType().isInteger());
1701         if (DCI.isBeforeLegalizeOps() ||
1702             isCondCodeLegal(CC, N0.getOperand(0).getSimpleValueType()))
1703           return DAG.getSetCC(dl, VT, N0.getOperand(0), N0.getOperand(1), CC);
1704       }
1705 
1706       if ((N0.getOpcode() == ISD::XOR ||
1707            (N0.getOpcode() == ISD::AND &&
1708             N0.getOperand(0).getOpcode() == ISD::XOR &&
1709             N0.getOperand(1) == N0.getOperand(0).getOperand(1))) &&
1710           isa<ConstantSDNode>(N0.getOperand(1)) &&
1711           cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue() == 1) {
1712         // If this is (X^1) == 0/1, swap the RHS and eliminate the xor.  We
1713         // can only do this if the top bits are known zero.
1714         unsigned BitWidth = N0.getValueSizeInBits();
1715         if (DAG.MaskedValueIsZero(N0,
1716                                   APInt::getHighBitsSet(BitWidth,
1717                                                         BitWidth-1))) {
1718           // Okay, get the un-inverted input value.
1719           SDValue Val;
1720           if (N0.getOpcode() == ISD::XOR)
1721             Val = N0.getOperand(0);
1722           else {
1723             assert(N0.getOpcode() == ISD::AND &&
1724                     N0.getOperand(0).getOpcode() == ISD::XOR);
1725             // ((X^1)&1)^1 -> X & 1
1726             Val = DAG.getNode(ISD::AND, dl, N0.getValueType(),
1727                               N0.getOperand(0).getOperand(0),
1728                               N0.getOperand(1));
1729           }
1730 
1731           return DAG.getSetCC(dl, VT, Val, N1,
1732                               Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ);
1733         }
1734       } else if (N1C->getAPIntValue() == 1 &&
1735                  (VT == MVT::i1 ||
1736                   getBooleanContents(N0->getValueType(0)) ==
1737                       ZeroOrOneBooleanContent)) {
1738         SDValue Op0 = N0;
1739         if (Op0.getOpcode() == ISD::TRUNCATE)
1740           Op0 = Op0.getOperand(0);
1741 
1742         if ((Op0.getOpcode() == ISD::XOR) &&
1743             Op0.getOperand(0).getOpcode() == ISD::SETCC &&
1744             Op0.getOperand(1).getOpcode() == ISD::SETCC) {
1745           // (xor (setcc), (setcc)) == / != 1 -> (setcc) != / == (setcc)
1746           Cond = (Cond == ISD::SETEQ) ? ISD::SETNE : ISD::SETEQ;
1747           return DAG.getSetCC(dl, VT, Op0.getOperand(0), Op0.getOperand(1),
1748                               Cond);
1749         }
1750         if (Op0.getOpcode() == ISD::AND &&
1751             isa<ConstantSDNode>(Op0.getOperand(1)) &&
1752             cast<ConstantSDNode>(Op0.getOperand(1))->getAPIntValue() == 1) {
1753           // If this is (X&1) == / != 1, normalize it to (X&1) != / == 0.
1754           if (Op0.getValueType().bitsGT(VT))
1755             Op0 = DAG.getNode(ISD::AND, dl, VT,
1756                           DAG.getNode(ISD::TRUNCATE, dl, VT, Op0.getOperand(0)),
1757                           DAG.getConstant(1, dl, VT));
1758           else if (Op0.getValueType().bitsLT(VT))
1759             Op0 = DAG.getNode(ISD::AND, dl, VT,
1760                         DAG.getNode(ISD::ANY_EXTEND, dl, VT, Op0.getOperand(0)),
1761                         DAG.getConstant(1, dl, VT));
1762 
1763           return DAG.getSetCC(dl, VT, Op0,
1764                               DAG.getConstant(0, dl, Op0.getValueType()),
1765                               Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ);
1766         }
1767         if (Op0.getOpcode() == ISD::AssertZext &&
1768             cast<VTSDNode>(Op0.getOperand(1))->getVT() == MVT::i1)
1769           return DAG.getSetCC(dl, VT, Op0,
1770                               DAG.getConstant(0, dl, Op0.getValueType()),
1771                               Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ);
1772       }
1773     }
1774 
1775     APInt MinVal, MaxVal;
1776     unsigned OperandBitSize = N1C->getValueType(0).getSizeInBits();
1777     if (ISD::isSignedIntSetCC(Cond)) {
1778       MinVal = APInt::getSignedMinValue(OperandBitSize);
1779       MaxVal = APInt::getSignedMaxValue(OperandBitSize);
1780     } else {
1781       MinVal = APInt::getMinValue(OperandBitSize);
1782       MaxVal = APInt::getMaxValue(OperandBitSize);
1783     }
1784 
1785     // Canonicalize GE/LE comparisons to use GT/LT comparisons.
1786     if (Cond == ISD::SETGE || Cond == ISD::SETUGE) {
1787       if (C1 == MinVal) return DAG.getConstant(1, dl, VT);  // X >= MIN --> true
1788       // X >= C0 --> X > (C0 - 1)
1789       APInt C = C1 - 1;
1790       ISD::CondCode NewCC = (Cond == ISD::SETGE) ? ISD::SETGT : ISD::SETUGT;
1791       if ((DCI.isBeforeLegalizeOps() ||
1792            isCondCodeLegal(NewCC, VT.getSimpleVT())) &&
1793           (!N1C->isOpaque() || (N1C->isOpaque() && C.getBitWidth() <= 64 &&
1794                                 isLegalICmpImmediate(C.getSExtValue())))) {
1795         return DAG.getSetCC(dl, VT, N0,
1796                             DAG.getConstant(C, dl, N1.getValueType()),
1797                             NewCC);
1798       }
1799     }
1800 
1801     if (Cond == ISD::SETLE || Cond == ISD::SETULE) {
1802       if (C1 == MaxVal) return DAG.getConstant(1, dl, VT);  // X <= MAX --> true
1803       // X <= C0 --> X < (C0 + 1)
1804       APInt C = C1 + 1;
1805       ISD::CondCode NewCC = (Cond == ISD::SETLE) ? ISD::SETLT : ISD::SETULT;
1806       if ((DCI.isBeforeLegalizeOps() ||
1807            isCondCodeLegal(NewCC, VT.getSimpleVT())) &&
1808           (!N1C->isOpaque() || (N1C->isOpaque() && C.getBitWidth() <= 64 &&
1809                                 isLegalICmpImmediate(C.getSExtValue())))) {
1810         return DAG.getSetCC(dl, VT, N0,
1811                             DAG.getConstant(C, dl, N1.getValueType()),
1812                             NewCC);
1813       }
1814     }
1815 
1816     if ((Cond == ISD::SETLT || Cond == ISD::SETULT) && C1 == MinVal)
1817       return DAG.getConstant(0, dl, VT);      // X < MIN --> false
1818     if ((Cond == ISD::SETGE || Cond == ISD::SETUGE) && C1 == MinVal)
1819       return DAG.getConstant(1, dl, VT);      // X >= MIN --> true
1820     if ((Cond == ISD::SETGT || Cond == ISD::SETUGT) && C1 == MaxVal)
1821       return DAG.getConstant(0, dl, VT);      // X > MAX --> false
1822     if ((Cond == ISD::SETLE || Cond == ISD::SETULE) && C1 == MaxVal)
1823       return DAG.getConstant(1, dl, VT);      // X <= MAX --> true
1824 
1825     // Canonicalize setgt X, Min --> setne X, Min
1826     if ((Cond == ISD::SETGT || Cond == ISD::SETUGT) && C1 == MinVal)
1827       return DAG.getSetCC(dl, VT, N0, N1, ISD::SETNE);
1828     // Canonicalize setlt X, Max --> setne X, Max
1829     if ((Cond == ISD::SETLT || Cond == ISD::SETULT) && C1 == MaxVal)
1830       return DAG.getSetCC(dl, VT, N0, N1, ISD::SETNE);
1831 
1832     // If we have setult X, 1, turn it into seteq X, 0
1833     if ((Cond == ISD::SETLT || Cond == ISD::SETULT) && C1 == MinVal+1)
1834       return DAG.getSetCC(dl, VT, N0,
1835                           DAG.getConstant(MinVal, dl, N0.getValueType()),
1836                           ISD::SETEQ);
1837     // If we have setugt X, Max-1, turn it into seteq X, Max
1838     if ((Cond == ISD::SETGT || Cond == ISD::SETUGT) && C1 == MaxVal-1)
1839       return DAG.getSetCC(dl, VT, N0,
1840                           DAG.getConstant(MaxVal, dl, N0.getValueType()),
1841                           ISD::SETEQ);
1842 
1843     // If we have "setcc X, C0", check to see if we can shrink the immediate
1844     // by changing cc.
1845 
1846     // SETUGT X, SINTMAX  -> SETLT X, 0
1847     if (Cond == ISD::SETUGT &&
1848         C1 == APInt::getSignedMaxValue(OperandBitSize))
1849       return DAG.getSetCC(dl, VT, N0,
1850                           DAG.getConstant(0, dl, N1.getValueType()),
1851                           ISD::SETLT);
1852 
1853     // SETULT X, SINTMIN  -> SETGT X, -1
1854     if (Cond == ISD::SETULT &&
1855         C1 == APInt::getSignedMinValue(OperandBitSize)) {
1856       SDValue ConstMinusOne =
1857           DAG.getConstant(APInt::getAllOnesValue(OperandBitSize), dl,
1858                           N1.getValueType());
1859       return DAG.getSetCC(dl, VT, N0, ConstMinusOne, ISD::SETGT);
1860     }
1861 
1862     // Fold bit comparisons when we can.
1863     if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
1864         (VT == N0.getValueType() ||
1865          (isTypeLegal(VT) && VT.bitsLE(N0.getValueType()))) &&
1866         N0.getOpcode() == ISD::AND) {
1867       auto &DL = DAG.getDataLayout();
1868       if (auto *AndRHS = dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
1869         EVT ShiftTy = DCI.isBeforeLegalize()
1870                           ? getPointerTy(DL)
1871                           : getShiftAmountTy(N0.getValueType(), DL);
1872         if (Cond == ISD::SETNE && C1 == 0) {// (X & 8) != 0  -->  (X & 8) >> 3
1873           // Perform the xform if the AND RHS is a single bit.
1874           if (AndRHS->getAPIntValue().isPowerOf2()) {
1875             return DAG.getNode(ISD::TRUNCATE, dl, VT,
1876                               DAG.getNode(ISD::SRL, dl, N0.getValueType(), N0,
1877                    DAG.getConstant(AndRHS->getAPIntValue().logBase2(), dl,
1878                                    ShiftTy)));
1879           }
1880         } else if (Cond == ISD::SETEQ && C1 == AndRHS->getAPIntValue()) {
1881           // (X & 8) == 8  -->  (X & 8) >> 3
1882           // Perform the xform if C1 is a single bit.
1883           if (C1.isPowerOf2()) {
1884             return DAG.getNode(ISD::TRUNCATE, dl, VT,
1885                                DAG.getNode(ISD::SRL, dl, N0.getValueType(), N0,
1886                                       DAG.getConstant(C1.logBase2(), dl,
1887                                                       ShiftTy)));
1888           }
1889         }
1890       }
1891     }
1892 
1893     if (C1.getMinSignedBits() <= 64 &&
1894         !isLegalICmpImmediate(C1.getSExtValue())) {
1895       // (X & -256) == 256 -> (X >> 8) == 1
1896       if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
1897           N0.getOpcode() == ISD::AND && N0.hasOneUse()) {
1898         if (auto *AndRHS = dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
1899           const APInt &AndRHSC = AndRHS->getAPIntValue();
1900           if ((-AndRHSC).isPowerOf2() && (AndRHSC & C1) == C1) {
1901             unsigned ShiftBits = AndRHSC.countTrailingZeros();
1902             auto &DL = DAG.getDataLayout();
1903             EVT ShiftTy = DCI.isBeforeLegalize()
1904                               ? getPointerTy(DL)
1905                               : getShiftAmountTy(N0.getValueType(), DL);
1906             EVT CmpTy = N0.getValueType();
1907             SDValue Shift = DAG.getNode(ISD::SRL, dl, CmpTy, N0.getOperand(0),
1908                                         DAG.getConstant(ShiftBits, dl,
1909                                                         ShiftTy));
1910             SDValue CmpRHS = DAG.getConstant(C1.lshr(ShiftBits), dl, CmpTy);
1911             return DAG.getSetCC(dl, VT, Shift, CmpRHS, Cond);
1912           }
1913         }
1914       } else if (Cond == ISD::SETULT || Cond == ISD::SETUGE ||
1915                  Cond == ISD::SETULE || Cond == ISD::SETUGT) {
1916         bool AdjOne = (Cond == ISD::SETULE || Cond == ISD::SETUGT);
1917         // X <  0x100000000 -> (X >> 32) <  1
1918         // X >= 0x100000000 -> (X >> 32) >= 1
1919         // X <= 0x0ffffffff -> (X >> 32) <  1
1920         // X >  0x0ffffffff -> (X >> 32) >= 1
1921         unsigned ShiftBits;
1922         APInt NewC = C1;
1923         ISD::CondCode NewCond = Cond;
1924         if (AdjOne) {
1925           ShiftBits = C1.countTrailingOnes();
1926           NewC = NewC + 1;
1927           NewCond = (Cond == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE;
1928         } else {
1929           ShiftBits = C1.countTrailingZeros();
1930         }
1931         NewC = NewC.lshr(ShiftBits);
1932         if (ShiftBits && NewC.getMinSignedBits() <= 64 &&
1933           isLegalICmpImmediate(NewC.getSExtValue())) {
1934           auto &DL = DAG.getDataLayout();
1935           EVT ShiftTy = DCI.isBeforeLegalize()
1936                             ? getPointerTy(DL)
1937                             : getShiftAmountTy(N0.getValueType(), DL);
1938           EVT CmpTy = N0.getValueType();
1939           SDValue Shift = DAG.getNode(ISD::SRL, dl, CmpTy, N0,
1940                                       DAG.getConstant(ShiftBits, dl, ShiftTy));
1941           SDValue CmpRHS = DAG.getConstant(NewC, dl, CmpTy);
1942           return DAG.getSetCC(dl, VT, Shift, CmpRHS, NewCond);
1943         }
1944       }
1945     }
1946   }
1947 
1948   if (isa<ConstantFPSDNode>(N0.getNode())) {
1949     // Constant fold or commute setcc.
1950     SDValue O = DAG.FoldSetCC(VT, N0, N1, Cond, dl);
1951     if (O.getNode()) return O;
1952   } else if (auto *CFP = dyn_cast<ConstantFPSDNode>(N1.getNode())) {
1953     // If the RHS of an FP comparison is a constant, simplify it away in
1954     // some cases.
1955     if (CFP->getValueAPF().isNaN()) {
1956       // If an operand is known to be a nan, we can fold it.
1957       switch (ISD::getUnorderedFlavor(Cond)) {
1958       default: llvm_unreachable("Unknown flavor!");
1959       case 0:  // Known false.
1960         return DAG.getConstant(0, dl, VT);
1961       case 1:  // Known true.
1962         return DAG.getConstant(1, dl, VT);
1963       case 2:  // Undefined.
1964         return DAG.getUNDEF(VT);
1965       }
1966     }
1967 
1968     // Otherwise, we know the RHS is not a NaN.  Simplify the node to drop the
1969     // constant if knowing that the operand is non-nan is enough.  We prefer to
1970     // have SETO(x,x) instead of SETO(x, 0.0) because this avoids having to
1971     // materialize 0.0.
1972     if (Cond == ISD::SETO || Cond == ISD::SETUO)
1973       return DAG.getSetCC(dl, VT, N0, N0, Cond);
1974 
1975     // If the condition is not legal, see if we can find an equivalent one
1976     // which is legal.
1977     if (!isCondCodeLegal(Cond, N0.getSimpleValueType())) {
1978       // If the comparison was an awkward floating-point == or != and one of
1979       // the comparison operands is infinity or negative infinity, convert the
1980       // condition to a less-awkward <= or >=.
1981       if (CFP->getValueAPF().isInfinity()) {
1982         if (CFP->getValueAPF().isNegative()) {
1983           if (Cond == ISD::SETOEQ &&
1984               isCondCodeLegal(ISD::SETOLE, N0.getSimpleValueType()))
1985             return DAG.getSetCC(dl, VT, N0, N1, ISD::SETOLE);
1986           if (Cond == ISD::SETUEQ &&
1987               isCondCodeLegal(ISD::SETOLE, N0.getSimpleValueType()))
1988             return DAG.getSetCC(dl, VT, N0, N1, ISD::SETULE);
1989           if (Cond == ISD::SETUNE &&
1990               isCondCodeLegal(ISD::SETUGT, N0.getSimpleValueType()))
1991             return DAG.getSetCC(dl, VT, N0, N1, ISD::SETUGT);
1992           if (Cond == ISD::SETONE &&
1993               isCondCodeLegal(ISD::SETUGT, N0.getSimpleValueType()))
1994             return DAG.getSetCC(dl, VT, N0, N1, ISD::SETOGT);
1995         } else {
1996           if (Cond == ISD::SETOEQ &&
1997               isCondCodeLegal(ISD::SETOGE, N0.getSimpleValueType()))
1998             return DAG.getSetCC(dl, VT, N0, N1, ISD::SETOGE);
1999           if (Cond == ISD::SETUEQ &&
2000               isCondCodeLegal(ISD::SETOGE, N0.getSimpleValueType()))
2001             return DAG.getSetCC(dl, VT, N0, N1, ISD::SETUGE);
2002           if (Cond == ISD::SETUNE &&
2003               isCondCodeLegal(ISD::SETULT, N0.getSimpleValueType()))
2004             return DAG.getSetCC(dl, VT, N0, N1, ISD::SETULT);
2005           if (Cond == ISD::SETONE &&
2006               isCondCodeLegal(ISD::SETULT, N0.getSimpleValueType()))
2007             return DAG.getSetCC(dl, VT, N0, N1, ISD::SETOLT);
2008         }
2009       }
2010     }
2011   }
2012 
2013   if (N0 == N1) {
2014     // The sext(setcc()) => setcc() optimization relies on the appropriate
2015     // constant being emitted.
2016     uint64_t EqVal = 0;
2017     switch (getBooleanContents(N0.getValueType())) {
2018     case UndefinedBooleanContent:
2019     case ZeroOrOneBooleanContent:
2020       EqVal = ISD::isTrueWhenEqual(Cond);
2021       break;
2022     case ZeroOrNegativeOneBooleanContent:
2023       EqVal = ISD::isTrueWhenEqual(Cond) ? -1 : 0;
2024       break;
2025     }
2026 
2027     // We can always fold X == X for integer setcc's.
2028     if (N0.getValueType().isInteger()) {
2029       return DAG.getConstant(EqVal, dl, VT);
2030     }
2031     unsigned UOF = ISD::getUnorderedFlavor(Cond);
2032     if (UOF == 2)   // FP operators that are undefined on NaNs.
2033       return DAG.getConstant(EqVal, dl, VT);
2034     if (UOF == unsigned(ISD::isTrueWhenEqual(Cond)))
2035       return DAG.getConstant(EqVal, dl, VT);
2036     // Otherwise, we can't fold it.  However, we can simplify it to SETUO/SETO
2037     // if it is not already.
2038     ISD::CondCode NewCond = UOF == 0 ? ISD::SETO : ISD::SETUO;
2039     if (NewCond != Cond && (DCI.isBeforeLegalizeOps() ||
2040           getCondCodeAction(NewCond, N0.getSimpleValueType()) == Legal))
2041       return DAG.getSetCC(dl, VT, N0, N1, NewCond);
2042   }
2043 
2044   if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
2045       N0.getValueType().isInteger()) {
2046     if (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::SUB ||
2047         N0.getOpcode() == ISD::XOR) {
2048       // Simplify (X+Y) == (X+Z) -->  Y == Z
2049       if (N0.getOpcode() == N1.getOpcode()) {
2050         if (N0.getOperand(0) == N1.getOperand(0))
2051           return DAG.getSetCC(dl, VT, N0.getOperand(1), N1.getOperand(1), Cond);
2052         if (N0.getOperand(1) == N1.getOperand(1))
2053           return DAG.getSetCC(dl, VT, N0.getOperand(0), N1.getOperand(0), Cond);
2054         if (DAG.isCommutativeBinOp(N0.getOpcode())) {
2055           // If X op Y == Y op X, try other combinations.
2056           if (N0.getOperand(0) == N1.getOperand(1))
2057             return DAG.getSetCC(dl, VT, N0.getOperand(1), N1.getOperand(0),
2058                                 Cond);
2059           if (N0.getOperand(1) == N1.getOperand(0))
2060             return DAG.getSetCC(dl, VT, N0.getOperand(0), N1.getOperand(1),
2061                                 Cond);
2062         }
2063       }
2064 
2065       // If RHS is a legal immediate value for a compare instruction, we need
2066       // to be careful about increasing register pressure needlessly.
2067       bool LegalRHSImm = false;
2068 
2069       if (auto *RHSC = dyn_cast<ConstantSDNode>(N1)) {
2070         if (auto *LHSR = dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
2071           // Turn (X+C1) == C2 --> X == C2-C1
2072           if (N0.getOpcode() == ISD::ADD && N0.getNode()->hasOneUse()) {
2073             return DAG.getSetCC(dl, VT, N0.getOperand(0),
2074                                 DAG.getConstant(RHSC->getAPIntValue()-
2075                                                 LHSR->getAPIntValue(),
2076                                 dl, N0.getValueType()), Cond);
2077           }
2078 
2079           // Turn (X^C1) == C2 into X == C1^C2 iff X&~C1 = 0.
2080           if (N0.getOpcode() == ISD::XOR)
2081             // If we know that all of the inverted bits are zero, don't bother
2082             // performing the inversion.
2083             if (DAG.MaskedValueIsZero(N0.getOperand(0), ~LHSR->getAPIntValue()))
2084               return
2085                 DAG.getSetCC(dl, VT, N0.getOperand(0),
2086                              DAG.getConstant(LHSR->getAPIntValue() ^
2087                                                RHSC->getAPIntValue(),
2088                                              dl, N0.getValueType()),
2089                              Cond);
2090         }
2091 
2092         // Turn (C1-X) == C2 --> X == C1-C2
2093         if (auto *SUBC = dyn_cast<ConstantSDNode>(N0.getOperand(0))) {
2094           if (N0.getOpcode() == ISD::SUB && N0.getNode()->hasOneUse()) {
2095             return
2096               DAG.getSetCC(dl, VT, N0.getOperand(1),
2097                            DAG.getConstant(SUBC->getAPIntValue() -
2098                                              RHSC->getAPIntValue(),
2099                                            dl, N0.getValueType()),
2100                            Cond);
2101           }
2102         }
2103 
2104         // Could RHSC fold directly into a compare?
2105         if (RHSC->getValueType(0).getSizeInBits() <= 64)
2106           LegalRHSImm = isLegalICmpImmediate(RHSC->getSExtValue());
2107       }
2108 
2109       // Simplify (X+Z) == X -->  Z == 0
2110       // Don't do this if X is an immediate that can fold into a cmp
2111       // instruction and X+Z has other uses. It could be an induction variable
2112       // chain, and the transform would increase register pressure.
2113       if (!LegalRHSImm || N0.getNode()->hasOneUse()) {
2114         if (N0.getOperand(0) == N1)
2115           return DAG.getSetCC(dl, VT, N0.getOperand(1),
2116                               DAG.getConstant(0, dl, N0.getValueType()), Cond);
2117         if (N0.getOperand(1) == N1) {
2118           if (DAG.isCommutativeBinOp(N0.getOpcode()))
2119             return DAG.getSetCC(dl, VT, N0.getOperand(0),
2120                                 DAG.getConstant(0, dl, N0.getValueType()),
2121                                 Cond);
2122           if (N0.getNode()->hasOneUse()) {
2123             assert(N0.getOpcode() == ISD::SUB && "Unexpected operation!");
2124             auto &DL = DAG.getDataLayout();
2125             // (Z-X) == X  --> Z == X<<1
2126             SDValue SH = DAG.getNode(
2127                 ISD::SHL, dl, N1.getValueType(), N1,
2128                 DAG.getConstant(1, dl,
2129                                 getShiftAmountTy(N1.getValueType(), DL)));
2130             if (!DCI.isCalledByLegalizer())
2131               DCI.AddToWorklist(SH.getNode());
2132             return DAG.getSetCC(dl, VT, N0.getOperand(0), SH, Cond);
2133           }
2134         }
2135       }
2136     }
2137 
2138     if (N1.getOpcode() == ISD::ADD || N1.getOpcode() == ISD::SUB ||
2139         N1.getOpcode() == ISD::XOR) {
2140       // Simplify  X == (X+Z) -->  Z == 0
2141       if (N1.getOperand(0) == N0)
2142         return DAG.getSetCC(dl, VT, N1.getOperand(1),
2143                         DAG.getConstant(0, dl, N1.getValueType()), Cond);
2144       if (N1.getOperand(1) == N0) {
2145         if (DAG.isCommutativeBinOp(N1.getOpcode()))
2146           return DAG.getSetCC(dl, VT, N1.getOperand(0),
2147                           DAG.getConstant(0, dl, N1.getValueType()), Cond);
2148         if (N1.getNode()->hasOneUse()) {
2149           assert(N1.getOpcode() == ISD::SUB && "Unexpected operation!");
2150           auto &DL = DAG.getDataLayout();
2151           // X == (Z-X)  --> X<<1 == Z
2152           SDValue SH = DAG.getNode(
2153               ISD::SHL, dl, N1.getValueType(), N0,
2154               DAG.getConstant(1, dl, getShiftAmountTy(N0.getValueType(), DL)));
2155           if (!DCI.isCalledByLegalizer())
2156             DCI.AddToWorklist(SH.getNode());
2157           return DAG.getSetCC(dl, VT, SH, N1.getOperand(0), Cond);
2158         }
2159       }
2160     }
2161 
2162     if (SDValue V = simplifySetCCWithAnd(VT, N0, N1, Cond, DCI, dl))
2163       return V;
2164   }
2165 
2166   // Fold away ALL boolean setcc's.
2167   SDValue Temp;
2168   if (N0.getValueType() == MVT::i1 && foldBooleans) {
2169     switch (Cond) {
2170     default: llvm_unreachable("Unknown integer setcc!");
2171     case ISD::SETEQ:  // X == Y  -> ~(X^Y)
2172       Temp = DAG.getNode(ISD::XOR, dl, MVT::i1, N0, N1);
2173       N0 = DAG.getNOT(dl, Temp, MVT::i1);
2174       if (!DCI.isCalledByLegalizer())
2175         DCI.AddToWorklist(Temp.getNode());
2176       break;
2177     case ISD::SETNE:  // X != Y   -->  (X^Y)
2178       N0 = DAG.getNode(ISD::XOR, dl, MVT::i1, N0, N1);
2179       break;
2180     case ISD::SETGT:  // X >s Y   -->  X == 0 & Y == 1  -->  ~X & Y
2181     case ISD::SETULT: // X <u Y   -->  X == 0 & Y == 1  -->  ~X & Y
2182       Temp = DAG.getNOT(dl, N0, MVT::i1);
2183       N0 = DAG.getNode(ISD::AND, dl, MVT::i1, N1, Temp);
2184       if (!DCI.isCalledByLegalizer())
2185         DCI.AddToWorklist(Temp.getNode());
2186       break;
2187     case ISD::SETLT:  // X <s Y   --> X == 1 & Y == 0  -->  ~Y & X
2188     case ISD::SETUGT: // X >u Y   --> X == 1 & Y == 0  -->  ~Y & X
2189       Temp = DAG.getNOT(dl, N1, MVT::i1);
2190       N0 = DAG.getNode(ISD::AND, dl, MVT::i1, N0, Temp);
2191       if (!DCI.isCalledByLegalizer())
2192         DCI.AddToWorklist(Temp.getNode());
2193       break;
2194     case ISD::SETULE: // X <=u Y  --> X == 0 | Y == 1  -->  ~X | Y
2195     case ISD::SETGE:  // X >=s Y  --> X == 0 | Y == 1  -->  ~X | Y
2196       Temp = DAG.getNOT(dl, N0, MVT::i1);
2197       N0 = DAG.getNode(ISD::OR, dl, MVT::i1, N1, Temp);
2198       if (!DCI.isCalledByLegalizer())
2199         DCI.AddToWorklist(Temp.getNode());
2200       break;
2201     case ISD::SETUGE: // X >=u Y  --> X == 1 | Y == 0  -->  ~Y | X
2202     case ISD::SETLE:  // X <=s Y  --> X == 1 | Y == 0  -->  ~Y | X
2203       Temp = DAG.getNOT(dl, N1, MVT::i1);
2204       N0 = DAG.getNode(ISD::OR, dl, MVT::i1, N0, Temp);
2205       break;
2206     }
2207     if (VT != MVT::i1) {
2208       if (!DCI.isCalledByLegalizer())
2209         DCI.AddToWorklist(N0.getNode());
2210       // FIXME: If running after legalize, we probably can't do this.
2211       N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, N0);
2212     }
2213     return N0;
2214   }
2215 
2216   // Could not fold it.
2217   return SDValue();
2218 }
2219 
2220 /// Returns true (and the GlobalValue and the offset) if the node is a
2221 /// GlobalAddress + offset.
2222 bool TargetLowering::isGAPlusOffset(SDNode *N, const GlobalValue *&GA,
2223                                     int64_t &Offset) const {
2224   if (auto *GASD = dyn_cast<GlobalAddressSDNode>(N)) {
2225     GA = GASD->getGlobal();
2226     Offset += GASD->getOffset();
2227     return true;
2228   }
2229 
2230   if (N->getOpcode() == ISD::ADD) {
2231     SDValue N1 = N->getOperand(0);
2232     SDValue N2 = N->getOperand(1);
2233     if (isGAPlusOffset(N1.getNode(), GA, Offset)) {
2234       if (auto *V = dyn_cast<ConstantSDNode>(N2)) {
2235         Offset += V->getSExtValue();
2236         return true;
2237       }
2238     } else if (isGAPlusOffset(N2.getNode(), GA, Offset)) {
2239       if (auto *V = dyn_cast<ConstantSDNode>(N1)) {
2240         Offset += V->getSExtValue();
2241         return true;
2242       }
2243     }
2244   }
2245 
2246   return false;
2247 }
2248 
2249 SDValue TargetLowering::PerformDAGCombine(SDNode *N,
2250                                           DAGCombinerInfo &DCI) const {
2251   // Default implementation: no optimization.
2252   return SDValue();
2253 }
2254 
2255 //===----------------------------------------------------------------------===//
2256 //  Inline Assembler Implementation Methods
2257 //===----------------------------------------------------------------------===//
2258 
2259 TargetLowering::ConstraintType
2260 TargetLowering::getConstraintType(StringRef Constraint) const {
2261   unsigned S = Constraint.size();
2262 
2263   if (S == 1) {
2264     switch (Constraint[0]) {
2265     default: break;
2266     case 'r': return C_RegisterClass;
2267     case 'm':    // memory
2268     case 'o':    // offsetable
2269     case 'V':    // not offsetable
2270       return C_Memory;
2271     case 'i':    // Simple Integer or Relocatable Constant
2272     case 'n':    // Simple Integer
2273     case 'E':    // Floating Point Constant
2274     case 'F':    // Floating Point Constant
2275     case 's':    // Relocatable Constant
2276     case 'p':    // Address.
2277     case 'X':    // Allow ANY value.
2278     case 'I':    // Target registers.
2279     case 'J':
2280     case 'K':
2281     case 'L':
2282     case 'M':
2283     case 'N':
2284     case 'O':
2285     case 'P':
2286     case '<':
2287     case '>':
2288       return C_Other;
2289     }
2290   }
2291 
2292   if (S > 1 && Constraint[0] == '{' && Constraint[S-1] == '}') {
2293     if (S == 8 && Constraint.substr(1, 6) == "memory") // "{memory}"
2294       return C_Memory;
2295     return C_Register;
2296   }
2297   return C_Unknown;
2298 }
2299 
2300 /// Try to replace an X constraint, which matches anything, with another that
2301 /// has more specific requirements based on the type of the corresponding
2302 /// operand.
2303 const char *TargetLowering::LowerXConstraint(EVT ConstraintVT) const{
2304   if (ConstraintVT.isInteger())
2305     return "r";
2306   if (ConstraintVT.isFloatingPoint())
2307     return "f";      // works for many targets
2308   return nullptr;
2309 }
2310 
2311 /// Lower the specified operand into the Ops vector.
2312 /// If it is invalid, don't add anything to Ops.
2313 void TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
2314                                                   std::string &Constraint,
2315                                                   std::vector<SDValue> &Ops,
2316                                                   SelectionDAG &DAG) const {
2317 
2318   if (Constraint.length() > 1) return;
2319 
2320   char ConstraintLetter = Constraint[0];
2321   switch (ConstraintLetter) {
2322   default: break;
2323   case 'X':     // Allows any operand; labels (basic block) use this.
2324     if (Op.getOpcode() == ISD::BasicBlock) {
2325       Ops.push_back(Op);
2326       return;
2327     }
2328     LLVM_FALLTHROUGH;
2329   case 'i':    // Simple Integer or Relocatable Constant
2330   case 'n':    // Simple Integer
2331   case 's': {  // Relocatable Constant
2332     // These operands are interested in values of the form (GV+C), where C may
2333     // be folded in as an offset of GV, or it may be explicitly added.  Also, it
2334     // is possible and fine if either GV or C are missing.
2335     ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
2336     GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op);
2337 
2338     // If we have "(add GV, C)", pull out GV/C
2339     if (Op.getOpcode() == ISD::ADD) {
2340       C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
2341       GA = dyn_cast<GlobalAddressSDNode>(Op.getOperand(0));
2342       if (!C || !GA) {
2343         C = dyn_cast<ConstantSDNode>(Op.getOperand(0));
2344         GA = dyn_cast<GlobalAddressSDNode>(Op.getOperand(1));
2345       }
2346       if (!C || !GA) {
2347         C = nullptr;
2348         GA = nullptr;
2349       }
2350     }
2351 
2352     // If we find a valid operand, map to the TargetXXX version so that the
2353     // value itself doesn't get selected.
2354     if (GA) {   // Either &GV   or   &GV+C
2355       if (ConstraintLetter != 'n') {
2356         int64_t Offs = GA->getOffset();
2357         if (C) Offs += C->getZExtValue();
2358         Ops.push_back(DAG.getTargetGlobalAddress(GA->getGlobal(),
2359                                                  C ? SDLoc(C) : SDLoc(),
2360                                                  Op.getValueType(), Offs));
2361       }
2362       return;
2363     }
2364     if (C) {   // just C, no GV.
2365       // Simple constants are not allowed for 's'.
2366       if (ConstraintLetter != 's') {
2367         // gcc prints these as sign extended.  Sign extend value to 64 bits
2368         // now; without this it would get ZExt'd later in
2369         // ScheduleDAGSDNodes::EmitNode, which is very generic.
2370         Ops.push_back(DAG.getTargetConstant(C->getAPIntValue().getSExtValue(),
2371                                             SDLoc(C), MVT::i64));
2372       }
2373       return;
2374     }
2375     break;
2376   }
2377   }
2378 }
2379 
2380 std::pair<unsigned, const TargetRegisterClass *>
2381 TargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *RI,
2382                                              StringRef Constraint,
2383                                              MVT VT) const {
2384   if (Constraint.empty() || Constraint[0] != '{')
2385     return std::make_pair(0u, static_cast<TargetRegisterClass*>(nullptr));
2386   assert(*(Constraint.end()-1) == '}' && "Not a brace enclosed constraint?");
2387 
2388   // Remove the braces from around the name.
2389   StringRef RegName(Constraint.data()+1, Constraint.size()-2);
2390 
2391   std::pair<unsigned, const TargetRegisterClass*> R =
2392     std::make_pair(0u, static_cast<const TargetRegisterClass*>(nullptr));
2393 
2394   // Figure out which register class contains this reg.
2395   for (TargetRegisterInfo::regclass_iterator RCI = RI->regclass_begin(),
2396        E = RI->regclass_end(); RCI != E; ++RCI) {
2397     const TargetRegisterClass *RC = *RCI;
2398 
2399     // If none of the value types for this register class are valid, we
2400     // can't use it.  For example, 64-bit reg classes on 32-bit targets.
2401     if (!isLegalRC(RC))
2402       continue;
2403 
2404     for (TargetRegisterClass::iterator I = RC->begin(), E = RC->end();
2405          I != E; ++I) {
2406       if (RegName.equals_lower(RI->getRegAsmName(*I))) {
2407         std::pair<unsigned, const TargetRegisterClass*> S =
2408           std::make_pair(*I, RC);
2409 
2410         // If this register class has the requested value type, return it,
2411         // otherwise keep searching and return the first class found
2412         // if no other is found which explicitly has the requested type.
2413         if (RC->hasType(VT))
2414           return S;
2415         else if (!R.second)
2416           R = S;
2417       }
2418     }
2419   }
2420 
2421   return R;
2422 }
2423 
2424 //===----------------------------------------------------------------------===//
2425 // Constraint Selection.
2426 
2427 /// Return true of this is an input operand that is a matching constraint like
2428 /// "4".
2429 bool TargetLowering::AsmOperandInfo::isMatchingInputConstraint() const {
2430   assert(!ConstraintCode.empty() && "No known constraint!");
2431   return isdigit(static_cast<unsigned char>(ConstraintCode[0]));
2432 }
2433 
2434 /// If this is an input matching constraint, this method returns the output
2435 /// operand it matches.
2436 unsigned TargetLowering::AsmOperandInfo::getMatchedOperand() const {
2437   assert(!ConstraintCode.empty() && "No known constraint!");
2438   return atoi(ConstraintCode.c_str());
2439 }
2440 
2441 /// Split up the constraint string from the inline assembly value into the
2442 /// specific constraints and their prefixes, and also tie in the associated
2443 /// operand values.
2444 /// If this returns an empty vector, and if the constraint string itself
2445 /// isn't empty, there was an error parsing.
2446 TargetLowering::AsmOperandInfoVector
2447 TargetLowering::ParseConstraints(const DataLayout &DL,
2448                                  const TargetRegisterInfo *TRI,
2449                                  ImmutableCallSite CS) const {
2450   /// Information about all of the constraints.
2451   AsmOperandInfoVector ConstraintOperands;
2452   const InlineAsm *IA = cast<InlineAsm>(CS.getCalledValue());
2453   unsigned maCount = 0; // Largest number of multiple alternative constraints.
2454 
2455   // Do a prepass over the constraints, canonicalizing them, and building up the
2456   // ConstraintOperands list.
2457   unsigned ArgNo = 0;   // ArgNo - The argument of the CallInst.
2458   unsigned ResNo = 0;   // ResNo - The result number of the next output.
2459 
2460   for (InlineAsm::ConstraintInfo &CI : IA->ParseConstraints()) {
2461     ConstraintOperands.emplace_back(std::move(CI));
2462     AsmOperandInfo &OpInfo = ConstraintOperands.back();
2463 
2464     // Update multiple alternative constraint count.
2465     if (OpInfo.multipleAlternatives.size() > maCount)
2466       maCount = OpInfo.multipleAlternatives.size();
2467 
2468     OpInfo.ConstraintVT = MVT::Other;
2469 
2470     // Compute the value type for each operand.
2471     switch (OpInfo.Type) {
2472     case InlineAsm::isOutput:
2473       // Indirect outputs just consume an argument.
2474       if (OpInfo.isIndirect) {
2475         OpInfo.CallOperandVal = const_cast<Value *>(CS.getArgument(ArgNo++));
2476         break;
2477       }
2478 
2479       // The return value of the call is this value.  As such, there is no
2480       // corresponding argument.
2481       assert(!CS.getType()->isVoidTy() &&
2482              "Bad inline asm!");
2483       if (StructType *STy = dyn_cast<StructType>(CS.getType())) {
2484         OpInfo.ConstraintVT =
2485             getSimpleValueType(DL, STy->getElementType(ResNo));
2486       } else {
2487         assert(ResNo == 0 && "Asm only has one result!");
2488         OpInfo.ConstraintVT = getSimpleValueType(DL, CS.getType());
2489       }
2490       ++ResNo;
2491       break;
2492     case InlineAsm::isInput:
2493       OpInfo.CallOperandVal = const_cast<Value *>(CS.getArgument(ArgNo++));
2494       break;
2495     case InlineAsm::isClobber:
2496       // Nothing to do.
2497       break;
2498     }
2499 
2500     if (OpInfo.CallOperandVal) {
2501       llvm::Type *OpTy = OpInfo.CallOperandVal->getType();
2502       if (OpInfo.isIndirect) {
2503         llvm::PointerType *PtrTy = dyn_cast<PointerType>(OpTy);
2504         if (!PtrTy)
2505           report_fatal_error("Indirect operand for inline asm not a pointer!");
2506         OpTy = PtrTy->getElementType();
2507       }
2508 
2509       // Look for vector wrapped in a struct. e.g. { <16 x i8> }.
2510       if (StructType *STy = dyn_cast<StructType>(OpTy))
2511         if (STy->getNumElements() == 1)
2512           OpTy = STy->getElementType(0);
2513 
2514       // If OpTy is not a single value, it may be a struct/union that we
2515       // can tile with integers.
2516       if (!OpTy->isSingleValueType() && OpTy->isSized()) {
2517         unsigned BitSize = DL.getTypeSizeInBits(OpTy);
2518         switch (BitSize) {
2519         default: break;
2520         case 1:
2521         case 8:
2522         case 16:
2523         case 32:
2524         case 64:
2525         case 128:
2526           OpInfo.ConstraintVT =
2527             MVT::getVT(IntegerType::get(OpTy->getContext(), BitSize), true);
2528           break;
2529         }
2530       } else if (PointerType *PT = dyn_cast<PointerType>(OpTy)) {
2531         unsigned PtrSize = DL.getPointerSizeInBits(PT->getAddressSpace());
2532         OpInfo.ConstraintVT = MVT::getIntegerVT(PtrSize);
2533       } else {
2534         OpInfo.ConstraintVT = MVT::getVT(OpTy, true);
2535       }
2536     }
2537   }
2538 
2539   // If we have multiple alternative constraints, select the best alternative.
2540   if (!ConstraintOperands.empty()) {
2541     if (maCount) {
2542       unsigned bestMAIndex = 0;
2543       int bestWeight = -1;
2544       // weight:  -1 = invalid match, and 0 = so-so match to 5 = good match.
2545       int weight = -1;
2546       unsigned maIndex;
2547       // Compute the sums of the weights for each alternative, keeping track
2548       // of the best (highest weight) one so far.
2549       for (maIndex = 0; maIndex < maCount; ++maIndex) {
2550         int weightSum = 0;
2551         for (unsigned cIndex = 0, eIndex = ConstraintOperands.size();
2552             cIndex != eIndex; ++cIndex) {
2553           AsmOperandInfo& OpInfo = ConstraintOperands[cIndex];
2554           if (OpInfo.Type == InlineAsm::isClobber)
2555             continue;
2556 
2557           // If this is an output operand with a matching input operand,
2558           // look up the matching input. If their types mismatch, e.g. one
2559           // is an integer, the other is floating point, or their sizes are
2560           // different, flag it as an maCantMatch.
2561           if (OpInfo.hasMatchingInput()) {
2562             AsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput];
2563             if (OpInfo.ConstraintVT != Input.ConstraintVT) {
2564               if ((OpInfo.ConstraintVT.isInteger() !=
2565                    Input.ConstraintVT.isInteger()) ||
2566                   (OpInfo.ConstraintVT.getSizeInBits() !=
2567                    Input.ConstraintVT.getSizeInBits())) {
2568                 weightSum = -1;  // Can't match.
2569                 break;
2570               }
2571             }
2572           }
2573           weight = getMultipleConstraintMatchWeight(OpInfo, maIndex);
2574           if (weight == -1) {
2575             weightSum = -1;
2576             break;
2577           }
2578           weightSum += weight;
2579         }
2580         // Update best.
2581         if (weightSum > bestWeight) {
2582           bestWeight = weightSum;
2583           bestMAIndex = maIndex;
2584         }
2585       }
2586 
2587       // Now select chosen alternative in each constraint.
2588       for (unsigned cIndex = 0, eIndex = ConstraintOperands.size();
2589           cIndex != eIndex; ++cIndex) {
2590         AsmOperandInfo& cInfo = ConstraintOperands[cIndex];
2591         if (cInfo.Type == InlineAsm::isClobber)
2592           continue;
2593         cInfo.selectAlternative(bestMAIndex);
2594       }
2595     }
2596   }
2597 
2598   // Check and hook up tied operands, choose constraint code to use.
2599   for (unsigned cIndex = 0, eIndex = ConstraintOperands.size();
2600       cIndex != eIndex; ++cIndex) {
2601     AsmOperandInfo& OpInfo = ConstraintOperands[cIndex];
2602 
2603     // If this is an output operand with a matching input operand, look up the
2604     // matching input. If their types mismatch, e.g. one is an integer, the
2605     // other is floating point, or their sizes are different, flag it as an
2606     // error.
2607     if (OpInfo.hasMatchingInput()) {
2608       AsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput];
2609 
2610       if (OpInfo.ConstraintVT != Input.ConstraintVT) {
2611         std::pair<unsigned, const TargetRegisterClass *> MatchRC =
2612             getRegForInlineAsmConstraint(TRI, OpInfo.ConstraintCode,
2613                                          OpInfo.ConstraintVT);
2614         std::pair<unsigned, const TargetRegisterClass *> InputRC =
2615             getRegForInlineAsmConstraint(TRI, Input.ConstraintCode,
2616                                          Input.ConstraintVT);
2617         if ((OpInfo.ConstraintVT.isInteger() !=
2618              Input.ConstraintVT.isInteger()) ||
2619             (MatchRC.second != InputRC.second)) {
2620           report_fatal_error("Unsupported asm: input constraint"
2621                              " with a matching output constraint of"
2622                              " incompatible type!");
2623         }
2624       }
2625     }
2626   }
2627 
2628   return ConstraintOperands;
2629 }
2630 
2631 /// Return an integer indicating how general CT is.
2632 static unsigned getConstraintGenerality(TargetLowering::ConstraintType CT) {
2633   switch (CT) {
2634   case TargetLowering::C_Other:
2635   case TargetLowering::C_Unknown:
2636     return 0;
2637   case TargetLowering::C_Register:
2638     return 1;
2639   case TargetLowering::C_RegisterClass:
2640     return 2;
2641   case TargetLowering::C_Memory:
2642     return 3;
2643   }
2644   llvm_unreachable("Invalid constraint type");
2645 }
2646 
2647 /// Examine constraint type and operand type and determine a weight value.
2648 /// This object must already have been set up with the operand type
2649 /// and the current alternative constraint selected.
2650 TargetLowering::ConstraintWeight
2651   TargetLowering::getMultipleConstraintMatchWeight(
2652     AsmOperandInfo &info, int maIndex) const {
2653   InlineAsm::ConstraintCodeVector *rCodes;
2654   if (maIndex >= (int)info.multipleAlternatives.size())
2655     rCodes = &info.Codes;
2656   else
2657     rCodes = &info.multipleAlternatives[maIndex].Codes;
2658   ConstraintWeight BestWeight = CW_Invalid;
2659 
2660   // Loop over the options, keeping track of the most general one.
2661   for (unsigned i = 0, e = rCodes->size(); i != e; ++i) {
2662     ConstraintWeight weight =
2663       getSingleConstraintMatchWeight(info, (*rCodes)[i].c_str());
2664     if (weight > BestWeight)
2665       BestWeight = weight;
2666   }
2667 
2668   return BestWeight;
2669 }
2670 
2671 /// Examine constraint type and operand type and determine a weight value.
2672 /// This object must already have been set up with the operand type
2673 /// and the current alternative constraint selected.
2674 TargetLowering::ConstraintWeight
2675   TargetLowering::getSingleConstraintMatchWeight(
2676     AsmOperandInfo &info, const char *constraint) const {
2677   ConstraintWeight weight = CW_Invalid;
2678   Value *CallOperandVal = info.CallOperandVal;
2679     // If we don't have a value, we can't do a match,
2680     // but allow it at the lowest weight.
2681   if (!CallOperandVal)
2682     return CW_Default;
2683   // Look at the constraint type.
2684   switch (*constraint) {
2685     case 'i': // immediate integer.
2686     case 'n': // immediate integer with a known value.
2687       if (isa<ConstantInt>(CallOperandVal))
2688         weight = CW_Constant;
2689       break;
2690     case 's': // non-explicit intregal immediate.
2691       if (isa<GlobalValue>(CallOperandVal))
2692         weight = CW_Constant;
2693       break;
2694     case 'E': // immediate float if host format.
2695     case 'F': // immediate float.
2696       if (isa<ConstantFP>(CallOperandVal))
2697         weight = CW_Constant;
2698       break;
2699     case '<': // memory operand with autodecrement.
2700     case '>': // memory operand with autoincrement.
2701     case 'm': // memory operand.
2702     case 'o': // offsettable memory operand
2703     case 'V': // non-offsettable memory operand
2704       weight = CW_Memory;
2705       break;
2706     case 'r': // general register.
2707     case 'g': // general register, memory operand or immediate integer.
2708               // note: Clang converts "g" to "imr".
2709       if (CallOperandVal->getType()->isIntegerTy())
2710         weight = CW_Register;
2711       break;
2712     case 'X': // any operand.
2713     default:
2714       weight = CW_Default;
2715       break;
2716   }
2717   return weight;
2718 }
2719 
2720 /// If there are multiple different constraints that we could pick for this
2721 /// operand (e.g. "imr") try to pick the 'best' one.
2722 /// This is somewhat tricky: constraints fall into four classes:
2723 ///    Other         -> immediates and magic values
2724 ///    Register      -> one specific register
2725 ///    RegisterClass -> a group of regs
2726 ///    Memory        -> memory
2727 /// Ideally, we would pick the most specific constraint possible: if we have
2728 /// something that fits into a register, we would pick it.  The problem here
2729 /// is that if we have something that could either be in a register or in
2730 /// memory that use of the register could cause selection of *other*
2731 /// operands to fail: they might only succeed if we pick memory.  Because of
2732 /// this the heuristic we use is:
2733 ///
2734 ///  1) If there is an 'other' constraint, and if the operand is valid for
2735 ///     that constraint, use it.  This makes us take advantage of 'i'
2736 ///     constraints when available.
2737 ///  2) Otherwise, pick the most general constraint present.  This prefers
2738 ///     'm' over 'r', for example.
2739 ///
2740 static void ChooseConstraint(TargetLowering::AsmOperandInfo &OpInfo,
2741                              const TargetLowering &TLI,
2742                              SDValue Op, SelectionDAG *DAG) {
2743   assert(OpInfo.Codes.size() > 1 && "Doesn't have multiple constraint options");
2744   unsigned BestIdx = 0;
2745   TargetLowering::ConstraintType BestType = TargetLowering::C_Unknown;
2746   int BestGenerality = -1;
2747 
2748   // Loop over the options, keeping track of the most general one.
2749   for (unsigned i = 0, e = OpInfo.Codes.size(); i != e; ++i) {
2750     TargetLowering::ConstraintType CType =
2751       TLI.getConstraintType(OpInfo.Codes[i]);
2752 
2753     // If this is an 'other' constraint, see if the operand is valid for it.
2754     // For example, on X86 we might have an 'rI' constraint.  If the operand
2755     // is an integer in the range [0..31] we want to use I (saving a load
2756     // of a register), otherwise we must use 'r'.
2757     if (CType == TargetLowering::C_Other && Op.getNode()) {
2758       assert(OpInfo.Codes[i].size() == 1 &&
2759              "Unhandled multi-letter 'other' constraint");
2760       std::vector<SDValue> ResultOps;
2761       TLI.LowerAsmOperandForConstraint(Op, OpInfo.Codes[i],
2762                                        ResultOps, *DAG);
2763       if (!ResultOps.empty()) {
2764         BestType = CType;
2765         BestIdx = i;
2766         break;
2767       }
2768     }
2769 
2770     // Things with matching constraints can only be registers, per gcc
2771     // documentation.  This mainly affects "g" constraints.
2772     if (CType == TargetLowering::C_Memory && OpInfo.hasMatchingInput())
2773       continue;
2774 
2775     // This constraint letter is more general than the previous one, use it.
2776     int Generality = getConstraintGenerality(CType);
2777     if (Generality > BestGenerality) {
2778       BestType = CType;
2779       BestIdx = i;
2780       BestGenerality = Generality;
2781     }
2782   }
2783 
2784   OpInfo.ConstraintCode = OpInfo.Codes[BestIdx];
2785   OpInfo.ConstraintType = BestType;
2786 }
2787 
2788 /// Determines the constraint code and constraint type to use for the specific
2789 /// AsmOperandInfo, setting OpInfo.ConstraintCode and OpInfo.ConstraintType.
2790 void TargetLowering::ComputeConstraintToUse(AsmOperandInfo &OpInfo,
2791                                             SDValue Op,
2792                                             SelectionDAG *DAG) const {
2793   assert(!OpInfo.Codes.empty() && "Must have at least one constraint");
2794 
2795   // Single-letter constraints ('r') are very common.
2796   if (OpInfo.Codes.size() == 1) {
2797     OpInfo.ConstraintCode = OpInfo.Codes[0];
2798     OpInfo.ConstraintType = getConstraintType(OpInfo.ConstraintCode);
2799   } else {
2800     ChooseConstraint(OpInfo, *this, Op, DAG);
2801   }
2802 
2803   // 'X' matches anything.
2804   if (OpInfo.ConstraintCode == "X" && OpInfo.CallOperandVal) {
2805     // Labels and constants are handled elsewhere ('X' is the only thing
2806     // that matches labels).  For Functions, the type here is the type of
2807     // the result, which is not what we want to look at; leave them alone.
2808     Value *v = OpInfo.CallOperandVal;
2809     if (isa<BasicBlock>(v) || isa<ConstantInt>(v) || isa<Function>(v)) {
2810       OpInfo.CallOperandVal = v;
2811       return;
2812     }
2813 
2814     // Otherwise, try to resolve it to something we know about by looking at
2815     // the actual operand type.
2816     if (const char *Repl = LowerXConstraint(OpInfo.ConstraintVT)) {
2817       OpInfo.ConstraintCode = Repl;
2818       OpInfo.ConstraintType = getConstraintType(OpInfo.ConstraintCode);
2819     }
2820   }
2821 }
2822 
2823 /// \brief Given an exact SDIV by a constant, create a multiplication
2824 /// with the multiplicative inverse of the constant.
2825 static SDValue BuildExactSDIV(const TargetLowering &TLI, SDValue Op1, APInt d,
2826                               const SDLoc &dl, SelectionDAG &DAG,
2827                               std::vector<SDNode *> &Created) {
2828   assert(d != 0 && "Division by zero!");
2829 
2830   // Shift the value upfront if it is even, so the LSB is one.
2831   unsigned ShAmt = d.countTrailingZeros();
2832   if (ShAmt) {
2833     // TODO: For UDIV use SRL instead of SRA.
2834     SDValue Amt =
2835         DAG.getConstant(ShAmt, dl, TLI.getShiftAmountTy(Op1.getValueType(),
2836                                                         DAG.getDataLayout()));
2837     SDNodeFlags Flags;
2838     Flags.setExact(true);
2839     Op1 = DAG.getNode(ISD::SRA, dl, Op1.getValueType(), Op1, Amt, &Flags);
2840     Created.push_back(Op1.getNode());
2841     d = d.ashr(ShAmt);
2842   }
2843 
2844   // Calculate the multiplicative inverse, using Newton's method.
2845   APInt t, xn = d;
2846   while ((t = d*xn) != 1)
2847     xn *= APInt(d.getBitWidth(), 2) - t;
2848 
2849   SDValue Op2 = DAG.getConstant(xn, dl, Op1.getValueType());
2850   SDValue Mul = DAG.getNode(ISD::MUL, dl, Op1.getValueType(), Op1, Op2);
2851   Created.push_back(Mul.getNode());
2852   return Mul;
2853 }
2854 
2855 SDValue TargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
2856                                       SelectionDAG &DAG,
2857                                       std::vector<SDNode *> *Created) const {
2858   AttributeSet Attr = DAG.getMachineFunction().getFunction()->getAttributes();
2859   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2860   if (TLI.isIntDivCheap(N->getValueType(0), Attr))
2861     return SDValue(N,0); // Lower SDIV as SDIV
2862   return SDValue();
2863 }
2864 
2865 /// \brief Given an ISD::SDIV node expressing a divide by constant,
2866 /// return a DAG expression to select that will generate the same value by
2867 /// multiplying by a magic number.
2868 /// Ref: "Hacker's Delight" or "The PowerPC Compiler Writer's Guide".
2869 SDValue TargetLowering::BuildSDIV(SDNode *N, const APInt &Divisor,
2870                                   SelectionDAG &DAG, bool IsAfterLegalization,
2871                                   std::vector<SDNode *> *Created) const {
2872   assert(Created && "No vector to hold sdiv ops.");
2873 
2874   EVT VT = N->getValueType(0);
2875   SDLoc dl(N);
2876 
2877   // Check to see if we can do this.
2878   // FIXME: We should be more aggressive here.
2879   if (!isTypeLegal(VT))
2880     return SDValue();
2881 
2882   // If the sdiv has an 'exact' bit we can use a simpler lowering.
2883   if (cast<BinaryWithFlagsSDNode>(N)->Flags.hasExact())
2884     return BuildExactSDIV(*this, N->getOperand(0), Divisor, dl, DAG, *Created);
2885 
2886   APInt::ms magics = Divisor.magic();
2887 
2888   // Multiply the numerator (operand 0) by the magic value
2889   // FIXME: We should support doing a MUL in a wider type
2890   SDValue Q;
2891   if (IsAfterLegalization ? isOperationLegal(ISD::MULHS, VT) :
2892                             isOperationLegalOrCustom(ISD::MULHS, VT))
2893     Q = DAG.getNode(ISD::MULHS, dl, VT, N->getOperand(0),
2894                     DAG.getConstant(magics.m, dl, VT));
2895   else if (IsAfterLegalization ? isOperationLegal(ISD::SMUL_LOHI, VT) :
2896                                  isOperationLegalOrCustom(ISD::SMUL_LOHI, VT))
2897     Q = SDValue(DAG.getNode(ISD::SMUL_LOHI, dl, DAG.getVTList(VT, VT),
2898                               N->getOperand(0),
2899                               DAG.getConstant(magics.m, dl, VT)).getNode(), 1);
2900   else
2901     return SDValue();       // No mulhs or equvialent
2902   // If d > 0 and m < 0, add the numerator
2903   if (Divisor.isStrictlyPositive() && magics.m.isNegative()) {
2904     Q = DAG.getNode(ISD::ADD, dl, VT, Q, N->getOperand(0));
2905     Created->push_back(Q.getNode());
2906   }
2907   // If d < 0 and m > 0, subtract the numerator.
2908   if (Divisor.isNegative() && magics.m.isStrictlyPositive()) {
2909     Q = DAG.getNode(ISD::SUB, dl, VT, Q, N->getOperand(0));
2910     Created->push_back(Q.getNode());
2911   }
2912   auto &DL = DAG.getDataLayout();
2913   // Shift right algebraic if shift value is nonzero
2914   if (magics.s > 0) {
2915     Q = DAG.getNode(
2916         ISD::SRA, dl, VT, Q,
2917         DAG.getConstant(magics.s, dl, getShiftAmountTy(Q.getValueType(), DL)));
2918     Created->push_back(Q.getNode());
2919   }
2920   // Extract the sign bit and add it to the quotient
2921   SDValue T =
2922       DAG.getNode(ISD::SRL, dl, VT, Q,
2923                   DAG.getConstant(VT.getScalarSizeInBits() - 1, dl,
2924                                   getShiftAmountTy(Q.getValueType(), DL)));
2925   Created->push_back(T.getNode());
2926   return DAG.getNode(ISD::ADD, dl, VT, Q, T);
2927 }
2928 
2929 /// \brief Given an ISD::UDIV node expressing a divide by constant,
2930 /// return a DAG expression to select that will generate the same value by
2931 /// multiplying by a magic number.
2932 /// Ref: "Hacker's Delight" or "The PowerPC Compiler Writer's Guide".
2933 SDValue TargetLowering::BuildUDIV(SDNode *N, const APInt &Divisor,
2934                                   SelectionDAG &DAG, bool IsAfterLegalization,
2935                                   std::vector<SDNode *> *Created) const {
2936   assert(Created && "No vector to hold udiv ops.");
2937 
2938   EVT VT = N->getValueType(0);
2939   SDLoc dl(N);
2940   auto &DL = DAG.getDataLayout();
2941 
2942   // Check to see if we can do this.
2943   // FIXME: We should be more aggressive here.
2944   if (!isTypeLegal(VT))
2945     return SDValue();
2946 
2947   // FIXME: We should use a narrower constant when the upper
2948   // bits are known to be zero.
2949   APInt::mu magics = Divisor.magicu();
2950 
2951   SDValue Q = N->getOperand(0);
2952 
2953   // If the divisor is even, we can avoid using the expensive fixup by shifting
2954   // the divided value upfront.
2955   if (magics.a != 0 && !Divisor[0]) {
2956     unsigned Shift = Divisor.countTrailingZeros();
2957     Q = DAG.getNode(
2958         ISD::SRL, dl, VT, Q,
2959         DAG.getConstant(Shift, dl, getShiftAmountTy(Q.getValueType(), DL)));
2960     Created->push_back(Q.getNode());
2961 
2962     // Get magic number for the shifted divisor.
2963     magics = Divisor.lshr(Shift).magicu(Shift);
2964     assert(magics.a == 0 && "Should use cheap fixup now");
2965   }
2966 
2967   // Multiply the numerator (operand 0) by the magic value
2968   // FIXME: We should support doing a MUL in a wider type
2969   if (IsAfterLegalization ? isOperationLegal(ISD::MULHU, VT) :
2970                             isOperationLegalOrCustom(ISD::MULHU, VT))
2971     Q = DAG.getNode(ISD::MULHU, dl, VT, Q, DAG.getConstant(magics.m, dl, VT));
2972   else if (IsAfterLegalization ? isOperationLegal(ISD::UMUL_LOHI, VT) :
2973                                  isOperationLegalOrCustom(ISD::UMUL_LOHI, VT))
2974     Q = SDValue(DAG.getNode(ISD::UMUL_LOHI, dl, DAG.getVTList(VT, VT), Q,
2975                             DAG.getConstant(magics.m, dl, VT)).getNode(), 1);
2976   else
2977     return SDValue();       // No mulhu or equvialent
2978 
2979   Created->push_back(Q.getNode());
2980 
2981   if (magics.a == 0) {
2982     assert(magics.s < Divisor.getBitWidth() &&
2983            "We shouldn't generate an undefined shift!");
2984     return DAG.getNode(
2985         ISD::SRL, dl, VT, Q,
2986         DAG.getConstant(magics.s, dl, getShiftAmountTy(Q.getValueType(), DL)));
2987   } else {
2988     SDValue NPQ = DAG.getNode(ISD::SUB, dl, VT, N->getOperand(0), Q);
2989     Created->push_back(NPQ.getNode());
2990     NPQ = DAG.getNode(
2991         ISD::SRL, dl, VT, NPQ,
2992         DAG.getConstant(1, dl, getShiftAmountTy(NPQ.getValueType(), DL)));
2993     Created->push_back(NPQ.getNode());
2994     NPQ = DAG.getNode(ISD::ADD, dl, VT, NPQ, Q);
2995     Created->push_back(NPQ.getNode());
2996     return DAG.getNode(
2997         ISD::SRL, dl, VT, NPQ,
2998         DAG.getConstant(magics.s - 1, dl,
2999                         getShiftAmountTy(NPQ.getValueType(), DL)));
3000   }
3001 }
3002 
3003 bool TargetLowering::
3004 verifyReturnAddressArgumentIsConstant(SDValue Op, SelectionDAG &DAG) const {
3005   if (!isa<ConstantSDNode>(Op.getOperand(0))) {
3006     DAG.getContext()->emitError("argument to '__builtin_return_address' must "
3007                                 "be a constant integer");
3008     return true;
3009   }
3010 
3011   return false;
3012 }
3013 
3014 //===----------------------------------------------------------------------===//
3015 // Legalization Utilities
3016 //===----------------------------------------------------------------------===//
3017 
3018 bool TargetLowering::expandMUL(SDNode *N, SDValue &Lo, SDValue &Hi, EVT HiLoVT,
3019                                SelectionDAG &DAG, SDValue LL, SDValue LH,
3020                                SDValue RL, SDValue RH) const {
3021   EVT VT = N->getValueType(0);
3022   SDLoc dl(N);
3023 
3024   bool HasMULHS = isOperationLegalOrCustom(ISD::MULHS, HiLoVT);
3025   bool HasMULHU = isOperationLegalOrCustom(ISD::MULHU, HiLoVT);
3026   bool HasSMUL_LOHI = isOperationLegalOrCustom(ISD::SMUL_LOHI, HiLoVT);
3027   bool HasUMUL_LOHI = isOperationLegalOrCustom(ISD::UMUL_LOHI, HiLoVT);
3028   if (HasMULHU || HasMULHS || HasUMUL_LOHI || HasSMUL_LOHI) {
3029     unsigned OuterBitSize = VT.getSizeInBits();
3030     unsigned InnerBitSize = HiLoVT.getSizeInBits();
3031     unsigned LHSSB = DAG.ComputeNumSignBits(N->getOperand(0));
3032     unsigned RHSSB = DAG.ComputeNumSignBits(N->getOperand(1));
3033 
3034     // LL, LH, RL, and RH must be either all NULL or all set to a value.
3035     assert((LL.getNode() && LH.getNode() && RL.getNode() && RH.getNode()) ||
3036            (!LL.getNode() && !LH.getNode() && !RL.getNode() && !RH.getNode()));
3037 
3038     if (!LL.getNode() && !RL.getNode() &&
3039         isOperationLegalOrCustom(ISD::TRUNCATE, HiLoVT)) {
3040       LL = DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, N->getOperand(0));
3041       RL = DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, N->getOperand(1));
3042     }
3043 
3044     if (!LL.getNode())
3045       return false;
3046 
3047     APInt HighMask = APInt::getHighBitsSet(OuterBitSize, InnerBitSize);
3048     if (DAG.MaskedValueIsZero(N->getOperand(0), HighMask) &&
3049         DAG.MaskedValueIsZero(N->getOperand(1), HighMask)) {
3050       // The inputs are both zero-extended.
3051       if (HasUMUL_LOHI) {
3052         // We can emit a umul_lohi.
3053         Lo = DAG.getNode(ISD::UMUL_LOHI, dl, DAG.getVTList(HiLoVT, HiLoVT), LL,
3054                          RL);
3055         Hi = SDValue(Lo.getNode(), 1);
3056         return true;
3057       }
3058       if (HasMULHU) {
3059         // We can emit a mulhu+mul.
3060         Lo = DAG.getNode(ISD::MUL, dl, HiLoVT, LL, RL);
3061         Hi = DAG.getNode(ISD::MULHU, dl, HiLoVT, LL, RL);
3062         return true;
3063       }
3064     }
3065     if (LHSSB > InnerBitSize && RHSSB > InnerBitSize) {
3066       // The input values are both sign-extended.
3067       if (HasSMUL_LOHI) {
3068         // We can emit a smul_lohi.
3069         Lo = DAG.getNode(ISD::SMUL_LOHI, dl, DAG.getVTList(HiLoVT, HiLoVT), LL,
3070                          RL);
3071         Hi = SDValue(Lo.getNode(), 1);
3072         return true;
3073       }
3074       if (HasMULHS) {
3075         // We can emit a mulhs+mul.
3076         Lo = DAG.getNode(ISD::MUL, dl, HiLoVT, LL, RL);
3077         Hi = DAG.getNode(ISD::MULHS, dl, HiLoVT, LL, RL);
3078         return true;
3079       }
3080     }
3081 
3082     if (!LH.getNode() && !RH.getNode() &&
3083         isOperationLegalOrCustom(ISD::SRL, VT) &&
3084         isOperationLegalOrCustom(ISD::TRUNCATE, HiLoVT)) {
3085       auto &DL = DAG.getDataLayout();
3086       unsigned ShiftAmt = VT.getSizeInBits() - HiLoVT.getSizeInBits();
3087       SDValue Shift = DAG.getConstant(ShiftAmt, dl, getShiftAmountTy(VT, DL));
3088       LH = DAG.getNode(ISD::SRL, dl, VT, N->getOperand(0), Shift);
3089       LH = DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, LH);
3090       RH = DAG.getNode(ISD::SRL, dl, VT, N->getOperand(1), Shift);
3091       RH = DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, RH);
3092     }
3093 
3094     if (!LH.getNode())
3095       return false;
3096 
3097     if (HasUMUL_LOHI) {
3098       // Lo,Hi = umul LHS, RHS.
3099       SDValue UMulLOHI = DAG.getNode(ISD::UMUL_LOHI, dl,
3100                                      DAG.getVTList(HiLoVT, HiLoVT), LL, RL);
3101       Lo = UMulLOHI;
3102       Hi = UMulLOHI.getValue(1);
3103       RH = DAG.getNode(ISD::MUL, dl, HiLoVT, LL, RH);
3104       LH = DAG.getNode(ISD::MUL, dl, HiLoVT, LH, RL);
3105       Hi = DAG.getNode(ISD::ADD, dl, HiLoVT, Hi, RH);
3106       Hi = DAG.getNode(ISD::ADD, dl, HiLoVT, Hi, LH);
3107       return true;
3108     }
3109     if (HasMULHU) {
3110       Lo = DAG.getNode(ISD::MUL, dl, HiLoVT, LL, RL);
3111       Hi = DAG.getNode(ISD::MULHU, dl, HiLoVT, LL, RL);
3112       RH = DAG.getNode(ISD::MUL, dl, HiLoVT, LL, RH);
3113       LH = DAG.getNode(ISD::MUL, dl, HiLoVT, LH, RL);
3114       Hi = DAG.getNode(ISD::ADD, dl, HiLoVT, Hi, RH);
3115       Hi = DAG.getNode(ISD::ADD, dl, HiLoVT, Hi, LH);
3116       return true;
3117     }
3118   }
3119   return false;
3120 }
3121 
3122 bool TargetLowering::expandFP_TO_SINT(SDNode *Node, SDValue &Result,
3123                                SelectionDAG &DAG) const {
3124   EVT VT = Node->getOperand(0).getValueType();
3125   EVT NVT = Node->getValueType(0);
3126   SDLoc dl(SDValue(Node, 0));
3127 
3128   // FIXME: Only f32 to i64 conversions are supported.
3129   if (VT != MVT::f32 || NVT != MVT::i64)
3130     return false;
3131 
3132   // Expand f32 -> i64 conversion
3133   // This algorithm comes from compiler-rt's implementation of fixsfdi:
3134   // https://github.com/llvm-mirror/compiler-rt/blob/master/lib/builtins/fixsfdi.c
3135   EVT IntVT = EVT::getIntegerVT(*DAG.getContext(),
3136                                 VT.getSizeInBits());
3137   SDValue ExponentMask = DAG.getConstant(0x7F800000, dl, IntVT);
3138   SDValue ExponentLoBit = DAG.getConstant(23, dl, IntVT);
3139   SDValue Bias = DAG.getConstant(127, dl, IntVT);
3140   SDValue SignMask = DAG.getConstant(APInt::getSignBit(VT.getSizeInBits()), dl,
3141                                      IntVT);
3142   SDValue SignLowBit = DAG.getConstant(VT.getSizeInBits() - 1, dl, IntVT);
3143   SDValue MantissaMask = DAG.getConstant(0x007FFFFF, dl, IntVT);
3144 
3145   SDValue Bits = DAG.getNode(ISD::BITCAST, dl, IntVT, Node->getOperand(0));
3146 
3147   auto &DL = DAG.getDataLayout();
3148   SDValue ExponentBits = DAG.getNode(
3149       ISD::SRL, dl, IntVT, DAG.getNode(ISD::AND, dl, IntVT, Bits, ExponentMask),
3150       DAG.getZExtOrTrunc(ExponentLoBit, dl, getShiftAmountTy(IntVT, DL)));
3151   SDValue Exponent = DAG.getNode(ISD::SUB, dl, IntVT, ExponentBits, Bias);
3152 
3153   SDValue Sign = DAG.getNode(
3154       ISD::SRA, dl, IntVT, DAG.getNode(ISD::AND, dl, IntVT, Bits, SignMask),
3155       DAG.getZExtOrTrunc(SignLowBit, dl, getShiftAmountTy(IntVT, DL)));
3156   Sign = DAG.getSExtOrTrunc(Sign, dl, NVT);
3157 
3158   SDValue R = DAG.getNode(ISD::OR, dl, IntVT,
3159       DAG.getNode(ISD::AND, dl, IntVT, Bits, MantissaMask),
3160       DAG.getConstant(0x00800000, dl, IntVT));
3161 
3162   R = DAG.getZExtOrTrunc(R, dl, NVT);
3163 
3164   R = DAG.getSelectCC(
3165       dl, Exponent, ExponentLoBit,
3166       DAG.getNode(ISD::SHL, dl, NVT, R,
3167                   DAG.getZExtOrTrunc(
3168                       DAG.getNode(ISD::SUB, dl, IntVT, Exponent, ExponentLoBit),
3169                       dl, getShiftAmountTy(IntVT, DL))),
3170       DAG.getNode(ISD::SRL, dl, NVT, R,
3171                   DAG.getZExtOrTrunc(
3172                       DAG.getNode(ISD::SUB, dl, IntVT, ExponentLoBit, Exponent),
3173                       dl, getShiftAmountTy(IntVT, DL))),
3174       ISD::SETGT);
3175 
3176   SDValue Ret = DAG.getNode(ISD::SUB, dl, NVT,
3177       DAG.getNode(ISD::XOR, dl, NVT, R, Sign),
3178       Sign);
3179 
3180   Result = DAG.getSelectCC(dl, Exponent, DAG.getConstant(0, dl, IntVT),
3181       DAG.getConstant(0, dl, NVT), Ret, ISD::SETLT);
3182   return true;
3183 }
3184 
3185 SDValue TargetLowering::scalarizeVectorLoad(LoadSDNode *LD,
3186                                             SelectionDAG &DAG) const {
3187   SDLoc SL(LD);
3188   SDValue Chain = LD->getChain();
3189   SDValue BasePTR = LD->getBasePtr();
3190   EVT SrcVT = LD->getMemoryVT();
3191   ISD::LoadExtType ExtType = LD->getExtensionType();
3192 
3193   unsigned NumElem = SrcVT.getVectorNumElements();
3194 
3195   EVT SrcEltVT = SrcVT.getScalarType();
3196   EVT DstEltVT = LD->getValueType(0).getScalarType();
3197 
3198   unsigned Stride = SrcEltVT.getSizeInBits() / 8;
3199   assert(SrcEltVT.isByteSized());
3200 
3201   EVT PtrVT = BasePTR.getValueType();
3202 
3203   SmallVector<SDValue, 8> Vals;
3204   SmallVector<SDValue, 8> LoadChains;
3205 
3206   for (unsigned Idx = 0; Idx < NumElem; ++Idx) {
3207     SDValue ScalarLoad =
3208         DAG.getExtLoad(ExtType, SL, DstEltVT, Chain, BasePTR,
3209                        LD->getPointerInfo().getWithOffset(Idx * Stride),
3210                        SrcEltVT, MinAlign(LD->getAlignment(), Idx * Stride),
3211                        LD->getMemOperand()->getFlags(), LD->getAAInfo());
3212 
3213     BasePTR = DAG.getNode(ISD::ADD, SL, PtrVT, BasePTR,
3214                           DAG.getConstant(Stride, SL, PtrVT));
3215 
3216     Vals.push_back(ScalarLoad.getValue(0));
3217     LoadChains.push_back(ScalarLoad.getValue(1));
3218   }
3219 
3220   SDValue NewChain = DAG.getNode(ISD::TokenFactor, SL, MVT::Other, LoadChains);
3221   SDValue Value = DAG.getNode(ISD::BUILD_VECTOR, SL, LD->getValueType(0), Vals);
3222 
3223   return DAG.getMergeValues({ Value, NewChain }, SL);
3224 }
3225 
3226 // FIXME: This relies on each element having a byte size, otherwise the stride
3227 // is 0 and just overwrites the same location. ExpandStore currently expects
3228 // this broken behavior.
3229 SDValue TargetLowering::scalarizeVectorStore(StoreSDNode *ST,
3230                                              SelectionDAG &DAG) const {
3231   SDLoc SL(ST);
3232 
3233   SDValue Chain = ST->getChain();
3234   SDValue BasePtr = ST->getBasePtr();
3235   SDValue Value = ST->getValue();
3236   EVT StVT = ST->getMemoryVT();
3237 
3238   // The type of the data we want to save
3239   EVT RegVT = Value.getValueType();
3240   EVT RegSclVT = RegVT.getScalarType();
3241 
3242   // The type of data as saved in memory.
3243   EVT MemSclVT = StVT.getScalarType();
3244 
3245   EVT PtrVT = BasePtr.getValueType();
3246 
3247   // Store Stride in bytes
3248   unsigned Stride = MemSclVT.getSizeInBits() / 8;
3249   EVT IdxVT = getVectorIdxTy(DAG.getDataLayout());
3250   unsigned NumElem = StVT.getVectorNumElements();
3251 
3252   // Extract each of the elements from the original vector and save them into
3253   // memory individually.
3254   SmallVector<SDValue, 8> Stores;
3255   for (unsigned Idx = 0; Idx < NumElem; ++Idx) {
3256     SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, RegSclVT, Value,
3257                               DAG.getConstant(Idx, SL, IdxVT));
3258 
3259     SDValue Ptr = DAG.getNode(ISD::ADD, SL, PtrVT, BasePtr,
3260                               DAG.getConstant(Idx * Stride, SL, PtrVT));
3261 
3262     // This scalar TruncStore may be illegal, but we legalize it later.
3263     SDValue Store = DAG.getTruncStore(
3264         Chain, SL, Elt, Ptr, ST->getPointerInfo().getWithOffset(Idx * Stride),
3265         MemSclVT, MinAlign(ST->getAlignment(), Idx * Stride),
3266         ST->getMemOperand()->getFlags(), ST->getAAInfo());
3267 
3268     Stores.push_back(Store);
3269   }
3270 
3271   return DAG.getNode(ISD::TokenFactor, SL, MVT::Other, Stores);
3272 }
3273 
3274 std::pair<SDValue, SDValue>
3275 TargetLowering::expandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG) const {
3276   assert(LD->getAddressingMode() == ISD::UNINDEXED &&
3277          "unaligned indexed loads not implemented!");
3278   SDValue Chain = LD->getChain();
3279   SDValue Ptr = LD->getBasePtr();
3280   EVT VT = LD->getValueType(0);
3281   EVT LoadedVT = LD->getMemoryVT();
3282   SDLoc dl(LD);
3283   if (VT.isFloatingPoint() || VT.isVector()) {
3284     EVT intVT = EVT::getIntegerVT(*DAG.getContext(), LoadedVT.getSizeInBits());
3285     if (isTypeLegal(intVT) && isTypeLegal(LoadedVT)) {
3286       if (!isOperationLegalOrCustom(ISD::LOAD, intVT)) {
3287         // Scalarize the load and let the individual components be handled.
3288         SDValue Scalarized = scalarizeVectorLoad(LD, DAG);
3289         return std::make_pair(Scalarized.getValue(0), Scalarized.getValue(1));
3290       }
3291 
3292       // Expand to a (misaligned) integer load of the same size,
3293       // then bitconvert to floating point or vector.
3294       SDValue newLoad = DAG.getLoad(intVT, dl, Chain, Ptr,
3295                                     LD->getMemOperand());
3296       SDValue Result = DAG.getNode(ISD::BITCAST, dl, LoadedVT, newLoad);
3297       if (LoadedVT != VT)
3298         Result = DAG.getNode(VT.isFloatingPoint() ? ISD::FP_EXTEND :
3299                              ISD::ANY_EXTEND, dl, VT, Result);
3300 
3301       return std::make_pair(Result, newLoad.getValue(1));
3302     }
3303 
3304     // Copy the value to a (aligned) stack slot using (unaligned) integer
3305     // loads and stores, then do a (aligned) load from the stack slot.
3306     MVT RegVT = getRegisterType(*DAG.getContext(), intVT);
3307     unsigned LoadedBytes = LoadedVT.getSizeInBits() / 8;
3308     unsigned RegBytes = RegVT.getSizeInBits() / 8;
3309     unsigned NumRegs = (LoadedBytes + RegBytes - 1) / RegBytes;
3310 
3311     // Make sure the stack slot is also aligned for the register type.
3312     SDValue StackBase = DAG.CreateStackTemporary(LoadedVT, RegVT);
3313 
3314     SmallVector<SDValue, 8> Stores;
3315     SDValue StackPtr = StackBase;
3316     unsigned Offset = 0;
3317 
3318     EVT PtrVT = Ptr.getValueType();
3319     EVT StackPtrVT = StackPtr.getValueType();
3320 
3321     SDValue PtrIncrement = DAG.getConstant(RegBytes, dl, PtrVT);
3322     SDValue StackPtrIncrement = DAG.getConstant(RegBytes, dl, StackPtrVT);
3323 
3324     // Do all but one copies using the full register width.
3325     for (unsigned i = 1; i < NumRegs; i++) {
3326       // Load one integer register's worth from the original location.
3327       SDValue Load = DAG.getLoad(
3328           RegVT, dl, Chain, Ptr, LD->getPointerInfo().getWithOffset(Offset),
3329           MinAlign(LD->getAlignment(), Offset), LD->getMemOperand()->getFlags(),
3330           LD->getAAInfo());
3331       // Follow the load with a store to the stack slot.  Remember the store.
3332       Stores.push_back(DAG.getStore(Load.getValue(1), dl, Load, StackPtr,
3333                                     MachinePointerInfo()));
3334       // Increment the pointers.
3335       Offset += RegBytes;
3336       Ptr = DAG.getNode(ISD::ADD, dl, PtrVT, Ptr, PtrIncrement);
3337       StackPtr = DAG.getNode(ISD::ADD, dl, StackPtrVT, StackPtr,
3338                              StackPtrIncrement);
3339     }
3340 
3341     // The last copy may be partial.  Do an extending load.
3342     EVT MemVT = EVT::getIntegerVT(*DAG.getContext(),
3343                                   8 * (LoadedBytes - Offset));
3344     SDValue Load =
3345         DAG.getExtLoad(ISD::EXTLOAD, dl, RegVT, Chain, Ptr,
3346                        LD->getPointerInfo().getWithOffset(Offset), MemVT,
3347                        MinAlign(LD->getAlignment(), Offset),
3348                        LD->getMemOperand()->getFlags(), LD->getAAInfo());
3349     // Follow the load with a store to the stack slot.  Remember the store.
3350     // On big-endian machines this requires a truncating store to ensure
3351     // that the bits end up in the right place.
3352     Stores.push_back(DAG.getTruncStore(Load.getValue(1), dl, Load, StackPtr,
3353                                        MachinePointerInfo(), MemVT));
3354 
3355     // The order of the stores doesn't matter - say it with a TokenFactor.
3356     SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores);
3357 
3358     // Finally, perform the original load only redirected to the stack slot.
3359     Load = DAG.getExtLoad(LD->getExtensionType(), dl, VT, TF, StackBase,
3360                           MachinePointerInfo(), LoadedVT);
3361 
3362     // Callers expect a MERGE_VALUES node.
3363     return std::make_pair(Load, TF);
3364   }
3365 
3366   assert(LoadedVT.isInteger() && !LoadedVT.isVector() &&
3367          "Unaligned load of unsupported type.");
3368 
3369   // Compute the new VT that is half the size of the old one.  This is an
3370   // integer MVT.
3371   unsigned NumBits = LoadedVT.getSizeInBits();
3372   EVT NewLoadedVT;
3373   NewLoadedVT = EVT::getIntegerVT(*DAG.getContext(), NumBits/2);
3374   NumBits >>= 1;
3375 
3376   unsigned Alignment = LD->getAlignment();
3377   unsigned IncrementSize = NumBits / 8;
3378   ISD::LoadExtType HiExtType = LD->getExtensionType();
3379 
3380   // If the original load is NON_EXTLOAD, the hi part load must be ZEXTLOAD.
3381   if (HiExtType == ISD::NON_EXTLOAD)
3382     HiExtType = ISD::ZEXTLOAD;
3383 
3384   // Load the value in two parts
3385   SDValue Lo, Hi;
3386   if (DAG.getDataLayout().isLittleEndian()) {
3387     Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, VT, Chain, Ptr, LD->getPointerInfo(),
3388                         NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(),
3389                         LD->getAAInfo());
3390     Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr,
3391                       DAG.getConstant(IncrementSize, dl, Ptr.getValueType()));
3392     Hi = DAG.getExtLoad(HiExtType, dl, VT, Chain, Ptr,
3393                         LD->getPointerInfo().getWithOffset(IncrementSize),
3394                         NewLoadedVT, MinAlign(Alignment, IncrementSize),
3395                         LD->getMemOperand()->getFlags(), LD->getAAInfo());
3396   } else {
3397     Hi = DAG.getExtLoad(HiExtType, dl, VT, Chain, Ptr, LD->getPointerInfo(),
3398                         NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(),
3399                         LD->getAAInfo());
3400     Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr,
3401                       DAG.getConstant(IncrementSize, dl, Ptr.getValueType()));
3402     Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, VT, Chain, Ptr,
3403                         LD->getPointerInfo().getWithOffset(IncrementSize),
3404                         NewLoadedVT, MinAlign(Alignment, IncrementSize),
3405                         LD->getMemOperand()->getFlags(), LD->getAAInfo());
3406   }
3407 
3408   // aggregate the two parts
3409   SDValue ShiftAmount =
3410       DAG.getConstant(NumBits, dl, getShiftAmountTy(Hi.getValueType(),
3411                                                     DAG.getDataLayout()));
3412   SDValue Result = DAG.getNode(ISD::SHL, dl, VT, Hi, ShiftAmount);
3413   Result = DAG.getNode(ISD::OR, dl, VT, Result, Lo);
3414 
3415   SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1),
3416                              Hi.getValue(1));
3417 
3418   return std::make_pair(Result, TF);
3419 }
3420 
3421 SDValue TargetLowering::expandUnalignedStore(StoreSDNode *ST,
3422                                              SelectionDAG &DAG) const {
3423   assert(ST->getAddressingMode() == ISD::UNINDEXED &&
3424          "unaligned indexed stores not implemented!");
3425   SDValue Chain = ST->getChain();
3426   SDValue Ptr = ST->getBasePtr();
3427   SDValue Val = ST->getValue();
3428   EVT VT = Val.getValueType();
3429   int Alignment = ST->getAlignment();
3430 
3431   SDLoc dl(ST);
3432   if (ST->getMemoryVT().isFloatingPoint() ||
3433       ST->getMemoryVT().isVector()) {
3434     EVT intVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits());
3435     if (isTypeLegal(intVT)) {
3436       if (!isOperationLegalOrCustom(ISD::STORE, intVT)) {
3437         // Scalarize the store and let the individual components be handled.
3438         SDValue Result = scalarizeVectorStore(ST, DAG);
3439 
3440         return Result;
3441       }
3442       // Expand to a bitconvert of the value to the integer type of the
3443       // same size, then a (misaligned) int store.
3444       // FIXME: Does not handle truncating floating point stores!
3445       SDValue Result = DAG.getNode(ISD::BITCAST, dl, intVT, Val);
3446       Result = DAG.getStore(Chain, dl, Result, Ptr, ST->getPointerInfo(),
3447                             Alignment, ST->getMemOperand()->getFlags());
3448       return Result;
3449     }
3450     // Do a (aligned) store to a stack slot, then copy from the stack slot
3451     // to the final destination using (unaligned) integer loads and stores.
3452     EVT StoredVT = ST->getMemoryVT();
3453     MVT RegVT =
3454       getRegisterType(*DAG.getContext(),
3455                       EVT::getIntegerVT(*DAG.getContext(),
3456                                         StoredVT.getSizeInBits()));
3457     EVT PtrVT = Ptr.getValueType();
3458     unsigned StoredBytes = StoredVT.getSizeInBits() / 8;
3459     unsigned RegBytes = RegVT.getSizeInBits() / 8;
3460     unsigned NumRegs = (StoredBytes + RegBytes - 1) / RegBytes;
3461 
3462     // Make sure the stack slot is also aligned for the register type.
3463     SDValue StackPtr = DAG.CreateStackTemporary(StoredVT, RegVT);
3464 
3465     // Perform the original store, only redirected to the stack slot.
3466     SDValue Store = DAG.getTruncStore(Chain, dl, Val, StackPtr,
3467                                       MachinePointerInfo(), StoredVT);
3468 
3469     EVT StackPtrVT = StackPtr.getValueType();
3470 
3471     SDValue PtrIncrement = DAG.getConstant(RegBytes, dl, PtrVT);
3472     SDValue StackPtrIncrement = DAG.getConstant(RegBytes, dl, StackPtrVT);
3473     SmallVector<SDValue, 8> Stores;
3474     unsigned Offset = 0;
3475 
3476     // Do all but one copies using the full register width.
3477     for (unsigned i = 1; i < NumRegs; i++) {
3478       // Load one integer register's worth from the stack slot.
3479       SDValue Load =
3480           DAG.getLoad(RegVT, dl, Store, StackPtr, MachinePointerInfo());
3481       // Store it to the final location.  Remember the store.
3482       Stores.push_back(DAG.getStore(Load.getValue(1), dl, Load, Ptr,
3483                                     ST->getPointerInfo().getWithOffset(Offset),
3484                                     MinAlign(ST->getAlignment(), Offset),
3485                                     ST->getMemOperand()->getFlags()));
3486       // Increment the pointers.
3487       Offset += RegBytes;
3488       StackPtr = DAG.getNode(ISD::ADD, dl, StackPtrVT,
3489                              StackPtr, StackPtrIncrement);
3490       Ptr = DAG.getNode(ISD::ADD, dl, PtrVT, Ptr, PtrIncrement);
3491     }
3492 
3493     // The last store may be partial.  Do a truncating store.  On big-endian
3494     // machines this requires an extending load from the stack slot to ensure
3495     // that the bits are in the right place.
3496     EVT MemVT = EVT::getIntegerVT(*DAG.getContext(),
3497                                   8 * (StoredBytes - Offset));
3498 
3499     // Load from the stack slot.
3500     SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, RegVT, Store, StackPtr,
3501                                   MachinePointerInfo(), MemVT);
3502 
3503     Stores.push_back(
3504         DAG.getTruncStore(Load.getValue(1), dl, Load, Ptr,
3505                           ST->getPointerInfo().getWithOffset(Offset), MemVT,
3506                           MinAlign(ST->getAlignment(), Offset),
3507                           ST->getMemOperand()->getFlags(), ST->getAAInfo()));
3508     // The order of the stores doesn't matter - say it with a TokenFactor.
3509     SDValue Result = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores);
3510     return Result;
3511   }
3512 
3513   assert(ST->getMemoryVT().isInteger() &&
3514          !ST->getMemoryVT().isVector() &&
3515          "Unaligned store of unknown type.");
3516   // Get the half-size VT
3517   EVT NewStoredVT = ST->getMemoryVT().getHalfSizedIntegerVT(*DAG.getContext());
3518   int NumBits = NewStoredVT.getSizeInBits();
3519   int IncrementSize = NumBits / 8;
3520 
3521   // Divide the stored value in two parts.
3522   SDValue ShiftAmount =
3523       DAG.getConstant(NumBits, dl, getShiftAmountTy(Val.getValueType(),
3524                                                     DAG.getDataLayout()));
3525   SDValue Lo = Val;
3526   SDValue Hi = DAG.getNode(ISD::SRL, dl, VT, Val, ShiftAmount);
3527 
3528   // Store the two parts
3529   SDValue Store1, Store2;
3530   Store1 = DAG.getTruncStore(Chain, dl,
3531                              DAG.getDataLayout().isLittleEndian() ? Lo : Hi,
3532                              Ptr, ST->getPointerInfo(), NewStoredVT, Alignment,
3533                              ST->getMemOperand()->getFlags());
3534 
3535   EVT PtrVT = Ptr.getValueType();
3536   Ptr = DAG.getNode(ISD::ADD, dl, PtrVT, Ptr,
3537                     DAG.getConstant(IncrementSize, dl, PtrVT));
3538   Alignment = MinAlign(Alignment, IncrementSize);
3539   Store2 = DAG.getTruncStore(
3540       Chain, dl, DAG.getDataLayout().isLittleEndian() ? Hi : Lo, Ptr,
3541       ST->getPointerInfo().getWithOffset(IncrementSize), NewStoredVT, Alignment,
3542       ST->getMemOperand()->getFlags(), ST->getAAInfo());
3543 
3544   SDValue Result =
3545     DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Store1, Store2);
3546   return Result;
3547 }
3548 
3549 //===----------------------------------------------------------------------===//
3550 // Implementation of Emulated TLS Model
3551 //===----------------------------------------------------------------------===//
3552 
3553 SDValue TargetLowering::LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA,
3554                                                 SelectionDAG &DAG) const {
3555   // Access to address of TLS varialbe xyz is lowered to a function call:
3556   //   __emutls_get_address( address of global variable named "__emutls_v.xyz" )
3557   EVT PtrVT = getPointerTy(DAG.getDataLayout());
3558   PointerType *VoidPtrType = Type::getInt8PtrTy(*DAG.getContext());
3559   SDLoc dl(GA);
3560 
3561   ArgListTy Args;
3562   ArgListEntry Entry;
3563   std::string NameString = ("__emutls_v." + GA->getGlobal()->getName()).str();
3564   Module *VariableModule = const_cast<Module*>(GA->getGlobal()->getParent());
3565   StringRef EmuTlsVarName(NameString);
3566   GlobalVariable *EmuTlsVar = VariableModule->getNamedGlobal(EmuTlsVarName);
3567   assert(EmuTlsVar && "Cannot find EmuTlsVar ");
3568   Entry.Node = DAG.getGlobalAddress(EmuTlsVar, dl, PtrVT);
3569   Entry.Ty = VoidPtrType;
3570   Args.push_back(Entry);
3571 
3572   SDValue EmuTlsGetAddr = DAG.getExternalSymbol("__emutls_get_address", PtrVT);
3573 
3574   TargetLowering::CallLoweringInfo CLI(DAG);
3575   CLI.setDebugLoc(dl).setChain(DAG.getEntryNode());
3576   CLI.setCallee(CallingConv::C, VoidPtrType, EmuTlsGetAddr, std::move(Args));
3577   std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
3578 
3579   // TLSADDR will be codegen'ed as call. Inform MFI that function has calls.
3580   // At last for X86 targets, maybe good for other targets too?
3581   MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
3582   MFI.setAdjustsStack(true);  // Is this only for X86 target?
3583   MFI.setHasCalls(true);
3584 
3585   assert((GA->getOffset() == 0) &&
3586          "Emulated TLS must have zero offset in GlobalAddressSDNode");
3587   return CallResult.first;
3588 }
3589 
3590 SDValue TargetLowering::lowerCmpEqZeroToCtlzSrl(SDValue Op,
3591                                                 SelectionDAG &DAG) const {
3592   assert((Op->getOpcode() == ISD::SETCC) && "Input has to be a SETCC node.");
3593   if (!isCtlzFast())
3594     return SDValue();
3595   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
3596   SDLoc dl(Op);
3597   if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
3598     if (C->isNullValue() && CC == ISD::SETEQ) {
3599       EVT VT = Op.getOperand(0).getValueType();
3600       SDValue Zext = Op.getOperand(0);
3601       if (VT.bitsLT(MVT::i32)) {
3602         VT = MVT::i32;
3603         Zext = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Op.getOperand(0));
3604       }
3605       unsigned Log2b = Log2_32(VT.getSizeInBits());
3606       SDValue Clz = DAG.getNode(ISD::CTLZ, dl, VT, Zext);
3607       SDValue Scc = DAG.getNode(ISD::SRL, dl, VT, Clz,
3608                                 DAG.getConstant(Log2b, dl, MVT::i32));
3609       return DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Scc);
3610     }
3611   }
3612   return SDValue();
3613 }
3614