1 //===-- TargetLowering.cpp - Implement the TargetLowering class -----------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This implements the TargetLowering class.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/Target/TargetLowering.h"
15 #include "llvm/ADT/BitVector.h"
16 #include "llvm/ADT/STLExtras.h"
17 #include "llvm/CodeGen/CallingConvLower.h"
18 #include "llvm/CodeGen/MachineFrameInfo.h"
19 #include "llvm/CodeGen/MachineFunction.h"
20 #include "llvm/CodeGen/MachineJumpTableInfo.h"
21 #include "llvm/CodeGen/MachineRegisterInfo.h"
22 #include "llvm/CodeGen/SelectionDAG.h"
23 #include "llvm/IR/DataLayout.h"
24 #include "llvm/IR/DerivedTypes.h"
25 #include "llvm/IR/GlobalVariable.h"
26 #include "llvm/IR/LLVMContext.h"
27 #include "llvm/MC/MCAsmInfo.h"
28 #include "llvm/MC/MCExpr.h"
29 #include "llvm/Support/ErrorHandling.h"
30 #include "llvm/Support/MathExtras.h"
31 #include "llvm/Target/TargetLoweringObjectFile.h"
32 #include "llvm/Target/TargetMachine.h"
33 #include "llvm/Target/TargetRegisterInfo.h"
34 #include "llvm/Target/TargetSubtargetInfo.h"
35 #include <cctype>
36 using namespace llvm;
37 
38 /// NOTE: The TargetMachine owns TLOF.
39 TargetLowering::TargetLowering(const TargetMachine &tm)
40   : TargetLoweringBase(tm) {}
41 
42 const char *TargetLowering::getTargetNodeName(unsigned Opcode) const {
43   return nullptr;
44 }
45 
46 bool TargetLowering::isPositionIndependent() const {
47   return getTargetMachine().isPositionIndependent();
48 }
49 
50 /// Check whether a given call node is in tail position within its function. If
51 /// so, it sets Chain to the input chain of the tail call.
52 bool TargetLowering::isInTailCallPosition(SelectionDAG &DAG, SDNode *Node,
53                                           SDValue &Chain) const {
54   const Function *F = DAG.getMachineFunction().getFunction();
55 
56   // Conservatively require the attributes of the call to match those of
57   // the return. Ignore noalias because it doesn't affect the call sequence.
58   AttributeList CallerAttrs = F->getAttributes();
59   if (AttrBuilder(CallerAttrs, AttributeList::ReturnIndex)
60           .removeAttribute(Attribute::NoAlias)
61           .hasAttributes())
62     return false;
63 
64   // It's not safe to eliminate the sign / zero extension of the return value.
65   if (CallerAttrs.hasAttribute(AttributeList::ReturnIndex, Attribute::ZExt) ||
66       CallerAttrs.hasAttribute(AttributeList::ReturnIndex, Attribute::SExt))
67     return false;
68 
69   // Check if the only use is a function return node.
70   return isUsedByReturnOnly(Node, Chain);
71 }
72 
73 bool TargetLowering::parametersInCSRMatch(const MachineRegisterInfo &MRI,
74     const uint32_t *CallerPreservedMask,
75     const SmallVectorImpl<CCValAssign> &ArgLocs,
76     const SmallVectorImpl<SDValue> &OutVals) const {
77   for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) {
78     const CCValAssign &ArgLoc = ArgLocs[I];
79     if (!ArgLoc.isRegLoc())
80       continue;
81     unsigned Reg = ArgLoc.getLocReg();
82     // Only look at callee saved registers.
83     if (MachineOperand::clobbersPhysReg(CallerPreservedMask, Reg))
84       continue;
85     // Check that we pass the value used for the caller.
86     // (We look for a CopyFromReg reading a virtual register that is used
87     //  for the function live-in value of register Reg)
88     SDValue Value = OutVals[I];
89     if (Value->getOpcode() != ISD::CopyFromReg)
90       return false;
91     unsigned ArgReg = cast<RegisterSDNode>(Value->getOperand(1))->getReg();
92     if (MRI.getLiveInPhysReg(ArgReg) != Reg)
93       return false;
94   }
95   return true;
96 }
97 
98 /// \brief Set CallLoweringInfo attribute flags based on a call instruction
99 /// and called function attributes.
100 void TargetLoweringBase::ArgListEntry::setAttributes(ImmutableCallSite *CS,
101                                                      unsigned AttrIdx) {
102   IsSExt = CS->paramHasAttr(AttrIdx, Attribute::SExt);
103   IsZExt = CS->paramHasAttr(AttrIdx, Attribute::ZExt);
104   IsInReg = CS->paramHasAttr(AttrIdx, Attribute::InReg);
105   IsSRet = CS->paramHasAttr(AttrIdx, Attribute::StructRet);
106   IsNest = CS->paramHasAttr(AttrIdx, Attribute::Nest);
107   IsByVal = CS->paramHasAttr(AttrIdx, Attribute::ByVal);
108   IsInAlloca = CS->paramHasAttr(AttrIdx, Attribute::InAlloca);
109   IsReturned = CS->paramHasAttr(AttrIdx, Attribute::Returned);
110   IsSwiftSelf = CS->paramHasAttr(AttrIdx, Attribute::SwiftSelf);
111   IsSwiftError = CS->paramHasAttr(AttrIdx, Attribute::SwiftError);
112   Alignment  = CS->getParamAlignment(AttrIdx);
113 }
114 
115 /// Generate a libcall taking the given operands as arguments and returning a
116 /// result of type RetVT.
117 std::pair<SDValue, SDValue>
118 TargetLowering::makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, EVT RetVT,
119                             ArrayRef<SDValue> Ops, bool isSigned,
120                             const SDLoc &dl, bool doesNotReturn,
121                             bool isReturnValueUsed) const {
122   TargetLowering::ArgListTy Args;
123   Args.reserve(Ops.size());
124 
125   TargetLowering::ArgListEntry Entry;
126   for (SDValue Op : Ops) {
127     Entry.Node = Op;
128     Entry.Ty = Entry.Node.getValueType().getTypeForEVT(*DAG.getContext());
129     Entry.IsSExt = shouldSignExtendTypeInLibCall(Op.getValueType(), isSigned);
130     Entry.IsZExt = !shouldSignExtendTypeInLibCall(Op.getValueType(), isSigned);
131     Args.push_back(Entry);
132   }
133 
134   if (LC == RTLIB::UNKNOWN_LIBCALL)
135     report_fatal_error("Unsupported library call operation!");
136   SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC),
137                                          getPointerTy(DAG.getDataLayout()));
138 
139   Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext());
140   TargetLowering::CallLoweringInfo CLI(DAG);
141   bool signExtend = shouldSignExtendTypeInLibCall(RetVT, isSigned);
142   CLI.setDebugLoc(dl)
143       .setChain(DAG.getEntryNode())
144       .setLibCallee(getLibcallCallingConv(LC), RetTy, Callee, std::move(Args))
145       .setNoReturn(doesNotReturn)
146       .setDiscardResult(!isReturnValueUsed)
147       .setSExtResult(signExtend)
148       .setZExtResult(!signExtend);
149   return LowerCallTo(CLI);
150 }
151 
152 /// Soften the operands of a comparison. This code is shared among BR_CC,
153 /// SELECT_CC, and SETCC handlers.
154 void TargetLowering::softenSetCCOperands(SelectionDAG &DAG, EVT VT,
155                                          SDValue &NewLHS, SDValue &NewRHS,
156                                          ISD::CondCode &CCCode,
157                                          const SDLoc &dl) const {
158   assert((VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f128 || VT == MVT::ppcf128)
159          && "Unsupported setcc type!");
160 
161   // Expand into one or more soft-fp libcall(s).
162   RTLIB::Libcall LC1 = RTLIB::UNKNOWN_LIBCALL, LC2 = RTLIB::UNKNOWN_LIBCALL;
163   bool ShouldInvertCC = false;
164   switch (CCCode) {
165   case ISD::SETEQ:
166   case ISD::SETOEQ:
167     LC1 = (VT == MVT::f32) ? RTLIB::OEQ_F32 :
168           (VT == MVT::f64) ? RTLIB::OEQ_F64 :
169           (VT == MVT::f128) ? RTLIB::OEQ_F128 : RTLIB::OEQ_PPCF128;
170     break;
171   case ISD::SETNE:
172   case ISD::SETUNE:
173     LC1 = (VT == MVT::f32) ? RTLIB::UNE_F32 :
174           (VT == MVT::f64) ? RTLIB::UNE_F64 :
175           (VT == MVT::f128) ? RTLIB::UNE_F128 : RTLIB::UNE_PPCF128;
176     break;
177   case ISD::SETGE:
178   case ISD::SETOGE:
179     LC1 = (VT == MVT::f32) ? RTLIB::OGE_F32 :
180           (VT == MVT::f64) ? RTLIB::OGE_F64 :
181           (VT == MVT::f128) ? RTLIB::OGE_F128 : RTLIB::OGE_PPCF128;
182     break;
183   case ISD::SETLT:
184   case ISD::SETOLT:
185     LC1 = (VT == MVT::f32) ? RTLIB::OLT_F32 :
186           (VT == MVT::f64) ? RTLIB::OLT_F64 :
187           (VT == MVT::f128) ? RTLIB::OLT_F128 : RTLIB::OLT_PPCF128;
188     break;
189   case ISD::SETLE:
190   case ISD::SETOLE:
191     LC1 = (VT == MVT::f32) ? RTLIB::OLE_F32 :
192           (VT == MVT::f64) ? RTLIB::OLE_F64 :
193           (VT == MVT::f128) ? RTLIB::OLE_F128 : RTLIB::OLE_PPCF128;
194     break;
195   case ISD::SETGT:
196   case ISD::SETOGT:
197     LC1 = (VT == MVT::f32) ? RTLIB::OGT_F32 :
198           (VT == MVT::f64) ? RTLIB::OGT_F64 :
199           (VT == MVT::f128) ? RTLIB::OGT_F128 : RTLIB::OGT_PPCF128;
200     break;
201   case ISD::SETUO:
202     LC1 = (VT == MVT::f32) ? RTLIB::UO_F32 :
203           (VT == MVT::f64) ? RTLIB::UO_F64 :
204           (VT == MVT::f128) ? RTLIB::UO_F128 : RTLIB::UO_PPCF128;
205     break;
206   case ISD::SETO:
207     LC1 = (VT == MVT::f32) ? RTLIB::O_F32 :
208           (VT == MVT::f64) ? RTLIB::O_F64 :
209           (VT == MVT::f128) ? RTLIB::O_F128 : RTLIB::O_PPCF128;
210     break;
211   case ISD::SETONE:
212     // SETONE = SETOLT | SETOGT
213     LC1 = (VT == MVT::f32) ? RTLIB::OLT_F32 :
214           (VT == MVT::f64) ? RTLIB::OLT_F64 :
215           (VT == MVT::f128) ? RTLIB::OLT_F128 : RTLIB::OLT_PPCF128;
216     LC2 = (VT == MVT::f32) ? RTLIB::OGT_F32 :
217           (VT == MVT::f64) ? RTLIB::OGT_F64 :
218           (VT == MVT::f128) ? RTLIB::OGT_F128 : RTLIB::OGT_PPCF128;
219     break;
220   case ISD::SETUEQ:
221     LC1 = (VT == MVT::f32) ? RTLIB::UO_F32 :
222           (VT == MVT::f64) ? RTLIB::UO_F64 :
223           (VT == MVT::f128) ? RTLIB::UO_F128 : RTLIB::UO_PPCF128;
224     LC2 = (VT == MVT::f32) ? RTLIB::OEQ_F32 :
225           (VT == MVT::f64) ? RTLIB::OEQ_F64 :
226           (VT == MVT::f128) ? RTLIB::OEQ_F128 : RTLIB::OEQ_PPCF128;
227     break;
228   default:
229     // Invert CC for unordered comparisons
230     ShouldInvertCC = true;
231     switch (CCCode) {
232     case ISD::SETULT:
233       LC1 = (VT == MVT::f32) ? RTLIB::OGE_F32 :
234             (VT == MVT::f64) ? RTLIB::OGE_F64 :
235             (VT == MVT::f128) ? RTLIB::OGE_F128 : RTLIB::OGE_PPCF128;
236       break;
237     case ISD::SETULE:
238       LC1 = (VT == MVT::f32) ? RTLIB::OGT_F32 :
239             (VT == MVT::f64) ? RTLIB::OGT_F64 :
240             (VT == MVT::f128) ? RTLIB::OGT_F128 : RTLIB::OGT_PPCF128;
241       break;
242     case ISD::SETUGT:
243       LC1 = (VT == MVT::f32) ? RTLIB::OLE_F32 :
244             (VT == MVT::f64) ? RTLIB::OLE_F64 :
245             (VT == MVT::f128) ? RTLIB::OLE_F128 : RTLIB::OLE_PPCF128;
246       break;
247     case ISD::SETUGE:
248       LC1 = (VT == MVT::f32) ? RTLIB::OLT_F32 :
249             (VT == MVT::f64) ? RTLIB::OLT_F64 :
250             (VT == MVT::f128) ? RTLIB::OLT_F128 : RTLIB::OLT_PPCF128;
251       break;
252     default: llvm_unreachable("Do not know how to soften this setcc!");
253     }
254   }
255 
256   // Use the target specific return value for comparions lib calls.
257   EVT RetVT = getCmpLibcallReturnType();
258   SDValue Ops[2] = {NewLHS, NewRHS};
259   NewLHS = makeLibCall(DAG, LC1, RetVT, Ops, false /*sign irrelevant*/,
260                        dl).first;
261   NewRHS = DAG.getConstant(0, dl, RetVT);
262 
263   CCCode = getCmpLibcallCC(LC1);
264   if (ShouldInvertCC)
265     CCCode = getSetCCInverse(CCCode, /*isInteger=*/true);
266 
267   if (LC2 != RTLIB::UNKNOWN_LIBCALL) {
268     SDValue Tmp = DAG.getNode(
269         ISD::SETCC, dl,
270         getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), RetVT),
271         NewLHS, NewRHS, DAG.getCondCode(CCCode));
272     NewLHS = makeLibCall(DAG, LC2, RetVT, Ops, false/*sign irrelevant*/,
273                          dl).first;
274     NewLHS = DAG.getNode(
275         ISD::SETCC, dl,
276         getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), RetVT),
277         NewLHS, NewRHS, DAG.getCondCode(getCmpLibcallCC(LC2)));
278     NewLHS = DAG.getNode(ISD::OR, dl, Tmp.getValueType(), Tmp, NewLHS);
279     NewRHS = SDValue();
280   }
281 }
282 
283 /// Return the entry encoding for a jump table in the current function. The
284 /// returned value is a member of the MachineJumpTableInfo::JTEntryKind enum.
285 unsigned TargetLowering::getJumpTableEncoding() const {
286   // In non-pic modes, just use the address of a block.
287   if (!isPositionIndependent())
288     return MachineJumpTableInfo::EK_BlockAddress;
289 
290   // In PIC mode, if the target supports a GPRel32 directive, use it.
291   if (getTargetMachine().getMCAsmInfo()->getGPRel32Directive() != nullptr)
292     return MachineJumpTableInfo::EK_GPRel32BlockAddress;
293 
294   // Otherwise, use a label difference.
295   return MachineJumpTableInfo::EK_LabelDifference32;
296 }
297 
298 SDValue TargetLowering::getPICJumpTableRelocBase(SDValue Table,
299                                                  SelectionDAG &DAG) const {
300   // If our PIC model is GP relative, use the global offset table as the base.
301   unsigned JTEncoding = getJumpTableEncoding();
302 
303   if ((JTEncoding == MachineJumpTableInfo::EK_GPRel64BlockAddress) ||
304       (JTEncoding == MachineJumpTableInfo::EK_GPRel32BlockAddress))
305     return DAG.getGLOBAL_OFFSET_TABLE(getPointerTy(DAG.getDataLayout()));
306 
307   return Table;
308 }
309 
310 /// This returns the relocation base for the given PIC jumptable, the same as
311 /// getPICJumpTableRelocBase, but as an MCExpr.
312 const MCExpr *
313 TargetLowering::getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
314                                              unsigned JTI,MCContext &Ctx) const{
315   // The normal PIC reloc base is the label at the start of the jump table.
316   return MCSymbolRefExpr::create(MF->getJTISymbol(JTI, Ctx), Ctx);
317 }
318 
319 bool
320 TargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
321   const TargetMachine &TM = getTargetMachine();
322   const GlobalValue *GV = GA->getGlobal();
323 
324   // If the address is not even local to this DSO we will have to load it from
325   // a got and then add the offset.
326   if (!TM.shouldAssumeDSOLocal(*GV->getParent(), GV))
327     return false;
328 
329   // If the code is position independent we will have to add a base register.
330   if (isPositionIndependent())
331     return false;
332 
333   // Otherwise we can do it.
334   return true;
335 }
336 
337 //===----------------------------------------------------------------------===//
338 //  Optimization Methods
339 //===----------------------------------------------------------------------===//
340 
341 /// If the specified instruction has a constant integer operand and there are
342 /// bits set in that constant that are not demanded, then clear those bits and
343 /// return true.
344 bool TargetLowering::TargetLoweringOpt::ShrinkDemandedConstant(
345     SDValue Op, const APInt &Demanded) {
346   SDLoc DL(Op);
347   unsigned Opcode = Op.getOpcode();
348 
349   // FIXME: ISD::SELECT, ISD::SELECT_CC
350   switch (Opcode) {
351   default:
352     break;
353   case ISD::XOR:
354   case ISD::AND:
355   case ISD::OR: {
356     auto *Op1C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
357     if (!Op1C)
358       return false;
359 
360     // If this is a 'not' op, don't touch it because that's a canonical form.
361     const APInt &C = Op1C->getAPIntValue();
362     if (Opcode == ISD::XOR && (C | ~Demanded).isAllOnesValue())
363       return false;
364 
365     if (C.intersects(~Demanded)) {
366       EVT VT = Op.getValueType();
367       SDValue NewC = DAG.getConstant(Demanded & C, DL, VT);
368       SDValue NewOp = DAG.getNode(Opcode, DL, VT, Op.getOperand(0), NewC);
369       return CombineTo(Op, NewOp);
370     }
371 
372     break;
373   }
374   }
375 
376   return false;
377 }
378 
379 /// Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if the casts are free.
380 /// This uses isZExtFree and ZERO_EXTEND for the widening cast, but it could be
381 /// generalized for targets with other types of implicit widening casts.
382 bool TargetLowering::TargetLoweringOpt::ShrinkDemandedOp(SDValue Op,
383                                                          unsigned BitWidth,
384                                                          const APInt &Demanded,
385                                                          const SDLoc &dl) {
386   assert(Op.getNumOperands() == 2 &&
387          "ShrinkDemandedOp only supports binary operators!");
388   assert(Op.getNode()->getNumValues() == 1 &&
389          "ShrinkDemandedOp only supports nodes with one result!");
390 
391   // Early return, as this function cannot handle vector types.
392   if (Op.getValueType().isVector())
393     return false;
394 
395   // Don't do this if the node has another user, which may require the
396   // full value.
397   if (!Op.getNode()->hasOneUse())
398     return false;
399 
400   // Search for the smallest integer type with free casts to and from
401   // Op's type. For expedience, just check power-of-2 integer types.
402   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
403   unsigned DemandedSize = BitWidth - Demanded.countLeadingZeros();
404   unsigned SmallVTBits = DemandedSize;
405   if (!isPowerOf2_32(SmallVTBits))
406     SmallVTBits = NextPowerOf2(SmallVTBits);
407   for (; SmallVTBits < BitWidth; SmallVTBits = NextPowerOf2(SmallVTBits)) {
408     EVT SmallVT = EVT::getIntegerVT(*DAG.getContext(), SmallVTBits);
409     if (TLI.isTruncateFree(Op.getValueType(), SmallVT) &&
410         TLI.isZExtFree(SmallVT, Op.getValueType())) {
411       // We found a type with free casts.
412       SDValue X = DAG.getNode(Op.getOpcode(), dl, SmallVT,
413                               DAG.getNode(ISD::TRUNCATE, dl, SmallVT,
414                                           Op.getNode()->getOperand(0)),
415                               DAG.getNode(ISD::TRUNCATE, dl, SmallVT,
416                                           Op.getNode()->getOperand(1)));
417       bool NeedZext = DemandedSize > SmallVTBits;
418       SDValue Z = DAG.getNode(NeedZext ? ISD::ZERO_EXTEND : ISD::ANY_EXTEND,
419                               dl, Op.getValueType(), X);
420       return CombineTo(Op, Z);
421     }
422   }
423   return false;
424 }
425 
426 bool
427 TargetLowering::TargetLoweringOpt::SimplifyDemandedBits(SDNode *User,
428                                                         unsigned OpIdx,
429                                                         const APInt &Demanded,
430                                                         DAGCombinerInfo &DCI) {
431   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
432   SDValue Op = User->getOperand(OpIdx);
433   APInt KnownZero, KnownOne;
434 
435   if (!TLI.SimplifyDemandedBits(Op, Demanded, KnownZero, KnownOne,
436                                 *this, 0, true))
437     return false;
438 
439 
440   // Old will not always be the same as Op.  For example:
441   //
442   // Demanded = 0xffffff
443   // Op = i64 truncate (i32 and x, 0xffffff)
444   // In this case simplify demand bits will want to replace the 'and' node
445   // with the value 'x', which will give us:
446   // Old = i32 and x, 0xffffff
447   // New = x
448   if (Old.hasOneUse()) {
449     // For the one use case, we just commit the change.
450     DCI.CommitTargetLoweringOpt(*this);
451     return true;
452   }
453 
454   // If Old has more than one use then it must be Op, because the
455   // AssumeSingleUse flag is not propogated to recursive calls of
456   // SimplifyDemanded bits, so the only node with multiple use that
457   // it will attempt to combine will be opt.
458   assert(Old == Op);
459 
460   SmallVector <SDValue, 4> NewOps;
461   for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) {
462     if (i == OpIdx) {
463       NewOps.push_back(New);
464       continue;
465     }
466     NewOps.push_back(User->getOperand(i));
467   }
468   DAG.UpdateNodeOperands(User, NewOps);
469   // Op has less users now, so we may be able to perform additional combines
470   // with it.
471   DCI.AddToWorklist(Op.getNode());
472   // User's operands have been updated, so we may be able to do new combines
473   // with it.
474   DCI.AddToWorklist(User);
475   return true;
476 }
477 
478 bool TargetLowering::SimplifyDemandedBits(SDValue Op, APInt &DemandedMask,
479                                           DAGCombinerInfo &DCI) const {
480 
481   SelectionDAG &DAG = DCI.DAG;
482   TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
483                         !DCI.isBeforeLegalizeOps());
484   APInt KnownZero, KnownOne;
485 
486   bool Simplified = SimplifyDemandedBits(Op, DemandedMask, KnownZero, KnownOne,
487                                          TLO);
488   if (Simplified)
489     DCI.CommitTargetLoweringOpt(TLO);
490   return Simplified;
491 }
492 
493 /// Look at Op. At this point, we know that only the DemandedMask bits of the
494 /// result of Op are ever used downstream. If we can use this information to
495 /// simplify Op, create a new simplified DAG node and return true, returning the
496 /// original and new nodes in Old and New. Otherwise, analyze the expression and
497 /// return a mask of KnownOne and KnownZero bits for the expression (used to
498 /// simplify the caller).  The KnownZero/One bits may only be accurate for those
499 /// bits in the DemandedMask.
500 bool TargetLowering::SimplifyDemandedBits(SDValue Op,
501                                           const APInt &DemandedMask,
502                                           APInt &KnownZero,
503                                           APInt &KnownOne,
504                                           TargetLoweringOpt &TLO,
505                                           unsigned Depth,
506                                           bool AssumeSingleUse) const {
507   unsigned BitWidth = DemandedMask.getBitWidth();
508   assert(Op.getScalarValueSizeInBits() == BitWidth &&
509          "Mask size mismatches value type size!");
510   APInt NewMask = DemandedMask;
511   SDLoc dl(Op);
512   auto &DL = TLO.DAG.getDataLayout();
513 
514   // Don't know anything.
515   KnownZero = KnownOne = APInt(BitWidth, 0);
516 
517   // Other users may use these bits.
518   if (!Op.getNode()->hasOneUse() && !AssumeSingleUse) {
519     if (Depth != 0) {
520       // If not at the root, Just compute the KnownZero/KnownOne bits to
521       // simplify things downstream.
522       TLO.DAG.computeKnownBits(Op, KnownZero, KnownOne, Depth);
523       return false;
524     }
525     // If this is the root being simplified, allow it to have multiple uses,
526     // just set the NewMask to all bits.
527     NewMask = APInt::getAllOnesValue(BitWidth);
528   } else if (DemandedMask == 0) {
529     // Not demanding any bits from Op.
530     if (!Op.isUndef())
531       return TLO.CombineTo(Op, TLO.DAG.getUNDEF(Op.getValueType()));
532     return false;
533   } else if (Depth == 6) {        // Limit search depth.
534     return false;
535   }
536 
537   APInt KnownZero2, KnownOne2, KnownZeroOut, KnownOneOut;
538   switch (Op.getOpcode()) {
539   case ISD::Constant:
540     // We know all of the bits for a constant!
541     KnownOne = cast<ConstantSDNode>(Op)->getAPIntValue();
542     KnownZero = ~KnownOne;
543     return false;   // Don't fall through, will infinitely loop.
544   case ISD::BUILD_VECTOR:
545     // Collect the known bits that are shared by every constant vector element.
546     KnownZero = KnownOne = APInt::getAllOnesValue(BitWidth);
547     for (SDValue SrcOp : Op->ops()) {
548       if (!isa<ConstantSDNode>(SrcOp)) {
549         // We can only handle all constant values - bail out with no known bits.
550         KnownZero = KnownOne = APInt(BitWidth, 0);
551         return false;
552       }
553       KnownOne2 = cast<ConstantSDNode>(SrcOp)->getAPIntValue();
554       KnownZero2 = ~KnownOne2;
555 
556       // BUILD_VECTOR can implicitly truncate sources, we must handle this.
557       if (KnownOne2.getBitWidth() != BitWidth) {
558         assert(KnownOne2.getBitWidth() > BitWidth &&
559                KnownZero2.getBitWidth() > BitWidth &&
560                "Expected BUILD_VECTOR implicit truncation");
561         KnownOne2 = KnownOne2.trunc(BitWidth);
562         KnownZero2 = KnownZero2.trunc(BitWidth);
563       }
564 
565       // Known bits are the values that are shared by every element.
566       // TODO: support per-element known bits.
567       KnownOne &= KnownOne2;
568       KnownZero &= KnownZero2;
569     }
570     return false;   // Don't fall through, will infinitely loop.
571   case ISD::AND:
572     // If the RHS is a constant, check to see if the LHS would be zero without
573     // using the bits from the RHS.  Below, we use knowledge about the RHS to
574     // simplify the LHS, here we're using information from the LHS to simplify
575     // the RHS.
576     if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
577       SDValue Op0 = Op.getOperand(0);
578       APInt LHSZero, LHSOne;
579       // Do not increment Depth here; that can cause an infinite loop.
580       TLO.DAG.computeKnownBits(Op0, LHSZero, LHSOne, Depth);
581       // If the LHS already has zeros where RHSC does, this and is dead.
582       if ((LHSZero & NewMask) == (~RHSC->getAPIntValue() & NewMask))
583         return TLO.CombineTo(Op, Op0);
584 
585       // If any of the set bits in the RHS are known zero on the LHS, shrink
586       // the constant.
587       if (TLO.ShrinkDemandedConstant(Op, ~LHSZero & NewMask))
588         return true;
589 
590       // Bitwise-not (xor X, -1) is a special case: we don't usually shrink its
591       // constant, but if this 'and' is only clearing bits that were just set by
592       // the xor, then this 'and' can be eliminated by shrinking the mask of
593       // the xor. For example, for a 32-bit X:
594       // and (xor (srl X, 31), -1), 1 --> xor (srl X, 31), 1
595       if (isBitwiseNot(Op0) && Op0.hasOneUse() &&
596           LHSOne == ~RHSC->getAPIntValue()) {
597         SDValue Xor = TLO.DAG.getNode(ISD::XOR, dl, Op.getValueType(),
598                                       Op0.getOperand(0), Op.getOperand(1));
599         return TLO.CombineTo(Op, Xor);
600       }
601     }
602 
603     if (SimplifyDemandedBits(Op.getOperand(1), NewMask, KnownZero,
604                              KnownOne, TLO, Depth+1))
605       return true;
606     assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
607     if (SimplifyDemandedBits(Op.getOperand(0), ~KnownZero & NewMask,
608                              KnownZero2, KnownOne2, TLO, Depth+1))
609       return true;
610     assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
611 
612     // If all of the demanded bits are known one on one side, return the other.
613     // These bits cannot contribute to the result of the 'and'.
614     if ((NewMask & ~KnownZero2 & KnownOne) == (~KnownZero2 & NewMask))
615       return TLO.CombineTo(Op, Op.getOperand(0));
616     if ((NewMask & ~KnownZero & KnownOne2) == (~KnownZero & NewMask))
617       return TLO.CombineTo(Op, Op.getOperand(1));
618     // If all of the demanded bits in the inputs are known zeros, return zero.
619     if ((NewMask & (KnownZero|KnownZero2)) == NewMask)
620       return TLO.CombineTo(Op, TLO.DAG.getConstant(0, dl, Op.getValueType()));
621     // If the RHS is a constant, see if we can simplify it.
622     if (TLO.ShrinkDemandedConstant(Op, ~KnownZero2 & NewMask))
623       return true;
624     // If the operation can be done in a smaller type, do so.
625     if (TLO.ShrinkDemandedOp(Op, BitWidth, NewMask, dl))
626       return true;
627 
628     // Output known-1 bits are only known if set in both the LHS & RHS.
629     KnownOne &= KnownOne2;
630     // Output known-0 are known to be clear if zero in either the LHS | RHS.
631     KnownZero |= KnownZero2;
632     break;
633   case ISD::OR:
634     if (SimplifyDemandedBits(Op.getOperand(1), NewMask, KnownZero,
635                              KnownOne, TLO, Depth+1))
636       return true;
637     assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
638     if (SimplifyDemandedBits(Op.getOperand(0), ~KnownOne & NewMask,
639                              KnownZero2, KnownOne2, TLO, Depth+1))
640       return true;
641     assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
642 
643     // If all of the demanded bits are known zero on one side, return the other.
644     // These bits cannot contribute to the result of the 'or'.
645     if ((NewMask & ~KnownOne2 & KnownZero) == (~KnownOne2 & NewMask))
646       return TLO.CombineTo(Op, Op.getOperand(0));
647     if ((NewMask & ~KnownOne & KnownZero2) == (~KnownOne & NewMask))
648       return TLO.CombineTo(Op, Op.getOperand(1));
649     // If all of the potentially set bits on one side are known to be set on
650     // the other side, just use the 'other' side.
651     if ((NewMask & ~KnownZero & KnownOne2) == (~KnownZero & NewMask))
652       return TLO.CombineTo(Op, Op.getOperand(0));
653     if ((NewMask & ~KnownZero2 & KnownOne) == (~KnownZero2 & NewMask))
654       return TLO.CombineTo(Op, Op.getOperand(1));
655     // If the RHS is a constant, see if we can simplify it.
656     if (TLO.ShrinkDemandedConstant(Op, NewMask))
657       return true;
658     // If the operation can be done in a smaller type, do so.
659     if (TLO.ShrinkDemandedOp(Op, BitWidth, NewMask, dl))
660       return true;
661 
662     // Output known-0 bits are only known if clear in both the LHS & RHS.
663     KnownZero &= KnownZero2;
664     // Output known-1 are known to be set if set in either the LHS | RHS.
665     KnownOne |= KnownOne2;
666     break;
667   case ISD::XOR:
668     if (SimplifyDemandedBits(Op.getOperand(1), NewMask, KnownZero,
669                              KnownOne, TLO, Depth+1))
670       return true;
671     assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
672     if (SimplifyDemandedBits(Op.getOperand(0), NewMask, KnownZero2,
673                              KnownOne2, TLO, Depth+1))
674       return true;
675     assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
676 
677     // If all of the demanded bits are known zero on one side, return the other.
678     // These bits cannot contribute to the result of the 'xor'.
679     if ((KnownZero & NewMask) == NewMask)
680       return TLO.CombineTo(Op, Op.getOperand(0));
681     if ((KnownZero2 & NewMask) == NewMask)
682       return TLO.CombineTo(Op, Op.getOperand(1));
683     // If the operation can be done in a smaller type, do so.
684     if (TLO.ShrinkDemandedOp(Op, BitWidth, NewMask, dl))
685       return true;
686 
687     // If all of the unknown bits are known to be zero on one side or the other
688     // (but not both) turn this into an *inclusive* or.
689     //    e.g. (A & C1)^(B & C2) -> (A & C1)|(B & C2) iff C1&C2 == 0
690     if ((NewMask & ~KnownZero & ~KnownZero2) == 0)
691       return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::OR, dl, Op.getValueType(),
692                                                Op.getOperand(0),
693                                                Op.getOperand(1)));
694 
695     // Output known-0 bits are known if clear or set in both the LHS & RHS.
696     KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2);
697     // Output known-1 are known to be set if set in only one of the LHS, RHS.
698     KnownOneOut = (KnownZero & KnownOne2) | (KnownOne & KnownZero2);
699 
700     // If all of the demanded bits on one side are known, and all of the set
701     // bits on that side are also known to be set on the other side, turn this
702     // into an AND, as we know the bits will be cleared.
703     //    e.g. (X | C1) ^ C2 --> (X | C1) & ~C2 iff (C1&C2) == C2
704     // NB: it is okay if more bits are known than are requested
705     if ((NewMask & (KnownZero|KnownOne)) == NewMask) { // all known on one side
706       if (KnownOne == KnownOne2) { // set bits are the same on both sides
707         EVT VT = Op.getValueType();
708         SDValue ANDC = TLO.DAG.getConstant(~KnownOne & NewMask, dl, VT);
709         return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::AND, dl, VT,
710                                                  Op.getOperand(0), ANDC));
711       }
712     }
713 
714     // If the RHS is a constant, see if we can simplify it.
715     // for XOR, we prefer to force bits to 1 if they will make a -1.
716     // If we can't force bits, try to shrink the constant.
717     if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
718       APInt Expanded = C->getAPIntValue() | (~NewMask);
719       // If we can expand it to have all bits set, do it.
720       if (Expanded.isAllOnesValue()) {
721         if (Expanded != C->getAPIntValue()) {
722           EVT VT = Op.getValueType();
723           SDValue New = TLO.DAG.getNode(Op.getOpcode(), dl,VT, Op.getOperand(0),
724                                         TLO.DAG.getConstant(Expanded, dl, VT));
725           return TLO.CombineTo(Op, New);
726         }
727         // If it already has all the bits set, nothing to change
728         // but don't shrink either!
729       } else if (TLO.ShrinkDemandedConstant(Op, NewMask)) {
730         return true;
731       }
732     }
733 
734     KnownZero = KnownZeroOut;
735     KnownOne  = KnownOneOut;
736     break;
737   case ISD::SELECT:
738     if (SimplifyDemandedBits(Op.getOperand(2), NewMask, KnownZero,
739                              KnownOne, TLO, Depth+1))
740       return true;
741     if (SimplifyDemandedBits(Op.getOperand(1), NewMask, KnownZero2,
742                              KnownOne2, TLO, Depth+1))
743       return true;
744     assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
745     assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
746 
747     // If the operands are constants, see if we can simplify them.
748     if (TLO.ShrinkDemandedConstant(Op, NewMask))
749       return true;
750 
751     // Only known if known in both the LHS and RHS.
752     KnownOne &= KnownOne2;
753     KnownZero &= KnownZero2;
754     break;
755   case ISD::SELECT_CC:
756     if (SimplifyDemandedBits(Op.getOperand(3), NewMask, KnownZero,
757                              KnownOne, TLO, Depth+1))
758       return true;
759     if (SimplifyDemandedBits(Op.getOperand(2), NewMask, KnownZero2,
760                              KnownOne2, TLO, Depth+1))
761       return true;
762     assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
763     assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
764 
765     // If the operands are constants, see if we can simplify them.
766     if (TLO.ShrinkDemandedConstant(Op, NewMask))
767       return true;
768 
769     // Only known if known in both the LHS and RHS.
770     KnownOne &= KnownOne2;
771     KnownZero &= KnownZero2;
772     break;
773   case ISD::SETCC: {
774     SDValue Op0 = Op.getOperand(0);
775     SDValue Op1 = Op.getOperand(1);
776     ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
777     // If (1) we only need the sign-bit, (2) the setcc operands are the same
778     // width as the setcc result, and (3) the result of a setcc conforms to 0 or
779     // -1, we may be able to bypass the setcc.
780     if (NewMask.isSignBit() && Op0.getScalarValueSizeInBits() == BitWidth &&
781         getBooleanContents(Op.getValueType()) ==
782             BooleanContent::ZeroOrNegativeOneBooleanContent) {
783       // If we're testing X < 0, then this compare isn't needed - just use X!
784       // FIXME: We're limiting to integer types here, but this should also work
785       // if we don't care about FP signed-zero. The use of SETLT with FP means
786       // that we don't care about NaNs.
787       if (CC == ISD::SETLT && Op1.getValueType().isInteger() &&
788           (isNullConstant(Op1) || ISD::isBuildVectorAllZeros(Op1.getNode())))
789         return TLO.CombineTo(Op, Op0);
790 
791       // TODO: Should we check for other forms of sign-bit comparisons?
792       // Examples: X <= -1, X >= 0
793     }
794     break;
795   }
796   case ISD::SHL:
797     if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
798       unsigned ShAmt = SA->getZExtValue();
799       SDValue InOp = Op.getOperand(0);
800 
801       // If the shift count is an invalid immediate, don't do anything.
802       if (ShAmt >= BitWidth)
803         break;
804 
805       // If this is ((X >>u C1) << ShAmt), see if we can simplify this into a
806       // single shift.  We can do this if the bottom bits (which are shifted
807       // out) are never demanded.
808       if (InOp.getOpcode() == ISD::SRL &&
809           isa<ConstantSDNode>(InOp.getOperand(1))) {
810         if (ShAmt && (NewMask & APInt::getLowBitsSet(BitWidth, ShAmt)) == 0) {
811           unsigned C1= cast<ConstantSDNode>(InOp.getOperand(1))->getZExtValue();
812           unsigned Opc = ISD::SHL;
813           int Diff = ShAmt-C1;
814           if (Diff < 0) {
815             Diff = -Diff;
816             Opc = ISD::SRL;
817           }
818 
819           SDValue NewSA =
820             TLO.DAG.getConstant(Diff, dl, Op.getOperand(1).getValueType());
821           EVT VT = Op.getValueType();
822           return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, dl, VT,
823                                                    InOp.getOperand(0), NewSA));
824         }
825       }
826 
827       if (SimplifyDemandedBits(InOp, NewMask.lshr(ShAmt),
828                                KnownZero, KnownOne, TLO, Depth+1))
829         return true;
830 
831       // Convert (shl (anyext x, c)) to (anyext (shl x, c)) if the high bits
832       // are not demanded. This will likely allow the anyext to be folded away.
833       if (InOp.getNode()->getOpcode() == ISD::ANY_EXTEND) {
834         SDValue InnerOp = InOp.getNode()->getOperand(0);
835         EVT InnerVT = InnerOp.getValueType();
836         unsigned InnerBits = InnerVT.getSizeInBits();
837         if (ShAmt < InnerBits && NewMask.lshr(InnerBits) == 0 &&
838             isTypeDesirableForOp(ISD::SHL, InnerVT)) {
839           EVT ShTy = getShiftAmountTy(InnerVT, DL);
840           if (!APInt(BitWidth, ShAmt).isIntN(ShTy.getSizeInBits()))
841             ShTy = InnerVT;
842           SDValue NarrowShl =
843             TLO.DAG.getNode(ISD::SHL, dl, InnerVT, InnerOp,
844                             TLO.DAG.getConstant(ShAmt, dl, ShTy));
845           return
846             TLO.CombineTo(Op,
847                           TLO.DAG.getNode(ISD::ANY_EXTEND, dl, Op.getValueType(),
848                                           NarrowShl));
849         }
850         // Repeat the SHL optimization above in cases where an extension
851         // intervenes: (shl (anyext (shr x, c1)), c2) to
852         // (shl (anyext x), c2-c1).  This requires that the bottom c1 bits
853         // aren't demanded (as above) and that the shifted upper c1 bits of
854         // x aren't demanded.
855         if (InOp.hasOneUse() &&
856             InnerOp.getOpcode() == ISD::SRL &&
857             InnerOp.hasOneUse() &&
858             isa<ConstantSDNode>(InnerOp.getOperand(1))) {
859           uint64_t InnerShAmt = cast<ConstantSDNode>(InnerOp.getOperand(1))
860             ->getZExtValue();
861           if (InnerShAmt < ShAmt &&
862               InnerShAmt < InnerBits &&
863               NewMask.lshr(InnerBits - InnerShAmt + ShAmt) == 0 &&
864               NewMask.trunc(ShAmt) == 0) {
865             SDValue NewSA =
866               TLO.DAG.getConstant(ShAmt - InnerShAmt, dl,
867                                   Op.getOperand(1).getValueType());
868             EVT VT = Op.getValueType();
869             SDValue NewExt = TLO.DAG.getNode(ISD::ANY_EXTEND, dl, VT,
870                                              InnerOp.getOperand(0));
871             return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SHL, dl, VT,
872                                                      NewExt, NewSA));
873           }
874         }
875       }
876 
877       KnownZero <<= SA->getZExtValue();
878       KnownOne  <<= SA->getZExtValue();
879       // low bits known zero.
880       KnownZero.setLowBits(SA->getZExtValue());
881     }
882     break;
883   case ISD::SRL:
884     if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
885       EVT VT = Op.getValueType();
886       unsigned ShAmt = SA->getZExtValue();
887       unsigned VTSize = VT.getSizeInBits();
888       SDValue InOp = Op.getOperand(0);
889 
890       // If the shift count is an invalid immediate, don't do anything.
891       if (ShAmt >= BitWidth)
892         break;
893 
894       APInt InDemandedMask = (NewMask << ShAmt);
895 
896       // If the shift is exact, then it does demand the low bits (and knows that
897       // they are zero).
898       if (cast<BinaryWithFlagsSDNode>(Op)->Flags.hasExact())
899         InDemandedMask.setLowBits(ShAmt);
900 
901       // If this is ((X << C1) >>u ShAmt), see if we can simplify this into a
902       // single shift.  We can do this if the top bits (which are shifted out)
903       // are never demanded.
904       if (InOp.getOpcode() == ISD::SHL &&
905           isa<ConstantSDNode>(InOp.getOperand(1))) {
906         if (ShAmt && (NewMask & APInt::getHighBitsSet(VTSize, ShAmt)) == 0) {
907           unsigned C1= cast<ConstantSDNode>(InOp.getOperand(1))->getZExtValue();
908           unsigned Opc = ISD::SRL;
909           int Diff = ShAmt-C1;
910           if (Diff < 0) {
911             Diff = -Diff;
912             Opc = ISD::SHL;
913           }
914 
915           SDValue NewSA =
916             TLO.DAG.getConstant(Diff, dl, Op.getOperand(1).getValueType());
917           return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, dl, VT,
918                                                    InOp.getOperand(0), NewSA));
919         }
920       }
921 
922       // Compute the new bits that are at the top now.
923       if (SimplifyDemandedBits(InOp, InDemandedMask,
924                                KnownZero, KnownOne, TLO, Depth+1))
925         return true;
926       assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
927       KnownZero = KnownZero.lshr(ShAmt);
928       KnownOne  = KnownOne.lshr(ShAmt);
929 
930       KnownZero.setHighBits(ShAmt);  // High bits known zero.
931     }
932     break;
933   case ISD::SRA:
934     // If this is an arithmetic shift right and only the low-bit is set, we can
935     // always convert this into a logical shr, even if the shift amount is
936     // variable.  The low bit of the shift cannot be an input sign bit unless
937     // the shift amount is >= the size of the datatype, which is undefined.
938     if (NewMask == 1)
939       return TLO.CombineTo(Op,
940                            TLO.DAG.getNode(ISD::SRL, dl, Op.getValueType(),
941                                            Op.getOperand(0), Op.getOperand(1)));
942 
943     if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
944       EVT VT = Op.getValueType();
945       unsigned ShAmt = SA->getZExtValue();
946 
947       // If the shift count is an invalid immediate, don't do anything.
948       if (ShAmt >= BitWidth)
949         break;
950 
951       APInt InDemandedMask = (NewMask << ShAmt);
952 
953       // If the shift is exact, then it does demand the low bits (and knows that
954       // they are zero).
955       if (cast<BinaryWithFlagsSDNode>(Op)->Flags.hasExact())
956         InDemandedMask.setLowBits(ShAmt);
957 
958       // If any of the demanded bits are produced by the sign extension, we also
959       // demand the input sign bit.
960       APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt);
961       if (HighBits.intersects(NewMask))
962         InDemandedMask |= APInt::getSignBit(VT.getScalarSizeInBits());
963 
964       if (SimplifyDemandedBits(Op.getOperand(0), InDemandedMask,
965                                KnownZero, KnownOne, TLO, Depth+1))
966         return true;
967       assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
968       KnownZero = KnownZero.lshr(ShAmt);
969       KnownOne  = KnownOne.lshr(ShAmt);
970 
971       // Handle the sign bit, adjusted to where it is now in the mask.
972       APInt SignBit = APInt::getSignBit(BitWidth).lshr(ShAmt);
973 
974       // If the input sign bit is known to be zero, or if none of the top bits
975       // are demanded, turn this into an unsigned shift right.
976       if (KnownZero.intersects(SignBit) || (HighBits & ~NewMask) == HighBits) {
977         SDNodeFlags Flags;
978         Flags.setExact(cast<BinaryWithFlagsSDNode>(Op)->Flags.hasExact());
979         return TLO.CombineTo(Op,
980                              TLO.DAG.getNode(ISD::SRL, dl, VT, Op.getOperand(0),
981                                              Op.getOperand(1), &Flags));
982       }
983 
984       int Log2 = NewMask.exactLogBase2();
985       if (Log2 >= 0) {
986         // The bit must come from the sign.
987         SDValue NewSA =
988           TLO.DAG.getConstant(BitWidth - 1 - Log2, dl,
989                               Op.getOperand(1).getValueType());
990         return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL, dl, VT,
991                                                  Op.getOperand(0), NewSA));
992       }
993 
994       if (KnownOne.intersects(SignBit))
995         // New bits are known one.
996         KnownOne |= HighBits;
997     }
998     break;
999   case ISD::SIGN_EXTEND_INREG: {
1000     EVT ExVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
1001 
1002     APInt MsbMask = APInt::getHighBitsSet(BitWidth, 1);
1003     // If we only care about the highest bit, don't bother shifting right.
1004     if (MsbMask == NewMask) {
1005       unsigned ShAmt = ExVT.getScalarSizeInBits();
1006       SDValue InOp = Op.getOperand(0);
1007       unsigned VTBits = Op->getValueType(0).getScalarSizeInBits();
1008       bool AlreadySignExtended =
1009         TLO.DAG.ComputeNumSignBits(InOp) >= VTBits-ShAmt+1;
1010       // However if the input is already sign extended we expect the sign
1011       // extension to be dropped altogether later and do not simplify.
1012       if (!AlreadySignExtended) {
1013         // Compute the correct shift amount type, which must be getShiftAmountTy
1014         // for scalar types after legalization.
1015         EVT ShiftAmtTy = Op.getValueType();
1016         if (TLO.LegalTypes() && !ShiftAmtTy.isVector())
1017           ShiftAmtTy = getShiftAmountTy(ShiftAmtTy, DL);
1018 
1019         SDValue ShiftAmt = TLO.DAG.getConstant(BitWidth - ShAmt, dl,
1020                                                ShiftAmtTy);
1021         return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SHL, dl,
1022                                                  Op.getValueType(), InOp,
1023                                                  ShiftAmt));
1024       }
1025     }
1026 
1027     // Sign extension.  Compute the demanded bits in the result that are not
1028     // present in the input.
1029     APInt NewBits =
1030       APInt::getHighBitsSet(BitWidth,
1031                             BitWidth - ExVT.getScalarSizeInBits());
1032 
1033     // If none of the extended bits are demanded, eliminate the sextinreg.
1034     if ((NewBits & NewMask) == 0)
1035       return TLO.CombineTo(Op, Op.getOperand(0));
1036 
1037     APInt InSignBit =
1038       APInt::getSignBit(ExVT.getScalarSizeInBits()).zext(BitWidth);
1039     APInt InputDemandedBits =
1040       APInt::getLowBitsSet(BitWidth,
1041                            ExVT.getScalarSizeInBits()) &
1042       NewMask;
1043 
1044     // Since the sign extended bits are demanded, we know that the sign
1045     // bit is demanded.
1046     InputDemandedBits |= InSignBit;
1047 
1048     if (SimplifyDemandedBits(Op.getOperand(0), InputDemandedBits,
1049                              KnownZero, KnownOne, TLO, Depth+1))
1050       return true;
1051     assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1052 
1053     // If the sign bit of the input is known set or clear, then we know the
1054     // top bits of the result.
1055 
1056     // If the input sign bit is known zero, convert this into a zero extension.
1057     if (KnownZero.intersects(InSignBit))
1058       return TLO.CombineTo(Op, TLO.DAG.getZeroExtendInReg(
1059                                    Op.getOperand(0), dl, ExVT.getScalarType()));
1060 
1061     if (KnownOne.intersects(InSignBit)) {    // Input sign bit known set
1062       KnownOne |= NewBits;
1063       KnownZero &= ~NewBits;
1064     } else {                       // Input sign bit unknown
1065       KnownZero &= ~NewBits;
1066       KnownOne &= ~NewBits;
1067     }
1068     break;
1069   }
1070   case ISD::BUILD_PAIR: {
1071     EVT HalfVT = Op.getOperand(0).getValueType();
1072     unsigned HalfBitWidth = HalfVT.getScalarSizeInBits();
1073 
1074     APInt MaskLo = NewMask.getLoBits(HalfBitWidth).trunc(HalfBitWidth);
1075     APInt MaskHi = NewMask.getHiBits(HalfBitWidth).trunc(HalfBitWidth);
1076 
1077     APInt KnownZeroLo, KnownOneLo;
1078     APInt KnownZeroHi, KnownOneHi;
1079 
1080     if (SimplifyDemandedBits(Op.getOperand(0), MaskLo, KnownZeroLo,
1081                              KnownOneLo, TLO, Depth + 1))
1082       return true;
1083 
1084     if (SimplifyDemandedBits(Op.getOperand(1), MaskHi, KnownZeroHi,
1085                              KnownOneHi, TLO, Depth + 1))
1086       return true;
1087 
1088     KnownZero = KnownZeroLo.zext(BitWidth) |
1089                 KnownZeroHi.zext(BitWidth).shl(HalfBitWidth);
1090 
1091     KnownOne = KnownOneLo.zext(BitWidth) |
1092                KnownOneHi.zext(BitWidth).shl(HalfBitWidth);
1093     break;
1094   }
1095   case ISD::ZERO_EXTEND: {
1096     unsigned OperandBitWidth = Op.getOperand(0).getScalarValueSizeInBits();
1097     APInt InMask = NewMask.trunc(OperandBitWidth);
1098 
1099     // If none of the top bits are demanded, convert this into an any_extend.
1100     APInt NewBits =
1101       APInt::getHighBitsSet(BitWidth, BitWidth - OperandBitWidth) & NewMask;
1102     if (!NewBits.intersects(NewMask))
1103       return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::ANY_EXTEND, dl,
1104                                                Op.getValueType(),
1105                                                Op.getOperand(0)));
1106 
1107     if (SimplifyDemandedBits(Op.getOperand(0), InMask,
1108                              KnownZero, KnownOne, TLO, Depth+1))
1109       return true;
1110     assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1111     KnownZero = KnownZero.zext(BitWidth);
1112     KnownOne = KnownOne.zext(BitWidth);
1113     KnownZero |= NewBits;
1114     break;
1115   }
1116   case ISD::SIGN_EXTEND: {
1117     EVT InVT = Op.getOperand(0).getValueType();
1118     unsigned InBits = InVT.getScalarSizeInBits();
1119     APInt InMask    = APInt::getLowBitsSet(BitWidth, InBits);
1120     APInt InSignBit = APInt::getOneBitSet(BitWidth, InBits - 1);
1121     APInt NewBits   = ~InMask & NewMask;
1122 
1123     // If none of the top bits are demanded, convert this into an any_extend.
1124     if (NewBits == 0)
1125       return TLO.CombineTo(Op,TLO.DAG.getNode(ISD::ANY_EXTEND, dl,
1126                                               Op.getValueType(),
1127                                               Op.getOperand(0)));
1128 
1129     // Since some of the sign extended bits are demanded, we know that the sign
1130     // bit is demanded.
1131     APInt InDemandedBits = InMask & NewMask;
1132     InDemandedBits |= InSignBit;
1133     InDemandedBits = InDemandedBits.trunc(InBits);
1134 
1135     if (SimplifyDemandedBits(Op.getOperand(0), InDemandedBits, KnownZero,
1136                              KnownOne, TLO, Depth+1))
1137       return true;
1138     KnownZero = KnownZero.zext(BitWidth);
1139     KnownOne = KnownOne.zext(BitWidth);
1140 
1141     // If the sign bit is known zero, convert this to a zero extend.
1142     if (KnownZero.intersects(InSignBit))
1143       return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::ZERO_EXTEND, dl,
1144                                                Op.getValueType(),
1145                                                Op.getOperand(0)));
1146 
1147     // If the sign bit is known one, the top bits match.
1148     if (KnownOne.intersects(InSignBit)) {
1149       KnownOne |= NewBits;
1150       assert((KnownZero & NewBits) == 0);
1151     } else {   // Otherwise, top bits aren't known.
1152       assert((KnownOne & NewBits) == 0);
1153       assert((KnownZero & NewBits) == 0);
1154     }
1155     break;
1156   }
1157   case ISD::ANY_EXTEND: {
1158     unsigned OperandBitWidth = Op.getOperand(0).getScalarValueSizeInBits();
1159     APInt InMask = NewMask.trunc(OperandBitWidth);
1160     if (SimplifyDemandedBits(Op.getOperand(0), InMask,
1161                              KnownZero, KnownOne, TLO, Depth+1))
1162       return true;
1163     assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1164     KnownZero = KnownZero.zext(BitWidth);
1165     KnownOne = KnownOne.zext(BitWidth);
1166     break;
1167   }
1168   case ISD::TRUNCATE: {
1169     // Simplify the input, using demanded bit information, and compute the known
1170     // zero/one bits live out.
1171     unsigned OperandBitWidth = Op.getOperand(0).getScalarValueSizeInBits();
1172     APInt TruncMask = NewMask.zext(OperandBitWidth);
1173     if (SimplifyDemandedBits(Op.getOperand(0), TruncMask,
1174                              KnownZero, KnownOne, TLO, Depth+1))
1175       return true;
1176     KnownZero = KnownZero.trunc(BitWidth);
1177     KnownOne = KnownOne.trunc(BitWidth);
1178 
1179     // If the input is only used by this truncate, see if we can shrink it based
1180     // on the known demanded bits.
1181     if (Op.getOperand(0).getNode()->hasOneUse()) {
1182       SDValue In = Op.getOperand(0);
1183       switch (In.getOpcode()) {
1184       default: break;
1185       case ISD::SRL:
1186         // Shrink SRL by a constant if none of the high bits shifted in are
1187         // demanded.
1188         if (TLO.LegalTypes() &&
1189             !isTypeDesirableForOp(ISD::SRL, Op.getValueType()))
1190           // Do not turn (vt1 truncate (vt2 srl)) into (vt1 srl) if vt1 is
1191           // undesirable.
1192           break;
1193         ConstantSDNode *ShAmt = dyn_cast<ConstantSDNode>(In.getOperand(1));
1194         if (!ShAmt)
1195           break;
1196         SDValue Shift = In.getOperand(1);
1197         if (TLO.LegalTypes()) {
1198           uint64_t ShVal = ShAmt->getZExtValue();
1199           Shift = TLO.DAG.getConstant(ShVal, dl,
1200                                       getShiftAmountTy(Op.getValueType(), DL));
1201         }
1202 
1203         APInt HighBits = APInt::getHighBitsSet(OperandBitWidth,
1204                                                OperandBitWidth - BitWidth);
1205         HighBits = HighBits.lshr(ShAmt->getZExtValue()).trunc(BitWidth);
1206 
1207         if (ShAmt->getZExtValue() < BitWidth && !(HighBits & NewMask)) {
1208           // None of the shifted in bits are needed.  Add a truncate of the
1209           // shift input, then shift it.
1210           SDValue NewTrunc = TLO.DAG.getNode(ISD::TRUNCATE, dl,
1211                                              Op.getValueType(),
1212                                              In.getOperand(0));
1213           return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL, dl,
1214                                                    Op.getValueType(),
1215                                                    NewTrunc,
1216                                                    Shift));
1217         }
1218         break;
1219       }
1220     }
1221 
1222     assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1223     break;
1224   }
1225   case ISD::AssertZext: {
1226     // AssertZext demands all of the high bits, plus any of the low bits
1227     // demanded by its users.
1228     EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT();
1229     APInt InMask = APInt::getLowBitsSet(BitWidth,
1230                                         VT.getSizeInBits());
1231     if (SimplifyDemandedBits(Op.getOperand(0), ~InMask | NewMask,
1232                              KnownZero, KnownOne, TLO, Depth+1))
1233       return true;
1234     assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1235 
1236     KnownZero |= ~InMask & NewMask;
1237     break;
1238   }
1239   case ISD::BITCAST:
1240     // If this is an FP->Int bitcast and if the sign bit is the only
1241     // thing demanded, turn this into a FGETSIGN.
1242     if (!TLO.LegalOperations() &&
1243         !Op.getValueType().isVector() &&
1244         !Op.getOperand(0).getValueType().isVector() &&
1245         NewMask == APInt::getSignBit(Op.getValueSizeInBits()) &&
1246         Op.getOperand(0).getValueType().isFloatingPoint()) {
1247       bool OpVTLegal = isOperationLegalOrCustom(ISD::FGETSIGN, Op.getValueType());
1248       bool i32Legal  = isOperationLegalOrCustom(ISD::FGETSIGN, MVT::i32);
1249       if ((OpVTLegal || i32Legal) && Op.getValueType().isSimple() &&
1250            Op.getOperand(0).getValueType() != MVT::f128) {
1251         // Cannot eliminate/lower SHL for f128 yet.
1252         EVT Ty = OpVTLegal ? Op.getValueType() : MVT::i32;
1253         // Make a FGETSIGN + SHL to move the sign bit into the appropriate
1254         // place.  We expect the SHL to be eliminated by other optimizations.
1255         SDValue Sign = TLO.DAG.getNode(ISD::FGETSIGN, dl, Ty, Op.getOperand(0));
1256         unsigned OpVTSizeInBits = Op.getValueSizeInBits();
1257         if (!OpVTLegal && OpVTSizeInBits > 32)
1258           Sign = TLO.DAG.getNode(ISD::ZERO_EXTEND, dl, Op.getValueType(), Sign);
1259         unsigned ShVal = Op.getValueSizeInBits() - 1;
1260         SDValue ShAmt = TLO.DAG.getConstant(ShVal, dl, Op.getValueType());
1261         return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SHL, dl,
1262                                                  Op.getValueType(),
1263                                                  Sign, ShAmt));
1264       }
1265     }
1266     break;
1267   case ISD::ADD:
1268   case ISD::MUL:
1269   case ISD::SUB: {
1270     // Add, Sub, and Mul don't demand any bits in positions beyond that
1271     // of the highest bit demanded of them.
1272     APInt LoMask = APInt::getLowBitsSet(BitWidth,
1273                                         BitWidth - NewMask.countLeadingZeros());
1274     if (SimplifyDemandedBits(Op.getOperand(0), LoMask, KnownZero2,
1275                              KnownOne2, TLO, Depth+1) ||
1276         SimplifyDemandedBits(Op.getOperand(1), LoMask, KnownZero2,
1277                              KnownOne2, TLO, Depth+1) ||
1278         // See if the operation should be performed at a smaller bit width.
1279         TLO.ShrinkDemandedOp(Op, BitWidth, NewMask, dl)) {
1280       const SDNodeFlags *Flags = Op.getNode()->getFlags();
1281       if (Flags->hasNoSignedWrap() || Flags->hasNoUnsignedWrap()) {
1282         // Disable the nsw and nuw flags. We can no longer guarantee that we
1283         // won't wrap after simplification.
1284         SDNodeFlags NewFlags = *Flags;
1285         NewFlags.setNoSignedWrap(false);
1286         NewFlags.setNoUnsignedWrap(false);
1287         SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, Op.getValueType(),
1288                                         Op.getOperand(0), Op.getOperand(1),
1289                                         &NewFlags);
1290         return TLO.CombineTo(Op, NewOp);
1291       }
1292       return true;
1293     }
1294     LLVM_FALLTHROUGH;
1295   }
1296   default:
1297     // Just use computeKnownBits to compute output bits.
1298     TLO.DAG.computeKnownBits(Op, KnownZero, KnownOne, Depth);
1299     break;
1300   }
1301 
1302   // If we know the value of all of the demanded bits, return this as a
1303   // constant.
1304   if ((NewMask & (KnownZero|KnownOne)) == NewMask) {
1305     // Avoid folding to a constant if any OpaqueConstant is involved.
1306     const SDNode *N = Op.getNode();
1307     for (SDNodeIterator I = SDNodeIterator::begin(N),
1308          E = SDNodeIterator::end(N); I != E; ++I) {
1309       SDNode *Op = *I;
1310       if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op))
1311         if (C->isOpaque())
1312           return false;
1313     }
1314     return TLO.CombineTo(Op,
1315                          TLO.DAG.getConstant(KnownOne, dl, Op.getValueType()));
1316   }
1317 
1318   return false;
1319 }
1320 
1321 /// Determine which of the bits specified in Mask are known to be either zero or
1322 /// one and return them in the KnownZero/KnownOne bitsets.
1323 void TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
1324                                                    APInt &KnownZero,
1325                                                    APInt &KnownOne,
1326                                                    const APInt &DemandedElts,
1327                                                    const SelectionDAG &DAG,
1328                                                    unsigned Depth) const {
1329   assert((Op.getOpcode() >= ISD::BUILTIN_OP_END ||
1330           Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
1331           Op.getOpcode() == ISD::INTRINSIC_W_CHAIN ||
1332           Op.getOpcode() == ISD::INTRINSIC_VOID) &&
1333          "Should use MaskedValueIsZero if you don't know whether Op"
1334          " is a target node!");
1335   KnownZero = KnownOne = APInt(KnownOne.getBitWidth(), 0);
1336 }
1337 
1338 /// This method can be implemented by targets that want to expose additional
1339 /// information about sign bits to the DAG Combiner.
1340 unsigned TargetLowering::ComputeNumSignBitsForTargetNode(SDValue Op,
1341                                                          const APInt &,
1342                                                          const SelectionDAG &,
1343                                                          unsigned Depth) const {
1344   assert((Op.getOpcode() >= ISD::BUILTIN_OP_END ||
1345           Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
1346           Op.getOpcode() == ISD::INTRINSIC_W_CHAIN ||
1347           Op.getOpcode() == ISD::INTRINSIC_VOID) &&
1348          "Should use ComputeNumSignBits if you don't know whether Op"
1349          " is a target node!");
1350   return 1;
1351 }
1352 
1353 bool TargetLowering::isConstTrueVal(const SDNode *N) const {
1354   if (!N)
1355     return false;
1356 
1357   const ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N);
1358   if (!CN) {
1359     const BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N);
1360     if (!BV)
1361       return false;
1362 
1363     // Only interested in constant splats, we don't care about undef
1364     // elements in identifying boolean constants and getConstantSplatNode
1365     // returns NULL if all ops are undef;
1366     CN = BV->getConstantSplatNode();
1367     if (!CN)
1368       return false;
1369   }
1370 
1371   switch (getBooleanContents(N->getValueType(0))) {
1372   case UndefinedBooleanContent:
1373     return CN->getAPIntValue()[0];
1374   case ZeroOrOneBooleanContent:
1375     return CN->isOne();
1376   case ZeroOrNegativeOneBooleanContent:
1377     return CN->isAllOnesValue();
1378   }
1379 
1380   llvm_unreachable("Invalid boolean contents");
1381 }
1382 
1383 SDValue TargetLowering::getConstTrueVal(SelectionDAG &DAG, EVT VT,
1384                                         const SDLoc &DL) const {
1385   unsigned ElementWidth = VT.getScalarSizeInBits();
1386   APInt TrueInt =
1387       getBooleanContents(VT) == TargetLowering::ZeroOrOneBooleanContent
1388           ? APInt(ElementWidth, 1)
1389           : APInt::getAllOnesValue(ElementWidth);
1390   return DAG.getConstant(TrueInt, DL, VT);
1391 }
1392 
1393 bool TargetLowering::isConstFalseVal(const SDNode *N) const {
1394   if (!N)
1395     return false;
1396 
1397   const ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N);
1398   if (!CN) {
1399     const BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N);
1400     if (!BV)
1401       return false;
1402 
1403     // Only interested in constant splats, we don't care about undef
1404     // elements in identifying boolean constants and getConstantSplatNode
1405     // returns NULL if all ops are undef;
1406     CN = BV->getConstantSplatNode();
1407     if (!CN)
1408       return false;
1409   }
1410 
1411   if (getBooleanContents(N->getValueType(0)) == UndefinedBooleanContent)
1412     return !CN->getAPIntValue()[0];
1413 
1414   return CN->isNullValue();
1415 }
1416 
1417 bool TargetLowering::isExtendedTrueVal(const ConstantSDNode *N, EVT VT,
1418                                        bool SExt) const {
1419   if (VT == MVT::i1)
1420     return N->isOne();
1421 
1422   TargetLowering::BooleanContent Cnt = getBooleanContents(VT);
1423   switch (Cnt) {
1424   case TargetLowering::ZeroOrOneBooleanContent:
1425     // An extended value of 1 is always true, unless its original type is i1,
1426     // in which case it will be sign extended to -1.
1427     return (N->isOne() && !SExt) || (SExt && (N->getValueType(0) != MVT::i1));
1428   case TargetLowering::UndefinedBooleanContent:
1429   case TargetLowering::ZeroOrNegativeOneBooleanContent:
1430     return N->isAllOnesValue() && SExt;
1431   }
1432   llvm_unreachable("Unexpected enumeration.");
1433 }
1434 
1435 /// This helper function of SimplifySetCC tries to optimize the comparison when
1436 /// either operand of the SetCC node is a bitwise-and instruction.
1437 SDValue TargetLowering::simplifySetCCWithAnd(EVT VT, SDValue N0, SDValue N1,
1438                                              ISD::CondCode Cond,
1439                                              DAGCombinerInfo &DCI,
1440                                              const SDLoc &DL) const {
1441   // Match these patterns in any of their permutations:
1442   // (X & Y) == Y
1443   // (X & Y) != Y
1444   if (N1.getOpcode() == ISD::AND && N0.getOpcode() != ISD::AND)
1445     std::swap(N0, N1);
1446 
1447   EVT OpVT = N0.getValueType();
1448   if (N0.getOpcode() != ISD::AND || !OpVT.isInteger() ||
1449       (Cond != ISD::SETEQ && Cond != ISD::SETNE))
1450     return SDValue();
1451 
1452   SDValue X, Y;
1453   if (N0.getOperand(0) == N1) {
1454     X = N0.getOperand(1);
1455     Y = N0.getOperand(0);
1456   } else if (N0.getOperand(1) == N1) {
1457     X = N0.getOperand(0);
1458     Y = N0.getOperand(1);
1459   } else {
1460     return SDValue();
1461   }
1462 
1463   SelectionDAG &DAG = DCI.DAG;
1464   SDValue Zero = DAG.getConstant(0, DL, OpVT);
1465   if (DAG.isKnownToBeAPowerOfTwo(Y)) {
1466     // Simplify X & Y == Y to X & Y != 0 if Y has exactly one bit set.
1467     // Note that where Y is variable and is known to have at most one bit set
1468     // (for example, if it is Z & 1) we cannot do this; the expressions are not
1469     // equivalent when Y == 0.
1470     Cond = ISD::getSetCCInverse(Cond, /*isInteger=*/true);
1471     if (DCI.isBeforeLegalizeOps() ||
1472         isCondCodeLegal(Cond, N0.getSimpleValueType()))
1473       return DAG.getSetCC(DL, VT, N0, Zero, Cond);
1474   } else if (N0.hasOneUse() && hasAndNotCompare(Y)) {
1475     // If the target supports an 'and-not' or 'and-complement' logic operation,
1476     // try to use that to make a comparison operation more efficient.
1477     // But don't do this transform if the mask is a single bit because there are
1478     // more efficient ways to deal with that case (for example, 'bt' on x86 or
1479     // 'rlwinm' on PPC).
1480 
1481     // Bail out if the compare operand that we want to turn into a zero is
1482     // already a zero (otherwise, infinite loop).
1483     auto *YConst = dyn_cast<ConstantSDNode>(Y);
1484     if (YConst && YConst->isNullValue())
1485       return SDValue();
1486 
1487     // Transform this into: ~X & Y == 0.
1488     SDValue NotX = DAG.getNOT(SDLoc(X), X, OpVT);
1489     SDValue NewAnd = DAG.getNode(ISD::AND, SDLoc(N0), OpVT, NotX, Y);
1490     return DAG.getSetCC(DL, VT, NewAnd, Zero, Cond);
1491   }
1492 
1493   return SDValue();
1494 }
1495 
1496 /// Try to simplify a setcc built with the specified operands and cc. If it is
1497 /// unable to simplify it, return a null SDValue.
1498 SDValue TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
1499                                       ISD::CondCode Cond, bool foldBooleans,
1500                                       DAGCombinerInfo &DCI,
1501                                       const SDLoc &dl) const {
1502   SelectionDAG &DAG = DCI.DAG;
1503 
1504   // These setcc operations always fold.
1505   switch (Cond) {
1506   default: break;
1507   case ISD::SETFALSE:
1508   case ISD::SETFALSE2: return DAG.getConstant(0, dl, VT);
1509   case ISD::SETTRUE:
1510   case ISD::SETTRUE2: {
1511     TargetLowering::BooleanContent Cnt =
1512         getBooleanContents(N0->getValueType(0));
1513     return DAG.getConstant(
1514         Cnt == TargetLowering::ZeroOrNegativeOneBooleanContent ? -1ULL : 1, dl,
1515         VT);
1516   }
1517   }
1518 
1519   // Ensure that the constant occurs on the RHS, and fold constant
1520   // comparisons.
1521   ISD::CondCode SwappedCC = ISD::getSetCCSwappedOperands(Cond);
1522   if (isa<ConstantSDNode>(N0.getNode()) &&
1523       (DCI.isBeforeLegalizeOps() ||
1524        isCondCodeLegal(SwappedCC, N0.getSimpleValueType())))
1525     return DAG.getSetCC(dl, VT, N1, N0, SwappedCC);
1526 
1527   if (auto *N1C = dyn_cast<ConstantSDNode>(N1.getNode())) {
1528     const APInt &C1 = N1C->getAPIntValue();
1529 
1530     // If the LHS is '(srl (ctlz x), 5)', the RHS is 0/1, and this is an
1531     // equality comparison, then we're just comparing whether X itself is
1532     // zero.
1533     if (N0.getOpcode() == ISD::SRL && (C1 == 0 || C1 == 1) &&
1534         N0.getOperand(0).getOpcode() == ISD::CTLZ &&
1535         N0.getOperand(1).getOpcode() == ISD::Constant) {
1536       const APInt &ShAmt
1537         = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue();
1538       if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
1539           ShAmt == Log2_32(N0.getValueSizeInBits())) {
1540         if ((C1 == 0) == (Cond == ISD::SETEQ)) {
1541           // (srl (ctlz x), 5) == 0  -> X != 0
1542           // (srl (ctlz x), 5) != 1  -> X != 0
1543           Cond = ISD::SETNE;
1544         } else {
1545           // (srl (ctlz x), 5) != 0  -> X == 0
1546           // (srl (ctlz x), 5) == 1  -> X == 0
1547           Cond = ISD::SETEQ;
1548         }
1549         SDValue Zero = DAG.getConstant(0, dl, N0.getValueType());
1550         return DAG.getSetCC(dl, VT, N0.getOperand(0).getOperand(0),
1551                             Zero, Cond);
1552       }
1553     }
1554 
1555     SDValue CTPOP = N0;
1556     // Look through truncs that don't change the value of a ctpop.
1557     if (N0.hasOneUse() && N0.getOpcode() == ISD::TRUNCATE)
1558       CTPOP = N0.getOperand(0);
1559 
1560     if (CTPOP.hasOneUse() && CTPOP.getOpcode() == ISD::CTPOP &&
1561         (N0 == CTPOP ||
1562          N0.getValueSizeInBits() > Log2_32_Ceil(CTPOP.getValueSizeInBits()))) {
1563       EVT CTVT = CTPOP.getValueType();
1564       SDValue CTOp = CTPOP.getOperand(0);
1565 
1566       // (ctpop x) u< 2 -> (x & x-1) == 0
1567       // (ctpop x) u> 1 -> (x & x-1) != 0
1568       if ((Cond == ISD::SETULT && C1 == 2) || (Cond == ISD::SETUGT && C1 == 1)){
1569         SDValue Sub = DAG.getNode(ISD::SUB, dl, CTVT, CTOp,
1570                                   DAG.getConstant(1, dl, CTVT));
1571         SDValue And = DAG.getNode(ISD::AND, dl, CTVT, CTOp, Sub);
1572         ISD::CondCode CC = Cond == ISD::SETULT ? ISD::SETEQ : ISD::SETNE;
1573         return DAG.getSetCC(dl, VT, And, DAG.getConstant(0, dl, CTVT), CC);
1574       }
1575 
1576       // TODO: (ctpop x) == 1 -> x && (x & x-1) == 0 iff ctpop is illegal.
1577     }
1578 
1579     // (zext x) == C --> x == (trunc C)
1580     // (sext x) == C --> x == (trunc C)
1581     if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
1582         DCI.isBeforeLegalize() && N0->hasOneUse()) {
1583       unsigned MinBits = N0.getValueSizeInBits();
1584       SDValue PreExt;
1585       bool Signed = false;
1586       if (N0->getOpcode() == ISD::ZERO_EXTEND) {
1587         // ZExt
1588         MinBits = N0->getOperand(0).getValueSizeInBits();
1589         PreExt = N0->getOperand(0);
1590       } else if (N0->getOpcode() == ISD::AND) {
1591         // DAGCombine turns costly ZExts into ANDs
1592         if (auto *C = dyn_cast<ConstantSDNode>(N0->getOperand(1)))
1593           if ((C->getAPIntValue()+1).isPowerOf2()) {
1594             MinBits = C->getAPIntValue().countTrailingOnes();
1595             PreExt = N0->getOperand(0);
1596           }
1597       } else if (N0->getOpcode() == ISD::SIGN_EXTEND) {
1598         // SExt
1599         MinBits = N0->getOperand(0).getValueSizeInBits();
1600         PreExt = N0->getOperand(0);
1601         Signed = true;
1602       } else if (auto *LN0 = dyn_cast<LoadSDNode>(N0)) {
1603         // ZEXTLOAD / SEXTLOAD
1604         if (LN0->getExtensionType() == ISD::ZEXTLOAD) {
1605           MinBits = LN0->getMemoryVT().getSizeInBits();
1606           PreExt = N0;
1607         } else if (LN0->getExtensionType() == ISD::SEXTLOAD) {
1608           Signed = true;
1609           MinBits = LN0->getMemoryVT().getSizeInBits();
1610           PreExt = N0;
1611         }
1612       }
1613 
1614       // Figure out how many bits we need to preserve this constant.
1615       unsigned ReqdBits = Signed ?
1616         C1.getBitWidth() - C1.getNumSignBits() + 1 :
1617         C1.getActiveBits();
1618 
1619       // Make sure we're not losing bits from the constant.
1620       if (MinBits > 0 &&
1621           MinBits < C1.getBitWidth() &&
1622           MinBits >= ReqdBits) {
1623         EVT MinVT = EVT::getIntegerVT(*DAG.getContext(), MinBits);
1624         if (isTypeDesirableForOp(ISD::SETCC, MinVT)) {
1625           // Will get folded away.
1626           SDValue Trunc = DAG.getNode(ISD::TRUNCATE, dl, MinVT, PreExt);
1627           if (MinBits == 1 && C1 == 1)
1628             // Invert the condition.
1629             return DAG.getSetCC(dl, VT, Trunc, DAG.getConstant(0, dl, MVT::i1),
1630                                 Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ);
1631           SDValue C = DAG.getConstant(C1.trunc(MinBits), dl, MinVT);
1632           return DAG.getSetCC(dl, VT, Trunc, C, Cond);
1633         }
1634 
1635         // If truncating the setcc operands is not desirable, we can still
1636         // simplify the expression in some cases:
1637         // setcc ([sz]ext (setcc x, y, cc)), 0, setne) -> setcc (x, y, cc)
1638         // setcc ([sz]ext (setcc x, y, cc)), 0, seteq) -> setcc (x, y, inv(cc))
1639         // setcc (zext (setcc x, y, cc)), 1, setne) -> setcc (x, y, inv(cc))
1640         // setcc (zext (setcc x, y, cc)), 1, seteq) -> setcc (x, y, cc)
1641         // setcc (sext (setcc x, y, cc)), -1, setne) -> setcc (x, y, inv(cc))
1642         // setcc (sext (setcc x, y, cc)), -1, seteq) -> setcc (x, y, cc)
1643         SDValue TopSetCC = N0->getOperand(0);
1644         unsigned N0Opc = N0->getOpcode();
1645         bool SExt = (N0Opc == ISD::SIGN_EXTEND);
1646         if (TopSetCC.getValueType() == MVT::i1 && VT == MVT::i1 &&
1647             TopSetCC.getOpcode() == ISD::SETCC &&
1648             (N0Opc == ISD::ZERO_EXTEND || N0Opc == ISD::SIGN_EXTEND) &&
1649             (isConstFalseVal(N1C) ||
1650              isExtendedTrueVal(N1C, N0->getValueType(0), SExt))) {
1651 
1652           bool Inverse = (N1C->isNullValue() && Cond == ISD::SETEQ) ||
1653                          (!N1C->isNullValue() && Cond == ISD::SETNE);
1654 
1655           if (!Inverse)
1656             return TopSetCC;
1657 
1658           ISD::CondCode InvCond = ISD::getSetCCInverse(
1659               cast<CondCodeSDNode>(TopSetCC.getOperand(2))->get(),
1660               TopSetCC.getOperand(0).getValueType().isInteger());
1661           return DAG.getSetCC(dl, VT, TopSetCC.getOperand(0),
1662                                       TopSetCC.getOperand(1),
1663                                       InvCond);
1664 
1665         }
1666       }
1667     }
1668 
1669     // If the LHS is '(and load, const)', the RHS is 0,
1670     // the test is for equality or unsigned, and all 1 bits of the const are
1671     // in the same partial word, see if we can shorten the load.
1672     if (DCI.isBeforeLegalize() &&
1673         !ISD::isSignedIntSetCC(Cond) &&
1674         N0.getOpcode() == ISD::AND && C1 == 0 &&
1675         N0.getNode()->hasOneUse() &&
1676         isa<LoadSDNode>(N0.getOperand(0)) &&
1677         N0.getOperand(0).getNode()->hasOneUse() &&
1678         isa<ConstantSDNode>(N0.getOperand(1))) {
1679       LoadSDNode *Lod = cast<LoadSDNode>(N0.getOperand(0));
1680       APInt bestMask;
1681       unsigned bestWidth = 0, bestOffset = 0;
1682       if (!Lod->isVolatile() && Lod->isUnindexed()) {
1683         unsigned origWidth = N0.getValueSizeInBits();
1684         unsigned maskWidth = origWidth;
1685         // We can narrow (e.g.) 16-bit extending loads on 32-bit target to
1686         // 8 bits, but have to be careful...
1687         if (Lod->getExtensionType() != ISD::NON_EXTLOAD)
1688           origWidth = Lod->getMemoryVT().getSizeInBits();
1689         const APInt &Mask =
1690           cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue();
1691         for (unsigned width = origWidth / 2; width>=8; width /= 2) {
1692           APInt newMask = APInt::getLowBitsSet(maskWidth, width);
1693           for (unsigned offset=0; offset<origWidth/width; offset++) {
1694             if ((newMask & Mask) == Mask) {
1695               if (!DAG.getDataLayout().isLittleEndian())
1696                 bestOffset = (origWidth/width - offset - 1) * (width/8);
1697               else
1698                 bestOffset = (uint64_t)offset * (width/8);
1699               bestMask = Mask.lshr(offset * (width/8) * 8);
1700               bestWidth = width;
1701               break;
1702             }
1703             newMask = newMask << width;
1704           }
1705         }
1706       }
1707       if (bestWidth) {
1708         EVT newVT = EVT::getIntegerVT(*DAG.getContext(), bestWidth);
1709         if (newVT.isRound()) {
1710           EVT PtrType = Lod->getOperand(1).getValueType();
1711           SDValue Ptr = Lod->getBasePtr();
1712           if (bestOffset != 0)
1713             Ptr = DAG.getNode(ISD::ADD, dl, PtrType, Lod->getBasePtr(),
1714                               DAG.getConstant(bestOffset, dl, PtrType));
1715           unsigned NewAlign = MinAlign(Lod->getAlignment(), bestOffset);
1716           SDValue NewLoad = DAG.getLoad(
1717               newVT, dl, Lod->getChain(), Ptr,
1718               Lod->getPointerInfo().getWithOffset(bestOffset), NewAlign);
1719           return DAG.getSetCC(dl, VT,
1720                               DAG.getNode(ISD::AND, dl, newVT, NewLoad,
1721                                       DAG.getConstant(bestMask.trunc(bestWidth),
1722                                                       dl, newVT)),
1723                               DAG.getConstant(0LL, dl, newVT), Cond);
1724         }
1725       }
1726     }
1727 
1728     // If the LHS is a ZERO_EXTEND, perform the comparison on the input.
1729     if (N0.getOpcode() == ISD::ZERO_EXTEND) {
1730       unsigned InSize = N0.getOperand(0).getValueSizeInBits();
1731 
1732       // If the comparison constant has bits in the upper part, the
1733       // zero-extended value could never match.
1734       if (C1.intersects(APInt::getHighBitsSet(C1.getBitWidth(),
1735                                               C1.getBitWidth() - InSize))) {
1736         switch (Cond) {
1737         case ISD::SETUGT:
1738         case ISD::SETUGE:
1739         case ISD::SETEQ: return DAG.getConstant(0, dl, VT);
1740         case ISD::SETULT:
1741         case ISD::SETULE:
1742         case ISD::SETNE: return DAG.getConstant(1, dl, VT);
1743         case ISD::SETGT:
1744         case ISD::SETGE:
1745           // True if the sign bit of C1 is set.
1746           return DAG.getConstant(C1.isNegative(), dl, VT);
1747         case ISD::SETLT:
1748         case ISD::SETLE:
1749           // True if the sign bit of C1 isn't set.
1750           return DAG.getConstant(C1.isNonNegative(), dl, VT);
1751         default:
1752           break;
1753         }
1754       }
1755 
1756       // Otherwise, we can perform the comparison with the low bits.
1757       switch (Cond) {
1758       case ISD::SETEQ:
1759       case ISD::SETNE:
1760       case ISD::SETUGT:
1761       case ISD::SETUGE:
1762       case ISD::SETULT:
1763       case ISD::SETULE: {
1764         EVT newVT = N0.getOperand(0).getValueType();
1765         if (DCI.isBeforeLegalizeOps() ||
1766             (isOperationLegal(ISD::SETCC, newVT) &&
1767              getCondCodeAction(Cond, newVT.getSimpleVT()) == Legal)) {
1768           EVT NewSetCCVT =
1769               getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), newVT);
1770           SDValue NewConst = DAG.getConstant(C1.trunc(InSize), dl, newVT);
1771 
1772           SDValue NewSetCC = DAG.getSetCC(dl, NewSetCCVT, N0.getOperand(0),
1773                                           NewConst, Cond);
1774           return DAG.getBoolExtOrTrunc(NewSetCC, dl, VT, N0.getValueType());
1775         }
1776         break;
1777       }
1778       default:
1779         break;   // todo, be more careful with signed comparisons
1780       }
1781     } else if (N0.getOpcode() == ISD::SIGN_EXTEND_INREG &&
1782                (Cond == ISD::SETEQ || Cond == ISD::SETNE)) {
1783       EVT ExtSrcTy = cast<VTSDNode>(N0.getOperand(1))->getVT();
1784       unsigned ExtSrcTyBits = ExtSrcTy.getSizeInBits();
1785       EVT ExtDstTy = N0.getValueType();
1786       unsigned ExtDstTyBits = ExtDstTy.getSizeInBits();
1787 
1788       // If the constant doesn't fit into the number of bits for the source of
1789       // the sign extension, it is impossible for both sides to be equal.
1790       if (C1.getMinSignedBits() > ExtSrcTyBits)
1791         return DAG.getConstant(Cond == ISD::SETNE, dl, VT);
1792 
1793       SDValue ZextOp;
1794       EVT Op0Ty = N0.getOperand(0).getValueType();
1795       if (Op0Ty == ExtSrcTy) {
1796         ZextOp = N0.getOperand(0);
1797       } else {
1798         APInt Imm = APInt::getLowBitsSet(ExtDstTyBits, ExtSrcTyBits);
1799         ZextOp = DAG.getNode(ISD::AND, dl, Op0Ty, N0.getOperand(0),
1800                               DAG.getConstant(Imm, dl, Op0Ty));
1801       }
1802       if (!DCI.isCalledByLegalizer())
1803         DCI.AddToWorklist(ZextOp.getNode());
1804       // Otherwise, make this a use of a zext.
1805       return DAG.getSetCC(dl, VT, ZextOp,
1806                           DAG.getConstant(C1 & APInt::getLowBitsSet(
1807                                                               ExtDstTyBits,
1808                                                               ExtSrcTyBits),
1809                                           dl, ExtDstTy),
1810                           Cond);
1811     } else if ((N1C->isNullValue() || N1C->getAPIntValue() == 1) &&
1812                 (Cond == ISD::SETEQ || Cond == ISD::SETNE)) {
1813       // SETCC (SETCC), [0|1], [EQ|NE]  -> SETCC
1814       if (N0.getOpcode() == ISD::SETCC &&
1815           isTypeLegal(VT) && VT.bitsLE(N0.getValueType())) {
1816         bool TrueWhenTrue = (Cond == ISD::SETEQ) ^ (N1C->getAPIntValue() != 1);
1817         if (TrueWhenTrue)
1818           return DAG.getNode(ISD::TRUNCATE, dl, VT, N0);
1819         // Invert the condition.
1820         ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get();
1821         CC = ISD::getSetCCInverse(CC,
1822                                   N0.getOperand(0).getValueType().isInteger());
1823         if (DCI.isBeforeLegalizeOps() ||
1824             isCondCodeLegal(CC, N0.getOperand(0).getSimpleValueType()))
1825           return DAG.getSetCC(dl, VT, N0.getOperand(0), N0.getOperand(1), CC);
1826       }
1827 
1828       if ((N0.getOpcode() == ISD::XOR ||
1829            (N0.getOpcode() == ISD::AND &&
1830             N0.getOperand(0).getOpcode() == ISD::XOR &&
1831             N0.getOperand(1) == N0.getOperand(0).getOperand(1))) &&
1832           isa<ConstantSDNode>(N0.getOperand(1)) &&
1833           cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue() == 1) {
1834         // If this is (X^1) == 0/1, swap the RHS and eliminate the xor.  We
1835         // can only do this if the top bits are known zero.
1836         unsigned BitWidth = N0.getValueSizeInBits();
1837         if (DAG.MaskedValueIsZero(N0,
1838                                   APInt::getHighBitsSet(BitWidth,
1839                                                         BitWidth-1))) {
1840           // Okay, get the un-inverted input value.
1841           SDValue Val;
1842           if (N0.getOpcode() == ISD::XOR)
1843             Val = N0.getOperand(0);
1844           else {
1845             assert(N0.getOpcode() == ISD::AND &&
1846                     N0.getOperand(0).getOpcode() == ISD::XOR);
1847             // ((X^1)&1)^1 -> X & 1
1848             Val = DAG.getNode(ISD::AND, dl, N0.getValueType(),
1849                               N0.getOperand(0).getOperand(0),
1850                               N0.getOperand(1));
1851           }
1852 
1853           return DAG.getSetCC(dl, VT, Val, N1,
1854                               Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ);
1855         }
1856       } else if (N1C->getAPIntValue() == 1 &&
1857                  (VT == MVT::i1 ||
1858                   getBooleanContents(N0->getValueType(0)) ==
1859                       ZeroOrOneBooleanContent)) {
1860         SDValue Op0 = N0;
1861         if (Op0.getOpcode() == ISD::TRUNCATE)
1862           Op0 = Op0.getOperand(0);
1863 
1864         if ((Op0.getOpcode() == ISD::XOR) &&
1865             Op0.getOperand(0).getOpcode() == ISD::SETCC &&
1866             Op0.getOperand(1).getOpcode() == ISD::SETCC) {
1867           // (xor (setcc), (setcc)) == / != 1 -> (setcc) != / == (setcc)
1868           Cond = (Cond == ISD::SETEQ) ? ISD::SETNE : ISD::SETEQ;
1869           return DAG.getSetCC(dl, VT, Op0.getOperand(0), Op0.getOperand(1),
1870                               Cond);
1871         }
1872         if (Op0.getOpcode() == ISD::AND &&
1873             isa<ConstantSDNode>(Op0.getOperand(1)) &&
1874             cast<ConstantSDNode>(Op0.getOperand(1))->getAPIntValue() == 1) {
1875           // If this is (X&1) == / != 1, normalize it to (X&1) != / == 0.
1876           if (Op0.getValueType().bitsGT(VT))
1877             Op0 = DAG.getNode(ISD::AND, dl, VT,
1878                           DAG.getNode(ISD::TRUNCATE, dl, VT, Op0.getOperand(0)),
1879                           DAG.getConstant(1, dl, VT));
1880           else if (Op0.getValueType().bitsLT(VT))
1881             Op0 = DAG.getNode(ISD::AND, dl, VT,
1882                         DAG.getNode(ISD::ANY_EXTEND, dl, VT, Op0.getOperand(0)),
1883                         DAG.getConstant(1, dl, VT));
1884 
1885           return DAG.getSetCC(dl, VT, Op0,
1886                               DAG.getConstant(0, dl, Op0.getValueType()),
1887                               Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ);
1888         }
1889         if (Op0.getOpcode() == ISD::AssertZext &&
1890             cast<VTSDNode>(Op0.getOperand(1))->getVT() == MVT::i1)
1891           return DAG.getSetCC(dl, VT, Op0,
1892                               DAG.getConstant(0, dl, Op0.getValueType()),
1893                               Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ);
1894       }
1895     }
1896 
1897     APInt MinVal, MaxVal;
1898     unsigned OperandBitSize = N1C->getValueType(0).getSizeInBits();
1899     if (ISD::isSignedIntSetCC(Cond)) {
1900       MinVal = APInt::getSignedMinValue(OperandBitSize);
1901       MaxVal = APInt::getSignedMaxValue(OperandBitSize);
1902     } else {
1903       MinVal = APInt::getMinValue(OperandBitSize);
1904       MaxVal = APInt::getMaxValue(OperandBitSize);
1905     }
1906 
1907     // Canonicalize GE/LE comparisons to use GT/LT comparisons.
1908     if (Cond == ISD::SETGE || Cond == ISD::SETUGE) {
1909       if (C1 == MinVal) return DAG.getConstant(1, dl, VT);  // X >= MIN --> true
1910       // X >= C0 --> X > (C0 - 1)
1911       APInt C = C1 - 1;
1912       ISD::CondCode NewCC = (Cond == ISD::SETGE) ? ISD::SETGT : ISD::SETUGT;
1913       if ((DCI.isBeforeLegalizeOps() ||
1914            isCondCodeLegal(NewCC, VT.getSimpleVT())) &&
1915           (!N1C->isOpaque() || (N1C->isOpaque() && C.getBitWidth() <= 64 &&
1916                                 isLegalICmpImmediate(C.getSExtValue())))) {
1917         return DAG.getSetCC(dl, VT, N0,
1918                             DAG.getConstant(C, dl, N1.getValueType()),
1919                             NewCC);
1920       }
1921     }
1922 
1923     if (Cond == ISD::SETLE || Cond == ISD::SETULE) {
1924       if (C1 == MaxVal) return DAG.getConstant(1, dl, VT);  // X <= MAX --> true
1925       // X <= C0 --> X < (C0 + 1)
1926       APInt C = C1 + 1;
1927       ISD::CondCode NewCC = (Cond == ISD::SETLE) ? ISD::SETLT : ISD::SETULT;
1928       if ((DCI.isBeforeLegalizeOps() ||
1929            isCondCodeLegal(NewCC, VT.getSimpleVT())) &&
1930           (!N1C->isOpaque() || (N1C->isOpaque() && C.getBitWidth() <= 64 &&
1931                                 isLegalICmpImmediate(C.getSExtValue())))) {
1932         return DAG.getSetCC(dl, VT, N0,
1933                             DAG.getConstant(C, dl, N1.getValueType()),
1934                             NewCC);
1935       }
1936     }
1937 
1938     if ((Cond == ISD::SETLT || Cond == ISD::SETULT) && C1 == MinVal)
1939       return DAG.getConstant(0, dl, VT);      // X < MIN --> false
1940     if ((Cond == ISD::SETGE || Cond == ISD::SETUGE) && C1 == MinVal)
1941       return DAG.getConstant(1, dl, VT);      // X >= MIN --> true
1942     if ((Cond == ISD::SETGT || Cond == ISD::SETUGT) && C1 == MaxVal)
1943       return DAG.getConstant(0, dl, VT);      // X > MAX --> false
1944     if ((Cond == ISD::SETLE || Cond == ISD::SETULE) && C1 == MaxVal)
1945       return DAG.getConstant(1, dl, VT);      // X <= MAX --> true
1946 
1947     // Canonicalize setgt X, Min --> setne X, Min
1948     if ((Cond == ISD::SETGT || Cond == ISD::SETUGT) && C1 == MinVal)
1949       return DAG.getSetCC(dl, VT, N0, N1, ISD::SETNE);
1950     // Canonicalize setlt X, Max --> setne X, Max
1951     if ((Cond == ISD::SETLT || Cond == ISD::SETULT) && C1 == MaxVal)
1952       return DAG.getSetCC(dl, VT, N0, N1, ISD::SETNE);
1953 
1954     // If we have setult X, 1, turn it into seteq X, 0
1955     if ((Cond == ISD::SETLT || Cond == ISD::SETULT) && C1 == MinVal+1)
1956       return DAG.getSetCC(dl, VT, N0,
1957                           DAG.getConstant(MinVal, dl, N0.getValueType()),
1958                           ISD::SETEQ);
1959     // If we have setugt X, Max-1, turn it into seteq X, Max
1960     if ((Cond == ISD::SETGT || Cond == ISD::SETUGT) && C1 == MaxVal-1)
1961       return DAG.getSetCC(dl, VT, N0,
1962                           DAG.getConstant(MaxVal, dl, N0.getValueType()),
1963                           ISD::SETEQ);
1964 
1965     // If we have "setcc X, C0", check to see if we can shrink the immediate
1966     // by changing cc.
1967 
1968     // SETUGT X, SINTMAX  -> SETLT X, 0
1969     if (Cond == ISD::SETUGT &&
1970         C1 == APInt::getSignedMaxValue(OperandBitSize))
1971       return DAG.getSetCC(dl, VT, N0,
1972                           DAG.getConstant(0, dl, N1.getValueType()),
1973                           ISD::SETLT);
1974 
1975     // SETULT X, SINTMIN  -> SETGT X, -1
1976     if (Cond == ISD::SETULT &&
1977         C1 == APInt::getSignedMinValue(OperandBitSize)) {
1978       SDValue ConstMinusOne =
1979           DAG.getConstant(APInt::getAllOnesValue(OperandBitSize), dl,
1980                           N1.getValueType());
1981       return DAG.getSetCC(dl, VT, N0, ConstMinusOne, ISD::SETGT);
1982     }
1983 
1984     // Fold bit comparisons when we can.
1985     if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
1986         (VT == N0.getValueType() ||
1987          (isTypeLegal(VT) && VT.bitsLE(N0.getValueType()))) &&
1988         N0.getOpcode() == ISD::AND) {
1989       auto &DL = DAG.getDataLayout();
1990       if (auto *AndRHS = dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
1991         EVT ShiftTy = DCI.isBeforeLegalize()
1992                           ? getPointerTy(DL)
1993                           : getShiftAmountTy(N0.getValueType(), DL);
1994         if (Cond == ISD::SETNE && C1 == 0) {// (X & 8) != 0  -->  (X & 8) >> 3
1995           // Perform the xform if the AND RHS is a single bit.
1996           if (AndRHS->getAPIntValue().isPowerOf2()) {
1997             return DAG.getNode(ISD::TRUNCATE, dl, VT,
1998                               DAG.getNode(ISD::SRL, dl, N0.getValueType(), N0,
1999                    DAG.getConstant(AndRHS->getAPIntValue().logBase2(), dl,
2000                                    ShiftTy)));
2001           }
2002         } else if (Cond == ISD::SETEQ && C1 == AndRHS->getAPIntValue()) {
2003           // (X & 8) == 8  -->  (X & 8) >> 3
2004           // Perform the xform if C1 is a single bit.
2005           if (C1.isPowerOf2()) {
2006             return DAG.getNode(ISD::TRUNCATE, dl, VT,
2007                                DAG.getNode(ISD::SRL, dl, N0.getValueType(), N0,
2008                                       DAG.getConstant(C1.logBase2(), dl,
2009                                                       ShiftTy)));
2010           }
2011         }
2012       }
2013     }
2014 
2015     if (C1.getMinSignedBits() <= 64 &&
2016         !isLegalICmpImmediate(C1.getSExtValue())) {
2017       // (X & -256) == 256 -> (X >> 8) == 1
2018       if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
2019           N0.getOpcode() == ISD::AND && N0.hasOneUse()) {
2020         if (auto *AndRHS = dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
2021           const APInt &AndRHSC = AndRHS->getAPIntValue();
2022           if ((-AndRHSC).isPowerOf2() && (AndRHSC & C1) == C1) {
2023             unsigned ShiftBits = AndRHSC.countTrailingZeros();
2024             auto &DL = DAG.getDataLayout();
2025             EVT ShiftTy = DCI.isBeforeLegalize()
2026                               ? getPointerTy(DL)
2027                               : getShiftAmountTy(N0.getValueType(), DL);
2028             EVT CmpTy = N0.getValueType();
2029             SDValue Shift = DAG.getNode(ISD::SRL, dl, CmpTy, N0.getOperand(0),
2030                                         DAG.getConstant(ShiftBits, dl,
2031                                                         ShiftTy));
2032             SDValue CmpRHS = DAG.getConstant(C1.lshr(ShiftBits), dl, CmpTy);
2033             return DAG.getSetCC(dl, VT, Shift, CmpRHS, Cond);
2034           }
2035         }
2036       } else if (Cond == ISD::SETULT || Cond == ISD::SETUGE ||
2037                  Cond == ISD::SETULE || Cond == ISD::SETUGT) {
2038         bool AdjOne = (Cond == ISD::SETULE || Cond == ISD::SETUGT);
2039         // X <  0x100000000 -> (X >> 32) <  1
2040         // X >= 0x100000000 -> (X >> 32) >= 1
2041         // X <= 0x0ffffffff -> (X >> 32) <  1
2042         // X >  0x0ffffffff -> (X >> 32) >= 1
2043         unsigned ShiftBits;
2044         APInt NewC = C1;
2045         ISD::CondCode NewCond = Cond;
2046         if (AdjOne) {
2047           ShiftBits = C1.countTrailingOnes();
2048           NewC = NewC + 1;
2049           NewCond = (Cond == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE;
2050         } else {
2051           ShiftBits = C1.countTrailingZeros();
2052         }
2053         NewC = NewC.lshr(ShiftBits);
2054         if (ShiftBits && NewC.getMinSignedBits() <= 64 &&
2055           isLegalICmpImmediate(NewC.getSExtValue())) {
2056           auto &DL = DAG.getDataLayout();
2057           EVT ShiftTy = DCI.isBeforeLegalize()
2058                             ? getPointerTy(DL)
2059                             : getShiftAmountTy(N0.getValueType(), DL);
2060           EVT CmpTy = N0.getValueType();
2061           SDValue Shift = DAG.getNode(ISD::SRL, dl, CmpTy, N0,
2062                                       DAG.getConstant(ShiftBits, dl, ShiftTy));
2063           SDValue CmpRHS = DAG.getConstant(NewC, dl, CmpTy);
2064           return DAG.getSetCC(dl, VT, Shift, CmpRHS, NewCond);
2065         }
2066       }
2067     }
2068   }
2069 
2070   if (isa<ConstantFPSDNode>(N0.getNode())) {
2071     // Constant fold or commute setcc.
2072     SDValue O = DAG.FoldSetCC(VT, N0, N1, Cond, dl);
2073     if (O.getNode()) return O;
2074   } else if (auto *CFP = dyn_cast<ConstantFPSDNode>(N1.getNode())) {
2075     // If the RHS of an FP comparison is a constant, simplify it away in
2076     // some cases.
2077     if (CFP->getValueAPF().isNaN()) {
2078       // If an operand is known to be a nan, we can fold it.
2079       switch (ISD::getUnorderedFlavor(Cond)) {
2080       default: llvm_unreachable("Unknown flavor!");
2081       case 0:  // Known false.
2082         return DAG.getConstant(0, dl, VT);
2083       case 1:  // Known true.
2084         return DAG.getConstant(1, dl, VT);
2085       case 2:  // Undefined.
2086         return DAG.getUNDEF(VT);
2087       }
2088     }
2089 
2090     // Otherwise, we know the RHS is not a NaN.  Simplify the node to drop the
2091     // constant if knowing that the operand is non-nan is enough.  We prefer to
2092     // have SETO(x,x) instead of SETO(x, 0.0) because this avoids having to
2093     // materialize 0.0.
2094     if (Cond == ISD::SETO || Cond == ISD::SETUO)
2095       return DAG.getSetCC(dl, VT, N0, N0, Cond);
2096 
2097     // setcc (fneg x), C -> setcc swap(pred) x, -C
2098     if (N0.getOpcode() == ISD::FNEG) {
2099       ISD::CondCode SwapCond = ISD::getSetCCSwappedOperands(Cond);
2100       if (DCI.isBeforeLegalizeOps() ||
2101           isCondCodeLegal(SwapCond, N0.getSimpleValueType())) {
2102         SDValue NegN1 = DAG.getNode(ISD::FNEG, dl, N0.getValueType(), N1);
2103         return DAG.getSetCC(dl, VT, N0.getOperand(0), NegN1, SwapCond);
2104       }
2105     }
2106 
2107     // If the condition is not legal, see if we can find an equivalent one
2108     // which is legal.
2109     if (!isCondCodeLegal(Cond, N0.getSimpleValueType())) {
2110       // If the comparison was an awkward floating-point == or != and one of
2111       // the comparison operands is infinity or negative infinity, convert the
2112       // condition to a less-awkward <= or >=.
2113       if (CFP->getValueAPF().isInfinity()) {
2114         if (CFP->getValueAPF().isNegative()) {
2115           if (Cond == ISD::SETOEQ &&
2116               isCondCodeLegal(ISD::SETOLE, N0.getSimpleValueType()))
2117             return DAG.getSetCC(dl, VT, N0, N1, ISD::SETOLE);
2118           if (Cond == ISD::SETUEQ &&
2119               isCondCodeLegal(ISD::SETOLE, N0.getSimpleValueType()))
2120             return DAG.getSetCC(dl, VT, N0, N1, ISD::SETULE);
2121           if (Cond == ISD::SETUNE &&
2122               isCondCodeLegal(ISD::SETUGT, N0.getSimpleValueType()))
2123             return DAG.getSetCC(dl, VT, N0, N1, ISD::SETUGT);
2124           if (Cond == ISD::SETONE &&
2125               isCondCodeLegal(ISD::SETUGT, N0.getSimpleValueType()))
2126             return DAG.getSetCC(dl, VT, N0, N1, ISD::SETOGT);
2127         } else {
2128           if (Cond == ISD::SETOEQ &&
2129               isCondCodeLegal(ISD::SETOGE, N0.getSimpleValueType()))
2130             return DAG.getSetCC(dl, VT, N0, N1, ISD::SETOGE);
2131           if (Cond == ISD::SETUEQ &&
2132               isCondCodeLegal(ISD::SETOGE, N0.getSimpleValueType()))
2133             return DAG.getSetCC(dl, VT, N0, N1, ISD::SETUGE);
2134           if (Cond == ISD::SETUNE &&
2135               isCondCodeLegal(ISD::SETULT, N0.getSimpleValueType()))
2136             return DAG.getSetCC(dl, VT, N0, N1, ISD::SETULT);
2137           if (Cond == ISD::SETONE &&
2138               isCondCodeLegal(ISD::SETULT, N0.getSimpleValueType()))
2139             return DAG.getSetCC(dl, VT, N0, N1, ISD::SETOLT);
2140         }
2141       }
2142     }
2143   }
2144 
2145   if (N0 == N1) {
2146     // The sext(setcc()) => setcc() optimization relies on the appropriate
2147     // constant being emitted.
2148     uint64_t EqVal = 0;
2149     switch (getBooleanContents(N0.getValueType())) {
2150     case UndefinedBooleanContent:
2151     case ZeroOrOneBooleanContent:
2152       EqVal = ISD::isTrueWhenEqual(Cond);
2153       break;
2154     case ZeroOrNegativeOneBooleanContent:
2155       EqVal = ISD::isTrueWhenEqual(Cond) ? -1 : 0;
2156       break;
2157     }
2158 
2159     // We can always fold X == X for integer setcc's.
2160     if (N0.getValueType().isInteger()) {
2161       return DAG.getConstant(EqVal, dl, VT);
2162     }
2163     unsigned UOF = ISD::getUnorderedFlavor(Cond);
2164     if (UOF == 2)   // FP operators that are undefined on NaNs.
2165       return DAG.getConstant(EqVal, dl, VT);
2166     if (UOF == unsigned(ISD::isTrueWhenEqual(Cond)))
2167       return DAG.getConstant(EqVal, dl, VT);
2168     // Otherwise, we can't fold it.  However, we can simplify it to SETUO/SETO
2169     // if it is not already.
2170     ISD::CondCode NewCond = UOF == 0 ? ISD::SETO : ISD::SETUO;
2171     if (NewCond != Cond && (DCI.isBeforeLegalizeOps() ||
2172           getCondCodeAction(NewCond, N0.getSimpleValueType()) == Legal))
2173       return DAG.getSetCC(dl, VT, N0, N1, NewCond);
2174   }
2175 
2176   if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
2177       N0.getValueType().isInteger()) {
2178     if (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::SUB ||
2179         N0.getOpcode() == ISD::XOR) {
2180       // Simplify (X+Y) == (X+Z) -->  Y == Z
2181       if (N0.getOpcode() == N1.getOpcode()) {
2182         if (N0.getOperand(0) == N1.getOperand(0))
2183           return DAG.getSetCC(dl, VT, N0.getOperand(1), N1.getOperand(1), Cond);
2184         if (N0.getOperand(1) == N1.getOperand(1))
2185           return DAG.getSetCC(dl, VT, N0.getOperand(0), N1.getOperand(0), Cond);
2186         if (DAG.isCommutativeBinOp(N0.getOpcode())) {
2187           // If X op Y == Y op X, try other combinations.
2188           if (N0.getOperand(0) == N1.getOperand(1))
2189             return DAG.getSetCC(dl, VT, N0.getOperand(1), N1.getOperand(0),
2190                                 Cond);
2191           if (N0.getOperand(1) == N1.getOperand(0))
2192             return DAG.getSetCC(dl, VT, N0.getOperand(0), N1.getOperand(1),
2193                                 Cond);
2194         }
2195       }
2196 
2197       // If RHS is a legal immediate value for a compare instruction, we need
2198       // to be careful about increasing register pressure needlessly.
2199       bool LegalRHSImm = false;
2200 
2201       if (auto *RHSC = dyn_cast<ConstantSDNode>(N1)) {
2202         if (auto *LHSR = dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
2203           // Turn (X+C1) == C2 --> X == C2-C1
2204           if (N0.getOpcode() == ISD::ADD && N0.getNode()->hasOneUse()) {
2205             return DAG.getSetCC(dl, VT, N0.getOperand(0),
2206                                 DAG.getConstant(RHSC->getAPIntValue()-
2207                                                 LHSR->getAPIntValue(),
2208                                 dl, N0.getValueType()), Cond);
2209           }
2210 
2211           // Turn (X^C1) == C2 into X == C1^C2 iff X&~C1 = 0.
2212           if (N0.getOpcode() == ISD::XOR)
2213             // If we know that all of the inverted bits are zero, don't bother
2214             // performing the inversion.
2215             if (DAG.MaskedValueIsZero(N0.getOperand(0), ~LHSR->getAPIntValue()))
2216               return
2217                 DAG.getSetCC(dl, VT, N0.getOperand(0),
2218                              DAG.getConstant(LHSR->getAPIntValue() ^
2219                                                RHSC->getAPIntValue(),
2220                                              dl, N0.getValueType()),
2221                              Cond);
2222         }
2223 
2224         // Turn (C1-X) == C2 --> X == C1-C2
2225         if (auto *SUBC = dyn_cast<ConstantSDNode>(N0.getOperand(0))) {
2226           if (N0.getOpcode() == ISD::SUB && N0.getNode()->hasOneUse()) {
2227             return
2228               DAG.getSetCC(dl, VT, N0.getOperand(1),
2229                            DAG.getConstant(SUBC->getAPIntValue() -
2230                                              RHSC->getAPIntValue(),
2231                                            dl, N0.getValueType()),
2232                            Cond);
2233           }
2234         }
2235 
2236         // Could RHSC fold directly into a compare?
2237         if (RHSC->getValueType(0).getSizeInBits() <= 64)
2238           LegalRHSImm = isLegalICmpImmediate(RHSC->getSExtValue());
2239       }
2240 
2241       // Simplify (X+Z) == X -->  Z == 0
2242       // Don't do this if X is an immediate that can fold into a cmp
2243       // instruction and X+Z has other uses. It could be an induction variable
2244       // chain, and the transform would increase register pressure.
2245       if (!LegalRHSImm || N0.getNode()->hasOneUse()) {
2246         if (N0.getOperand(0) == N1)
2247           return DAG.getSetCC(dl, VT, N0.getOperand(1),
2248                               DAG.getConstant(0, dl, N0.getValueType()), Cond);
2249         if (N0.getOperand(1) == N1) {
2250           if (DAG.isCommutativeBinOp(N0.getOpcode()))
2251             return DAG.getSetCC(dl, VT, N0.getOperand(0),
2252                                 DAG.getConstant(0, dl, N0.getValueType()),
2253                                 Cond);
2254           if (N0.getNode()->hasOneUse()) {
2255             assert(N0.getOpcode() == ISD::SUB && "Unexpected operation!");
2256             auto &DL = DAG.getDataLayout();
2257             // (Z-X) == X  --> Z == X<<1
2258             SDValue SH = DAG.getNode(
2259                 ISD::SHL, dl, N1.getValueType(), N1,
2260                 DAG.getConstant(1, dl,
2261                                 getShiftAmountTy(N1.getValueType(), DL)));
2262             if (!DCI.isCalledByLegalizer())
2263               DCI.AddToWorklist(SH.getNode());
2264             return DAG.getSetCC(dl, VT, N0.getOperand(0), SH, Cond);
2265           }
2266         }
2267       }
2268     }
2269 
2270     if (N1.getOpcode() == ISD::ADD || N1.getOpcode() == ISD::SUB ||
2271         N1.getOpcode() == ISD::XOR) {
2272       // Simplify  X == (X+Z) -->  Z == 0
2273       if (N1.getOperand(0) == N0)
2274         return DAG.getSetCC(dl, VT, N1.getOperand(1),
2275                         DAG.getConstant(0, dl, N1.getValueType()), Cond);
2276       if (N1.getOperand(1) == N0) {
2277         if (DAG.isCommutativeBinOp(N1.getOpcode()))
2278           return DAG.getSetCC(dl, VT, N1.getOperand(0),
2279                           DAG.getConstant(0, dl, N1.getValueType()), Cond);
2280         if (N1.getNode()->hasOneUse()) {
2281           assert(N1.getOpcode() == ISD::SUB && "Unexpected operation!");
2282           auto &DL = DAG.getDataLayout();
2283           // X == (Z-X)  --> X<<1 == Z
2284           SDValue SH = DAG.getNode(
2285               ISD::SHL, dl, N1.getValueType(), N0,
2286               DAG.getConstant(1, dl, getShiftAmountTy(N0.getValueType(), DL)));
2287           if (!DCI.isCalledByLegalizer())
2288             DCI.AddToWorklist(SH.getNode());
2289           return DAG.getSetCC(dl, VT, SH, N1.getOperand(0), Cond);
2290         }
2291       }
2292     }
2293 
2294     if (SDValue V = simplifySetCCWithAnd(VT, N0, N1, Cond, DCI, dl))
2295       return V;
2296   }
2297 
2298   // Fold away ALL boolean setcc's.
2299   SDValue Temp;
2300   if (N0.getValueType() == MVT::i1 && foldBooleans) {
2301     switch (Cond) {
2302     default: llvm_unreachable("Unknown integer setcc!");
2303     case ISD::SETEQ:  // X == Y  -> ~(X^Y)
2304       Temp = DAG.getNode(ISD::XOR, dl, MVT::i1, N0, N1);
2305       N0 = DAG.getNOT(dl, Temp, MVT::i1);
2306       if (!DCI.isCalledByLegalizer())
2307         DCI.AddToWorklist(Temp.getNode());
2308       break;
2309     case ISD::SETNE:  // X != Y   -->  (X^Y)
2310       N0 = DAG.getNode(ISD::XOR, dl, MVT::i1, N0, N1);
2311       break;
2312     case ISD::SETGT:  // X >s Y   -->  X == 0 & Y == 1  -->  ~X & Y
2313     case ISD::SETULT: // X <u Y   -->  X == 0 & Y == 1  -->  ~X & Y
2314       Temp = DAG.getNOT(dl, N0, MVT::i1);
2315       N0 = DAG.getNode(ISD::AND, dl, MVT::i1, N1, Temp);
2316       if (!DCI.isCalledByLegalizer())
2317         DCI.AddToWorklist(Temp.getNode());
2318       break;
2319     case ISD::SETLT:  // X <s Y   --> X == 1 & Y == 0  -->  ~Y & X
2320     case ISD::SETUGT: // X >u Y   --> X == 1 & Y == 0  -->  ~Y & X
2321       Temp = DAG.getNOT(dl, N1, MVT::i1);
2322       N0 = DAG.getNode(ISD::AND, dl, MVT::i1, N0, Temp);
2323       if (!DCI.isCalledByLegalizer())
2324         DCI.AddToWorklist(Temp.getNode());
2325       break;
2326     case ISD::SETULE: // X <=u Y  --> X == 0 | Y == 1  -->  ~X | Y
2327     case ISD::SETGE:  // X >=s Y  --> X == 0 | Y == 1  -->  ~X | Y
2328       Temp = DAG.getNOT(dl, N0, MVT::i1);
2329       N0 = DAG.getNode(ISD::OR, dl, MVT::i1, N1, Temp);
2330       if (!DCI.isCalledByLegalizer())
2331         DCI.AddToWorklist(Temp.getNode());
2332       break;
2333     case ISD::SETUGE: // X >=u Y  --> X == 1 | Y == 0  -->  ~Y | X
2334     case ISD::SETLE:  // X <=s Y  --> X == 1 | Y == 0  -->  ~Y | X
2335       Temp = DAG.getNOT(dl, N1, MVT::i1);
2336       N0 = DAG.getNode(ISD::OR, dl, MVT::i1, N0, Temp);
2337       break;
2338     }
2339     if (VT != MVT::i1) {
2340       if (!DCI.isCalledByLegalizer())
2341         DCI.AddToWorklist(N0.getNode());
2342       // FIXME: If running after legalize, we probably can't do this.
2343       N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, N0);
2344     }
2345     return N0;
2346   }
2347 
2348   // Could not fold it.
2349   return SDValue();
2350 }
2351 
2352 /// Returns true (and the GlobalValue and the offset) if the node is a
2353 /// GlobalAddress + offset.
2354 bool TargetLowering::isGAPlusOffset(SDNode *N, const GlobalValue *&GA,
2355                                     int64_t &Offset) const {
2356   if (auto *GASD = dyn_cast<GlobalAddressSDNode>(N)) {
2357     GA = GASD->getGlobal();
2358     Offset += GASD->getOffset();
2359     return true;
2360   }
2361 
2362   if (N->getOpcode() == ISD::ADD) {
2363     SDValue N1 = N->getOperand(0);
2364     SDValue N2 = N->getOperand(1);
2365     if (isGAPlusOffset(N1.getNode(), GA, Offset)) {
2366       if (auto *V = dyn_cast<ConstantSDNode>(N2)) {
2367         Offset += V->getSExtValue();
2368         return true;
2369       }
2370     } else if (isGAPlusOffset(N2.getNode(), GA, Offset)) {
2371       if (auto *V = dyn_cast<ConstantSDNode>(N1)) {
2372         Offset += V->getSExtValue();
2373         return true;
2374       }
2375     }
2376   }
2377 
2378   return false;
2379 }
2380 
2381 SDValue TargetLowering::PerformDAGCombine(SDNode *N,
2382                                           DAGCombinerInfo &DCI) const {
2383   // Default implementation: no optimization.
2384   return SDValue();
2385 }
2386 
2387 //===----------------------------------------------------------------------===//
2388 //  Inline Assembler Implementation Methods
2389 //===----------------------------------------------------------------------===//
2390 
2391 TargetLowering::ConstraintType
2392 TargetLowering::getConstraintType(StringRef Constraint) const {
2393   unsigned S = Constraint.size();
2394 
2395   if (S == 1) {
2396     switch (Constraint[0]) {
2397     default: break;
2398     case 'r': return C_RegisterClass;
2399     case 'm':    // memory
2400     case 'o':    // offsetable
2401     case 'V':    // not offsetable
2402       return C_Memory;
2403     case 'i':    // Simple Integer or Relocatable Constant
2404     case 'n':    // Simple Integer
2405     case 'E':    // Floating Point Constant
2406     case 'F':    // Floating Point Constant
2407     case 's':    // Relocatable Constant
2408     case 'p':    // Address.
2409     case 'X':    // Allow ANY value.
2410     case 'I':    // Target registers.
2411     case 'J':
2412     case 'K':
2413     case 'L':
2414     case 'M':
2415     case 'N':
2416     case 'O':
2417     case 'P':
2418     case '<':
2419     case '>':
2420       return C_Other;
2421     }
2422   }
2423 
2424   if (S > 1 && Constraint[0] == '{' && Constraint[S-1] == '}') {
2425     if (S == 8 && Constraint.substr(1, 6) == "memory") // "{memory}"
2426       return C_Memory;
2427     return C_Register;
2428   }
2429   return C_Unknown;
2430 }
2431 
2432 /// Try to replace an X constraint, which matches anything, with another that
2433 /// has more specific requirements based on the type of the corresponding
2434 /// operand.
2435 const char *TargetLowering::LowerXConstraint(EVT ConstraintVT) const{
2436   if (ConstraintVT.isInteger())
2437     return "r";
2438   if (ConstraintVT.isFloatingPoint())
2439     return "f";      // works for many targets
2440   return nullptr;
2441 }
2442 
2443 /// Lower the specified operand into the Ops vector.
2444 /// If it is invalid, don't add anything to Ops.
2445 void TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
2446                                                   std::string &Constraint,
2447                                                   std::vector<SDValue> &Ops,
2448                                                   SelectionDAG &DAG) const {
2449 
2450   if (Constraint.length() > 1) return;
2451 
2452   char ConstraintLetter = Constraint[0];
2453   switch (ConstraintLetter) {
2454   default: break;
2455   case 'X':     // Allows any operand; labels (basic block) use this.
2456     if (Op.getOpcode() == ISD::BasicBlock) {
2457       Ops.push_back(Op);
2458       return;
2459     }
2460     LLVM_FALLTHROUGH;
2461   case 'i':    // Simple Integer or Relocatable Constant
2462   case 'n':    // Simple Integer
2463   case 's': {  // Relocatable Constant
2464     // These operands are interested in values of the form (GV+C), where C may
2465     // be folded in as an offset of GV, or it may be explicitly added.  Also, it
2466     // is possible and fine if either GV or C are missing.
2467     ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
2468     GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op);
2469 
2470     // If we have "(add GV, C)", pull out GV/C
2471     if (Op.getOpcode() == ISD::ADD) {
2472       C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
2473       GA = dyn_cast<GlobalAddressSDNode>(Op.getOperand(0));
2474       if (!C || !GA) {
2475         C = dyn_cast<ConstantSDNode>(Op.getOperand(0));
2476         GA = dyn_cast<GlobalAddressSDNode>(Op.getOperand(1));
2477       }
2478       if (!C || !GA) {
2479         C = nullptr;
2480         GA = nullptr;
2481       }
2482     }
2483 
2484     // If we find a valid operand, map to the TargetXXX version so that the
2485     // value itself doesn't get selected.
2486     if (GA) {   // Either &GV   or   &GV+C
2487       if (ConstraintLetter != 'n') {
2488         int64_t Offs = GA->getOffset();
2489         if (C) Offs += C->getZExtValue();
2490         Ops.push_back(DAG.getTargetGlobalAddress(GA->getGlobal(),
2491                                                  C ? SDLoc(C) : SDLoc(),
2492                                                  Op.getValueType(), Offs));
2493       }
2494       return;
2495     }
2496     if (C) {   // just C, no GV.
2497       // Simple constants are not allowed for 's'.
2498       if (ConstraintLetter != 's') {
2499         // gcc prints these as sign extended.  Sign extend value to 64 bits
2500         // now; without this it would get ZExt'd later in
2501         // ScheduleDAGSDNodes::EmitNode, which is very generic.
2502         Ops.push_back(DAG.getTargetConstant(C->getAPIntValue().getSExtValue(),
2503                                             SDLoc(C), MVT::i64));
2504       }
2505       return;
2506     }
2507     break;
2508   }
2509   }
2510 }
2511 
2512 std::pair<unsigned, const TargetRegisterClass *>
2513 TargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *RI,
2514                                              StringRef Constraint,
2515                                              MVT VT) const {
2516   if (Constraint.empty() || Constraint[0] != '{')
2517     return std::make_pair(0u, static_cast<TargetRegisterClass*>(nullptr));
2518   assert(*(Constraint.end()-1) == '}' && "Not a brace enclosed constraint?");
2519 
2520   // Remove the braces from around the name.
2521   StringRef RegName(Constraint.data()+1, Constraint.size()-2);
2522 
2523   std::pair<unsigned, const TargetRegisterClass*> R =
2524     std::make_pair(0u, static_cast<const TargetRegisterClass*>(nullptr));
2525 
2526   // Figure out which register class contains this reg.
2527   for (const TargetRegisterClass *RC : RI->regclasses()) {
2528     // If none of the value types for this register class are valid, we
2529     // can't use it.  For example, 64-bit reg classes on 32-bit targets.
2530     if (!isLegalRC(RC))
2531       continue;
2532 
2533     for (TargetRegisterClass::iterator I = RC->begin(), E = RC->end();
2534          I != E; ++I) {
2535       if (RegName.equals_lower(RI->getRegAsmName(*I))) {
2536         std::pair<unsigned, const TargetRegisterClass*> S =
2537           std::make_pair(*I, RC);
2538 
2539         // If this register class has the requested value type, return it,
2540         // otherwise keep searching and return the first class found
2541         // if no other is found which explicitly has the requested type.
2542         if (RC->hasType(VT))
2543           return S;
2544         else if (!R.second)
2545           R = S;
2546       }
2547     }
2548   }
2549 
2550   return R;
2551 }
2552 
2553 //===----------------------------------------------------------------------===//
2554 // Constraint Selection.
2555 
2556 /// Return true of this is an input operand that is a matching constraint like
2557 /// "4".
2558 bool TargetLowering::AsmOperandInfo::isMatchingInputConstraint() const {
2559   assert(!ConstraintCode.empty() && "No known constraint!");
2560   return isdigit(static_cast<unsigned char>(ConstraintCode[0]));
2561 }
2562 
2563 /// If this is an input matching constraint, this method returns the output
2564 /// operand it matches.
2565 unsigned TargetLowering::AsmOperandInfo::getMatchedOperand() const {
2566   assert(!ConstraintCode.empty() && "No known constraint!");
2567   return atoi(ConstraintCode.c_str());
2568 }
2569 
2570 /// Split up the constraint string from the inline assembly value into the
2571 /// specific constraints and their prefixes, and also tie in the associated
2572 /// operand values.
2573 /// If this returns an empty vector, and if the constraint string itself
2574 /// isn't empty, there was an error parsing.
2575 TargetLowering::AsmOperandInfoVector
2576 TargetLowering::ParseConstraints(const DataLayout &DL,
2577                                  const TargetRegisterInfo *TRI,
2578                                  ImmutableCallSite CS) const {
2579   /// Information about all of the constraints.
2580   AsmOperandInfoVector ConstraintOperands;
2581   const InlineAsm *IA = cast<InlineAsm>(CS.getCalledValue());
2582   unsigned maCount = 0; // Largest number of multiple alternative constraints.
2583 
2584   // Do a prepass over the constraints, canonicalizing them, and building up the
2585   // ConstraintOperands list.
2586   unsigned ArgNo = 0;   // ArgNo - The argument of the CallInst.
2587   unsigned ResNo = 0;   // ResNo - The result number of the next output.
2588 
2589   for (InlineAsm::ConstraintInfo &CI : IA->ParseConstraints()) {
2590     ConstraintOperands.emplace_back(std::move(CI));
2591     AsmOperandInfo &OpInfo = ConstraintOperands.back();
2592 
2593     // Update multiple alternative constraint count.
2594     if (OpInfo.multipleAlternatives.size() > maCount)
2595       maCount = OpInfo.multipleAlternatives.size();
2596 
2597     OpInfo.ConstraintVT = MVT::Other;
2598 
2599     // Compute the value type for each operand.
2600     switch (OpInfo.Type) {
2601     case InlineAsm::isOutput:
2602       // Indirect outputs just consume an argument.
2603       if (OpInfo.isIndirect) {
2604         OpInfo.CallOperandVal = const_cast<Value *>(CS.getArgument(ArgNo++));
2605         break;
2606       }
2607 
2608       // The return value of the call is this value.  As such, there is no
2609       // corresponding argument.
2610       assert(!CS.getType()->isVoidTy() &&
2611              "Bad inline asm!");
2612       if (StructType *STy = dyn_cast<StructType>(CS.getType())) {
2613         OpInfo.ConstraintVT =
2614             getSimpleValueType(DL, STy->getElementType(ResNo));
2615       } else {
2616         assert(ResNo == 0 && "Asm only has one result!");
2617         OpInfo.ConstraintVT = getSimpleValueType(DL, CS.getType());
2618       }
2619       ++ResNo;
2620       break;
2621     case InlineAsm::isInput:
2622       OpInfo.CallOperandVal = const_cast<Value *>(CS.getArgument(ArgNo++));
2623       break;
2624     case InlineAsm::isClobber:
2625       // Nothing to do.
2626       break;
2627     }
2628 
2629     if (OpInfo.CallOperandVal) {
2630       llvm::Type *OpTy = OpInfo.CallOperandVal->getType();
2631       if (OpInfo.isIndirect) {
2632         llvm::PointerType *PtrTy = dyn_cast<PointerType>(OpTy);
2633         if (!PtrTy)
2634           report_fatal_error("Indirect operand for inline asm not a pointer!");
2635         OpTy = PtrTy->getElementType();
2636       }
2637 
2638       // Look for vector wrapped in a struct. e.g. { <16 x i8> }.
2639       if (StructType *STy = dyn_cast<StructType>(OpTy))
2640         if (STy->getNumElements() == 1)
2641           OpTy = STy->getElementType(0);
2642 
2643       // If OpTy is not a single value, it may be a struct/union that we
2644       // can tile with integers.
2645       if (!OpTy->isSingleValueType() && OpTy->isSized()) {
2646         unsigned BitSize = DL.getTypeSizeInBits(OpTy);
2647         switch (BitSize) {
2648         default: break;
2649         case 1:
2650         case 8:
2651         case 16:
2652         case 32:
2653         case 64:
2654         case 128:
2655           OpInfo.ConstraintVT =
2656             MVT::getVT(IntegerType::get(OpTy->getContext(), BitSize), true);
2657           break;
2658         }
2659       } else if (PointerType *PT = dyn_cast<PointerType>(OpTy)) {
2660         unsigned PtrSize = DL.getPointerSizeInBits(PT->getAddressSpace());
2661         OpInfo.ConstraintVT = MVT::getIntegerVT(PtrSize);
2662       } else {
2663         OpInfo.ConstraintVT = MVT::getVT(OpTy, true);
2664       }
2665     }
2666   }
2667 
2668   // If we have multiple alternative constraints, select the best alternative.
2669   if (!ConstraintOperands.empty()) {
2670     if (maCount) {
2671       unsigned bestMAIndex = 0;
2672       int bestWeight = -1;
2673       // weight:  -1 = invalid match, and 0 = so-so match to 5 = good match.
2674       int weight = -1;
2675       unsigned maIndex;
2676       // Compute the sums of the weights for each alternative, keeping track
2677       // of the best (highest weight) one so far.
2678       for (maIndex = 0; maIndex < maCount; ++maIndex) {
2679         int weightSum = 0;
2680         for (unsigned cIndex = 0, eIndex = ConstraintOperands.size();
2681             cIndex != eIndex; ++cIndex) {
2682           AsmOperandInfo& OpInfo = ConstraintOperands[cIndex];
2683           if (OpInfo.Type == InlineAsm::isClobber)
2684             continue;
2685 
2686           // If this is an output operand with a matching input operand,
2687           // look up the matching input. If their types mismatch, e.g. one
2688           // is an integer, the other is floating point, or their sizes are
2689           // different, flag it as an maCantMatch.
2690           if (OpInfo.hasMatchingInput()) {
2691             AsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput];
2692             if (OpInfo.ConstraintVT != Input.ConstraintVT) {
2693               if ((OpInfo.ConstraintVT.isInteger() !=
2694                    Input.ConstraintVT.isInteger()) ||
2695                   (OpInfo.ConstraintVT.getSizeInBits() !=
2696                    Input.ConstraintVT.getSizeInBits())) {
2697                 weightSum = -1;  // Can't match.
2698                 break;
2699               }
2700             }
2701           }
2702           weight = getMultipleConstraintMatchWeight(OpInfo, maIndex);
2703           if (weight == -1) {
2704             weightSum = -1;
2705             break;
2706           }
2707           weightSum += weight;
2708         }
2709         // Update best.
2710         if (weightSum > bestWeight) {
2711           bestWeight = weightSum;
2712           bestMAIndex = maIndex;
2713         }
2714       }
2715 
2716       // Now select chosen alternative in each constraint.
2717       for (unsigned cIndex = 0, eIndex = ConstraintOperands.size();
2718           cIndex != eIndex; ++cIndex) {
2719         AsmOperandInfo& cInfo = ConstraintOperands[cIndex];
2720         if (cInfo.Type == InlineAsm::isClobber)
2721           continue;
2722         cInfo.selectAlternative(bestMAIndex);
2723       }
2724     }
2725   }
2726 
2727   // Check and hook up tied operands, choose constraint code to use.
2728   for (unsigned cIndex = 0, eIndex = ConstraintOperands.size();
2729       cIndex != eIndex; ++cIndex) {
2730     AsmOperandInfo& OpInfo = ConstraintOperands[cIndex];
2731 
2732     // If this is an output operand with a matching input operand, look up the
2733     // matching input. If their types mismatch, e.g. one is an integer, the
2734     // other is floating point, or their sizes are different, flag it as an
2735     // error.
2736     if (OpInfo.hasMatchingInput()) {
2737       AsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput];
2738 
2739       if (OpInfo.ConstraintVT != Input.ConstraintVT) {
2740         std::pair<unsigned, const TargetRegisterClass *> MatchRC =
2741             getRegForInlineAsmConstraint(TRI, OpInfo.ConstraintCode,
2742                                          OpInfo.ConstraintVT);
2743         std::pair<unsigned, const TargetRegisterClass *> InputRC =
2744             getRegForInlineAsmConstraint(TRI, Input.ConstraintCode,
2745                                          Input.ConstraintVT);
2746         if ((OpInfo.ConstraintVT.isInteger() !=
2747              Input.ConstraintVT.isInteger()) ||
2748             (MatchRC.second != InputRC.second)) {
2749           report_fatal_error("Unsupported asm: input constraint"
2750                              " with a matching output constraint of"
2751                              " incompatible type!");
2752         }
2753       }
2754     }
2755   }
2756 
2757   return ConstraintOperands;
2758 }
2759 
2760 /// Return an integer indicating how general CT is.
2761 static unsigned getConstraintGenerality(TargetLowering::ConstraintType CT) {
2762   switch (CT) {
2763   case TargetLowering::C_Other:
2764   case TargetLowering::C_Unknown:
2765     return 0;
2766   case TargetLowering::C_Register:
2767     return 1;
2768   case TargetLowering::C_RegisterClass:
2769     return 2;
2770   case TargetLowering::C_Memory:
2771     return 3;
2772   }
2773   llvm_unreachable("Invalid constraint type");
2774 }
2775 
2776 /// Examine constraint type and operand type and determine a weight value.
2777 /// This object must already have been set up with the operand type
2778 /// and the current alternative constraint selected.
2779 TargetLowering::ConstraintWeight
2780   TargetLowering::getMultipleConstraintMatchWeight(
2781     AsmOperandInfo &info, int maIndex) const {
2782   InlineAsm::ConstraintCodeVector *rCodes;
2783   if (maIndex >= (int)info.multipleAlternatives.size())
2784     rCodes = &info.Codes;
2785   else
2786     rCodes = &info.multipleAlternatives[maIndex].Codes;
2787   ConstraintWeight BestWeight = CW_Invalid;
2788 
2789   // Loop over the options, keeping track of the most general one.
2790   for (unsigned i = 0, e = rCodes->size(); i != e; ++i) {
2791     ConstraintWeight weight =
2792       getSingleConstraintMatchWeight(info, (*rCodes)[i].c_str());
2793     if (weight > BestWeight)
2794       BestWeight = weight;
2795   }
2796 
2797   return BestWeight;
2798 }
2799 
2800 /// Examine constraint type and operand type and determine a weight value.
2801 /// This object must already have been set up with the operand type
2802 /// and the current alternative constraint selected.
2803 TargetLowering::ConstraintWeight
2804   TargetLowering::getSingleConstraintMatchWeight(
2805     AsmOperandInfo &info, const char *constraint) const {
2806   ConstraintWeight weight = CW_Invalid;
2807   Value *CallOperandVal = info.CallOperandVal;
2808     // If we don't have a value, we can't do a match,
2809     // but allow it at the lowest weight.
2810   if (!CallOperandVal)
2811     return CW_Default;
2812   // Look at the constraint type.
2813   switch (*constraint) {
2814     case 'i': // immediate integer.
2815     case 'n': // immediate integer with a known value.
2816       if (isa<ConstantInt>(CallOperandVal))
2817         weight = CW_Constant;
2818       break;
2819     case 's': // non-explicit intregal immediate.
2820       if (isa<GlobalValue>(CallOperandVal))
2821         weight = CW_Constant;
2822       break;
2823     case 'E': // immediate float if host format.
2824     case 'F': // immediate float.
2825       if (isa<ConstantFP>(CallOperandVal))
2826         weight = CW_Constant;
2827       break;
2828     case '<': // memory operand with autodecrement.
2829     case '>': // memory operand with autoincrement.
2830     case 'm': // memory operand.
2831     case 'o': // offsettable memory operand
2832     case 'V': // non-offsettable memory operand
2833       weight = CW_Memory;
2834       break;
2835     case 'r': // general register.
2836     case 'g': // general register, memory operand or immediate integer.
2837               // note: Clang converts "g" to "imr".
2838       if (CallOperandVal->getType()->isIntegerTy())
2839         weight = CW_Register;
2840       break;
2841     case 'X': // any operand.
2842     default:
2843       weight = CW_Default;
2844       break;
2845   }
2846   return weight;
2847 }
2848 
2849 /// If there are multiple different constraints that we could pick for this
2850 /// operand (e.g. "imr") try to pick the 'best' one.
2851 /// This is somewhat tricky: constraints fall into four classes:
2852 ///    Other         -> immediates and magic values
2853 ///    Register      -> one specific register
2854 ///    RegisterClass -> a group of regs
2855 ///    Memory        -> memory
2856 /// Ideally, we would pick the most specific constraint possible: if we have
2857 /// something that fits into a register, we would pick it.  The problem here
2858 /// is that if we have something that could either be in a register or in
2859 /// memory that use of the register could cause selection of *other*
2860 /// operands to fail: they might only succeed if we pick memory.  Because of
2861 /// this the heuristic we use is:
2862 ///
2863 ///  1) If there is an 'other' constraint, and if the operand is valid for
2864 ///     that constraint, use it.  This makes us take advantage of 'i'
2865 ///     constraints when available.
2866 ///  2) Otherwise, pick the most general constraint present.  This prefers
2867 ///     'm' over 'r', for example.
2868 ///
2869 static void ChooseConstraint(TargetLowering::AsmOperandInfo &OpInfo,
2870                              const TargetLowering &TLI,
2871                              SDValue Op, SelectionDAG *DAG) {
2872   assert(OpInfo.Codes.size() > 1 && "Doesn't have multiple constraint options");
2873   unsigned BestIdx = 0;
2874   TargetLowering::ConstraintType BestType = TargetLowering::C_Unknown;
2875   int BestGenerality = -1;
2876 
2877   // Loop over the options, keeping track of the most general one.
2878   for (unsigned i = 0, e = OpInfo.Codes.size(); i != e; ++i) {
2879     TargetLowering::ConstraintType CType =
2880       TLI.getConstraintType(OpInfo.Codes[i]);
2881 
2882     // If this is an 'other' constraint, see if the operand is valid for it.
2883     // For example, on X86 we might have an 'rI' constraint.  If the operand
2884     // is an integer in the range [0..31] we want to use I (saving a load
2885     // of a register), otherwise we must use 'r'.
2886     if (CType == TargetLowering::C_Other && Op.getNode()) {
2887       assert(OpInfo.Codes[i].size() == 1 &&
2888              "Unhandled multi-letter 'other' constraint");
2889       std::vector<SDValue> ResultOps;
2890       TLI.LowerAsmOperandForConstraint(Op, OpInfo.Codes[i],
2891                                        ResultOps, *DAG);
2892       if (!ResultOps.empty()) {
2893         BestType = CType;
2894         BestIdx = i;
2895         break;
2896       }
2897     }
2898 
2899     // Things with matching constraints can only be registers, per gcc
2900     // documentation.  This mainly affects "g" constraints.
2901     if (CType == TargetLowering::C_Memory && OpInfo.hasMatchingInput())
2902       continue;
2903 
2904     // This constraint letter is more general than the previous one, use it.
2905     int Generality = getConstraintGenerality(CType);
2906     if (Generality > BestGenerality) {
2907       BestType = CType;
2908       BestIdx = i;
2909       BestGenerality = Generality;
2910     }
2911   }
2912 
2913   OpInfo.ConstraintCode = OpInfo.Codes[BestIdx];
2914   OpInfo.ConstraintType = BestType;
2915 }
2916 
2917 /// Determines the constraint code and constraint type to use for the specific
2918 /// AsmOperandInfo, setting OpInfo.ConstraintCode and OpInfo.ConstraintType.
2919 void TargetLowering::ComputeConstraintToUse(AsmOperandInfo &OpInfo,
2920                                             SDValue Op,
2921                                             SelectionDAG *DAG) const {
2922   assert(!OpInfo.Codes.empty() && "Must have at least one constraint");
2923 
2924   // Single-letter constraints ('r') are very common.
2925   if (OpInfo.Codes.size() == 1) {
2926     OpInfo.ConstraintCode = OpInfo.Codes[0];
2927     OpInfo.ConstraintType = getConstraintType(OpInfo.ConstraintCode);
2928   } else {
2929     ChooseConstraint(OpInfo, *this, Op, DAG);
2930   }
2931 
2932   // 'X' matches anything.
2933   if (OpInfo.ConstraintCode == "X" && OpInfo.CallOperandVal) {
2934     // Labels and constants are handled elsewhere ('X' is the only thing
2935     // that matches labels).  For Functions, the type here is the type of
2936     // the result, which is not what we want to look at; leave them alone.
2937     Value *v = OpInfo.CallOperandVal;
2938     if (isa<BasicBlock>(v) || isa<ConstantInt>(v) || isa<Function>(v)) {
2939       OpInfo.CallOperandVal = v;
2940       return;
2941     }
2942 
2943     // Otherwise, try to resolve it to something we know about by looking at
2944     // the actual operand type.
2945     if (const char *Repl = LowerXConstraint(OpInfo.ConstraintVT)) {
2946       OpInfo.ConstraintCode = Repl;
2947       OpInfo.ConstraintType = getConstraintType(OpInfo.ConstraintCode);
2948     }
2949   }
2950 }
2951 
2952 /// \brief Given an exact SDIV by a constant, create a multiplication
2953 /// with the multiplicative inverse of the constant.
2954 static SDValue BuildExactSDIV(const TargetLowering &TLI, SDValue Op1, APInt d,
2955                               const SDLoc &dl, SelectionDAG &DAG,
2956                               std::vector<SDNode *> &Created) {
2957   assert(d != 0 && "Division by zero!");
2958 
2959   // Shift the value upfront if it is even, so the LSB is one.
2960   unsigned ShAmt = d.countTrailingZeros();
2961   if (ShAmt) {
2962     // TODO: For UDIV use SRL instead of SRA.
2963     SDValue Amt =
2964         DAG.getConstant(ShAmt, dl, TLI.getShiftAmountTy(Op1.getValueType(),
2965                                                         DAG.getDataLayout()));
2966     SDNodeFlags Flags;
2967     Flags.setExact(true);
2968     Op1 = DAG.getNode(ISD::SRA, dl, Op1.getValueType(), Op1, Amt, &Flags);
2969     Created.push_back(Op1.getNode());
2970     d = d.ashr(ShAmt);
2971   }
2972 
2973   // Calculate the multiplicative inverse, using Newton's method.
2974   APInt t, xn = d;
2975   while ((t = d*xn) != 1)
2976     xn *= APInt(d.getBitWidth(), 2) - t;
2977 
2978   SDValue Op2 = DAG.getConstant(xn, dl, Op1.getValueType());
2979   SDValue Mul = DAG.getNode(ISD::MUL, dl, Op1.getValueType(), Op1, Op2);
2980   Created.push_back(Mul.getNode());
2981   return Mul;
2982 }
2983 
2984 SDValue TargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
2985                                       SelectionDAG &DAG,
2986                                       std::vector<SDNode *> *Created) const {
2987   AttributeList Attr = DAG.getMachineFunction().getFunction()->getAttributes();
2988   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2989   if (TLI.isIntDivCheap(N->getValueType(0), Attr))
2990     return SDValue(N,0); // Lower SDIV as SDIV
2991   return SDValue();
2992 }
2993 
2994 /// \brief Given an ISD::SDIV node expressing a divide by constant,
2995 /// return a DAG expression to select that will generate the same value by
2996 /// multiplying by a magic number.
2997 /// Ref: "Hacker's Delight" or "The PowerPC Compiler Writer's Guide".
2998 SDValue TargetLowering::BuildSDIV(SDNode *N, const APInt &Divisor,
2999                                   SelectionDAG &DAG, bool IsAfterLegalization,
3000                                   std::vector<SDNode *> *Created) const {
3001   assert(Created && "No vector to hold sdiv ops.");
3002 
3003   EVT VT = N->getValueType(0);
3004   SDLoc dl(N);
3005 
3006   // Check to see if we can do this.
3007   // FIXME: We should be more aggressive here.
3008   if (!isTypeLegal(VT))
3009     return SDValue();
3010 
3011   // If the sdiv has an 'exact' bit we can use a simpler lowering.
3012   if (cast<BinaryWithFlagsSDNode>(N)->Flags.hasExact())
3013     return BuildExactSDIV(*this, N->getOperand(0), Divisor, dl, DAG, *Created);
3014 
3015   APInt::ms magics = Divisor.magic();
3016 
3017   // Multiply the numerator (operand 0) by the magic value
3018   // FIXME: We should support doing a MUL in a wider type
3019   SDValue Q;
3020   if (IsAfterLegalization ? isOperationLegal(ISD::MULHS, VT) :
3021                             isOperationLegalOrCustom(ISD::MULHS, VT))
3022     Q = DAG.getNode(ISD::MULHS, dl, VT, N->getOperand(0),
3023                     DAG.getConstant(magics.m, dl, VT));
3024   else if (IsAfterLegalization ? isOperationLegal(ISD::SMUL_LOHI, VT) :
3025                                  isOperationLegalOrCustom(ISD::SMUL_LOHI, VT))
3026     Q = SDValue(DAG.getNode(ISD::SMUL_LOHI, dl, DAG.getVTList(VT, VT),
3027                               N->getOperand(0),
3028                               DAG.getConstant(magics.m, dl, VT)).getNode(), 1);
3029   else
3030     return SDValue();       // No mulhs or equvialent
3031   // If d > 0 and m < 0, add the numerator
3032   if (Divisor.isStrictlyPositive() && magics.m.isNegative()) {
3033     Q = DAG.getNode(ISD::ADD, dl, VT, Q, N->getOperand(0));
3034     Created->push_back(Q.getNode());
3035   }
3036   // If d < 0 and m > 0, subtract the numerator.
3037   if (Divisor.isNegative() && magics.m.isStrictlyPositive()) {
3038     Q = DAG.getNode(ISD::SUB, dl, VT, Q, N->getOperand(0));
3039     Created->push_back(Q.getNode());
3040   }
3041   auto &DL = DAG.getDataLayout();
3042   // Shift right algebraic if shift value is nonzero
3043   if (magics.s > 0) {
3044     Q = DAG.getNode(
3045         ISD::SRA, dl, VT, Q,
3046         DAG.getConstant(magics.s, dl, getShiftAmountTy(Q.getValueType(), DL)));
3047     Created->push_back(Q.getNode());
3048   }
3049   // Extract the sign bit and add it to the quotient
3050   SDValue T =
3051       DAG.getNode(ISD::SRL, dl, VT, Q,
3052                   DAG.getConstant(VT.getScalarSizeInBits() - 1, dl,
3053                                   getShiftAmountTy(Q.getValueType(), DL)));
3054   Created->push_back(T.getNode());
3055   return DAG.getNode(ISD::ADD, dl, VT, Q, T);
3056 }
3057 
3058 /// \brief Given an ISD::UDIV node expressing a divide by constant,
3059 /// return a DAG expression to select that will generate the same value by
3060 /// multiplying by a magic number.
3061 /// Ref: "Hacker's Delight" or "The PowerPC Compiler Writer's Guide".
3062 SDValue TargetLowering::BuildUDIV(SDNode *N, const APInt &Divisor,
3063                                   SelectionDAG &DAG, bool IsAfterLegalization,
3064                                   std::vector<SDNode *> *Created) const {
3065   assert(Created && "No vector to hold udiv ops.");
3066 
3067   EVT VT = N->getValueType(0);
3068   SDLoc dl(N);
3069   auto &DL = DAG.getDataLayout();
3070 
3071   // Check to see if we can do this.
3072   // FIXME: We should be more aggressive here.
3073   if (!isTypeLegal(VT))
3074     return SDValue();
3075 
3076   // FIXME: We should use a narrower constant when the upper
3077   // bits are known to be zero.
3078   APInt::mu magics = Divisor.magicu();
3079 
3080   SDValue Q = N->getOperand(0);
3081 
3082   // If the divisor is even, we can avoid using the expensive fixup by shifting
3083   // the divided value upfront.
3084   if (magics.a != 0 && !Divisor[0]) {
3085     unsigned Shift = Divisor.countTrailingZeros();
3086     Q = DAG.getNode(
3087         ISD::SRL, dl, VT, Q,
3088         DAG.getConstant(Shift, dl, getShiftAmountTy(Q.getValueType(), DL)));
3089     Created->push_back(Q.getNode());
3090 
3091     // Get magic number for the shifted divisor.
3092     magics = Divisor.lshr(Shift).magicu(Shift);
3093     assert(magics.a == 0 && "Should use cheap fixup now");
3094   }
3095 
3096   // Multiply the numerator (operand 0) by the magic value
3097   // FIXME: We should support doing a MUL in a wider type
3098   if (IsAfterLegalization ? isOperationLegal(ISD::MULHU, VT) :
3099                             isOperationLegalOrCustom(ISD::MULHU, VT))
3100     Q = DAG.getNode(ISD::MULHU, dl, VT, Q, DAG.getConstant(magics.m, dl, VT));
3101   else if (IsAfterLegalization ? isOperationLegal(ISD::UMUL_LOHI, VT) :
3102                                  isOperationLegalOrCustom(ISD::UMUL_LOHI, VT))
3103     Q = SDValue(DAG.getNode(ISD::UMUL_LOHI, dl, DAG.getVTList(VT, VT), Q,
3104                             DAG.getConstant(magics.m, dl, VT)).getNode(), 1);
3105   else
3106     return SDValue();       // No mulhu or equivalent
3107 
3108   Created->push_back(Q.getNode());
3109 
3110   if (magics.a == 0) {
3111     assert(magics.s < Divisor.getBitWidth() &&
3112            "We shouldn't generate an undefined shift!");
3113     return DAG.getNode(
3114         ISD::SRL, dl, VT, Q,
3115         DAG.getConstant(magics.s, dl, getShiftAmountTy(Q.getValueType(), DL)));
3116   } else {
3117     SDValue NPQ = DAG.getNode(ISD::SUB, dl, VT, N->getOperand(0), Q);
3118     Created->push_back(NPQ.getNode());
3119     NPQ = DAG.getNode(
3120         ISD::SRL, dl, VT, NPQ,
3121         DAG.getConstant(1, dl, getShiftAmountTy(NPQ.getValueType(), DL)));
3122     Created->push_back(NPQ.getNode());
3123     NPQ = DAG.getNode(ISD::ADD, dl, VT, NPQ, Q);
3124     Created->push_back(NPQ.getNode());
3125     return DAG.getNode(
3126         ISD::SRL, dl, VT, NPQ,
3127         DAG.getConstant(magics.s - 1, dl,
3128                         getShiftAmountTy(NPQ.getValueType(), DL)));
3129   }
3130 }
3131 
3132 bool TargetLowering::
3133 verifyReturnAddressArgumentIsConstant(SDValue Op, SelectionDAG &DAG) const {
3134   if (!isa<ConstantSDNode>(Op.getOperand(0))) {
3135     DAG.getContext()->emitError("argument to '__builtin_return_address' must "
3136                                 "be a constant integer");
3137     return true;
3138   }
3139 
3140   return false;
3141 }
3142 
3143 //===----------------------------------------------------------------------===//
3144 // Legalization Utilities
3145 //===----------------------------------------------------------------------===//
3146 
3147 bool TargetLowering::expandMUL_LOHI(unsigned Opcode, EVT VT, SDLoc dl,
3148                                     SDValue LHS, SDValue RHS,
3149                                     SmallVectorImpl<SDValue> &Result,
3150                                     EVT HiLoVT, SelectionDAG &DAG,
3151                                     MulExpansionKind Kind, SDValue LL,
3152                                     SDValue LH, SDValue RL, SDValue RH) const {
3153   assert(Opcode == ISD::MUL || Opcode == ISD::UMUL_LOHI ||
3154          Opcode == ISD::SMUL_LOHI);
3155 
3156   bool HasMULHS = (Kind == MulExpansionKind::Always) ||
3157                   isOperationLegalOrCustom(ISD::MULHS, HiLoVT);
3158   bool HasMULHU = (Kind == MulExpansionKind::Always) ||
3159                   isOperationLegalOrCustom(ISD::MULHU, HiLoVT);
3160   bool HasSMUL_LOHI = (Kind == MulExpansionKind::Always) ||
3161                       isOperationLegalOrCustom(ISD::SMUL_LOHI, HiLoVT);
3162   bool HasUMUL_LOHI = (Kind == MulExpansionKind::Always) ||
3163                       isOperationLegalOrCustom(ISD::UMUL_LOHI, HiLoVT);
3164 
3165   if (!HasMULHU && !HasMULHS && !HasUMUL_LOHI && !HasSMUL_LOHI)
3166     return false;
3167 
3168   unsigned OuterBitSize = VT.getScalarSizeInBits();
3169   unsigned InnerBitSize = HiLoVT.getScalarSizeInBits();
3170   unsigned LHSSB = DAG.ComputeNumSignBits(LHS);
3171   unsigned RHSSB = DAG.ComputeNumSignBits(RHS);
3172 
3173   // LL, LH, RL, and RH must be either all NULL or all set to a value.
3174   assert((LL.getNode() && LH.getNode() && RL.getNode() && RH.getNode()) ||
3175          (!LL.getNode() && !LH.getNode() && !RL.getNode() && !RH.getNode()));
3176 
3177   SDVTList VTs = DAG.getVTList(HiLoVT, HiLoVT);
3178   auto MakeMUL_LOHI = [&](SDValue L, SDValue R, SDValue &Lo, SDValue &Hi,
3179                           bool Signed) -> bool {
3180     if ((Signed && HasSMUL_LOHI) || (!Signed && HasUMUL_LOHI)) {
3181       Lo = DAG.getNode(Signed ? ISD::SMUL_LOHI : ISD::UMUL_LOHI, dl, VTs, L, R);
3182       Hi = SDValue(Lo.getNode(), 1);
3183       return true;
3184     }
3185     if ((Signed && HasMULHS) || (!Signed && HasMULHU)) {
3186       Lo = DAG.getNode(ISD::MUL, dl, HiLoVT, L, R);
3187       Hi = DAG.getNode(Signed ? ISD::MULHS : ISD::MULHU, dl, HiLoVT, L, R);
3188       return true;
3189     }
3190     return false;
3191   };
3192 
3193   SDValue Lo, Hi;
3194 
3195   if (!LL.getNode() && !RL.getNode() &&
3196       isOperationLegalOrCustom(ISD::TRUNCATE, HiLoVT)) {
3197     LL = DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, LHS);
3198     RL = DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, RHS);
3199   }
3200 
3201   if (!LL.getNode())
3202     return false;
3203 
3204   APInt HighMask = APInt::getHighBitsSet(OuterBitSize, InnerBitSize);
3205   if (DAG.MaskedValueIsZero(LHS, HighMask) &&
3206       DAG.MaskedValueIsZero(RHS, HighMask)) {
3207     // The inputs are both zero-extended.
3208     if (MakeMUL_LOHI(LL, RL, Lo, Hi, false)) {
3209       Result.push_back(Lo);
3210       Result.push_back(Hi);
3211       if (Opcode != ISD::MUL) {
3212         SDValue Zero = DAG.getConstant(0, dl, HiLoVT);
3213         Result.push_back(Zero);
3214         Result.push_back(Zero);
3215       }
3216       return true;
3217     }
3218   }
3219 
3220   if (!VT.isVector() && Opcode == ISD::MUL && LHSSB > InnerBitSize &&
3221       RHSSB > InnerBitSize) {
3222     // The input values are both sign-extended.
3223     // TODO non-MUL case?
3224     if (MakeMUL_LOHI(LL, RL, Lo, Hi, true)) {
3225       Result.push_back(Lo);
3226       Result.push_back(Hi);
3227       return true;
3228     }
3229   }
3230 
3231   unsigned ShiftAmount = OuterBitSize - InnerBitSize;
3232   EVT ShiftAmountTy = getShiftAmountTy(VT, DAG.getDataLayout());
3233   if (APInt::getMaxValue(ShiftAmountTy.getSizeInBits()).ult(ShiftAmount)) {
3234     // FIXME getShiftAmountTy does not always return a sensible result when VT
3235     // is an illegal type, and so the type may be too small to fit the shift
3236     // amount. Override it with i32. The shift will have to be legalized.
3237     ShiftAmountTy = MVT::i32;
3238   }
3239   SDValue Shift = DAG.getConstant(ShiftAmount, dl, ShiftAmountTy);
3240 
3241   if (!LH.getNode() && !RH.getNode() &&
3242       isOperationLegalOrCustom(ISD::SRL, VT) &&
3243       isOperationLegalOrCustom(ISD::TRUNCATE, HiLoVT)) {
3244     LH = DAG.getNode(ISD::SRL, dl, VT, LHS, Shift);
3245     LH = DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, LH);
3246     RH = DAG.getNode(ISD::SRL, dl, VT, RHS, Shift);
3247     RH = DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, RH);
3248   }
3249 
3250   if (!LH.getNode())
3251     return false;
3252 
3253   if (!MakeMUL_LOHI(LL, RL, Lo, Hi, false))
3254     return false;
3255 
3256   Result.push_back(Lo);
3257 
3258   if (Opcode == ISD::MUL) {
3259     RH = DAG.getNode(ISD::MUL, dl, HiLoVT, LL, RH);
3260     LH = DAG.getNode(ISD::MUL, dl, HiLoVT, LH, RL);
3261     Hi = DAG.getNode(ISD::ADD, dl, HiLoVT, Hi, RH);
3262     Hi = DAG.getNode(ISD::ADD, dl, HiLoVT, Hi, LH);
3263     Result.push_back(Hi);
3264     return true;
3265   }
3266 
3267   // Compute the full width result.
3268   auto Merge = [&](SDValue Lo, SDValue Hi) -> SDValue {
3269     Lo = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Lo);
3270     Hi = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Hi);
3271     Hi = DAG.getNode(ISD::SHL, dl, VT, Hi, Shift);
3272     return DAG.getNode(ISD::OR, dl, VT, Lo, Hi);
3273   };
3274 
3275   SDValue Next = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Hi);
3276   if (!MakeMUL_LOHI(LL, RH, Lo, Hi, false))
3277     return false;
3278 
3279   // This is effectively the add part of a multiply-add of half-sized operands,
3280   // so it cannot overflow.
3281   Next = DAG.getNode(ISD::ADD, dl, VT, Next, Merge(Lo, Hi));
3282 
3283   if (!MakeMUL_LOHI(LH, RL, Lo, Hi, false))
3284     return false;
3285 
3286   Next = DAG.getNode(ISD::ADDC, dl, DAG.getVTList(VT, MVT::Glue), Next,
3287                      Merge(Lo, Hi));
3288 
3289   SDValue Carry = Next.getValue(1);
3290   Result.push_back(DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, Next));
3291   Next = DAG.getNode(ISD::SRL, dl, VT, Next, Shift);
3292 
3293   if (!MakeMUL_LOHI(LH, RH, Lo, Hi, Opcode == ISD::SMUL_LOHI))
3294     return false;
3295 
3296   SDValue Zero = DAG.getConstant(0, dl, HiLoVT);
3297   Hi = DAG.getNode(ISD::ADDE, dl, DAG.getVTList(HiLoVT, MVT::Glue), Hi, Zero,
3298                    Carry);
3299   Next = DAG.getNode(ISD::ADD, dl, VT, Next, Merge(Lo, Hi));
3300 
3301   if (Opcode == ISD::SMUL_LOHI) {
3302     SDValue NextSub = DAG.getNode(ISD::SUB, dl, VT, Next,
3303                                   DAG.getNode(ISD::ZERO_EXTEND, dl, VT, RL));
3304     Next = DAG.getSelectCC(dl, LH, Zero, NextSub, Next, ISD::SETLT);
3305 
3306     NextSub = DAG.getNode(ISD::SUB, dl, VT, Next,
3307                           DAG.getNode(ISD::ZERO_EXTEND, dl, VT, LL));
3308     Next = DAG.getSelectCC(dl, RH, Zero, NextSub, Next, ISD::SETLT);
3309   }
3310 
3311   Result.push_back(DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, Next));
3312   Next = DAG.getNode(ISD::SRL, dl, VT, Next, Shift);
3313   Result.push_back(DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, Next));
3314   return true;
3315 }
3316 
3317 bool TargetLowering::expandMUL(SDNode *N, SDValue &Lo, SDValue &Hi, EVT HiLoVT,
3318                                SelectionDAG &DAG, MulExpansionKind Kind,
3319                                SDValue LL, SDValue LH, SDValue RL,
3320                                SDValue RH) const {
3321   SmallVector<SDValue, 2> Result;
3322   bool Ok = expandMUL_LOHI(N->getOpcode(), N->getValueType(0), N,
3323                            N->getOperand(0), N->getOperand(1), Result, HiLoVT,
3324                            DAG, Kind, LL, LH, RL, RH);
3325   if (Ok) {
3326     assert(Result.size() == 2);
3327     Lo = Result[0];
3328     Hi = Result[1];
3329   }
3330   return Ok;
3331 }
3332 
3333 bool TargetLowering::expandFP_TO_SINT(SDNode *Node, SDValue &Result,
3334                                SelectionDAG &DAG) const {
3335   EVT VT = Node->getOperand(0).getValueType();
3336   EVT NVT = Node->getValueType(0);
3337   SDLoc dl(SDValue(Node, 0));
3338 
3339   // FIXME: Only f32 to i64 conversions are supported.
3340   if (VT != MVT::f32 || NVT != MVT::i64)
3341     return false;
3342 
3343   // Expand f32 -> i64 conversion
3344   // This algorithm comes from compiler-rt's implementation of fixsfdi:
3345   // https://github.com/llvm-mirror/compiler-rt/blob/master/lib/builtins/fixsfdi.c
3346   EVT IntVT = EVT::getIntegerVT(*DAG.getContext(),
3347                                 VT.getSizeInBits());
3348   SDValue ExponentMask = DAG.getConstant(0x7F800000, dl, IntVT);
3349   SDValue ExponentLoBit = DAG.getConstant(23, dl, IntVT);
3350   SDValue Bias = DAG.getConstant(127, dl, IntVT);
3351   SDValue SignMask = DAG.getConstant(APInt::getSignBit(VT.getSizeInBits()), dl,
3352                                      IntVT);
3353   SDValue SignLowBit = DAG.getConstant(VT.getSizeInBits() - 1, dl, IntVT);
3354   SDValue MantissaMask = DAG.getConstant(0x007FFFFF, dl, IntVT);
3355 
3356   SDValue Bits = DAG.getNode(ISD::BITCAST, dl, IntVT, Node->getOperand(0));
3357 
3358   auto &DL = DAG.getDataLayout();
3359   SDValue ExponentBits = DAG.getNode(
3360       ISD::SRL, dl, IntVT, DAG.getNode(ISD::AND, dl, IntVT, Bits, ExponentMask),
3361       DAG.getZExtOrTrunc(ExponentLoBit, dl, getShiftAmountTy(IntVT, DL)));
3362   SDValue Exponent = DAG.getNode(ISD::SUB, dl, IntVT, ExponentBits, Bias);
3363 
3364   SDValue Sign = DAG.getNode(
3365       ISD::SRA, dl, IntVT, DAG.getNode(ISD::AND, dl, IntVT, Bits, SignMask),
3366       DAG.getZExtOrTrunc(SignLowBit, dl, getShiftAmountTy(IntVT, DL)));
3367   Sign = DAG.getSExtOrTrunc(Sign, dl, NVT);
3368 
3369   SDValue R = DAG.getNode(ISD::OR, dl, IntVT,
3370       DAG.getNode(ISD::AND, dl, IntVT, Bits, MantissaMask),
3371       DAG.getConstant(0x00800000, dl, IntVT));
3372 
3373   R = DAG.getZExtOrTrunc(R, dl, NVT);
3374 
3375   R = DAG.getSelectCC(
3376       dl, Exponent, ExponentLoBit,
3377       DAG.getNode(ISD::SHL, dl, NVT, R,
3378                   DAG.getZExtOrTrunc(
3379                       DAG.getNode(ISD::SUB, dl, IntVT, Exponent, ExponentLoBit),
3380                       dl, getShiftAmountTy(IntVT, DL))),
3381       DAG.getNode(ISD::SRL, dl, NVT, R,
3382                   DAG.getZExtOrTrunc(
3383                       DAG.getNode(ISD::SUB, dl, IntVT, ExponentLoBit, Exponent),
3384                       dl, getShiftAmountTy(IntVT, DL))),
3385       ISD::SETGT);
3386 
3387   SDValue Ret = DAG.getNode(ISD::SUB, dl, NVT,
3388       DAG.getNode(ISD::XOR, dl, NVT, R, Sign),
3389       Sign);
3390 
3391   Result = DAG.getSelectCC(dl, Exponent, DAG.getConstant(0, dl, IntVT),
3392       DAG.getConstant(0, dl, NVT), Ret, ISD::SETLT);
3393   return true;
3394 }
3395 
3396 SDValue TargetLowering::scalarizeVectorLoad(LoadSDNode *LD,
3397                                             SelectionDAG &DAG) const {
3398   SDLoc SL(LD);
3399   SDValue Chain = LD->getChain();
3400   SDValue BasePTR = LD->getBasePtr();
3401   EVT SrcVT = LD->getMemoryVT();
3402   ISD::LoadExtType ExtType = LD->getExtensionType();
3403 
3404   unsigned NumElem = SrcVT.getVectorNumElements();
3405 
3406   EVT SrcEltVT = SrcVT.getScalarType();
3407   EVT DstEltVT = LD->getValueType(0).getScalarType();
3408 
3409   unsigned Stride = SrcEltVT.getSizeInBits() / 8;
3410   assert(SrcEltVT.isByteSized());
3411 
3412   EVT PtrVT = BasePTR.getValueType();
3413 
3414   SmallVector<SDValue, 8> Vals;
3415   SmallVector<SDValue, 8> LoadChains;
3416 
3417   for (unsigned Idx = 0; Idx < NumElem; ++Idx) {
3418     SDValue ScalarLoad =
3419         DAG.getExtLoad(ExtType, SL, DstEltVT, Chain, BasePTR,
3420                        LD->getPointerInfo().getWithOffset(Idx * Stride),
3421                        SrcEltVT, MinAlign(LD->getAlignment(), Idx * Stride),
3422                        LD->getMemOperand()->getFlags(), LD->getAAInfo());
3423 
3424     BasePTR = DAG.getNode(ISD::ADD, SL, PtrVT, BasePTR,
3425                           DAG.getConstant(Stride, SL, PtrVT));
3426 
3427     Vals.push_back(ScalarLoad.getValue(0));
3428     LoadChains.push_back(ScalarLoad.getValue(1));
3429   }
3430 
3431   SDValue NewChain = DAG.getNode(ISD::TokenFactor, SL, MVT::Other, LoadChains);
3432   SDValue Value = DAG.getBuildVector(LD->getValueType(0), SL, Vals);
3433 
3434   return DAG.getMergeValues({ Value, NewChain }, SL);
3435 }
3436 
3437 // FIXME: This relies on each element having a byte size, otherwise the stride
3438 // is 0 and just overwrites the same location. ExpandStore currently expects
3439 // this broken behavior.
3440 SDValue TargetLowering::scalarizeVectorStore(StoreSDNode *ST,
3441                                              SelectionDAG &DAG) const {
3442   SDLoc SL(ST);
3443 
3444   SDValue Chain = ST->getChain();
3445   SDValue BasePtr = ST->getBasePtr();
3446   SDValue Value = ST->getValue();
3447   EVT StVT = ST->getMemoryVT();
3448 
3449   // The type of the data we want to save
3450   EVT RegVT = Value.getValueType();
3451   EVT RegSclVT = RegVT.getScalarType();
3452 
3453   // The type of data as saved in memory.
3454   EVT MemSclVT = StVT.getScalarType();
3455 
3456   EVT PtrVT = BasePtr.getValueType();
3457 
3458   // Store Stride in bytes
3459   unsigned Stride = MemSclVT.getSizeInBits() / 8;
3460   EVT IdxVT = getVectorIdxTy(DAG.getDataLayout());
3461   unsigned NumElem = StVT.getVectorNumElements();
3462 
3463   // Extract each of the elements from the original vector and save them into
3464   // memory individually.
3465   SmallVector<SDValue, 8> Stores;
3466   for (unsigned Idx = 0; Idx < NumElem; ++Idx) {
3467     SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, RegSclVT, Value,
3468                               DAG.getConstant(Idx, SL, IdxVT));
3469 
3470     SDValue Ptr = DAG.getNode(ISD::ADD, SL, PtrVT, BasePtr,
3471                               DAG.getConstant(Idx * Stride, SL, PtrVT));
3472 
3473     // This scalar TruncStore may be illegal, but we legalize it later.
3474     SDValue Store = DAG.getTruncStore(
3475         Chain, SL, Elt, Ptr, ST->getPointerInfo().getWithOffset(Idx * Stride),
3476         MemSclVT, MinAlign(ST->getAlignment(), Idx * Stride),
3477         ST->getMemOperand()->getFlags(), ST->getAAInfo());
3478 
3479     Stores.push_back(Store);
3480   }
3481 
3482   return DAG.getNode(ISD::TokenFactor, SL, MVT::Other, Stores);
3483 }
3484 
3485 std::pair<SDValue, SDValue>
3486 TargetLowering::expandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG) const {
3487   assert(LD->getAddressingMode() == ISD::UNINDEXED &&
3488          "unaligned indexed loads not implemented!");
3489   SDValue Chain = LD->getChain();
3490   SDValue Ptr = LD->getBasePtr();
3491   EVT VT = LD->getValueType(0);
3492   EVT LoadedVT = LD->getMemoryVT();
3493   SDLoc dl(LD);
3494   if (VT.isFloatingPoint() || VT.isVector()) {
3495     EVT intVT = EVT::getIntegerVT(*DAG.getContext(), LoadedVT.getSizeInBits());
3496     if (isTypeLegal(intVT) && isTypeLegal(LoadedVT)) {
3497       if (!isOperationLegalOrCustom(ISD::LOAD, intVT)) {
3498         // Scalarize the load and let the individual components be handled.
3499         SDValue Scalarized = scalarizeVectorLoad(LD, DAG);
3500         return std::make_pair(Scalarized.getValue(0), Scalarized.getValue(1));
3501       }
3502 
3503       // Expand to a (misaligned) integer load of the same size,
3504       // then bitconvert to floating point or vector.
3505       SDValue newLoad = DAG.getLoad(intVT, dl, Chain, Ptr,
3506                                     LD->getMemOperand());
3507       SDValue Result = DAG.getNode(ISD::BITCAST, dl, LoadedVT, newLoad);
3508       if (LoadedVT != VT)
3509         Result = DAG.getNode(VT.isFloatingPoint() ? ISD::FP_EXTEND :
3510                              ISD::ANY_EXTEND, dl, VT, Result);
3511 
3512       return std::make_pair(Result, newLoad.getValue(1));
3513     }
3514 
3515     // Copy the value to a (aligned) stack slot using (unaligned) integer
3516     // loads and stores, then do a (aligned) load from the stack slot.
3517     MVT RegVT = getRegisterType(*DAG.getContext(), intVT);
3518     unsigned LoadedBytes = LoadedVT.getSizeInBits() / 8;
3519     unsigned RegBytes = RegVT.getSizeInBits() / 8;
3520     unsigned NumRegs = (LoadedBytes + RegBytes - 1) / RegBytes;
3521 
3522     // Make sure the stack slot is also aligned for the register type.
3523     SDValue StackBase = DAG.CreateStackTemporary(LoadedVT, RegVT);
3524 
3525     SmallVector<SDValue, 8> Stores;
3526     SDValue StackPtr = StackBase;
3527     unsigned Offset = 0;
3528 
3529     EVT PtrVT = Ptr.getValueType();
3530     EVT StackPtrVT = StackPtr.getValueType();
3531 
3532     SDValue PtrIncrement = DAG.getConstant(RegBytes, dl, PtrVT);
3533     SDValue StackPtrIncrement = DAG.getConstant(RegBytes, dl, StackPtrVT);
3534 
3535     // Do all but one copies using the full register width.
3536     for (unsigned i = 1; i < NumRegs; i++) {
3537       // Load one integer register's worth from the original location.
3538       SDValue Load = DAG.getLoad(
3539           RegVT, dl, Chain, Ptr, LD->getPointerInfo().getWithOffset(Offset),
3540           MinAlign(LD->getAlignment(), Offset), LD->getMemOperand()->getFlags(),
3541           LD->getAAInfo());
3542       // Follow the load with a store to the stack slot.  Remember the store.
3543       Stores.push_back(DAG.getStore(Load.getValue(1), dl, Load, StackPtr,
3544                                     MachinePointerInfo()));
3545       // Increment the pointers.
3546       Offset += RegBytes;
3547       Ptr = DAG.getNode(ISD::ADD, dl, PtrVT, Ptr, PtrIncrement);
3548       StackPtr = DAG.getNode(ISD::ADD, dl, StackPtrVT, StackPtr,
3549                              StackPtrIncrement);
3550     }
3551 
3552     // The last copy may be partial.  Do an extending load.
3553     EVT MemVT = EVT::getIntegerVT(*DAG.getContext(),
3554                                   8 * (LoadedBytes - Offset));
3555     SDValue Load =
3556         DAG.getExtLoad(ISD::EXTLOAD, dl, RegVT, Chain, Ptr,
3557                        LD->getPointerInfo().getWithOffset(Offset), MemVT,
3558                        MinAlign(LD->getAlignment(), Offset),
3559                        LD->getMemOperand()->getFlags(), LD->getAAInfo());
3560     // Follow the load with a store to the stack slot.  Remember the store.
3561     // On big-endian machines this requires a truncating store to ensure
3562     // that the bits end up in the right place.
3563     Stores.push_back(DAG.getTruncStore(Load.getValue(1), dl, Load, StackPtr,
3564                                        MachinePointerInfo(), MemVT));
3565 
3566     // The order of the stores doesn't matter - say it with a TokenFactor.
3567     SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores);
3568 
3569     // Finally, perform the original load only redirected to the stack slot.
3570     Load = DAG.getExtLoad(LD->getExtensionType(), dl, VT, TF, StackBase,
3571                           MachinePointerInfo(), LoadedVT);
3572 
3573     // Callers expect a MERGE_VALUES node.
3574     return std::make_pair(Load, TF);
3575   }
3576 
3577   assert(LoadedVT.isInteger() && !LoadedVT.isVector() &&
3578          "Unaligned load of unsupported type.");
3579 
3580   // Compute the new VT that is half the size of the old one.  This is an
3581   // integer MVT.
3582   unsigned NumBits = LoadedVT.getSizeInBits();
3583   EVT NewLoadedVT;
3584   NewLoadedVT = EVT::getIntegerVT(*DAG.getContext(), NumBits/2);
3585   NumBits >>= 1;
3586 
3587   unsigned Alignment = LD->getAlignment();
3588   unsigned IncrementSize = NumBits / 8;
3589   ISD::LoadExtType HiExtType = LD->getExtensionType();
3590 
3591   // If the original load is NON_EXTLOAD, the hi part load must be ZEXTLOAD.
3592   if (HiExtType == ISD::NON_EXTLOAD)
3593     HiExtType = ISD::ZEXTLOAD;
3594 
3595   // Load the value in two parts
3596   SDValue Lo, Hi;
3597   if (DAG.getDataLayout().isLittleEndian()) {
3598     Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, VT, Chain, Ptr, LD->getPointerInfo(),
3599                         NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(),
3600                         LD->getAAInfo());
3601     Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr,
3602                       DAG.getConstant(IncrementSize, dl, Ptr.getValueType()));
3603     Hi = DAG.getExtLoad(HiExtType, dl, VT, Chain, Ptr,
3604                         LD->getPointerInfo().getWithOffset(IncrementSize),
3605                         NewLoadedVT, MinAlign(Alignment, IncrementSize),
3606                         LD->getMemOperand()->getFlags(), LD->getAAInfo());
3607   } else {
3608     Hi = DAG.getExtLoad(HiExtType, dl, VT, Chain, Ptr, LD->getPointerInfo(),
3609                         NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(),
3610                         LD->getAAInfo());
3611     Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr,
3612                       DAG.getConstant(IncrementSize, dl, Ptr.getValueType()));
3613     Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, VT, Chain, Ptr,
3614                         LD->getPointerInfo().getWithOffset(IncrementSize),
3615                         NewLoadedVT, MinAlign(Alignment, IncrementSize),
3616                         LD->getMemOperand()->getFlags(), LD->getAAInfo());
3617   }
3618 
3619   // aggregate the two parts
3620   SDValue ShiftAmount =
3621       DAG.getConstant(NumBits, dl, getShiftAmountTy(Hi.getValueType(),
3622                                                     DAG.getDataLayout()));
3623   SDValue Result = DAG.getNode(ISD::SHL, dl, VT, Hi, ShiftAmount);
3624   Result = DAG.getNode(ISD::OR, dl, VT, Result, Lo);
3625 
3626   SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1),
3627                              Hi.getValue(1));
3628 
3629   return std::make_pair(Result, TF);
3630 }
3631 
3632 SDValue TargetLowering::expandUnalignedStore(StoreSDNode *ST,
3633                                              SelectionDAG &DAG) const {
3634   assert(ST->getAddressingMode() == ISD::UNINDEXED &&
3635          "unaligned indexed stores not implemented!");
3636   SDValue Chain = ST->getChain();
3637   SDValue Ptr = ST->getBasePtr();
3638   SDValue Val = ST->getValue();
3639   EVT VT = Val.getValueType();
3640   int Alignment = ST->getAlignment();
3641 
3642   SDLoc dl(ST);
3643   if (ST->getMemoryVT().isFloatingPoint() ||
3644       ST->getMemoryVT().isVector()) {
3645     EVT intVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits());
3646     if (isTypeLegal(intVT)) {
3647       if (!isOperationLegalOrCustom(ISD::STORE, intVT)) {
3648         // Scalarize the store and let the individual components be handled.
3649         SDValue Result = scalarizeVectorStore(ST, DAG);
3650 
3651         return Result;
3652       }
3653       // Expand to a bitconvert of the value to the integer type of the
3654       // same size, then a (misaligned) int store.
3655       // FIXME: Does not handle truncating floating point stores!
3656       SDValue Result = DAG.getNode(ISD::BITCAST, dl, intVT, Val);
3657       Result = DAG.getStore(Chain, dl, Result, Ptr, ST->getPointerInfo(),
3658                             Alignment, ST->getMemOperand()->getFlags());
3659       return Result;
3660     }
3661     // Do a (aligned) store to a stack slot, then copy from the stack slot
3662     // to the final destination using (unaligned) integer loads and stores.
3663     EVT StoredVT = ST->getMemoryVT();
3664     MVT RegVT =
3665       getRegisterType(*DAG.getContext(),
3666                       EVT::getIntegerVT(*DAG.getContext(),
3667                                         StoredVT.getSizeInBits()));
3668     EVT PtrVT = Ptr.getValueType();
3669     unsigned StoredBytes = StoredVT.getSizeInBits() / 8;
3670     unsigned RegBytes = RegVT.getSizeInBits() / 8;
3671     unsigned NumRegs = (StoredBytes + RegBytes - 1) / RegBytes;
3672 
3673     // Make sure the stack slot is also aligned for the register type.
3674     SDValue StackPtr = DAG.CreateStackTemporary(StoredVT, RegVT);
3675 
3676     // Perform the original store, only redirected to the stack slot.
3677     SDValue Store = DAG.getTruncStore(Chain, dl, Val, StackPtr,
3678                                       MachinePointerInfo(), StoredVT);
3679 
3680     EVT StackPtrVT = StackPtr.getValueType();
3681 
3682     SDValue PtrIncrement = DAG.getConstant(RegBytes, dl, PtrVT);
3683     SDValue StackPtrIncrement = DAG.getConstant(RegBytes, dl, StackPtrVT);
3684     SmallVector<SDValue, 8> Stores;
3685     unsigned Offset = 0;
3686 
3687     // Do all but one copies using the full register width.
3688     for (unsigned i = 1; i < NumRegs; i++) {
3689       // Load one integer register's worth from the stack slot.
3690       SDValue Load =
3691           DAG.getLoad(RegVT, dl, Store, StackPtr, MachinePointerInfo());
3692       // Store it to the final location.  Remember the store.
3693       Stores.push_back(DAG.getStore(Load.getValue(1), dl, Load, Ptr,
3694                                     ST->getPointerInfo().getWithOffset(Offset),
3695                                     MinAlign(ST->getAlignment(), Offset),
3696                                     ST->getMemOperand()->getFlags()));
3697       // Increment the pointers.
3698       Offset += RegBytes;
3699       StackPtr = DAG.getNode(ISD::ADD, dl, StackPtrVT,
3700                              StackPtr, StackPtrIncrement);
3701       Ptr = DAG.getNode(ISD::ADD, dl, PtrVT, Ptr, PtrIncrement);
3702     }
3703 
3704     // The last store may be partial.  Do a truncating store.  On big-endian
3705     // machines this requires an extending load from the stack slot to ensure
3706     // that the bits are in the right place.
3707     EVT MemVT = EVT::getIntegerVT(*DAG.getContext(),
3708                                   8 * (StoredBytes - Offset));
3709 
3710     // Load from the stack slot.
3711     SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, RegVT, Store, StackPtr,
3712                                   MachinePointerInfo(), MemVT);
3713 
3714     Stores.push_back(
3715         DAG.getTruncStore(Load.getValue(1), dl, Load, Ptr,
3716                           ST->getPointerInfo().getWithOffset(Offset), MemVT,
3717                           MinAlign(ST->getAlignment(), Offset),
3718                           ST->getMemOperand()->getFlags(), ST->getAAInfo()));
3719     // The order of the stores doesn't matter - say it with a TokenFactor.
3720     SDValue Result = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores);
3721     return Result;
3722   }
3723 
3724   assert(ST->getMemoryVT().isInteger() &&
3725          !ST->getMemoryVT().isVector() &&
3726          "Unaligned store of unknown type.");
3727   // Get the half-size VT
3728   EVT NewStoredVT = ST->getMemoryVT().getHalfSizedIntegerVT(*DAG.getContext());
3729   int NumBits = NewStoredVT.getSizeInBits();
3730   int IncrementSize = NumBits / 8;
3731 
3732   // Divide the stored value in two parts.
3733   SDValue ShiftAmount =
3734       DAG.getConstant(NumBits, dl, getShiftAmountTy(Val.getValueType(),
3735                                                     DAG.getDataLayout()));
3736   SDValue Lo = Val;
3737   SDValue Hi = DAG.getNode(ISD::SRL, dl, VT, Val, ShiftAmount);
3738 
3739   // Store the two parts
3740   SDValue Store1, Store2;
3741   Store1 = DAG.getTruncStore(Chain, dl,
3742                              DAG.getDataLayout().isLittleEndian() ? Lo : Hi,
3743                              Ptr, ST->getPointerInfo(), NewStoredVT, Alignment,
3744                              ST->getMemOperand()->getFlags());
3745 
3746   EVT PtrVT = Ptr.getValueType();
3747   Ptr = DAG.getNode(ISD::ADD, dl, PtrVT, Ptr,
3748                     DAG.getConstant(IncrementSize, dl, PtrVT));
3749   Alignment = MinAlign(Alignment, IncrementSize);
3750   Store2 = DAG.getTruncStore(
3751       Chain, dl, DAG.getDataLayout().isLittleEndian() ? Hi : Lo, Ptr,
3752       ST->getPointerInfo().getWithOffset(IncrementSize), NewStoredVT, Alignment,
3753       ST->getMemOperand()->getFlags(), ST->getAAInfo());
3754 
3755   SDValue Result =
3756     DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Store1, Store2);
3757   return Result;
3758 }
3759 
3760 SDValue
3761 TargetLowering::IncrementMemoryAddress(SDValue Addr, SDValue Mask,
3762                                        const SDLoc &DL, EVT DataVT,
3763                                        SelectionDAG &DAG,
3764                                        bool IsCompressedMemory) const {
3765   SDValue Increment;
3766   EVT AddrVT = Addr.getValueType();
3767   EVT MaskVT = Mask.getValueType();
3768   assert(DataVT.getVectorNumElements() == MaskVT.getVectorNumElements() &&
3769          "Incompatible types of Data and Mask");
3770   if (IsCompressedMemory) {
3771     // Incrementing the pointer according to number of '1's in the mask.
3772     EVT MaskIntVT = EVT::getIntegerVT(*DAG.getContext(), MaskVT.getSizeInBits());
3773     SDValue MaskInIntReg = DAG.getBitcast(MaskIntVT, Mask);
3774     if (MaskIntVT.getSizeInBits() < 32) {
3775       MaskInIntReg = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, MaskInIntReg);
3776       MaskIntVT = MVT::i32;
3777     }
3778 
3779     // Count '1's with POPCNT.
3780     Increment = DAG.getNode(ISD::CTPOP, DL, MaskIntVT, MaskInIntReg);
3781     Increment = DAG.getZExtOrTrunc(Increment, DL, AddrVT);
3782     // Scale is an element size in bytes.
3783     SDValue Scale = DAG.getConstant(DataVT.getScalarSizeInBits() / 8, DL,
3784                                     AddrVT);
3785     Increment = DAG.getNode(ISD::MUL, DL, AddrVT, Increment, Scale);
3786   } else
3787     Increment = DAG.getConstant(DataVT.getSizeInBits() / 8, DL, AddrVT);
3788 
3789   return DAG.getNode(ISD::ADD, DL, AddrVT, Addr, Increment);
3790 }
3791 
3792 static SDValue clampDynamicVectorIndex(SelectionDAG &DAG,
3793                                        SDValue Idx,
3794                                        EVT VecVT,
3795                                        const SDLoc &dl) {
3796   if (isa<ConstantSDNode>(Idx))
3797     return Idx;
3798 
3799   EVT IdxVT = Idx.getValueType();
3800   unsigned NElts = VecVT.getVectorNumElements();
3801   if (isPowerOf2_32(NElts)) {
3802     APInt Imm = APInt::getLowBitsSet(IdxVT.getSizeInBits(),
3803                                      Log2_32(NElts));
3804     return DAG.getNode(ISD::AND, dl, IdxVT, Idx,
3805                        DAG.getConstant(Imm, dl, IdxVT));
3806   }
3807 
3808   return DAG.getNode(ISD::UMIN, dl, IdxVT, Idx,
3809                      DAG.getConstant(NElts - 1, dl, IdxVT));
3810 }
3811 
3812 SDValue TargetLowering::getVectorElementPointer(SelectionDAG &DAG,
3813                                                 SDValue VecPtr, EVT VecVT,
3814                                                 SDValue Index) const {
3815   SDLoc dl(Index);
3816   // Make sure the index type is big enough to compute in.
3817   Index = DAG.getZExtOrTrunc(Index, dl, getPointerTy(DAG.getDataLayout()));
3818 
3819   EVT EltVT = VecVT.getVectorElementType();
3820 
3821   // Calculate the element offset and add it to the pointer.
3822   unsigned EltSize = EltVT.getSizeInBits() / 8; // FIXME: should be ABI size.
3823   assert(EltSize * 8 == EltVT.getSizeInBits() &&
3824          "Converting bits to bytes lost precision");
3825 
3826   Index = clampDynamicVectorIndex(DAG, Index, VecVT, dl);
3827 
3828   EVT IdxVT = Index.getValueType();
3829 
3830   Index = DAG.getNode(ISD::MUL, dl, IdxVT, Index,
3831                       DAG.getConstant(EltSize, dl, IdxVT));
3832   return DAG.getNode(ISD::ADD, dl, IdxVT, Index, VecPtr);
3833 }
3834 
3835 //===----------------------------------------------------------------------===//
3836 // Implementation of Emulated TLS Model
3837 //===----------------------------------------------------------------------===//
3838 
3839 SDValue TargetLowering::LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA,
3840                                                 SelectionDAG &DAG) const {
3841   // Access to address of TLS varialbe xyz is lowered to a function call:
3842   //   __emutls_get_address( address of global variable named "__emutls_v.xyz" )
3843   EVT PtrVT = getPointerTy(DAG.getDataLayout());
3844   PointerType *VoidPtrType = Type::getInt8PtrTy(*DAG.getContext());
3845   SDLoc dl(GA);
3846 
3847   ArgListTy Args;
3848   ArgListEntry Entry;
3849   std::string NameString = ("__emutls_v." + GA->getGlobal()->getName()).str();
3850   Module *VariableModule = const_cast<Module*>(GA->getGlobal()->getParent());
3851   StringRef EmuTlsVarName(NameString);
3852   GlobalVariable *EmuTlsVar = VariableModule->getNamedGlobal(EmuTlsVarName);
3853   assert(EmuTlsVar && "Cannot find EmuTlsVar ");
3854   Entry.Node = DAG.getGlobalAddress(EmuTlsVar, dl, PtrVT);
3855   Entry.Ty = VoidPtrType;
3856   Args.push_back(Entry);
3857 
3858   SDValue EmuTlsGetAddr = DAG.getExternalSymbol("__emutls_get_address", PtrVT);
3859 
3860   TargetLowering::CallLoweringInfo CLI(DAG);
3861   CLI.setDebugLoc(dl).setChain(DAG.getEntryNode());
3862   CLI.setLibCallee(CallingConv::C, VoidPtrType, EmuTlsGetAddr, std::move(Args));
3863   std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
3864 
3865   // TLSADDR will be codegen'ed as call. Inform MFI that function has calls.
3866   // At last for X86 targets, maybe good for other targets too?
3867   MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
3868   MFI.setAdjustsStack(true);  // Is this only for X86 target?
3869   MFI.setHasCalls(true);
3870 
3871   assert((GA->getOffset() == 0) &&
3872          "Emulated TLS must have zero offset in GlobalAddressSDNode");
3873   return CallResult.first;
3874 }
3875 
3876 SDValue TargetLowering::lowerCmpEqZeroToCtlzSrl(SDValue Op,
3877                                                 SelectionDAG &DAG) const {
3878   assert((Op->getOpcode() == ISD::SETCC) && "Input has to be a SETCC node.");
3879   if (!isCtlzFast())
3880     return SDValue();
3881   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
3882   SDLoc dl(Op);
3883   if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
3884     if (C->isNullValue() && CC == ISD::SETEQ) {
3885       EVT VT = Op.getOperand(0).getValueType();
3886       SDValue Zext = Op.getOperand(0);
3887       if (VT.bitsLT(MVT::i32)) {
3888         VT = MVT::i32;
3889         Zext = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Op.getOperand(0));
3890       }
3891       unsigned Log2b = Log2_32(VT.getSizeInBits());
3892       SDValue Clz = DAG.getNode(ISD::CTLZ, dl, VT, Zext);
3893       SDValue Scc = DAG.getNode(ISD::SRL, dl, VT, Clz,
3894                                 DAG.getConstant(Log2b, dl, MVT::i32));
3895       return DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Scc);
3896     }
3897   }
3898   return SDValue();
3899 }
3900