1 //===-- TargetLowering.cpp - Implement the TargetLowering class -----------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This implements the TargetLowering class.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/Target/TargetLowering.h"
15 #include "llvm/ADT/BitVector.h"
16 #include "llvm/ADT/STLExtras.h"
17 #include "llvm/CodeGen/CallingConvLower.h"
18 #include "llvm/CodeGen/MachineFrameInfo.h"
19 #include "llvm/CodeGen/MachineFunction.h"
20 #include "llvm/CodeGen/MachineJumpTableInfo.h"
21 #include "llvm/CodeGen/MachineRegisterInfo.h"
22 #include "llvm/CodeGen/SelectionDAG.h"
23 #include "llvm/IR/DataLayout.h"
24 #include "llvm/IR/DerivedTypes.h"
25 #include "llvm/IR/GlobalVariable.h"
26 #include "llvm/IR/LLVMContext.h"
27 #include "llvm/MC/MCAsmInfo.h"
28 #include "llvm/MC/MCExpr.h"
29 #include "llvm/Support/ErrorHandling.h"
30 #include "llvm/Support/MathExtras.h"
31 #include "llvm/Target/TargetLoweringObjectFile.h"
32 #include "llvm/Target/TargetMachine.h"
33 #include "llvm/Target/TargetRegisterInfo.h"
34 #include "llvm/Target/TargetSubtargetInfo.h"
35 #include <cctype>
36 using namespace llvm;
37 
38 /// NOTE: The TargetMachine owns TLOF.
39 TargetLowering::TargetLowering(const TargetMachine &tm)
40   : TargetLoweringBase(tm) {}
41 
42 const char *TargetLowering::getTargetNodeName(unsigned Opcode) const {
43   return nullptr;
44 }
45 
46 bool TargetLowering::isPositionIndependent() const {
47   return getTargetMachine().isPositionIndependent();
48 }
49 
50 /// Check whether a given call node is in tail position within its function. If
51 /// so, it sets Chain to the input chain of the tail call.
52 bool TargetLowering::isInTailCallPosition(SelectionDAG &DAG, SDNode *Node,
53                                           SDValue &Chain) const {
54   const Function *F = DAG.getMachineFunction().getFunction();
55 
56   // Conservatively require the attributes of the call to match those of
57   // the return. Ignore noalias because it doesn't affect the call sequence.
58   AttributeSet CallerAttrs = F->getAttributes();
59   if (AttrBuilder(CallerAttrs, AttributeSet::ReturnIndex)
60       .removeAttribute(Attribute::NoAlias).hasAttributes())
61     return false;
62 
63   // It's not safe to eliminate the sign / zero extension of the return value.
64   if (CallerAttrs.hasAttribute(AttributeSet::ReturnIndex, Attribute::ZExt) ||
65       CallerAttrs.hasAttribute(AttributeSet::ReturnIndex, Attribute::SExt))
66     return false;
67 
68   // Check if the only use is a function return node.
69   return isUsedByReturnOnly(Node, Chain);
70 }
71 
72 bool TargetLowering::parametersInCSRMatch(const MachineRegisterInfo &MRI,
73     const uint32_t *CallerPreservedMask,
74     const SmallVectorImpl<CCValAssign> &ArgLocs,
75     const SmallVectorImpl<SDValue> &OutVals) const {
76   for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) {
77     const CCValAssign &ArgLoc = ArgLocs[I];
78     if (!ArgLoc.isRegLoc())
79       continue;
80     unsigned Reg = ArgLoc.getLocReg();
81     // Only look at callee saved registers.
82     if (MachineOperand::clobbersPhysReg(CallerPreservedMask, Reg))
83       continue;
84     // Check that we pass the value used for the caller.
85     // (We look for a CopyFromReg reading a virtual register that is used
86     //  for the function live-in value of register Reg)
87     SDValue Value = OutVals[I];
88     if (Value->getOpcode() != ISD::CopyFromReg)
89       return false;
90     unsigned ArgReg = cast<RegisterSDNode>(Value->getOperand(1))->getReg();
91     if (MRI.getLiveInPhysReg(ArgReg) != Reg)
92       return false;
93   }
94   return true;
95 }
96 
97 /// \brief Set CallLoweringInfo attribute flags based on a call instruction
98 /// and called function attributes.
99 void TargetLowering::ArgListEntry::setAttributes(ImmutableCallSite *CS,
100                                                  unsigned AttrIdx) {
101   isSExt     = CS->paramHasAttr(AttrIdx, Attribute::SExt);
102   isZExt     = CS->paramHasAttr(AttrIdx, Attribute::ZExt);
103   isInReg    = CS->paramHasAttr(AttrIdx, Attribute::InReg);
104   isSRet     = CS->paramHasAttr(AttrIdx, Attribute::StructRet);
105   isNest     = CS->paramHasAttr(AttrIdx, Attribute::Nest);
106   isByVal    = CS->paramHasAttr(AttrIdx, Attribute::ByVal);
107   isInAlloca = CS->paramHasAttr(AttrIdx, Attribute::InAlloca);
108   isReturned = CS->paramHasAttr(AttrIdx, Attribute::Returned);
109   isSwiftSelf = CS->paramHasAttr(AttrIdx, Attribute::SwiftSelf);
110   isSwiftError = CS->paramHasAttr(AttrIdx, Attribute::SwiftError);
111   Alignment  = CS->getParamAlignment(AttrIdx);
112 }
113 
114 /// Generate a libcall taking the given operands as arguments and returning a
115 /// result of type RetVT.
116 std::pair<SDValue, SDValue>
117 TargetLowering::makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, EVT RetVT,
118                             ArrayRef<SDValue> Ops, bool isSigned,
119                             const SDLoc &dl, bool doesNotReturn,
120                             bool isReturnValueUsed) const {
121   TargetLowering::ArgListTy Args;
122   Args.reserve(Ops.size());
123 
124   TargetLowering::ArgListEntry Entry;
125   for (SDValue Op : Ops) {
126     Entry.Node = Op;
127     Entry.Ty = Entry.Node.getValueType().getTypeForEVT(*DAG.getContext());
128     Entry.isSExt = shouldSignExtendTypeInLibCall(Op.getValueType(), isSigned);
129     Entry.isZExt = !shouldSignExtendTypeInLibCall(Op.getValueType(), isSigned);
130     Args.push_back(Entry);
131   }
132 
133   if (LC == RTLIB::UNKNOWN_LIBCALL)
134     report_fatal_error("Unsupported library call operation!");
135   SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC),
136                                          getPointerTy(DAG.getDataLayout()));
137 
138   Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext());
139   TargetLowering::CallLoweringInfo CLI(DAG);
140   bool signExtend = shouldSignExtendTypeInLibCall(RetVT, isSigned);
141   CLI.setDebugLoc(dl).setChain(DAG.getEntryNode())
142     .setCallee(getLibcallCallingConv(LC), RetTy, Callee, std::move(Args))
143     .setNoReturn(doesNotReturn).setDiscardResult(!isReturnValueUsed)
144     .setSExtResult(signExtend).setZExtResult(!signExtend);
145   return LowerCallTo(CLI);
146 }
147 
148 /// Soften the operands of a comparison. This code is shared among BR_CC,
149 /// SELECT_CC, and SETCC handlers.
150 void TargetLowering::softenSetCCOperands(SelectionDAG &DAG, EVT VT,
151                                          SDValue &NewLHS, SDValue &NewRHS,
152                                          ISD::CondCode &CCCode,
153                                          const SDLoc &dl) const {
154   assert((VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f128 || VT == MVT::ppcf128)
155          && "Unsupported setcc type!");
156 
157   // Expand into one or more soft-fp libcall(s).
158   RTLIB::Libcall LC1 = RTLIB::UNKNOWN_LIBCALL, LC2 = RTLIB::UNKNOWN_LIBCALL;
159   bool ShouldInvertCC = false;
160   switch (CCCode) {
161   case ISD::SETEQ:
162   case ISD::SETOEQ:
163     LC1 = (VT == MVT::f32) ? RTLIB::OEQ_F32 :
164           (VT == MVT::f64) ? RTLIB::OEQ_F64 :
165           (VT == MVT::f128) ? RTLIB::OEQ_F128 : RTLIB::OEQ_PPCF128;
166     break;
167   case ISD::SETNE:
168   case ISD::SETUNE:
169     LC1 = (VT == MVT::f32) ? RTLIB::UNE_F32 :
170           (VT == MVT::f64) ? RTLIB::UNE_F64 :
171           (VT == MVT::f128) ? RTLIB::UNE_F128 : RTLIB::UNE_PPCF128;
172     break;
173   case ISD::SETGE:
174   case ISD::SETOGE:
175     LC1 = (VT == MVT::f32) ? RTLIB::OGE_F32 :
176           (VT == MVT::f64) ? RTLIB::OGE_F64 :
177           (VT == MVT::f128) ? RTLIB::OGE_F128 : RTLIB::OGE_PPCF128;
178     break;
179   case ISD::SETLT:
180   case ISD::SETOLT:
181     LC1 = (VT == MVT::f32) ? RTLIB::OLT_F32 :
182           (VT == MVT::f64) ? RTLIB::OLT_F64 :
183           (VT == MVT::f128) ? RTLIB::OLT_F128 : RTLIB::OLT_PPCF128;
184     break;
185   case ISD::SETLE:
186   case ISD::SETOLE:
187     LC1 = (VT == MVT::f32) ? RTLIB::OLE_F32 :
188           (VT == MVT::f64) ? RTLIB::OLE_F64 :
189           (VT == MVT::f128) ? RTLIB::OLE_F128 : RTLIB::OLE_PPCF128;
190     break;
191   case ISD::SETGT:
192   case ISD::SETOGT:
193     LC1 = (VT == MVT::f32) ? RTLIB::OGT_F32 :
194           (VT == MVT::f64) ? RTLIB::OGT_F64 :
195           (VT == MVT::f128) ? RTLIB::OGT_F128 : RTLIB::OGT_PPCF128;
196     break;
197   case ISD::SETUO:
198     LC1 = (VT == MVT::f32) ? RTLIB::UO_F32 :
199           (VT == MVT::f64) ? RTLIB::UO_F64 :
200           (VT == MVT::f128) ? RTLIB::UO_F128 : RTLIB::UO_PPCF128;
201     break;
202   case ISD::SETO:
203     LC1 = (VT == MVT::f32) ? RTLIB::O_F32 :
204           (VT == MVT::f64) ? RTLIB::O_F64 :
205           (VT == MVT::f128) ? RTLIB::O_F128 : RTLIB::O_PPCF128;
206     break;
207   case ISD::SETONE:
208     // SETONE = SETOLT | SETOGT
209     LC1 = (VT == MVT::f32) ? RTLIB::OLT_F32 :
210           (VT == MVT::f64) ? RTLIB::OLT_F64 :
211           (VT == MVT::f128) ? RTLIB::OLT_F128 : RTLIB::OLT_PPCF128;
212     LC2 = (VT == MVT::f32) ? RTLIB::OGT_F32 :
213           (VT == MVT::f64) ? RTLIB::OGT_F64 :
214           (VT == MVT::f128) ? RTLIB::OGT_F128 : RTLIB::OGT_PPCF128;
215     break;
216   case ISD::SETUEQ:
217     LC1 = (VT == MVT::f32) ? RTLIB::UO_F32 :
218           (VT == MVT::f64) ? RTLIB::UO_F64 :
219           (VT == MVT::f128) ? RTLIB::UO_F128 : RTLIB::UO_PPCF128;
220     LC2 = (VT == MVT::f32) ? RTLIB::OEQ_F32 :
221           (VT == MVT::f64) ? RTLIB::OEQ_F64 :
222           (VT == MVT::f128) ? RTLIB::OEQ_F128 : RTLIB::OEQ_PPCF128;
223     break;
224   default:
225     // Invert CC for unordered comparisons
226     ShouldInvertCC = true;
227     switch (CCCode) {
228     case ISD::SETULT:
229       LC1 = (VT == MVT::f32) ? RTLIB::OGE_F32 :
230             (VT == MVT::f64) ? RTLIB::OGE_F64 :
231             (VT == MVT::f128) ? RTLIB::OGE_F128 : RTLIB::OGE_PPCF128;
232       break;
233     case ISD::SETULE:
234       LC1 = (VT == MVT::f32) ? RTLIB::OGT_F32 :
235             (VT == MVT::f64) ? RTLIB::OGT_F64 :
236             (VT == MVT::f128) ? RTLIB::OGT_F128 : RTLIB::OGT_PPCF128;
237       break;
238     case ISD::SETUGT:
239       LC1 = (VT == MVT::f32) ? RTLIB::OLE_F32 :
240             (VT == MVT::f64) ? RTLIB::OLE_F64 :
241             (VT == MVT::f128) ? RTLIB::OLE_F128 : RTLIB::OLE_PPCF128;
242       break;
243     case ISD::SETUGE:
244       LC1 = (VT == MVT::f32) ? RTLIB::OLT_F32 :
245             (VT == MVT::f64) ? RTLIB::OLT_F64 :
246             (VT == MVT::f128) ? RTLIB::OLT_F128 : RTLIB::OLT_PPCF128;
247       break;
248     default: llvm_unreachable("Do not know how to soften this setcc!");
249     }
250   }
251 
252   // Use the target specific return value for comparions lib calls.
253   EVT RetVT = getCmpLibcallReturnType();
254   SDValue Ops[2] = {NewLHS, NewRHS};
255   NewLHS = makeLibCall(DAG, LC1, RetVT, Ops, false /*sign irrelevant*/,
256                        dl).first;
257   NewRHS = DAG.getConstant(0, dl, RetVT);
258 
259   CCCode = getCmpLibcallCC(LC1);
260   if (ShouldInvertCC)
261     CCCode = getSetCCInverse(CCCode, /*isInteger=*/true);
262 
263   if (LC2 != RTLIB::UNKNOWN_LIBCALL) {
264     SDValue Tmp = DAG.getNode(
265         ISD::SETCC, dl,
266         getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), RetVT),
267         NewLHS, NewRHS, DAG.getCondCode(CCCode));
268     NewLHS = makeLibCall(DAG, LC2, RetVT, Ops, false/*sign irrelevant*/,
269                          dl).first;
270     NewLHS = DAG.getNode(
271         ISD::SETCC, dl,
272         getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), RetVT),
273         NewLHS, NewRHS, DAG.getCondCode(getCmpLibcallCC(LC2)));
274     NewLHS = DAG.getNode(ISD::OR, dl, Tmp.getValueType(), Tmp, NewLHS);
275     NewRHS = SDValue();
276   }
277 }
278 
279 /// Return the entry encoding for a jump table in the current function. The
280 /// returned value is a member of the MachineJumpTableInfo::JTEntryKind enum.
281 unsigned TargetLowering::getJumpTableEncoding() const {
282   // In non-pic modes, just use the address of a block.
283   if (!isPositionIndependent())
284     return MachineJumpTableInfo::EK_BlockAddress;
285 
286   // In PIC mode, if the target supports a GPRel32 directive, use it.
287   if (getTargetMachine().getMCAsmInfo()->getGPRel32Directive() != nullptr)
288     return MachineJumpTableInfo::EK_GPRel32BlockAddress;
289 
290   // Otherwise, use a label difference.
291   return MachineJumpTableInfo::EK_LabelDifference32;
292 }
293 
294 SDValue TargetLowering::getPICJumpTableRelocBase(SDValue Table,
295                                                  SelectionDAG &DAG) const {
296   // If our PIC model is GP relative, use the global offset table as the base.
297   unsigned JTEncoding = getJumpTableEncoding();
298 
299   if ((JTEncoding == MachineJumpTableInfo::EK_GPRel64BlockAddress) ||
300       (JTEncoding == MachineJumpTableInfo::EK_GPRel32BlockAddress))
301     return DAG.getGLOBAL_OFFSET_TABLE(getPointerTy(DAG.getDataLayout()));
302 
303   return Table;
304 }
305 
306 /// This returns the relocation base for the given PIC jumptable, the same as
307 /// getPICJumpTableRelocBase, but as an MCExpr.
308 const MCExpr *
309 TargetLowering::getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
310                                              unsigned JTI,MCContext &Ctx) const{
311   // The normal PIC reloc base is the label at the start of the jump table.
312   return MCSymbolRefExpr::create(MF->getJTISymbol(JTI, Ctx), Ctx);
313 }
314 
315 bool
316 TargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
317   const TargetMachine &TM = getTargetMachine();
318   const GlobalValue *GV = GA->getGlobal();
319 
320   // If the address is not even local to this DSO we will have to load it from
321   // a got and then add the offset.
322   if (!TM.shouldAssumeDSOLocal(*GV->getParent(), GV))
323     return false;
324 
325   // If the code is position independent we will have to add a base register.
326   if (isPositionIndependent())
327     return false;
328 
329   // Otherwise we can do it.
330   return true;
331 }
332 
333 //===----------------------------------------------------------------------===//
334 //  Optimization Methods
335 //===----------------------------------------------------------------------===//
336 
337 /// Check to see if the specified operand of the specified instruction is a
338 /// constant integer. If so, check to see if there are any bits set in the
339 /// constant that are not demanded. If so, shrink the constant and return true.
340 bool TargetLowering::TargetLoweringOpt::ShrinkDemandedConstant(SDValue Op,
341                                                         const APInt &Demanded) {
342   SDLoc dl(Op);
343 
344   // FIXME: ISD::SELECT, ISD::SELECT_CC
345   switch (Op.getOpcode()) {
346   default: break;
347   case ISD::XOR:
348   case ISD::AND:
349   case ISD::OR: {
350     ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
351     if (!C) return false;
352 
353     if (Op.getOpcode() == ISD::XOR &&
354         (C->getAPIntValue() | (~Demanded)).isAllOnesValue())
355       return false;
356 
357     // if we can expand it to have all bits set, do it
358     if (C->getAPIntValue().intersects(~Demanded)) {
359       EVT VT = Op.getValueType();
360       SDValue New = DAG.getNode(Op.getOpcode(), dl, VT, Op.getOperand(0),
361                                 DAG.getConstant(Demanded &
362                                                 C->getAPIntValue(),
363                                                 dl, VT));
364       return CombineTo(Op, New);
365     }
366 
367     break;
368   }
369   }
370 
371   return false;
372 }
373 
374 /// Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if the casts are free.
375 /// This uses isZExtFree and ZERO_EXTEND for the widening cast, but it could be
376 /// generalized for targets with other types of implicit widening casts.
377 bool TargetLowering::TargetLoweringOpt::ShrinkDemandedOp(SDValue Op,
378                                                          unsigned BitWidth,
379                                                          const APInt &Demanded,
380                                                          const SDLoc &dl) {
381   assert(Op.getNumOperands() == 2 &&
382          "ShrinkDemandedOp only supports binary operators!");
383   assert(Op.getNode()->getNumValues() == 1 &&
384          "ShrinkDemandedOp only supports nodes with one result!");
385 
386   // Early return, as this function cannot handle vector types.
387   if (Op.getValueType().isVector())
388     return false;
389 
390   // Don't do this if the node has another user, which may require the
391   // full value.
392   if (!Op.getNode()->hasOneUse())
393     return false;
394 
395   // Search for the smallest integer type with free casts to and from
396   // Op's type. For expedience, just check power-of-2 integer types.
397   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
398   unsigned DemandedSize = BitWidth - Demanded.countLeadingZeros();
399   unsigned SmallVTBits = DemandedSize;
400   if (!isPowerOf2_32(SmallVTBits))
401     SmallVTBits = NextPowerOf2(SmallVTBits);
402   for (; SmallVTBits < BitWidth; SmallVTBits = NextPowerOf2(SmallVTBits)) {
403     EVT SmallVT = EVT::getIntegerVT(*DAG.getContext(), SmallVTBits);
404     if (TLI.isTruncateFree(Op.getValueType(), SmallVT) &&
405         TLI.isZExtFree(SmallVT, Op.getValueType())) {
406       // We found a type with free casts.
407       SDValue X = DAG.getNode(Op.getOpcode(), dl, SmallVT,
408                               DAG.getNode(ISD::TRUNCATE, dl, SmallVT,
409                                           Op.getNode()->getOperand(0)),
410                               DAG.getNode(ISD::TRUNCATE, dl, SmallVT,
411                                           Op.getNode()->getOperand(1)));
412       bool NeedZext = DemandedSize > SmallVTBits;
413       SDValue Z = DAG.getNode(NeedZext ? ISD::ZERO_EXTEND : ISD::ANY_EXTEND,
414                               dl, Op.getValueType(), X);
415       return CombineTo(Op, Z);
416     }
417   }
418   return false;
419 }
420 
421 bool
422 TargetLowering::TargetLoweringOpt::SimplifyDemandedBits(SDNode *User,
423                                                         unsigned OpIdx,
424                                                         const APInt &Demanded,
425                                                         DAGCombinerInfo &DCI) {
426   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
427   SDValue Op = User->getOperand(OpIdx);
428   APInt KnownZero, KnownOne;
429 
430   if (!TLI.SimplifyDemandedBits(Op, Demanded, KnownZero, KnownOne,
431                                 *this, 0, true))
432     return false;
433 
434 
435   // Old will not always be the same as Op.  For example:
436   //
437   // Demanded = 0xffffff
438   // Op = i64 truncate (i32 and x, 0xffffff)
439   // In this case simplify demand bits will want to replace the 'and' node
440   // with the value 'x', which will give us:
441   // Old = i32 and x, 0xffffff
442   // New = x
443   if (Old.hasOneUse()) {
444     // For the one use case, we just commit the change.
445     DCI.CommitTargetLoweringOpt(*this);
446     return true;
447   }
448 
449   // If Old has more than one use then it must be Op, because the
450   // AssumeSingleUse flag is not propogated to recursive calls of
451   // SimplifyDemanded bits, so the only node with multiple use that
452   // it will attempt to combine will be opt.
453   assert(Old == Op);
454 
455   SmallVector <SDValue, 4> NewOps;
456   for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) {
457     if (i == OpIdx) {
458       NewOps.push_back(New);
459       continue;
460     }
461     NewOps.push_back(User->getOperand(i));
462   }
463   DAG.UpdateNodeOperands(User, NewOps);
464   // Op has less users now, so we may be able to perform additional combines
465   // with it.
466   DCI.AddToWorklist(Op.getNode());
467   // User's operands have been updated, so we may be able to do new combines
468   // with it.
469   DCI.AddToWorklist(User);
470   return true;
471 }
472 
473 /// Look at Op. At this point, we know that only the DemandedMask bits of the
474 /// result of Op are ever used downstream. If we can use this information to
475 /// simplify Op, create a new simplified DAG node and return true, returning the
476 /// original and new nodes in Old and New. Otherwise, analyze the expression and
477 /// return a mask of KnownOne and KnownZero bits for the expression (used to
478 /// simplify the caller).  The KnownZero/One bits may only be accurate for those
479 /// bits in the DemandedMask.
480 bool TargetLowering::SimplifyDemandedBits(SDValue Op,
481                                           const APInt &DemandedMask,
482                                           APInt &KnownZero,
483                                           APInt &KnownOne,
484                                           TargetLoweringOpt &TLO,
485                                           unsigned Depth,
486                                           bool AssumeSingleUse) const {
487   unsigned BitWidth = DemandedMask.getBitWidth();
488   assert(Op.getScalarValueSizeInBits() == BitWidth &&
489          "Mask size mismatches value type size!");
490   APInt NewMask = DemandedMask;
491   SDLoc dl(Op);
492   auto &DL = TLO.DAG.getDataLayout();
493 
494   // Don't know anything.
495   KnownZero = KnownOne = APInt(BitWidth, 0);
496 
497   // Other users may use these bits.
498   if (!Op.getNode()->hasOneUse() && !AssumeSingleUse) {
499     if (Depth != 0) {
500       // If not at the root, Just compute the KnownZero/KnownOne bits to
501       // simplify things downstream.
502       TLO.DAG.computeKnownBits(Op, KnownZero, KnownOne, Depth);
503       return false;
504     }
505     // If this is the root being simplified, allow it to have multiple uses,
506     // just set the NewMask to all bits.
507     NewMask = APInt::getAllOnesValue(BitWidth);
508   } else if (DemandedMask == 0) {
509     // Not demanding any bits from Op.
510     if (!Op.isUndef())
511       return TLO.CombineTo(Op, TLO.DAG.getUNDEF(Op.getValueType()));
512     return false;
513   } else if (Depth == 6) {        // Limit search depth.
514     return false;
515   }
516 
517   APInt KnownZero2, KnownOne2, KnownZeroOut, KnownOneOut;
518   switch (Op.getOpcode()) {
519   case ISD::Constant:
520     // We know all of the bits for a constant!
521     KnownOne = cast<ConstantSDNode>(Op)->getAPIntValue();
522     KnownZero = ~KnownOne;
523     return false;   // Don't fall through, will infinitely loop.
524   case ISD::BUILD_VECTOR:
525     // Collect the known bits that are shared by every constant vector element.
526     KnownZero = KnownOne = APInt::getAllOnesValue(BitWidth);
527     for (SDValue SrcOp : Op->ops()) {
528       if (!isa<ConstantSDNode>(SrcOp)) {
529         // We can only handle all constant values - bail out with no known bits.
530         KnownZero = KnownOne = APInt(BitWidth, 0);
531         return false;
532       }
533       KnownOne2 = cast<ConstantSDNode>(SrcOp)->getAPIntValue();
534       KnownZero2 = ~KnownOne2;
535 
536       // BUILD_VECTOR can implicitly truncate sources, we must handle this.
537       if (KnownOne2.getBitWidth() != BitWidth) {
538         assert(KnownOne2.getBitWidth() > BitWidth &&
539                KnownZero2.getBitWidth() > BitWidth &&
540                "Expected BUILD_VECTOR implicit truncation");
541         KnownOne2 = KnownOne2.trunc(BitWidth);
542         KnownZero2 = KnownZero2.trunc(BitWidth);
543       }
544 
545       // Known bits are the values that are shared by every element.
546       // TODO: support per-element known bits.
547       KnownOne &= KnownOne2;
548       KnownZero &= KnownZero2;
549     }
550     return false;   // Don't fall through, will infinitely loop.
551   case ISD::AND:
552     // If the RHS is a constant, check to see if the LHS would be zero without
553     // using the bits from the RHS.  Below, we use knowledge about the RHS to
554     // simplify the LHS, here we're using information from the LHS to simplify
555     // the RHS.
556     if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
557       SDValue Op0 = Op.getOperand(0);
558       APInt LHSZero, LHSOne;
559       // Do not increment Depth here; that can cause an infinite loop.
560       TLO.DAG.computeKnownBits(Op0, LHSZero, LHSOne, Depth);
561       // If the LHS already has zeros where RHSC does, this and is dead.
562       if ((LHSZero & NewMask) == (~RHSC->getAPIntValue() & NewMask))
563         return TLO.CombineTo(Op, Op0);
564 
565       // If any of the set bits in the RHS are known zero on the LHS, shrink
566       // the constant.
567       if (TLO.ShrinkDemandedConstant(Op, ~LHSZero & NewMask))
568         return true;
569 
570       // Bitwise-not (xor X, -1) is a special case: we don't usually shrink its
571       // constant, but if this 'and' is only clearing bits that were just set by
572       // the xor, then this 'and' can be eliminated by shrinking the mask of
573       // the xor. For example, for a 32-bit X:
574       // and (xor (srl X, 31), -1), 1 --> xor (srl X, 31), 1
575       if (isBitwiseNot(Op0) && Op0.hasOneUse() &&
576           LHSOne == ~RHSC->getAPIntValue()) {
577         SDValue Xor = TLO.DAG.getNode(ISD::XOR, dl, Op.getValueType(),
578                                       Op0.getOperand(0), Op.getOperand(1));
579         return TLO.CombineTo(Op, Xor);
580       }
581     }
582 
583     if (SimplifyDemandedBits(Op.getOperand(1), NewMask, KnownZero,
584                              KnownOne, TLO, Depth+1))
585       return true;
586     assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
587     if (SimplifyDemandedBits(Op.getOperand(0), ~KnownZero & NewMask,
588                              KnownZero2, KnownOne2, TLO, Depth+1))
589       return true;
590     assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
591 
592     // If all of the demanded bits are known one on one side, return the other.
593     // These bits cannot contribute to the result of the 'and'.
594     if ((NewMask & ~KnownZero2 & KnownOne) == (~KnownZero2 & NewMask))
595       return TLO.CombineTo(Op, Op.getOperand(0));
596     if ((NewMask & ~KnownZero & KnownOne2) == (~KnownZero & NewMask))
597       return TLO.CombineTo(Op, Op.getOperand(1));
598     // If all of the demanded bits in the inputs are known zeros, return zero.
599     if ((NewMask & (KnownZero|KnownZero2)) == NewMask)
600       return TLO.CombineTo(Op, TLO.DAG.getConstant(0, dl, Op.getValueType()));
601     // If the RHS is a constant, see if we can simplify it.
602     if (TLO.ShrinkDemandedConstant(Op, ~KnownZero2 & NewMask))
603       return true;
604     // If the operation can be done in a smaller type, do so.
605     if (TLO.ShrinkDemandedOp(Op, BitWidth, NewMask, dl))
606       return true;
607 
608     // Output known-1 bits are only known if set in both the LHS & RHS.
609     KnownOne &= KnownOne2;
610     // Output known-0 are known to be clear if zero in either the LHS | RHS.
611     KnownZero |= KnownZero2;
612     break;
613   case ISD::OR:
614     if (SimplifyDemandedBits(Op.getOperand(1), NewMask, KnownZero,
615                              KnownOne, TLO, Depth+1))
616       return true;
617     assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
618     if (SimplifyDemandedBits(Op.getOperand(0), ~KnownOne & NewMask,
619                              KnownZero2, KnownOne2, TLO, Depth+1))
620       return true;
621     assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
622 
623     // If all of the demanded bits are known zero on one side, return the other.
624     // These bits cannot contribute to the result of the 'or'.
625     if ((NewMask & ~KnownOne2 & KnownZero) == (~KnownOne2 & NewMask))
626       return TLO.CombineTo(Op, Op.getOperand(0));
627     if ((NewMask & ~KnownOne & KnownZero2) == (~KnownOne & NewMask))
628       return TLO.CombineTo(Op, Op.getOperand(1));
629     // If all of the potentially set bits on one side are known to be set on
630     // the other side, just use the 'other' side.
631     if ((NewMask & ~KnownZero & KnownOne2) == (~KnownZero & NewMask))
632       return TLO.CombineTo(Op, Op.getOperand(0));
633     if ((NewMask & ~KnownZero2 & KnownOne) == (~KnownZero2 & NewMask))
634       return TLO.CombineTo(Op, Op.getOperand(1));
635     // If the RHS is a constant, see if we can simplify it.
636     if (TLO.ShrinkDemandedConstant(Op, NewMask))
637       return true;
638     // If the operation can be done in a smaller type, do so.
639     if (TLO.ShrinkDemandedOp(Op, BitWidth, NewMask, dl))
640       return true;
641 
642     // Output known-0 bits are only known if clear in both the LHS & RHS.
643     KnownZero &= KnownZero2;
644     // Output known-1 are known to be set if set in either the LHS | RHS.
645     KnownOne |= KnownOne2;
646     break;
647   case ISD::XOR:
648     if (SimplifyDemandedBits(Op.getOperand(1), NewMask, KnownZero,
649                              KnownOne, TLO, Depth+1))
650       return true;
651     assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
652     if (SimplifyDemandedBits(Op.getOperand(0), NewMask, KnownZero2,
653                              KnownOne2, TLO, Depth+1))
654       return true;
655     assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
656 
657     // If all of the demanded bits are known zero on one side, return the other.
658     // These bits cannot contribute to the result of the 'xor'.
659     if ((KnownZero & NewMask) == NewMask)
660       return TLO.CombineTo(Op, Op.getOperand(0));
661     if ((KnownZero2 & NewMask) == NewMask)
662       return TLO.CombineTo(Op, Op.getOperand(1));
663     // If the operation can be done in a smaller type, do so.
664     if (TLO.ShrinkDemandedOp(Op, BitWidth, NewMask, dl))
665       return true;
666 
667     // If all of the unknown bits are known to be zero on one side or the other
668     // (but not both) turn this into an *inclusive* or.
669     //    e.g. (A & C1)^(B & C2) -> (A & C1)|(B & C2) iff C1&C2 == 0
670     if ((NewMask & ~KnownZero & ~KnownZero2) == 0)
671       return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::OR, dl, Op.getValueType(),
672                                                Op.getOperand(0),
673                                                Op.getOperand(1)));
674 
675     // Output known-0 bits are known if clear or set in both the LHS & RHS.
676     KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2);
677     // Output known-1 are known to be set if set in only one of the LHS, RHS.
678     KnownOneOut = (KnownZero & KnownOne2) | (KnownOne & KnownZero2);
679 
680     // If all of the demanded bits on one side are known, and all of the set
681     // bits on that side are also known to be set on the other side, turn this
682     // into an AND, as we know the bits will be cleared.
683     //    e.g. (X | C1) ^ C2 --> (X | C1) & ~C2 iff (C1&C2) == C2
684     // NB: it is okay if more bits are known than are requested
685     if ((NewMask & (KnownZero|KnownOne)) == NewMask) { // all known on one side
686       if (KnownOne == KnownOne2) { // set bits are the same on both sides
687         EVT VT = Op.getValueType();
688         SDValue ANDC = TLO.DAG.getConstant(~KnownOne & NewMask, dl, VT);
689         return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::AND, dl, VT,
690                                                  Op.getOperand(0), ANDC));
691       }
692     }
693 
694     // If the RHS is a constant, see if we can simplify it.
695     // for XOR, we prefer to force bits to 1 if they will make a -1.
696     // If we can't force bits, try to shrink the constant.
697     if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
698       APInt Expanded = C->getAPIntValue() | (~NewMask);
699       // If we can expand it to have all bits set, do it.
700       if (Expanded.isAllOnesValue()) {
701         if (Expanded != C->getAPIntValue()) {
702           EVT VT = Op.getValueType();
703           SDValue New = TLO.DAG.getNode(Op.getOpcode(), dl,VT, Op.getOperand(0),
704                                         TLO.DAG.getConstant(Expanded, dl, VT));
705           return TLO.CombineTo(Op, New);
706         }
707         // If it already has all the bits set, nothing to change
708         // but don't shrink either!
709       } else if (TLO.ShrinkDemandedConstant(Op, NewMask)) {
710         return true;
711       }
712     }
713 
714     KnownZero = KnownZeroOut;
715     KnownOne  = KnownOneOut;
716     break;
717   case ISD::SELECT:
718     if (SimplifyDemandedBits(Op.getOperand(2), NewMask, KnownZero,
719                              KnownOne, TLO, Depth+1))
720       return true;
721     if (SimplifyDemandedBits(Op.getOperand(1), NewMask, KnownZero2,
722                              KnownOne2, TLO, Depth+1))
723       return true;
724     assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
725     assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
726 
727     // If the operands are constants, see if we can simplify them.
728     if (TLO.ShrinkDemandedConstant(Op, NewMask))
729       return true;
730 
731     // Only known if known in both the LHS and RHS.
732     KnownOne &= KnownOne2;
733     KnownZero &= KnownZero2;
734     break;
735   case ISD::SELECT_CC:
736     if (SimplifyDemandedBits(Op.getOperand(3), NewMask, KnownZero,
737                              KnownOne, TLO, Depth+1))
738       return true;
739     if (SimplifyDemandedBits(Op.getOperand(2), NewMask, KnownZero2,
740                              KnownOne2, TLO, Depth+1))
741       return true;
742     assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
743     assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
744 
745     // If the operands are constants, see if we can simplify them.
746     if (TLO.ShrinkDemandedConstant(Op, NewMask))
747       return true;
748 
749     // Only known if known in both the LHS and RHS.
750     KnownOne &= KnownOne2;
751     KnownZero &= KnownZero2;
752     break;
753   case ISD::SHL:
754     if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
755       unsigned ShAmt = SA->getZExtValue();
756       SDValue InOp = Op.getOperand(0);
757 
758       // If the shift count is an invalid immediate, don't do anything.
759       if (ShAmt >= BitWidth)
760         break;
761 
762       // If this is ((X >>u C1) << ShAmt), see if we can simplify this into a
763       // single shift.  We can do this if the bottom bits (which are shifted
764       // out) are never demanded.
765       if (InOp.getOpcode() == ISD::SRL &&
766           isa<ConstantSDNode>(InOp.getOperand(1))) {
767         if (ShAmt && (NewMask & APInt::getLowBitsSet(BitWidth, ShAmt)) == 0) {
768           unsigned C1= cast<ConstantSDNode>(InOp.getOperand(1))->getZExtValue();
769           unsigned Opc = ISD::SHL;
770           int Diff = ShAmt-C1;
771           if (Diff < 0) {
772             Diff = -Diff;
773             Opc = ISD::SRL;
774           }
775 
776           SDValue NewSA =
777             TLO.DAG.getConstant(Diff, dl, Op.getOperand(1).getValueType());
778           EVT VT = Op.getValueType();
779           return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, dl, VT,
780                                                    InOp.getOperand(0), NewSA));
781         }
782       }
783 
784       if (SimplifyDemandedBits(InOp, NewMask.lshr(ShAmt),
785                                KnownZero, KnownOne, TLO, Depth+1))
786         return true;
787 
788       // Convert (shl (anyext x, c)) to (anyext (shl x, c)) if the high bits
789       // are not demanded. This will likely allow the anyext to be folded away.
790       if (InOp.getNode()->getOpcode() == ISD::ANY_EXTEND) {
791         SDValue InnerOp = InOp.getNode()->getOperand(0);
792         EVT InnerVT = InnerOp.getValueType();
793         unsigned InnerBits = InnerVT.getSizeInBits();
794         if (ShAmt < InnerBits && NewMask.lshr(InnerBits) == 0 &&
795             isTypeDesirableForOp(ISD::SHL, InnerVT)) {
796           EVT ShTy = getShiftAmountTy(InnerVT, DL);
797           if (!APInt(BitWidth, ShAmt).isIntN(ShTy.getSizeInBits()))
798             ShTy = InnerVT;
799           SDValue NarrowShl =
800             TLO.DAG.getNode(ISD::SHL, dl, InnerVT, InnerOp,
801                             TLO.DAG.getConstant(ShAmt, dl, ShTy));
802           return
803             TLO.CombineTo(Op,
804                           TLO.DAG.getNode(ISD::ANY_EXTEND, dl, Op.getValueType(),
805                                           NarrowShl));
806         }
807         // Repeat the SHL optimization above in cases where an extension
808         // intervenes: (shl (anyext (shr x, c1)), c2) to
809         // (shl (anyext x), c2-c1).  This requires that the bottom c1 bits
810         // aren't demanded (as above) and that the shifted upper c1 bits of
811         // x aren't demanded.
812         if (InOp.hasOneUse() &&
813             InnerOp.getOpcode() == ISD::SRL &&
814             InnerOp.hasOneUse() &&
815             isa<ConstantSDNode>(InnerOp.getOperand(1))) {
816           uint64_t InnerShAmt = cast<ConstantSDNode>(InnerOp.getOperand(1))
817             ->getZExtValue();
818           if (InnerShAmt < ShAmt &&
819               InnerShAmt < InnerBits &&
820               NewMask.lshr(InnerBits - InnerShAmt + ShAmt) == 0 &&
821               NewMask.trunc(ShAmt) == 0) {
822             SDValue NewSA =
823               TLO.DAG.getConstant(ShAmt - InnerShAmt, dl,
824                                   Op.getOperand(1).getValueType());
825             EVT VT = Op.getValueType();
826             SDValue NewExt = TLO.DAG.getNode(ISD::ANY_EXTEND, dl, VT,
827                                              InnerOp.getOperand(0));
828             return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SHL, dl, VT,
829                                                      NewExt, NewSA));
830           }
831         }
832       }
833 
834       KnownZero <<= SA->getZExtValue();
835       KnownOne  <<= SA->getZExtValue();
836       // low bits known zero.
837       KnownZero |= APInt::getLowBitsSet(BitWidth, SA->getZExtValue());
838     }
839     break;
840   case ISD::SRL:
841     if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
842       EVT VT = Op.getValueType();
843       unsigned ShAmt = SA->getZExtValue();
844       unsigned VTSize = VT.getSizeInBits();
845       SDValue InOp = Op.getOperand(0);
846 
847       // If the shift count is an invalid immediate, don't do anything.
848       if (ShAmt >= BitWidth)
849         break;
850 
851       APInt InDemandedMask = (NewMask << ShAmt);
852 
853       // If the shift is exact, then it does demand the low bits (and knows that
854       // they are zero).
855       if (cast<BinaryWithFlagsSDNode>(Op)->Flags.hasExact())
856         InDemandedMask |= APInt::getLowBitsSet(BitWidth, ShAmt);
857 
858       // If this is ((X << C1) >>u ShAmt), see if we can simplify this into a
859       // single shift.  We can do this if the top bits (which are shifted out)
860       // are never demanded.
861       if (InOp.getOpcode() == ISD::SHL &&
862           isa<ConstantSDNode>(InOp.getOperand(1))) {
863         if (ShAmt && (NewMask & APInt::getHighBitsSet(VTSize, ShAmt)) == 0) {
864           unsigned C1= cast<ConstantSDNode>(InOp.getOperand(1))->getZExtValue();
865           unsigned Opc = ISD::SRL;
866           int Diff = ShAmt-C1;
867           if (Diff < 0) {
868             Diff = -Diff;
869             Opc = ISD::SHL;
870           }
871 
872           SDValue NewSA =
873             TLO.DAG.getConstant(Diff, dl, Op.getOperand(1).getValueType());
874           return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, dl, VT,
875                                                    InOp.getOperand(0), NewSA));
876         }
877       }
878 
879       // Compute the new bits that are at the top now.
880       if (SimplifyDemandedBits(InOp, InDemandedMask,
881                                KnownZero, KnownOne, TLO, Depth+1))
882         return true;
883       assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
884       KnownZero = KnownZero.lshr(ShAmt);
885       KnownOne  = KnownOne.lshr(ShAmt);
886 
887       APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt);
888       KnownZero |= HighBits;  // High bits known zero.
889     }
890     break;
891   case ISD::SRA:
892     // If this is an arithmetic shift right and only the low-bit is set, we can
893     // always convert this into a logical shr, even if the shift amount is
894     // variable.  The low bit of the shift cannot be an input sign bit unless
895     // the shift amount is >= the size of the datatype, which is undefined.
896     if (NewMask == 1)
897       return TLO.CombineTo(Op,
898                            TLO.DAG.getNode(ISD::SRL, dl, Op.getValueType(),
899                                            Op.getOperand(0), Op.getOperand(1)));
900 
901     if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
902       EVT VT = Op.getValueType();
903       unsigned ShAmt = SA->getZExtValue();
904 
905       // If the shift count is an invalid immediate, don't do anything.
906       if (ShAmt >= BitWidth)
907         break;
908 
909       APInt InDemandedMask = (NewMask << ShAmt);
910 
911       // If the shift is exact, then it does demand the low bits (and knows that
912       // they are zero).
913       if (cast<BinaryWithFlagsSDNode>(Op)->Flags.hasExact())
914         InDemandedMask |= APInt::getLowBitsSet(BitWidth, ShAmt);
915 
916       // If any of the demanded bits are produced by the sign extension, we also
917       // demand the input sign bit.
918       APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt);
919       if (HighBits.intersects(NewMask))
920         InDemandedMask |= APInt::getSignBit(VT.getScalarSizeInBits());
921 
922       if (SimplifyDemandedBits(Op.getOperand(0), InDemandedMask,
923                                KnownZero, KnownOne, TLO, Depth+1))
924         return true;
925       assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
926       KnownZero = KnownZero.lshr(ShAmt);
927       KnownOne  = KnownOne.lshr(ShAmt);
928 
929       // Handle the sign bit, adjusted to where it is now in the mask.
930       APInt SignBit = APInt::getSignBit(BitWidth).lshr(ShAmt);
931 
932       // If the input sign bit is known to be zero, or if none of the top bits
933       // are demanded, turn this into an unsigned shift right.
934       if (KnownZero.intersects(SignBit) || (HighBits & ~NewMask) == HighBits) {
935         SDNodeFlags Flags;
936         Flags.setExact(cast<BinaryWithFlagsSDNode>(Op)->Flags.hasExact());
937         return TLO.CombineTo(Op,
938                              TLO.DAG.getNode(ISD::SRL, dl, VT, Op.getOperand(0),
939                                              Op.getOperand(1), &Flags));
940       }
941 
942       int Log2 = NewMask.exactLogBase2();
943       if (Log2 >= 0) {
944         // The bit must come from the sign.
945         SDValue NewSA =
946           TLO.DAG.getConstant(BitWidth - 1 - Log2, dl,
947                               Op.getOperand(1).getValueType());
948         return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL, dl, VT,
949                                                  Op.getOperand(0), NewSA));
950       }
951 
952       if (KnownOne.intersects(SignBit))
953         // New bits are known one.
954         KnownOne |= HighBits;
955     }
956     break;
957   case ISD::SIGN_EXTEND_INREG: {
958     EVT ExVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
959 
960     APInt MsbMask = APInt::getHighBitsSet(BitWidth, 1);
961     // If we only care about the highest bit, don't bother shifting right.
962     if (MsbMask == NewMask) {
963       unsigned ShAmt = ExVT.getScalarSizeInBits();
964       SDValue InOp = Op.getOperand(0);
965       unsigned VTBits = Op->getValueType(0).getScalarSizeInBits();
966       bool AlreadySignExtended =
967         TLO.DAG.ComputeNumSignBits(InOp) >= VTBits-ShAmt+1;
968       // However if the input is already sign extended we expect the sign
969       // extension to be dropped altogether later and do not simplify.
970       if (!AlreadySignExtended) {
971         // Compute the correct shift amount type, which must be getShiftAmountTy
972         // for scalar types after legalization.
973         EVT ShiftAmtTy = Op.getValueType();
974         if (TLO.LegalTypes() && !ShiftAmtTy.isVector())
975           ShiftAmtTy = getShiftAmountTy(ShiftAmtTy, DL);
976 
977         SDValue ShiftAmt = TLO.DAG.getConstant(BitWidth - ShAmt, dl,
978                                                ShiftAmtTy);
979         return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SHL, dl,
980                                                  Op.getValueType(), InOp,
981                                                  ShiftAmt));
982       }
983     }
984 
985     // Sign extension.  Compute the demanded bits in the result that are not
986     // present in the input.
987     APInt NewBits =
988       APInt::getHighBitsSet(BitWidth,
989                             BitWidth - ExVT.getScalarSizeInBits());
990 
991     // If none of the extended bits are demanded, eliminate the sextinreg.
992     if ((NewBits & NewMask) == 0)
993       return TLO.CombineTo(Op, Op.getOperand(0));
994 
995     APInt InSignBit =
996       APInt::getSignBit(ExVT.getScalarSizeInBits()).zext(BitWidth);
997     APInt InputDemandedBits =
998       APInt::getLowBitsSet(BitWidth,
999                            ExVT.getScalarSizeInBits()) &
1000       NewMask;
1001 
1002     // Since the sign extended bits are demanded, we know that the sign
1003     // bit is demanded.
1004     InputDemandedBits |= InSignBit;
1005 
1006     if (SimplifyDemandedBits(Op.getOperand(0), InputDemandedBits,
1007                              KnownZero, KnownOne, TLO, Depth+1))
1008       return true;
1009     assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1010 
1011     // If the sign bit of the input is known set or clear, then we know the
1012     // top bits of the result.
1013 
1014     // If the input sign bit is known zero, convert this into a zero extension.
1015     if (KnownZero.intersects(InSignBit))
1016       return TLO.CombineTo(Op, TLO.DAG.getZeroExtendInReg(
1017                                    Op.getOperand(0), dl, ExVT.getScalarType()));
1018 
1019     if (KnownOne.intersects(InSignBit)) {    // Input sign bit known set
1020       KnownOne |= NewBits;
1021       KnownZero &= ~NewBits;
1022     } else {                       // Input sign bit unknown
1023       KnownZero &= ~NewBits;
1024       KnownOne &= ~NewBits;
1025     }
1026     break;
1027   }
1028   case ISD::BUILD_PAIR: {
1029     EVT HalfVT = Op.getOperand(0).getValueType();
1030     unsigned HalfBitWidth = HalfVT.getScalarSizeInBits();
1031 
1032     APInt MaskLo = NewMask.getLoBits(HalfBitWidth).trunc(HalfBitWidth);
1033     APInt MaskHi = NewMask.getHiBits(HalfBitWidth).trunc(HalfBitWidth);
1034 
1035     APInt KnownZeroLo, KnownOneLo;
1036     APInt KnownZeroHi, KnownOneHi;
1037 
1038     if (SimplifyDemandedBits(Op.getOperand(0), MaskLo, KnownZeroLo,
1039                              KnownOneLo, TLO, Depth + 1))
1040       return true;
1041 
1042     if (SimplifyDemandedBits(Op.getOperand(1), MaskHi, KnownZeroHi,
1043                              KnownOneHi, TLO, Depth + 1))
1044       return true;
1045 
1046     KnownZero = KnownZeroLo.zext(BitWidth) |
1047                 KnownZeroHi.zext(BitWidth).shl(HalfBitWidth);
1048 
1049     KnownOne = KnownOneLo.zext(BitWidth) |
1050                KnownOneHi.zext(BitWidth).shl(HalfBitWidth);
1051     break;
1052   }
1053   case ISD::ZERO_EXTEND: {
1054     unsigned OperandBitWidth = Op.getOperand(0).getScalarValueSizeInBits();
1055     APInt InMask = NewMask.trunc(OperandBitWidth);
1056 
1057     // If none of the top bits are demanded, convert this into an any_extend.
1058     APInt NewBits =
1059       APInt::getHighBitsSet(BitWidth, BitWidth - OperandBitWidth) & NewMask;
1060     if (!NewBits.intersects(NewMask))
1061       return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::ANY_EXTEND, dl,
1062                                                Op.getValueType(),
1063                                                Op.getOperand(0)));
1064 
1065     if (SimplifyDemandedBits(Op.getOperand(0), InMask,
1066                              KnownZero, KnownOne, TLO, Depth+1))
1067       return true;
1068     assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1069     KnownZero = KnownZero.zext(BitWidth);
1070     KnownOne = KnownOne.zext(BitWidth);
1071     KnownZero |= NewBits;
1072     break;
1073   }
1074   case ISD::SIGN_EXTEND: {
1075     EVT InVT = Op.getOperand(0).getValueType();
1076     unsigned InBits = InVT.getScalarSizeInBits();
1077     APInt InMask    = APInt::getLowBitsSet(BitWidth, InBits);
1078     APInt InSignBit = APInt::getBitsSet(BitWidth, InBits - 1, InBits);
1079     APInt NewBits   = ~InMask & NewMask;
1080 
1081     // If none of the top bits are demanded, convert this into an any_extend.
1082     if (NewBits == 0)
1083       return TLO.CombineTo(Op,TLO.DAG.getNode(ISD::ANY_EXTEND, dl,
1084                                               Op.getValueType(),
1085                                               Op.getOperand(0)));
1086 
1087     // Since some of the sign extended bits are demanded, we know that the sign
1088     // bit is demanded.
1089     APInt InDemandedBits = InMask & NewMask;
1090     InDemandedBits |= InSignBit;
1091     InDemandedBits = InDemandedBits.trunc(InBits);
1092 
1093     if (SimplifyDemandedBits(Op.getOperand(0), InDemandedBits, KnownZero,
1094                              KnownOne, TLO, Depth+1))
1095       return true;
1096     KnownZero = KnownZero.zext(BitWidth);
1097     KnownOne = KnownOne.zext(BitWidth);
1098 
1099     // If the sign bit is known zero, convert this to a zero extend.
1100     if (KnownZero.intersects(InSignBit))
1101       return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::ZERO_EXTEND, dl,
1102                                                Op.getValueType(),
1103                                                Op.getOperand(0)));
1104 
1105     // If the sign bit is known one, the top bits match.
1106     if (KnownOne.intersects(InSignBit)) {
1107       KnownOne |= NewBits;
1108       assert((KnownZero & NewBits) == 0);
1109     } else {   // Otherwise, top bits aren't known.
1110       assert((KnownOne & NewBits) == 0);
1111       assert((KnownZero & NewBits) == 0);
1112     }
1113     break;
1114   }
1115   case ISD::ANY_EXTEND: {
1116     unsigned OperandBitWidth = Op.getOperand(0).getScalarValueSizeInBits();
1117     APInt InMask = NewMask.trunc(OperandBitWidth);
1118     if (SimplifyDemandedBits(Op.getOperand(0), InMask,
1119                              KnownZero, KnownOne, TLO, Depth+1))
1120       return true;
1121     assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1122     KnownZero = KnownZero.zext(BitWidth);
1123     KnownOne = KnownOne.zext(BitWidth);
1124     break;
1125   }
1126   case ISD::TRUNCATE: {
1127     // Simplify the input, using demanded bit information, and compute the known
1128     // zero/one bits live out.
1129     unsigned OperandBitWidth = Op.getOperand(0).getScalarValueSizeInBits();
1130     APInt TruncMask = NewMask.zext(OperandBitWidth);
1131     if (SimplifyDemandedBits(Op.getOperand(0), TruncMask,
1132                              KnownZero, KnownOne, TLO, Depth+1))
1133       return true;
1134     KnownZero = KnownZero.trunc(BitWidth);
1135     KnownOne = KnownOne.trunc(BitWidth);
1136 
1137     // If the input is only used by this truncate, see if we can shrink it based
1138     // on the known demanded bits.
1139     if (Op.getOperand(0).getNode()->hasOneUse()) {
1140       SDValue In = Op.getOperand(0);
1141       switch (In.getOpcode()) {
1142       default: break;
1143       case ISD::SRL:
1144         // Shrink SRL by a constant if none of the high bits shifted in are
1145         // demanded.
1146         if (TLO.LegalTypes() &&
1147             !isTypeDesirableForOp(ISD::SRL, Op.getValueType()))
1148           // Do not turn (vt1 truncate (vt2 srl)) into (vt1 srl) if vt1 is
1149           // undesirable.
1150           break;
1151         ConstantSDNode *ShAmt = dyn_cast<ConstantSDNode>(In.getOperand(1));
1152         if (!ShAmt)
1153           break;
1154         SDValue Shift = In.getOperand(1);
1155         if (TLO.LegalTypes()) {
1156           uint64_t ShVal = ShAmt->getZExtValue();
1157           Shift = TLO.DAG.getConstant(ShVal, dl,
1158                                       getShiftAmountTy(Op.getValueType(), DL));
1159         }
1160 
1161         APInt HighBits = APInt::getHighBitsSet(OperandBitWidth,
1162                                                OperandBitWidth - BitWidth);
1163         HighBits = HighBits.lshr(ShAmt->getZExtValue()).trunc(BitWidth);
1164 
1165         if (ShAmt->getZExtValue() < BitWidth && !(HighBits & NewMask)) {
1166           // None of the shifted in bits are needed.  Add a truncate of the
1167           // shift input, then shift it.
1168           SDValue NewTrunc = TLO.DAG.getNode(ISD::TRUNCATE, dl,
1169                                              Op.getValueType(),
1170                                              In.getOperand(0));
1171           return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL, dl,
1172                                                    Op.getValueType(),
1173                                                    NewTrunc,
1174                                                    Shift));
1175         }
1176         break;
1177       }
1178     }
1179 
1180     assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1181     break;
1182   }
1183   case ISD::AssertZext: {
1184     // AssertZext demands all of the high bits, plus any of the low bits
1185     // demanded by its users.
1186     EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT();
1187     APInt InMask = APInt::getLowBitsSet(BitWidth,
1188                                         VT.getSizeInBits());
1189     if (SimplifyDemandedBits(Op.getOperand(0), ~InMask | NewMask,
1190                              KnownZero, KnownOne, TLO, Depth+1))
1191       return true;
1192     assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1193 
1194     KnownZero |= ~InMask & NewMask;
1195     break;
1196   }
1197   case ISD::BITCAST:
1198     // If this is an FP->Int bitcast and if the sign bit is the only
1199     // thing demanded, turn this into a FGETSIGN.
1200     if (!TLO.LegalOperations() &&
1201         !Op.getValueType().isVector() &&
1202         !Op.getOperand(0).getValueType().isVector() &&
1203         NewMask == APInt::getSignBit(Op.getValueSizeInBits()) &&
1204         Op.getOperand(0).getValueType().isFloatingPoint()) {
1205       bool OpVTLegal = isOperationLegalOrCustom(ISD::FGETSIGN, Op.getValueType());
1206       bool i32Legal  = isOperationLegalOrCustom(ISD::FGETSIGN, MVT::i32);
1207       if ((OpVTLegal || i32Legal) && Op.getValueType().isSimple() &&
1208            Op.getOperand(0).getValueType() != MVT::f128) {
1209         // Cannot eliminate/lower SHL for f128 yet.
1210         EVT Ty = OpVTLegal ? Op.getValueType() : MVT::i32;
1211         // Make a FGETSIGN + SHL to move the sign bit into the appropriate
1212         // place.  We expect the SHL to be eliminated by other optimizations.
1213         SDValue Sign = TLO.DAG.getNode(ISD::FGETSIGN, dl, Ty, Op.getOperand(0));
1214         unsigned OpVTSizeInBits = Op.getValueSizeInBits();
1215         if (!OpVTLegal && OpVTSizeInBits > 32)
1216           Sign = TLO.DAG.getNode(ISD::ZERO_EXTEND, dl, Op.getValueType(), Sign);
1217         unsigned ShVal = Op.getValueSizeInBits() - 1;
1218         SDValue ShAmt = TLO.DAG.getConstant(ShVal, dl, Op.getValueType());
1219         return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SHL, dl,
1220                                                  Op.getValueType(),
1221                                                  Sign, ShAmt));
1222       }
1223     }
1224     break;
1225   case ISD::ADD:
1226   case ISD::MUL:
1227   case ISD::SUB: {
1228     // Add, Sub, and Mul don't demand any bits in positions beyond that
1229     // of the highest bit demanded of them.
1230     APInt LoMask = APInt::getLowBitsSet(BitWidth,
1231                                         BitWidth - NewMask.countLeadingZeros());
1232     if (SimplifyDemandedBits(Op.getOperand(0), LoMask, KnownZero2,
1233                              KnownOne2, TLO, Depth+1) ||
1234         SimplifyDemandedBits(Op.getOperand(1), LoMask, KnownZero2,
1235                              KnownOne2, TLO, Depth+1) ||
1236         // See if the operation should be performed at a smaller bit width.
1237         TLO.ShrinkDemandedOp(Op, BitWidth, NewMask, dl)) {
1238       const SDNodeFlags *Flags = Op.getNode()->getFlags();
1239       if (Flags->hasNoSignedWrap() || Flags->hasNoUnsignedWrap()) {
1240         // Disable the nsw and nuw flags. We can no longer guarantee that we
1241         // won't wrap after simplification.
1242         SDNodeFlags NewFlags = *Flags;
1243         NewFlags.setNoSignedWrap(false);
1244         NewFlags.setNoUnsignedWrap(false);
1245         SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, Op.getValueType(),
1246                                         Op.getOperand(0), Op.getOperand(1),
1247                                         &NewFlags);
1248         return TLO.CombineTo(Op, NewOp);
1249       }
1250       return true;
1251     }
1252     LLVM_FALLTHROUGH;
1253   }
1254   default:
1255     // Just use computeKnownBits to compute output bits.
1256     TLO.DAG.computeKnownBits(Op, KnownZero, KnownOne, Depth);
1257     break;
1258   }
1259 
1260   // If we know the value of all of the demanded bits, return this as a
1261   // constant.
1262   if ((NewMask & (KnownZero|KnownOne)) == NewMask) {
1263     // Avoid folding to a constant if any OpaqueConstant is involved.
1264     const SDNode *N = Op.getNode();
1265     for (SDNodeIterator I = SDNodeIterator::begin(N),
1266          E = SDNodeIterator::end(N); I != E; ++I) {
1267       SDNode *Op = *I;
1268       if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op))
1269         if (C->isOpaque())
1270           return false;
1271     }
1272     return TLO.CombineTo(Op,
1273                          TLO.DAG.getConstant(KnownOne, dl, Op.getValueType()));
1274   }
1275 
1276   return false;
1277 }
1278 
1279 /// Determine which of the bits specified in Mask are known to be either zero or
1280 /// one and return them in the KnownZero/KnownOne bitsets.
1281 void TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
1282                                                    APInt &KnownZero,
1283                                                    APInt &KnownOne,
1284                                                    const SelectionDAG &DAG,
1285                                                    unsigned Depth) const {
1286   assert((Op.getOpcode() >= ISD::BUILTIN_OP_END ||
1287           Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
1288           Op.getOpcode() == ISD::INTRINSIC_W_CHAIN ||
1289           Op.getOpcode() == ISD::INTRINSIC_VOID) &&
1290          "Should use MaskedValueIsZero if you don't know whether Op"
1291          " is a target node!");
1292   KnownZero = KnownOne = APInt(KnownOne.getBitWidth(), 0);
1293 }
1294 
1295 /// This method can be implemented by targets that want to expose additional
1296 /// information about sign bits to the DAG Combiner.
1297 unsigned TargetLowering::ComputeNumSignBitsForTargetNode(SDValue Op,
1298                                                          const SelectionDAG &,
1299                                                          unsigned Depth) const {
1300   assert((Op.getOpcode() >= ISD::BUILTIN_OP_END ||
1301           Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
1302           Op.getOpcode() == ISD::INTRINSIC_W_CHAIN ||
1303           Op.getOpcode() == ISD::INTRINSIC_VOID) &&
1304          "Should use ComputeNumSignBits if you don't know whether Op"
1305          " is a target node!");
1306   return 1;
1307 }
1308 
1309 bool TargetLowering::isConstTrueVal(const SDNode *N) const {
1310   if (!N)
1311     return false;
1312 
1313   const ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N);
1314   if (!CN) {
1315     const BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N);
1316     if (!BV)
1317       return false;
1318 
1319     // Only interested in constant splats, we don't care about undef
1320     // elements in identifying boolean constants and getConstantSplatNode
1321     // returns NULL if all ops are undef;
1322     CN = BV->getConstantSplatNode();
1323     if (!CN)
1324       return false;
1325   }
1326 
1327   switch (getBooleanContents(N->getValueType(0))) {
1328   case UndefinedBooleanContent:
1329     return CN->getAPIntValue()[0];
1330   case ZeroOrOneBooleanContent:
1331     return CN->isOne();
1332   case ZeroOrNegativeOneBooleanContent:
1333     return CN->isAllOnesValue();
1334   }
1335 
1336   llvm_unreachable("Invalid boolean contents");
1337 }
1338 
1339 SDValue TargetLowering::getConstTrueVal(SelectionDAG &DAG, EVT VT,
1340                                         const SDLoc &DL) const {
1341   unsigned ElementWidth = VT.getScalarSizeInBits();
1342   APInt TrueInt =
1343       getBooleanContents(VT) == TargetLowering::ZeroOrOneBooleanContent
1344           ? APInt(ElementWidth, 1)
1345           : APInt::getAllOnesValue(ElementWidth);
1346   return DAG.getConstant(TrueInt, DL, VT);
1347 }
1348 
1349 bool TargetLowering::isConstFalseVal(const SDNode *N) const {
1350   if (!N)
1351     return false;
1352 
1353   const ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N);
1354   if (!CN) {
1355     const BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N);
1356     if (!BV)
1357       return false;
1358 
1359     // Only interested in constant splats, we don't care about undef
1360     // elements in identifying boolean constants and getConstantSplatNode
1361     // returns NULL if all ops are undef;
1362     CN = BV->getConstantSplatNode();
1363     if (!CN)
1364       return false;
1365   }
1366 
1367   if (getBooleanContents(N->getValueType(0)) == UndefinedBooleanContent)
1368     return !CN->getAPIntValue()[0];
1369 
1370   return CN->isNullValue();
1371 }
1372 
1373 bool TargetLowering::isExtendedTrueVal(const ConstantSDNode *N, EVT VT,
1374                                        bool SExt) const {
1375   if (VT == MVT::i1)
1376     return N->isOne();
1377 
1378   TargetLowering::BooleanContent Cnt = getBooleanContents(VT);
1379   switch (Cnt) {
1380   case TargetLowering::ZeroOrOneBooleanContent:
1381     // An extended value of 1 is always true, unless its original type is i1,
1382     // in which case it will be sign extended to -1.
1383     return (N->isOne() && !SExt) || (SExt && (N->getValueType(0) != MVT::i1));
1384   case TargetLowering::UndefinedBooleanContent:
1385   case TargetLowering::ZeroOrNegativeOneBooleanContent:
1386     return N->isAllOnesValue() && SExt;
1387   }
1388   llvm_unreachable("Unexpected enumeration.");
1389 }
1390 
1391 /// This helper function of SimplifySetCC tries to optimize the comparison when
1392 /// either operand of the SetCC node is a bitwise-and instruction.
1393 SDValue TargetLowering::simplifySetCCWithAnd(EVT VT, SDValue N0, SDValue N1,
1394                                              ISD::CondCode Cond,
1395                                              DAGCombinerInfo &DCI,
1396                                              const SDLoc &DL) const {
1397   // Match these patterns in any of their permutations:
1398   // (X & Y) == Y
1399   // (X & Y) != Y
1400   if (N1.getOpcode() == ISD::AND && N0.getOpcode() != ISD::AND)
1401     std::swap(N0, N1);
1402 
1403   EVT OpVT = N0.getValueType();
1404   if (N0.getOpcode() != ISD::AND || !OpVT.isInteger() ||
1405       (Cond != ISD::SETEQ && Cond != ISD::SETNE))
1406     return SDValue();
1407 
1408   SDValue X, Y;
1409   if (N0.getOperand(0) == N1) {
1410     X = N0.getOperand(1);
1411     Y = N0.getOperand(0);
1412   } else if (N0.getOperand(1) == N1) {
1413     X = N0.getOperand(0);
1414     Y = N0.getOperand(1);
1415   } else {
1416     return SDValue();
1417   }
1418 
1419   SelectionDAG &DAG = DCI.DAG;
1420   SDValue Zero = DAG.getConstant(0, DL, OpVT);
1421   if (DAG.isKnownToBeAPowerOfTwo(Y)) {
1422     // Simplify X & Y == Y to X & Y != 0 if Y has exactly one bit set.
1423     // Note that where Y is variable and is known to have at most one bit set
1424     // (for example, if it is Z & 1) we cannot do this; the expressions are not
1425     // equivalent when Y == 0.
1426     Cond = ISD::getSetCCInverse(Cond, /*isInteger=*/true);
1427     if (DCI.isBeforeLegalizeOps() ||
1428         isCondCodeLegal(Cond, N0.getSimpleValueType()))
1429       return DAG.getSetCC(DL, VT, N0, Zero, Cond);
1430   } else if (N0.hasOneUse() && hasAndNotCompare(Y)) {
1431     // If the target supports an 'and-not' or 'and-complement' logic operation,
1432     // try to use that to make a comparison operation more efficient.
1433     // But don't do this transform if the mask is a single bit because there are
1434     // more efficient ways to deal with that case (for example, 'bt' on x86 or
1435     // 'rlwinm' on PPC).
1436 
1437     // Bail out if the compare operand that we want to turn into a zero is
1438     // already a zero (otherwise, infinite loop).
1439     auto *YConst = dyn_cast<ConstantSDNode>(Y);
1440     if (YConst && YConst->isNullValue())
1441       return SDValue();
1442 
1443     // Transform this into: ~X & Y == 0.
1444     SDValue NotX = DAG.getNOT(SDLoc(X), X, OpVT);
1445     SDValue NewAnd = DAG.getNode(ISD::AND, SDLoc(N0), OpVT, NotX, Y);
1446     return DAG.getSetCC(DL, VT, NewAnd, Zero, Cond);
1447   }
1448 
1449   return SDValue();
1450 }
1451 
1452 /// Try to simplify a setcc built with the specified operands and cc. If it is
1453 /// unable to simplify it, return a null SDValue.
1454 SDValue TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
1455                                       ISD::CondCode Cond, bool foldBooleans,
1456                                       DAGCombinerInfo &DCI,
1457                                       const SDLoc &dl) const {
1458   SelectionDAG &DAG = DCI.DAG;
1459 
1460   // These setcc operations always fold.
1461   switch (Cond) {
1462   default: break;
1463   case ISD::SETFALSE:
1464   case ISD::SETFALSE2: return DAG.getConstant(0, dl, VT);
1465   case ISD::SETTRUE:
1466   case ISD::SETTRUE2: {
1467     TargetLowering::BooleanContent Cnt =
1468         getBooleanContents(N0->getValueType(0));
1469     return DAG.getConstant(
1470         Cnt == TargetLowering::ZeroOrNegativeOneBooleanContent ? -1ULL : 1, dl,
1471         VT);
1472   }
1473   }
1474 
1475   // Ensure that the constant occurs on the RHS, and fold constant
1476   // comparisons.
1477   ISD::CondCode SwappedCC = ISD::getSetCCSwappedOperands(Cond);
1478   if (isa<ConstantSDNode>(N0.getNode()) &&
1479       (DCI.isBeforeLegalizeOps() ||
1480        isCondCodeLegal(SwappedCC, N0.getSimpleValueType())))
1481     return DAG.getSetCC(dl, VT, N1, N0, SwappedCC);
1482 
1483   if (auto *N1C = dyn_cast<ConstantSDNode>(N1.getNode())) {
1484     const APInt &C1 = N1C->getAPIntValue();
1485 
1486     // If the LHS is '(srl (ctlz x), 5)', the RHS is 0/1, and this is an
1487     // equality comparison, then we're just comparing whether X itself is
1488     // zero.
1489     if (N0.getOpcode() == ISD::SRL && (C1 == 0 || C1 == 1) &&
1490         N0.getOperand(0).getOpcode() == ISD::CTLZ &&
1491         N0.getOperand(1).getOpcode() == ISD::Constant) {
1492       const APInt &ShAmt
1493         = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue();
1494       if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
1495           ShAmt == Log2_32(N0.getValueSizeInBits())) {
1496         if ((C1 == 0) == (Cond == ISD::SETEQ)) {
1497           // (srl (ctlz x), 5) == 0  -> X != 0
1498           // (srl (ctlz x), 5) != 1  -> X != 0
1499           Cond = ISD::SETNE;
1500         } else {
1501           // (srl (ctlz x), 5) != 0  -> X == 0
1502           // (srl (ctlz x), 5) == 1  -> X == 0
1503           Cond = ISD::SETEQ;
1504         }
1505         SDValue Zero = DAG.getConstant(0, dl, N0.getValueType());
1506         return DAG.getSetCC(dl, VT, N0.getOperand(0).getOperand(0),
1507                             Zero, Cond);
1508       }
1509     }
1510 
1511     SDValue CTPOP = N0;
1512     // Look through truncs that don't change the value of a ctpop.
1513     if (N0.hasOneUse() && N0.getOpcode() == ISD::TRUNCATE)
1514       CTPOP = N0.getOperand(0);
1515 
1516     if (CTPOP.hasOneUse() && CTPOP.getOpcode() == ISD::CTPOP &&
1517         (N0 == CTPOP ||
1518          N0.getValueSizeInBits() > Log2_32_Ceil(CTPOP.getValueSizeInBits()))) {
1519       EVT CTVT = CTPOP.getValueType();
1520       SDValue CTOp = CTPOP.getOperand(0);
1521 
1522       // (ctpop x) u< 2 -> (x & x-1) == 0
1523       // (ctpop x) u> 1 -> (x & x-1) != 0
1524       if ((Cond == ISD::SETULT && C1 == 2) || (Cond == ISD::SETUGT && C1 == 1)){
1525         SDValue Sub = DAG.getNode(ISD::SUB, dl, CTVT, CTOp,
1526                                   DAG.getConstant(1, dl, CTVT));
1527         SDValue And = DAG.getNode(ISD::AND, dl, CTVT, CTOp, Sub);
1528         ISD::CondCode CC = Cond == ISD::SETULT ? ISD::SETEQ : ISD::SETNE;
1529         return DAG.getSetCC(dl, VT, And, DAG.getConstant(0, dl, CTVT), CC);
1530       }
1531 
1532       // TODO: (ctpop x) == 1 -> x && (x & x-1) == 0 iff ctpop is illegal.
1533     }
1534 
1535     // (zext x) == C --> x == (trunc C)
1536     // (sext x) == C --> x == (trunc C)
1537     if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
1538         DCI.isBeforeLegalize() && N0->hasOneUse()) {
1539       unsigned MinBits = N0.getValueSizeInBits();
1540       SDValue PreExt;
1541       bool Signed = false;
1542       if (N0->getOpcode() == ISD::ZERO_EXTEND) {
1543         // ZExt
1544         MinBits = N0->getOperand(0).getValueSizeInBits();
1545         PreExt = N0->getOperand(0);
1546       } else if (N0->getOpcode() == ISD::AND) {
1547         // DAGCombine turns costly ZExts into ANDs
1548         if (auto *C = dyn_cast<ConstantSDNode>(N0->getOperand(1)))
1549           if ((C->getAPIntValue()+1).isPowerOf2()) {
1550             MinBits = C->getAPIntValue().countTrailingOnes();
1551             PreExt = N0->getOperand(0);
1552           }
1553       } else if (N0->getOpcode() == ISD::SIGN_EXTEND) {
1554         // SExt
1555         MinBits = N0->getOperand(0).getValueSizeInBits();
1556         PreExt = N0->getOperand(0);
1557         Signed = true;
1558       } else if (auto *LN0 = dyn_cast<LoadSDNode>(N0)) {
1559         // ZEXTLOAD / SEXTLOAD
1560         if (LN0->getExtensionType() == ISD::ZEXTLOAD) {
1561           MinBits = LN0->getMemoryVT().getSizeInBits();
1562           PreExt = N0;
1563         } else if (LN0->getExtensionType() == ISD::SEXTLOAD) {
1564           Signed = true;
1565           MinBits = LN0->getMemoryVT().getSizeInBits();
1566           PreExt = N0;
1567         }
1568       }
1569 
1570       // Figure out how many bits we need to preserve this constant.
1571       unsigned ReqdBits = Signed ?
1572         C1.getBitWidth() - C1.getNumSignBits() + 1 :
1573         C1.getActiveBits();
1574 
1575       // Make sure we're not losing bits from the constant.
1576       if (MinBits > 0 &&
1577           MinBits < C1.getBitWidth() &&
1578           MinBits >= ReqdBits) {
1579         EVT MinVT = EVT::getIntegerVT(*DAG.getContext(), MinBits);
1580         if (isTypeDesirableForOp(ISD::SETCC, MinVT)) {
1581           // Will get folded away.
1582           SDValue Trunc = DAG.getNode(ISD::TRUNCATE, dl, MinVT, PreExt);
1583           if (MinBits == 1 && C1 == 1)
1584             // Invert the condition.
1585             return DAG.getSetCC(dl, VT, Trunc, DAG.getConstant(0, dl, MVT::i1),
1586                                 Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ);
1587           SDValue C = DAG.getConstant(C1.trunc(MinBits), dl, MinVT);
1588           return DAG.getSetCC(dl, VT, Trunc, C, Cond);
1589         }
1590 
1591         // If truncating the setcc operands is not desirable, we can still
1592         // simplify the expression in some cases:
1593         // setcc ([sz]ext (setcc x, y, cc)), 0, setne) -> setcc (x, y, cc)
1594         // setcc ([sz]ext (setcc x, y, cc)), 0, seteq) -> setcc (x, y, inv(cc))
1595         // setcc (zext (setcc x, y, cc)), 1, setne) -> setcc (x, y, inv(cc))
1596         // setcc (zext (setcc x, y, cc)), 1, seteq) -> setcc (x, y, cc)
1597         // setcc (sext (setcc x, y, cc)), -1, setne) -> setcc (x, y, inv(cc))
1598         // setcc (sext (setcc x, y, cc)), -1, seteq) -> setcc (x, y, cc)
1599         SDValue TopSetCC = N0->getOperand(0);
1600         unsigned N0Opc = N0->getOpcode();
1601         bool SExt = (N0Opc == ISD::SIGN_EXTEND);
1602         if (TopSetCC.getValueType() == MVT::i1 && VT == MVT::i1 &&
1603             TopSetCC.getOpcode() == ISD::SETCC &&
1604             (N0Opc == ISD::ZERO_EXTEND || N0Opc == ISD::SIGN_EXTEND) &&
1605             (isConstFalseVal(N1C) ||
1606              isExtendedTrueVal(N1C, N0->getValueType(0), SExt))) {
1607 
1608           bool Inverse = (N1C->isNullValue() && Cond == ISD::SETEQ) ||
1609                          (!N1C->isNullValue() && Cond == ISD::SETNE);
1610 
1611           if (!Inverse)
1612             return TopSetCC;
1613 
1614           ISD::CondCode InvCond = ISD::getSetCCInverse(
1615               cast<CondCodeSDNode>(TopSetCC.getOperand(2))->get(),
1616               TopSetCC.getOperand(0).getValueType().isInteger());
1617           return DAG.getSetCC(dl, VT, TopSetCC.getOperand(0),
1618                                       TopSetCC.getOperand(1),
1619                                       InvCond);
1620 
1621         }
1622       }
1623     }
1624 
1625     // If the LHS is '(and load, const)', the RHS is 0,
1626     // the test is for equality or unsigned, and all 1 bits of the const are
1627     // in the same partial word, see if we can shorten the load.
1628     if (DCI.isBeforeLegalize() &&
1629         !ISD::isSignedIntSetCC(Cond) &&
1630         N0.getOpcode() == ISD::AND && C1 == 0 &&
1631         N0.getNode()->hasOneUse() &&
1632         isa<LoadSDNode>(N0.getOperand(0)) &&
1633         N0.getOperand(0).getNode()->hasOneUse() &&
1634         isa<ConstantSDNode>(N0.getOperand(1))) {
1635       LoadSDNode *Lod = cast<LoadSDNode>(N0.getOperand(0));
1636       APInt bestMask;
1637       unsigned bestWidth = 0, bestOffset = 0;
1638       if (!Lod->isVolatile() && Lod->isUnindexed()) {
1639         unsigned origWidth = N0.getValueSizeInBits();
1640         unsigned maskWidth = origWidth;
1641         // We can narrow (e.g.) 16-bit extending loads on 32-bit target to
1642         // 8 bits, but have to be careful...
1643         if (Lod->getExtensionType() != ISD::NON_EXTLOAD)
1644           origWidth = Lod->getMemoryVT().getSizeInBits();
1645         const APInt &Mask =
1646           cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue();
1647         for (unsigned width = origWidth / 2; width>=8; width /= 2) {
1648           APInt newMask = APInt::getLowBitsSet(maskWidth, width);
1649           for (unsigned offset=0; offset<origWidth/width; offset++) {
1650             if ((newMask & Mask) == Mask) {
1651               if (!DAG.getDataLayout().isLittleEndian())
1652                 bestOffset = (origWidth/width - offset - 1) * (width/8);
1653               else
1654                 bestOffset = (uint64_t)offset * (width/8);
1655               bestMask = Mask.lshr(offset * (width/8) * 8);
1656               bestWidth = width;
1657               break;
1658             }
1659             newMask = newMask << width;
1660           }
1661         }
1662       }
1663       if (bestWidth) {
1664         EVT newVT = EVT::getIntegerVT(*DAG.getContext(), bestWidth);
1665         if (newVT.isRound()) {
1666           EVT PtrType = Lod->getOperand(1).getValueType();
1667           SDValue Ptr = Lod->getBasePtr();
1668           if (bestOffset != 0)
1669             Ptr = DAG.getNode(ISD::ADD, dl, PtrType, Lod->getBasePtr(),
1670                               DAG.getConstant(bestOffset, dl, PtrType));
1671           unsigned NewAlign = MinAlign(Lod->getAlignment(), bestOffset);
1672           SDValue NewLoad = DAG.getLoad(
1673               newVT, dl, Lod->getChain(), Ptr,
1674               Lod->getPointerInfo().getWithOffset(bestOffset), NewAlign);
1675           return DAG.getSetCC(dl, VT,
1676                               DAG.getNode(ISD::AND, dl, newVT, NewLoad,
1677                                       DAG.getConstant(bestMask.trunc(bestWidth),
1678                                                       dl, newVT)),
1679                               DAG.getConstant(0LL, dl, newVT), Cond);
1680         }
1681       }
1682     }
1683 
1684     // If the LHS is a ZERO_EXTEND, perform the comparison on the input.
1685     if (N0.getOpcode() == ISD::ZERO_EXTEND) {
1686       unsigned InSize = N0.getOperand(0).getValueSizeInBits();
1687 
1688       // If the comparison constant has bits in the upper part, the
1689       // zero-extended value could never match.
1690       if (C1.intersects(APInt::getHighBitsSet(C1.getBitWidth(),
1691                                               C1.getBitWidth() - InSize))) {
1692         switch (Cond) {
1693         case ISD::SETUGT:
1694         case ISD::SETUGE:
1695         case ISD::SETEQ: return DAG.getConstant(0, dl, VT);
1696         case ISD::SETULT:
1697         case ISD::SETULE:
1698         case ISD::SETNE: return DAG.getConstant(1, dl, VT);
1699         case ISD::SETGT:
1700         case ISD::SETGE:
1701           // True if the sign bit of C1 is set.
1702           return DAG.getConstant(C1.isNegative(), dl, VT);
1703         case ISD::SETLT:
1704         case ISD::SETLE:
1705           // True if the sign bit of C1 isn't set.
1706           return DAG.getConstant(C1.isNonNegative(), dl, VT);
1707         default:
1708           break;
1709         }
1710       }
1711 
1712       // Otherwise, we can perform the comparison with the low bits.
1713       switch (Cond) {
1714       case ISD::SETEQ:
1715       case ISD::SETNE:
1716       case ISD::SETUGT:
1717       case ISD::SETUGE:
1718       case ISD::SETULT:
1719       case ISD::SETULE: {
1720         EVT newVT = N0.getOperand(0).getValueType();
1721         if (DCI.isBeforeLegalizeOps() ||
1722             (isOperationLegal(ISD::SETCC, newVT) &&
1723              getCondCodeAction(Cond, newVT.getSimpleVT()) == Legal)) {
1724           EVT NewSetCCVT =
1725               getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), newVT);
1726           SDValue NewConst = DAG.getConstant(C1.trunc(InSize), dl, newVT);
1727 
1728           SDValue NewSetCC = DAG.getSetCC(dl, NewSetCCVT, N0.getOperand(0),
1729                                           NewConst, Cond);
1730           return DAG.getBoolExtOrTrunc(NewSetCC, dl, VT, N0.getValueType());
1731         }
1732         break;
1733       }
1734       default:
1735         break;   // todo, be more careful with signed comparisons
1736       }
1737     } else if (N0.getOpcode() == ISD::SIGN_EXTEND_INREG &&
1738                (Cond == ISD::SETEQ || Cond == ISD::SETNE)) {
1739       EVT ExtSrcTy = cast<VTSDNode>(N0.getOperand(1))->getVT();
1740       unsigned ExtSrcTyBits = ExtSrcTy.getSizeInBits();
1741       EVT ExtDstTy = N0.getValueType();
1742       unsigned ExtDstTyBits = ExtDstTy.getSizeInBits();
1743 
1744       // If the constant doesn't fit into the number of bits for the source of
1745       // the sign extension, it is impossible for both sides to be equal.
1746       if (C1.getMinSignedBits() > ExtSrcTyBits)
1747         return DAG.getConstant(Cond == ISD::SETNE, dl, VT);
1748 
1749       SDValue ZextOp;
1750       EVT Op0Ty = N0.getOperand(0).getValueType();
1751       if (Op0Ty == ExtSrcTy) {
1752         ZextOp = N0.getOperand(0);
1753       } else {
1754         APInt Imm = APInt::getLowBitsSet(ExtDstTyBits, ExtSrcTyBits);
1755         ZextOp = DAG.getNode(ISD::AND, dl, Op0Ty, N0.getOperand(0),
1756                               DAG.getConstant(Imm, dl, Op0Ty));
1757       }
1758       if (!DCI.isCalledByLegalizer())
1759         DCI.AddToWorklist(ZextOp.getNode());
1760       // Otherwise, make this a use of a zext.
1761       return DAG.getSetCC(dl, VT, ZextOp,
1762                           DAG.getConstant(C1 & APInt::getLowBitsSet(
1763                                                               ExtDstTyBits,
1764                                                               ExtSrcTyBits),
1765                                           dl, ExtDstTy),
1766                           Cond);
1767     } else if ((N1C->isNullValue() || N1C->getAPIntValue() == 1) &&
1768                 (Cond == ISD::SETEQ || Cond == ISD::SETNE)) {
1769       // SETCC (SETCC), [0|1], [EQ|NE]  -> SETCC
1770       if (N0.getOpcode() == ISD::SETCC &&
1771           isTypeLegal(VT) && VT.bitsLE(N0.getValueType())) {
1772         bool TrueWhenTrue = (Cond == ISD::SETEQ) ^ (N1C->getAPIntValue() != 1);
1773         if (TrueWhenTrue)
1774           return DAG.getNode(ISD::TRUNCATE, dl, VT, N0);
1775         // Invert the condition.
1776         ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get();
1777         CC = ISD::getSetCCInverse(CC,
1778                                   N0.getOperand(0).getValueType().isInteger());
1779         if (DCI.isBeforeLegalizeOps() ||
1780             isCondCodeLegal(CC, N0.getOperand(0).getSimpleValueType()))
1781           return DAG.getSetCC(dl, VT, N0.getOperand(0), N0.getOperand(1), CC);
1782       }
1783 
1784       if ((N0.getOpcode() == ISD::XOR ||
1785            (N0.getOpcode() == ISD::AND &&
1786             N0.getOperand(0).getOpcode() == ISD::XOR &&
1787             N0.getOperand(1) == N0.getOperand(0).getOperand(1))) &&
1788           isa<ConstantSDNode>(N0.getOperand(1)) &&
1789           cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue() == 1) {
1790         // If this is (X^1) == 0/1, swap the RHS and eliminate the xor.  We
1791         // can only do this if the top bits are known zero.
1792         unsigned BitWidth = N0.getValueSizeInBits();
1793         if (DAG.MaskedValueIsZero(N0,
1794                                   APInt::getHighBitsSet(BitWidth,
1795                                                         BitWidth-1))) {
1796           // Okay, get the un-inverted input value.
1797           SDValue Val;
1798           if (N0.getOpcode() == ISD::XOR)
1799             Val = N0.getOperand(0);
1800           else {
1801             assert(N0.getOpcode() == ISD::AND &&
1802                     N0.getOperand(0).getOpcode() == ISD::XOR);
1803             // ((X^1)&1)^1 -> X & 1
1804             Val = DAG.getNode(ISD::AND, dl, N0.getValueType(),
1805                               N0.getOperand(0).getOperand(0),
1806                               N0.getOperand(1));
1807           }
1808 
1809           return DAG.getSetCC(dl, VT, Val, N1,
1810                               Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ);
1811         }
1812       } else if (N1C->getAPIntValue() == 1 &&
1813                  (VT == MVT::i1 ||
1814                   getBooleanContents(N0->getValueType(0)) ==
1815                       ZeroOrOneBooleanContent)) {
1816         SDValue Op0 = N0;
1817         if (Op0.getOpcode() == ISD::TRUNCATE)
1818           Op0 = Op0.getOperand(0);
1819 
1820         if ((Op0.getOpcode() == ISD::XOR) &&
1821             Op0.getOperand(0).getOpcode() == ISD::SETCC &&
1822             Op0.getOperand(1).getOpcode() == ISD::SETCC) {
1823           // (xor (setcc), (setcc)) == / != 1 -> (setcc) != / == (setcc)
1824           Cond = (Cond == ISD::SETEQ) ? ISD::SETNE : ISD::SETEQ;
1825           return DAG.getSetCC(dl, VT, Op0.getOperand(0), Op0.getOperand(1),
1826                               Cond);
1827         }
1828         if (Op0.getOpcode() == ISD::AND &&
1829             isa<ConstantSDNode>(Op0.getOperand(1)) &&
1830             cast<ConstantSDNode>(Op0.getOperand(1))->getAPIntValue() == 1) {
1831           // If this is (X&1) == / != 1, normalize it to (X&1) != / == 0.
1832           if (Op0.getValueType().bitsGT(VT))
1833             Op0 = DAG.getNode(ISD::AND, dl, VT,
1834                           DAG.getNode(ISD::TRUNCATE, dl, VT, Op0.getOperand(0)),
1835                           DAG.getConstant(1, dl, VT));
1836           else if (Op0.getValueType().bitsLT(VT))
1837             Op0 = DAG.getNode(ISD::AND, dl, VT,
1838                         DAG.getNode(ISD::ANY_EXTEND, dl, VT, Op0.getOperand(0)),
1839                         DAG.getConstant(1, dl, VT));
1840 
1841           return DAG.getSetCC(dl, VT, Op0,
1842                               DAG.getConstant(0, dl, Op0.getValueType()),
1843                               Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ);
1844         }
1845         if (Op0.getOpcode() == ISD::AssertZext &&
1846             cast<VTSDNode>(Op0.getOperand(1))->getVT() == MVT::i1)
1847           return DAG.getSetCC(dl, VT, Op0,
1848                               DAG.getConstant(0, dl, Op0.getValueType()),
1849                               Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ);
1850       }
1851     }
1852 
1853     APInt MinVal, MaxVal;
1854     unsigned OperandBitSize = N1C->getValueType(0).getSizeInBits();
1855     if (ISD::isSignedIntSetCC(Cond)) {
1856       MinVal = APInt::getSignedMinValue(OperandBitSize);
1857       MaxVal = APInt::getSignedMaxValue(OperandBitSize);
1858     } else {
1859       MinVal = APInt::getMinValue(OperandBitSize);
1860       MaxVal = APInt::getMaxValue(OperandBitSize);
1861     }
1862 
1863     // Canonicalize GE/LE comparisons to use GT/LT comparisons.
1864     if (Cond == ISD::SETGE || Cond == ISD::SETUGE) {
1865       if (C1 == MinVal) return DAG.getConstant(1, dl, VT);  // X >= MIN --> true
1866       // X >= C0 --> X > (C0 - 1)
1867       APInt C = C1 - 1;
1868       ISD::CondCode NewCC = (Cond == ISD::SETGE) ? ISD::SETGT : ISD::SETUGT;
1869       if ((DCI.isBeforeLegalizeOps() ||
1870            isCondCodeLegal(NewCC, VT.getSimpleVT())) &&
1871           (!N1C->isOpaque() || (N1C->isOpaque() && C.getBitWidth() <= 64 &&
1872                                 isLegalICmpImmediate(C.getSExtValue())))) {
1873         return DAG.getSetCC(dl, VT, N0,
1874                             DAG.getConstant(C, dl, N1.getValueType()),
1875                             NewCC);
1876       }
1877     }
1878 
1879     if (Cond == ISD::SETLE || Cond == ISD::SETULE) {
1880       if (C1 == MaxVal) return DAG.getConstant(1, dl, VT);  // X <= MAX --> true
1881       // X <= C0 --> X < (C0 + 1)
1882       APInt C = C1 + 1;
1883       ISD::CondCode NewCC = (Cond == ISD::SETLE) ? ISD::SETLT : ISD::SETULT;
1884       if ((DCI.isBeforeLegalizeOps() ||
1885            isCondCodeLegal(NewCC, VT.getSimpleVT())) &&
1886           (!N1C->isOpaque() || (N1C->isOpaque() && C.getBitWidth() <= 64 &&
1887                                 isLegalICmpImmediate(C.getSExtValue())))) {
1888         return DAG.getSetCC(dl, VT, N0,
1889                             DAG.getConstant(C, dl, N1.getValueType()),
1890                             NewCC);
1891       }
1892     }
1893 
1894     if ((Cond == ISD::SETLT || Cond == ISD::SETULT) && C1 == MinVal)
1895       return DAG.getConstant(0, dl, VT);      // X < MIN --> false
1896     if ((Cond == ISD::SETGE || Cond == ISD::SETUGE) && C1 == MinVal)
1897       return DAG.getConstant(1, dl, VT);      // X >= MIN --> true
1898     if ((Cond == ISD::SETGT || Cond == ISD::SETUGT) && C1 == MaxVal)
1899       return DAG.getConstant(0, dl, VT);      // X > MAX --> false
1900     if ((Cond == ISD::SETLE || Cond == ISD::SETULE) && C1 == MaxVal)
1901       return DAG.getConstant(1, dl, VT);      // X <= MAX --> true
1902 
1903     // Canonicalize setgt X, Min --> setne X, Min
1904     if ((Cond == ISD::SETGT || Cond == ISD::SETUGT) && C1 == MinVal)
1905       return DAG.getSetCC(dl, VT, N0, N1, ISD::SETNE);
1906     // Canonicalize setlt X, Max --> setne X, Max
1907     if ((Cond == ISD::SETLT || Cond == ISD::SETULT) && C1 == MaxVal)
1908       return DAG.getSetCC(dl, VT, N0, N1, ISD::SETNE);
1909 
1910     // If we have setult X, 1, turn it into seteq X, 0
1911     if ((Cond == ISD::SETLT || Cond == ISD::SETULT) && C1 == MinVal+1)
1912       return DAG.getSetCC(dl, VT, N0,
1913                           DAG.getConstant(MinVal, dl, N0.getValueType()),
1914                           ISD::SETEQ);
1915     // If we have setugt X, Max-1, turn it into seteq X, Max
1916     if ((Cond == ISD::SETGT || Cond == ISD::SETUGT) && C1 == MaxVal-1)
1917       return DAG.getSetCC(dl, VT, N0,
1918                           DAG.getConstant(MaxVal, dl, N0.getValueType()),
1919                           ISD::SETEQ);
1920 
1921     // If we have "setcc X, C0", check to see if we can shrink the immediate
1922     // by changing cc.
1923 
1924     // SETUGT X, SINTMAX  -> SETLT X, 0
1925     if (Cond == ISD::SETUGT &&
1926         C1 == APInt::getSignedMaxValue(OperandBitSize))
1927       return DAG.getSetCC(dl, VT, N0,
1928                           DAG.getConstant(0, dl, N1.getValueType()),
1929                           ISD::SETLT);
1930 
1931     // SETULT X, SINTMIN  -> SETGT X, -1
1932     if (Cond == ISD::SETULT &&
1933         C1 == APInt::getSignedMinValue(OperandBitSize)) {
1934       SDValue ConstMinusOne =
1935           DAG.getConstant(APInt::getAllOnesValue(OperandBitSize), dl,
1936                           N1.getValueType());
1937       return DAG.getSetCC(dl, VT, N0, ConstMinusOne, ISD::SETGT);
1938     }
1939 
1940     // Fold bit comparisons when we can.
1941     if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
1942         (VT == N0.getValueType() ||
1943          (isTypeLegal(VT) && VT.bitsLE(N0.getValueType()))) &&
1944         N0.getOpcode() == ISD::AND) {
1945       auto &DL = DAG.getDataLayout();
1946       if (auto *AndRHS = dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
1947         EVT ShiftTy = DCI.isBeforeLegalize()
1948                           ? getPointerTy(DL)
1949                           : getShiftAmountTy(N0.getValueType(), DL);
1950         if (Cond == ISD::SETNE && C1 == 0) {// (X & 8) != 0  -->  (X & 8) >> 3
1951           // Perform the xform if the AND RHS is a single bit.
1952           if (AndRHS->getAPIntValue().isPowerOf2()) {
1953             return DAG.getNode(ISD::TRUNCATE, dl, VT,
1954                               DAG.getNode(ISD::SRL, dl, N0.getValueType(), N0,
1955                    DAG.getConstant(AndRHS->getAPIntValue().logBase2(), dl,
1956                                    ShiftTy)));
1957           }
1958         } else if (Cond == ISD::SETEQ && C1 == AndRHS->getAPIntValue()) {
1959           // (X & 8) == 8  -->  (X & 8) >> 3
1960           // Perform the xform if C1 is a single bit.
1961           if (C1.isPowerOf2()) {
1962             return DAG.getNode(ISD::TRUNCATE, dl, VT,
1963                                DAG.getNode(ISD::SRL, dl, N0.getValueType(), N0,
1964                                       DAG.getConstant(C1.logBase2(), dl,
1965                                                       ShiftTy)));
1966           }
1967         }
1968       }
1969     }
1970 
1971     if (C1.getMinSignedBits() <= 64 &&
1972         !isLegalICmpImmediate(C1.getSExtValue())) {
1973       // (X & -256) == 256 -> (X >> 8) == 1
1974       if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
1975           N0.getOpcode() == ISD::AND && N0.hasOneUse()) {
1976         if (auto *AndRHS = dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
1977           const APInt &AndRHSC = AndRHS->getAPIntValue();
1978           if ((-AndRHSC).isPowerOf2() && (AndRHSC & C1) == C1) {
1979             unsigned ShiftBits = AndRHSC.countTrailingZeros();
1980             auto &DL = DAG.getDataLayout();
1981             EVT ShiftTy = DCI.isBeforeLegalize()
1982                               ? getPointerTy(DL)
1983                               : getShiftAmountTy(N0.getValueType(), DL);
1984             EVT CmpTy = N0.getValueType();
1985             SDValue Shift = DAG.getNode(ISD::SRL, dl, CmpTy, N0.getOperand(0),
1986                                         DAG.getConstant(ShiftBits, dl,
1987                                                         ShiftTy));
1988             SDValue CmpRHS = DAG.getConstant(C1.lshr(ShiftBits), dl, CmpTy);
1989             return DAG.getSetCC(dl, VT, Shift, CmpRHS, Cond);
1990           }
1991         }
1992       } else if (Cond == ISD::SETULT || Cond == ISD::SETUGE ||
1993                  Cond == ISD::SETULE || Cond == ISD::SETUGT) {
1994         bool AdjOne = (Cond == ISD::SETULE || Cond == ISD::SETUGT);
1995         // X <  0x100000000 -> (X >> 32) <  1
1996         // X >= 0x100000000 -> (X >> 32) >= 1
1997         // X <= 0x0ffffffff -> (X >> 32) <  1
1998         // X >  0x0ffffffff -> (X >> 32) >= 1
1999         unsigned ShiftBits;
2000         APInt NewC = C1;
2001         ISD::CondCode NewCond = Cond;
2002         if (AdjOne) {
2003           ShiftBits = C1.countTrailingOnes();
2004           NewC = NewC + 1;
2005           NewCond = (Cond == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE;
2006         } else {
2007           ShiftBits = C1.countTrailingZeros();
2008         }
2009         NewC = NewC.lshr(ShiftBits);
2010         if (ShiftBits && NewC.getMinSignedBits() <= 64 &&
2011           isLegalICmpImmediate(NewC.getSExtValue())) {
2012           auto &DL = DAG.getDataLayout();
2013           EVT ShiftTy = DCI.isBeforeLegalize()
2014                             ? getPointerTy(DL)
2015                             : getShiftAmountTy(N0.getValueType(), DL);
2016           EVT CmpTy = N0.getValueType();
2017           SDValue Shift = DAG.getNode(ISD::SRL, dl, CmpTy, N0,
2018                                       DAG.getConstant(ShiftBits, dl, ShiftTy));
2019           SDValue CmpRHS = DAG.getConstant(NewC, dl, CmpTy);
2020           return DAG.getSetCC(dl, VT, Shift, CmpRHS, NewCond);
2021         }
2022       }
2023     }
2024   }
2025 
2026   if (isa<ConstantFPSDNode>(N0.getNode())) {
2027     // Constant fold or commute setcc.
2028     SDValue O = DAG.FoldSetCC(VT, N0, N1, Cond, dl);
2029     if (O.getNode()) return O;
2030   } else if (auto *CFP = dyn_cast<ConstantFPSDNode>(N1.getNode())) {
2031     // If the RHS of an FP comparison is a constant, simplify it away in
2032     // some cases.
2033     if (CFP->getValueAPF().isNaN()) {
2034       // If an operand is known to be a nan, we can fold it.
2035       switch (ISD::getUnorderedFlavor(Cond)) {
2036       default: llvm_unreachable("Unknown flavor!");
2037       case 0:  // Known false.
2038         return DAG.getConstant(0, dl, VT);
2039       case 1:  // Known true.
2040         return DAG.getConstant(1, dl, VT);
2041       case 2:  // Undefined.
2042         return DAG.getUNDEF(VT);
2043       }
2044     }
2045 
2046     // Otherwise, we know the RHS is not a NaN.  Simplify the node to drop the
2047     // constant if knowing that the operand is non-nan is enough.  We prefer to
2048     // have SETO(x,x) instead of SETO(x, 0.0) because this avoids having to
2049     // materialize 0.0.
2050     if (Cond == ISD::SETO || Cond == ISD::SETUO)
2051       return DAG.getSetCC(dl, VT, N0, N0, Cond);
2052 
2053     // If the condition is not legal, see if we can find an equivalent one
2054     // which is legal.
2055     if (!isCondCodeLegal(Cond, N0.getSimpleValueType())) {
2056       // If the comparison was an awkward floating-point == or != and one of
2057       // the comparison operands is infinity or negative infinity, convert the
2058       // condition to a less-awkward <= or >=.
2059       if (CFP->getValueAPF().isInfinity()) {
2060         if (CFP->getValueAPF().isNegative()) {
2061           if (Cond == ISD::SETOEQ &&
2062               isCondCodeLegal(ISD::SETOLE, N0.getSimpleValueType()))
2063             return DAG.getSetCC(dl, VT, N0, N1, ISD::SETOLE);
2064           if (Cond == ISD::SETUEQ &&
2065               isCondCodeLegal(ISD::SETOLE, N0.getSimpleValueType()))
2066             return DAG.getSetCC(dl, VT, N0, N1, ISD::SETULE);
2067           if (Cond == ISD::SETUNE &&
2068               isCondCodeLegal(ISD::SETUGT, N0.getSimpleValueType()))
2069             return DAG.getSetCC(dl, VT, N0, N1, ISD::SETUGT);
2070           if (Cond == ISD::SETONE &&
2071               isCondCodeLegal(ISD::SETUGT, N0.getSimpleValueType()))
2072             return DAG.getSetCC(dl, VT, N0, N1, ISD::SETOGT);
2073         } else {
2074           if (Cond == ISD::SETOEQ &&
2075               isCondCodeLegal(ISD::SETOGE, N0.getSimpleValueType()))
2076             return DAG.getSetCC(dl, VT, N0, N1, ISD::SETOGE);
2077           if (Cond == ISD::SETUEQ &&
2078               isCondCodeLegal(ISD::SETOGE, N0.getSimpleValueType()))
2079             return DAG.getSetCC(dl, VT, N0, N1, ISD::SETUGE);
2080           if (Cond == ISD::SETUNE &&
2081               isCondCodeLegal(ISD::SETULT, N0.getSimpleValueType()))
2082             return DAG.getSetCC(dl, VT, N0, N1, ISD::SETULT);
2083           if (Cond == ISD::SETONE &&
2084               isCondCodeLegal(ISD::SETULT, N0.getSimpleValueType()))
2085             return DAG.getSetCC(dl, VT, N0, N1, ISD::SETOLT);
2086         }
2087       }
2088     }
2089   }
2090 
2091   if (N0 == N1) {
2092     // The sext(setcc()) => setcc() optimization relies on the appropriate
2093     // constant being emitted.
2094     uint64_t EqVal = 0;
2095     switch (getBooleanContents(N0.getValueType())) {
2096     case UndefinedBooleanContent:
2097     case ZeroOrOneBooleanContent:
2098       EqVal = ISD::isTrueWhenEqual(Cond);
2099       break;
2100     case ZeroOrNegativeOneBooleanContent:
2101       EqVal = ISD::isTrueWhenEqual(Cond) ? -1 : 0;
2102       break;
2103     }
2104 
2105     // We can always fold X == X for integer setcc's.
2106     if (N0.getValueType().isInteger()) {
2107       return DAG.getConstant(EqVal, dl, VT);
2108     }
2109     unsigned UOF = ISD::getUnorderedFlavor(Cond);
2110     if (UOF == 2)   // FP operators that are undefined on NaNs.
2111       return DAG.getConstant(EqVal, dl, VT);
2112     if (UOF == unsigned(ISD::isTrueWhenEqual(Cond)))
2113       return DAG.getConstant(EqVal, dl, VT);
2114     // Otherwise, we can't fold it.  However, we can simplify it to SETUO/SETO
2115     // if it is not already.
2116     ISD::CondCode NewCond = UOF == 0 ? ISD::SETO : ISD::SETUO;
2117     if (NewCond != Cond && (DCI.isBeforeLegalizeOps() ||
2118           getCondCodeAction(NewCond, N0.getSimpleValueType()) == Legal))
2119       return DAG.getSetCC(dl, VT, N0, N1, NewCond);
2120   }
2121 
2122   if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
2123       N0.getValueType().isInteger()) {
2124     if (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::SUB ||
2125         N0.getOpcode() == ISD::XOR) {
2126       // Simplify (X+Y) == (X+Z) -->  Y == Z
2127       if (N0.getOpcode() == N1.getOpcode()) {
2128         if (N0.getOperand(0) == N1.getOperand(0))
2129           return DAG.getSetCC(dl, VT, N0.getOperand(1), N1.getOperand(1), Cond);
2130         if (N0.getOperand(1) == N1.getOperand(1))
2131           return DAG.getSetCC(dl, VT, N0.getOperand(0), N1.getOperand(0), Cond);
2132         if (DAG.isCommutativeBinOp(N0.getOpcode())) {
2133           // If X op Y == Y op X, try other combinations.
2134           if (N0.getOperand(0) == N1.getOperand(1))
2135             return DAG.getSetCC(dl, VT, N0.getOperand(1), N1.getOperand(0),
2136                                 Cond);
2137           if (N0.getOperand(1) == N1.getOperand(0))
2138             return DAG.getSetCC(dl, VT, N0.getOperand(0), N1.getOperand(1),
2139                                 Cond);
2140         }
2141       }
2142 
2143       // If RHS is a legal immediate value for a compare instruction, we need
2144       // to be careful about increasing register pressure needlessly.
2145       bool LegalRHSImm = false;
2146 
2147       if (auto *RHSC = dyn_cast<ConstantSDNode>(N1)) {
2148         if (auto *LHSR = dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
2149           // Turn (X+C1) == C2 --> X == C2-C1
2150           if (N0.getOpcode() == ISD::ADD && N0.getNode()->hasOneUse()) {
2151             return DAG.getSetCC(dl, VT, N0.getOperand(0),
2152                                 DAG.getConstant(RHSC->getAPIntValue()-
2153                                                 LHSR->getAPIntValue(),
2154                                 dl, N0.getValueType()), Cond);
2155           }
2156 
2157           // Turn (X^C1) == C2 into X == C1^C2 iff X&~C1 = 0.
2158           if (N0.getOpcode() == ISD::XOR)
2159             // If we know that all of the inverted bits are zero, don't bother
2160             // performing the inversion.
2161             if (DAG.MaskedValueIsZero(N0.getOperand(0), ~LHSR->getAPIntValue()))
2162               return
2163                 DAG.getSetCC(dl, VT, N0.getOperand(0),
2164                              DAG.getConstant(LHSR->getAPIntValue() ^
2165                                                RHSC->getAPIntValue(),
2166                                              dl, N0.getValueType()),
2167                              Cond);
2168         }
2169 
2170         // Turn (C1-X) == C2 --> X == C1-C2
2171         if (auto *SUBC = dyn_cast<ConstantSDNode>(N0.getOperand(0))) {
2172           if (N0.getOpcode() == ISD::SUB && N0.getNode()->hasOneUse()) {
2173             return
2174               DAG.getSetCC(dl, VT, N0.getOperand(1),
2175                            DAG.getConstant(SUBC->getAPIntValue() -
2176                                              RHSC->getAPIntValue(),
2177                                            dl, N0.getValueType()),
2178                            Cond);
2179           }
2180         }
2181 
2182         // Could RHSC fold directly into a compare?
2183         if (RHSC->getValueType(0).getSizeInBits() <= 64)
2184           LegalRHSImm = isLegalICmpImmediate(RHSC->getSExtValue());
2185       }
2186 
2187       // Simplify (X+Z) == X -->  Z == 0
2188       // Don't do this if X is an immediate that can fold into a cmp
2189       // instruction and X+Z has other uses. It could be an induction variable
2190       // chain, and the transform would increase register pressure.
2191       if (!LegalRHSImm || N0.getNode()->hasOneUse()) {
2192         if (N0.getOperand(0) == N1)
2193           return DAG.getSetCC(dl, VT, N0.getOperand(1),
2194                               DAG.getConstant(0, dl, N0.getValueType()), Cond);
2195         if (N0.getOperand(1) == N1) {
2196           if (DAG.isCommutativeBinOp(N0.getOpcode()))
2197             return DAG.getSetCC(dl, VT, N0.getOperand(0),
2198                                 DAG.getConstant(0, dl, N0.getValueType()),
2199                                 Cond);
2200           if (N0.getNode()->hasOneUse()) {
2201             assert(N0.getOpcode() == ISD::SUB && "Unexpected operation!");
2202             auto &DL = DAG.getDataLayout();
2203             // (Z-X) == X  --> Z == X<<1
2204             SDValue SH = DAG.getNode(
2205                 ISD::SHL, dl, N1.getValueType(), N1,
2206                 DAG.getConstant(1, dl,
2207                                 getShiftAmountTy(N1.getValueType(), DL)));
2208             if (!DCI.isCalledByLegalizer())
2209               DCI.AddToWorklist(SH.getNode());
2210             return DAG.getSetCC(dl, VT, N0.getOperand(0), SH, Cond);
2211           }
2212         }
2213       }
2214     }
2215 
2216     if (N1.getOpcode() == ISD::ADD || N1.getOpcode() == ISD::SUB ||
2217         N1.getOpcode() == ISD::XOR) {
2218       // Simplify  X == (X+Z) -->  Z == 0
2219       if (N1.getOperand(0) == N0)
2220         return DAG.getSetCC(dl, VT, N1.getOperand(1),
2221                         DAG.getConstant(0, dl, N1.getValueType()), Cond);
2222       if (N1.getOperand(1) == N0) {
2223         if (DAG.isCommutativeBinOp(N1.getOpcode()))
2224           return DAG.getSetCC(dl, VT, N1.getOperand(0),
2225                           DAG.getConstant(0, dl, N1.getValueType()), Cond);
2226         if (N1.getNode()->hasOneUse()) {
2227           assert(N1.getOpcode() == ISD::SUB && "Unexpected operation!");
2228           auto &DL = DAG.getDataLayout();
2229           // X == (Z-X)  --> X<<1 == Z
2230           SDValue SH = DAG.getNode(
2231               ISD::SHL, dl, N1.getValueType(), N0,
2232               DAG.getConstant(1, dl, getShiftAmountTy(N0.getValueType(), DL)));
2233           if (!DCI.isCalledByLegalizer())
2234             DCI.AddToWorklist(SH.getNode());
2235           return DAG.getSetCC(dl, VT, SH, N1.getOperand(0), Cond);
2236         }
2237       }
2238     }
2239 
2240     if (SDValue V = simplifySetCCWithAnd(VT, N0, N1, Cond, DCI, dl))
2241       return V;
2242   }
2243 
2244   // Fold away ALL boolean setcc's.
2245   SDValue Temp;
2246   if (N0.getValueType() == MVT::i1 && foldBooleans) {
2247     switch (Cond) {
2248     default: llvm_unreachable("Unknown integer setcc!");
2249     case ISD::SETEQ:  // X == Y  -> ~(X^Y)
2250       Temp = DAG.getNode(ISD::XOR, dl, MVT::i1, N0, N1);
2251       N0 = DAG.getNOT(dl, Temp, MVT::i1);
2252       if (!DCI.isCalledByLegalizer())
2253         DCI.AddToWorklist(Temp.getNode());
2254       break;
2255     case ISD::SETNE:  // X != Y   -->  (X^Y)
2256       N0 = DAG.getNode(ISD::XOR, dl, MVT::i1, N0, N1);
2257       break;
2258     case ISD::SETGT:  // X >s Y   -->  X == 0 & Y == 1  -->  ~X & Y
2259     case ISD::SETULT: // X <u Y   -->  X == 0 & Y == 1  -->  ~X & Y
2260       Temp = DAG.getNOT(dl, N0, MVT::i1);
2261       N0 = DAG.getNode(ISD::AND, dl, MVT::i1, N1, Temp);
2262       if (!DCI.isCalledByLegalizer())
2263         DCI.AddToWorklist(Temp.getNode());
2264       break;
2265     case ISD::SETLT:  // X <s Y   --> X == 1 & Y == 0  -->  ~Y & X
2266     case ISD::SETUGT: // X >u Y   --> X == 1 & Y == 0  -->  ~Y & X
2267       Temp = DAG.getNOT(dl, N1, MVT::i1);
2268       N0 = DAG.getNode(ISD::AND, dl, MVT::i1, N0, Temp);
2269       if (!DCI.isCalledByLegalizer())
2270         DCI.AddToWorklist(Temp.getNode());
2271       break;
2272     case ISD::SETULE: // X <=u Y  --> X == 0 | Y == 1  -->  ~X | Y
2273     case ISD::SETGE:  // X >=s Y  --> X == 0 | Y == 1  -->  ~X | Y
2274       Temp = DAG.getNOT(dl, N0, MVT::i1);
2275       N0 = DAG.getNode(ISD::OR, dl, MVT::i1, N1, Temp);
2276       if (!DCI.isCalledByLegalizer())
2277         DCI.AddToWorklist(Temp.getNode());
2278       break;
2279     case ISD::SETUGE: // X >=u Y  --> X == 1 | Y == 0  -->  ~Y | X
2280     case ISD::SETLE:  // X <=s Y  --> X == 1 | Y == 0  -->  ~Y | X
2281       Temp = DAG.getNOT(dl, N1, MVT::i1);
2282       N0 = DAG.getNode(ISD::OR, dl, MVT::i1, N0, Temp);
2283       break;
2284     }
2285     if (VT != MVT::i1) {
2286       if (!DCI.isCalledByLegalizer())
2287         DCI.AddToWorklist(N0.getNode());
2288       // FIXME: If running after legalize, we probably can't do this.
2289       N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, N0);
2290     }
2291     return N0;
2292   }
2293 
2294   // Could not fold it.
2295   return SDValue();
2296 }
2297 
2298 /// Returns true (and the GlobalValue and the offset) if the node is a
2299 /// GlobalAddress + offset.
2300 bool TargetLowering::isGAPlusOffset(SDNode *N, const GlobalValue *&GA,
2301                                     int64_t &Offset) const {
2302   if (auto *GASD = dyn_cast<GlobalAddressSDNode>(N)) {
2303     GA = GASD->getGlobal();
2304     Offset += GASD->getOffset();
2305     return true;
2306   }
2307 
2308   if (N->getOpcode() == ISD::ADD) {
2309     SDValue N1 = N->getOperand(0);
2310     SDValue N2 = N->getOperand(1);
2311     if (isGAPlusOffset(N1.getNode(), GA, Offset)) {
2312       if (auto *V = dyn_cast<ConstantSDNode>(N2)) {
2313         Offset += V->getSExtValue();
2314         return true;
2315       }
2316     } else if (isGAPlusOffset(N2.getNode(), GA, Offset)) {
2317       if (auto *V = dyn_cast<ConstantSDNode>(N1)) {
2318         Offset += V->getSExtValue();
2319         return true;
2320       }
2321     }
2322   }
2323 
2324   return false;
2325 }
2326 
2327 SDValue TargetLowering::PerformDAGCombine(SDNode *N,
2328                                           DAGCombinerInfo &DCI) const {
2329   // Default implementation: no optimization.
2330   return SDValue();
2331 }
2332 
2333 //===----------------------------------------------------------------------===//
2334 //  Inline Assembler Implementation Methods
2335 //===----------------------------------------------------------------------===//
2336 
2337 TargetLowering::ConstraintType
2338 TargetLowering::getConstraintType(StringRef Constraint) const {
2339   unsigned S = Constraint.size();
2340 
2341   if (S == 1) {
2342     switch (Constraint[0]) {
2343     default: break;
2344     case 'r': return C_RegisterClass;
2345     case 'm':    // memory
2346     case 'o':    // offsetable
2347     case 'V':    // not offsetable
2348       return C_Memory;
2349     case 'i':    // Simple Integer or Relocatable Constant
2350     case 'n':    // Simple Integer
2351     case 'E':    // Floating Point Constant
2352     case 'F':    // Floating Point Constant
2353     case 's':    // Relocatable Constant
2354     case 'p':    // Address.
2355     case 'X':    // Allow ANY value.
2356     case 'I':    // Target registers.
2357     case 'J':
2358     case 'K':
2359     case 'L':
2360     case 'M':
2361     case 'N':
2362     case 'O':
2363     case 'P':
2364     case '<':
2365     case '>':
2366       return C_Other;
2367     }
2368   }
2369 
2370   if (S > 1 && Constraint[0] == '{' && Constraint[S-1] == '}') {
2371     if (S == 8 && Constraint.substr(1, 6) == "memory") // "{memory}"
2372       return C_Memory;
2373     return C_Register;
2374   }
2375   return C_Unknown;
2376 }
2377 
2378 /// Try to replace an X constraint, which matches anything, with another that
2379 /// has more specific requirements based on the type of the corresponding
2380 /// operand.
2381 const char *TargetLowering::LowerXConstraint(EVT ConstraintVT) const{
2382   if (ConstraintVT.isInteger())
2383     return "r";
2384   if (ConstraintVT.isFloatingPoint())
2385     return "f";      // works for many targets
2386   return nullptr;
2387 }
2388 
2389 /// Lower the specified operand into the Ops vector.
2390 /// If it is invalid, don't add anything to Ops.
2391 void TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
2392                                                   std::string &Constraint,
2393                                                   std::vector<SDValue> &Ops,
2394                                                   SelectionDAG &DAG) const {
2395 
2396   if (Constraint.length() > 1) return;
2397 
2398   char ConstraintLetter = Constraint[0];
2399   switch (ConstraintLetter) {
2400   default: break;
2401   case 'X':     // Allows any operand; labels (basic block) use this.
2402     if (Op.getOpcode() == ISD::BasicBlock) {
2403       Ops.push_back(Op);
2404       return;
2405     }
2406     LLVM_FALLTHROUGH;
2407   case 'i':    // Simple Integer or Relocatable Constant
2408   case 'n':    // Simple Integer
2409   case 's': {  // Relocatable Constant
2410     // These operands are interested in values of the form (GV+C), where C may
2411     // be folded in as an offset of GV, or it may be explicitly added.  Also, it
2412     // is possible and fine if either GV or C are missing.
2413     ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
2414     GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op);
2415 
2416     // If we have "(add GV, C)", pull out GV/C
2417     if (Op.getOpcode() == ISD::ADD) {
2418       C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
2419       GA = dyn_cast<GlobalAddressSDNode>(Op.getOperand(0));
2420       if (!C || !GA) {
2421         C = dyn_cast<ConstantSDNode>(Op.getOperand(0));
2422         GA = dyn_cast<GlobalAddressSDNode>(Op.getOperand(1));
2423       }
2424       if (!C || !GA) {
2425         C = nullptr;
2426         GA = nullptr;
2427       }
2428     }
2429 
2430     // If we find a valid operand, map to the TargetXXX version so that the
2431     // value itself doesn't get selected.
2432     if (GA) {   // Either &GV   or   &GV+C
2433       if (ConstraintLetter != 'n') {
2434         int64_t Offs = GA->getOffset();
2435         if (C) Offs += C->getZExtValue();
2436         Ops.push_back(DAG.getTargetGlobalAddress(GA->getGlobal(),
2437                                                  C ? SDLoc(C) : SDLoc(),
2438                                                  Op.getValueType(), Offs));
2439       }
2440       return;
2441     }
2442     if (C) {   // just C, no GV.
2443       // Simple constants are not allowed for 's'.
2444       if (ConstraintLetter != 's') {
2445         // gcc prints these as sign extended.  Sign extend value to 64 bits
2446         // now; without this it would get ZExt'd later in
2447         // ScheduleDAGSDNodes::EmitNode, which is very generic.
2448         Ops.push_back(DAG.getTargetConstant(C->getAPIntValue().getSExtValue(),
2449                                             SDLoc(C), MVT::i64));
2450       }
2451       return;
2452     }
2453     break;
2454   }
2455   }
2456 }
2457 
2458 std::pair<unsigned, const TargetRegisterClass *>
2459 TargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *RI,
2460                                              StringRef Constraint,
2461                                              MVT VT) const {
2462   if (Constraint.empty() || Constraint[0] != '{')
2463     return std::make_pair(0u, static_cast<TargetRegisterClass*>(nullptr));
2464   assert(*(Constraint.end()-1) == '}' && "Not a brace enclosed constraint?");
2465 
2466   // Remove the braces from around the name.
2467   StringRef RegName(Constraint.data()+1, Constraint.size()-2);
2468 
2469   std::pair<unsigned, const TargetRegisterClass*> R =
2470     std::make_pair(0u, static_cast<const TargetRegisterClass*>(nullptr));
2471 
2472   // Figure out which register class contains this reg.
2473   for (const TargetRegisterClass *RC : RI->regclasses()) {
2474     // If none of the value types for this register class are valid, we
2475     // can't use it.  For example, 64-bit reg classes on 32-bit targets.
2476     if (!isLegalRC(RC))
2477       continue;
2478 
2479     for (TargetRegisterClass::iterator I = RC->begin(), E = RC->end();
2480          I != E; ++I) {
2481       if (RegName.equals_lower(RI->getRegAsmName(*I))) {
2482         std::pair<unsigned, const TargetRegisterClass*> S =
2483           std::make_pair(*I, RC);
2484 
2485         // If this register class has the requested value type, return it,
2486         // otherwise keep searching and return the first class found
2487         // if no other is found which explicitly has the requested type.
2488         if (RC->hasType(VT))
2489           return S;
2490         else if (!R.second)
2491           R = S;
2492       }
2493     }
2494   }
2495 
2496   return R;
2497 }
2498 
2499 //===----------------------------------------------------------------------===//
2500 // Constraint Selection.
2501 
2502 /// Return true of this is an input operand that is a matching constraint like
2503 /// "4".
2504 bool TargetLowering::AsmOperandInfo::isMatchingInputConstraint() const {
2505   assert(!ConstraintCode.empty() && "No known constraint!");
2506   return isdigit(static_cast<unsigned char>(ConstraintCode[0]));
2507 }
2508 
2509 /// If this is an input matching constraint, this method returns the output
2510 /// operand it matches.
2511 unsigned TargetLowering::AsmOperandInfo::getMatchedOperand() const {
2512   assert(!ConstraintCode.empty() && "No known constraint!");
2513   return atoi(ConstraintCode.c_str());
2514 }
2515 
2516 /// Split up the constraint string from the inline assembly value into the
2517 /// specific constraints and their prefixes, and also tie in the associated
2518 /// operand values.
2519 /// If this returns an empty vector, and if the constraint string itself
2520 /// isn't empty, there was an error parsing.
2521 TargetLowering::AsmOperandInfoVector
2522 TargetLowering::ParseConstraints(const DataLayout &DL,
2523                                  const TargetRegisterInfo *TRI,
2524                                  ImmutableCallSite CS) const {
2525   /// Information about all of the constraints.
2526   AsmOperandInfoVector ConstraintOperands;
2527   const InlineAsm *IA = cast<InlineAsm>(CS.getCalledValue());
2528   unsigned maCount = 0; // Largest number of multiple alternative constraints.
2529 
2530   // Do a prepass over the constraints, canonicalizing them, and building up the
2531   // ConstraintOperands list.
2532   unsigned ArgNo = 0;   // ArgNo - The argument of the CallInst.
2533   unsigned ResNo = 0;   // ResNo - The result number of the next output.
2534 
2535   for (InlineAsm::ConstraintInfo &CI : IA->ParseConstraints()) {
2536     ConstraintOperands.emplace_back(std::move(CI));
2537     AsmOperandInfo &OpInfo = ConstraintOperands.back();
2538 
2539     // Update multiple alternative constraint count.
2540     if (OpInfo.multipleAlternatives.size() > maCount)
2541       maCount = OpInfo.multipleAlternatives.size();
2542 
2543     OpInfo.ConstraintVT = MVT::Other;
2544 
2545     // Compute the value type for each operand.
2546     switch (OpInfo.Type) {
2547     case InlineAsm::isOutput:
2548       // Indirect outputs just consume an argument.
2549       if (OpInfo.isIndirect) {
2550         OpInfo.CallOperandVal = const_cast<Value *>(CS.getArgument(ArgNo++));
2551         break;
2552       }
2553 
2554       // The return value of the call is this value.  As such, there is no
2555       // corresponding argument.
2556       assert(!CS.getType()->isVoidTy() &&
2557              "Bad inline asm!");
2558       if (StructType *STy = dyn_cast<StructType>(CS.getType())) {
2559         OpInfo.ConstraintVT =
2560             getSimpleValueType(DL, STy->getElementType(ResNo));
2561       } else {
2562         assert(ResNo == 0 && "Asm only has one result!");
2563         OpInfo.ConstraintVT = getSimpleValueType(DL, CS.getType());
2564       }
2565       ++ResNo;
2566       break;
2567     case InlineAsm::isInput:
2568       OpInfo.CallOperandVal = const_cast<Value *>(CS.getArgument(ArgNo++));
2569       break;
2570     case InlineAsm::isClobber:
2571       // Nothing to do.
2572       break;
2573     }
2574 
2575     if (OpInfo.CallOperandVal) {
2576       llvm::Type *OpTy = OpInfo.CallOperandVal->getType();
2577       if (OpInfo.isIndirect) {
2578         llvm::PointerType *PtrTy = dyn_cast<PointerType>(OpTy);
2579         if (!PtrTy)
2580           report_fatal_error("Indirect operand for inline asm not a pointer!");
2581         OpTy = PtrTy->getElementType();
2582       }
2583 
2584       // Look for vector wrapped in a struct. e.g. { <16 x i8> }.
2585       if (StructType *STy = dyn_cast<StructType>(OpTy))
2586         if (STy->getNumElements() == 1)
2587           OpTy = STy->getElementType(0);
2588 
2589       // If OpTy is not a single value, it may be a struct/union that we
2590       // can tile with integers.
2591       if (!OpTy->isSingleValueType() && OpTy->isSized()) {
2592         unsigned BitSize = DL.getTypeSizeInBits(OpTy);
2593         switch (BitSize) {
2594         default: break;
2595         case 1:
2596         case 8:
2597         case 16:
2598         case 32:
2599         case 64:
2600         case 128:
2601           OpInfo.ConstraintVT =
2602             MVT::getVT(IntegerType::get(OpTy->getContext(), BitSize), true);
2603           break;
2604         }
2605       } else if (PointerType *PT = dyn_cast<PointerType>(OpTy)) {
2606         unsigned PtrSize = DL.getPointerSizeInBits(PT->getAddressSpace());
2607         OpInfo.ConstraintVT = MVT::getIntegerVT(PtrSize);
2608       } else {
2609         OpInfo.ConstraintVT = MVT::getVT(OpTy, true);
2610       }
2611     }
2612   }
2613 
2614   // If we have multiple alternative constraints, select the best alternative.
2615   if (!ConstraintOperands.empty()) {
2616     if (maCount) {
2617       unsigned bestMAIndex = 0;
2618       int bestWeight = -1;
2619       // weight:  -1 = invalid match, and 0 = so-so match to 5 = good match.
2620       int weight = -1;
2621       unsigned maIndex;
2622       // Compute the sums of the weights for each alternative, keeping track
2623       // of the best (highest weight) one so far.
2624       for (maIndex = 0; maIndex < maCount; ++maIndex) {
2625         int weightSum = 0;
2626         for (unsigned cIndex = 0, eIndex = ConstraintOperands.size();
2627             cIndex != eIndex; ++cIndex) {
2628           AsmOperandInfo& OpInfo = ConstraintOperands[cIndex];
2629           if (OpInfo.Type == InlineAsm::isClobber)
2630             continue;
2631 
2632           // If this is an output operand with a matching input operand,
2633           // look up the matching input. If their types mismatch, e.g. one
2634           // is an integer, the other is floating point, or their sizes are
2635           // different, flag it as an maCantMatch.
2636           if (OpInfo.hasMatchingInput()) {
2637             AsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput];
2638             if (OpInfo.ConstraintVT != Input.ConstraintVT) {
2639               if ((OpInfo.ConstraintVT.isInteger() !=
2640                    Input.ConstraintVT.isInteger()) ||
2641                   (OpInfo.ConstraintVT.getSizeInBits() !=
2642                    Input.ConstraintVT.getSizeInBits())) {
2643                 weightSum = -1;  // Can't match.
2644                 break;
2645               }
2646             }
2647           }
2648           weight = getMultipleConstraintMatchWeight(OpInfo, maIndex);
2649           if (weight == -1) {
2650             weightSum = -1;
2651             break;
2652           }
2653           weightSum += weight;
2654         }
2655         // Update best.
2656         if (weightSum > bestWeight) {
2657           bestWeight = weightSum;
2658           bestMAIndex = maIndex;
2659         }
2660       }
2661 
2662       // Now select chosen alternative in each constraint.
2663       for (unsigned cIndex = 0, eIndex = ConstraintOperands.size();
2664           cIndex != eIndex; ++cIndex) {
2665         AsmOperandInfo& cInfo = ConstraintOperands[cIndex];
2666         if (cInfo.Type == InlineAsm::isClobber)
2667           continue;
2668         cInfo.selectAlternative(bestMAIndex);
2669       }
2670     }
2671   }
2672 
2673   // Check and hook up tied operands, choose constraint code to use.
2674   for (unsigned cIndex = 0, eIndex = ConstraintOperands.size();
2675       cIndex != eIndex; ++cIndex) {
2676     AsmOperandInfo& OpInfo = ConstraintOperands[cIndex];
2677 
2678     // If this is an output operand with a matching input operand, look up the
2679     // matching input. If their types mismatch, e.g. one is an integer, the
2680     // other is floating point, or their sizes are different, flag it as an
2681     // error.
2682     if (OpInfo.hasMatchingInput()) {
2683       AsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput];
2684 
2685       if (OpInfo.ConstraintVT != Input.ConstraintVT) {
2686         std::pair<unsigned, const TargetRegisterClass *> MatchRC =
2687             getRegForInlineAsmConstraint(TRI, OpInfo.ConstraintCode,
2688                                          OpInfo.ConstraintVT);
2689         std::pair<unsigned, const TargetRegisterClass *> InputRC =
2690             getRegForInlineAsmConstraint(TRI, Input.ConstraintCode,
2691                                          Input.ConstraintVT);
2692         if ((OpInfo.ConstraintVT.isInteger() !=
2693              Input.ConstraintVT.isInteger()) ||
2694             (MatchRC.second != InputRC.second)) {
2695           report_fatal_error("Unsupported asm: input constraint"
2696                              " with a matching output constraint of"
2697                              " incompatible type!");
2698         }
2699       }
2700     }
2701   }
2702 
2703   return ConstraintOperands;
2704 }
2705 
2706 /// Return an integer indicating how general CT is.
2707 static unsigned getConstraintGenerality(TargetLowering::ConstraintType CT) {
2708   switch (CT) {
2709   case TargetLowering::C_Other:
2710   case TargetLowering::C_Unknown:
2711     return 0;
2712   case TargetLowering::C_Register:
2713     return 1;
2714   case TargetLowering::C_RegisterClass:
2715     return 2;
2716   case TargetLowering::C_Memory:
2717     return 3;
2718   }
2719   llvm_unreachable("Invalid constraint type");
2720 }
2721 
2722 /// Examine constraint type and operand type and determine a weight value.
2723 /// This object must already have been set up with the operand type
2724 /// and the current alternative constraint selected.
2725 TargetLowering::ConstraintWeight
2726   TargetLowering::getMultipleConstraintMatchWeight(
2727     AsmOperandInfo &info, int maIndex) const {
2728   InlineAsm::ConstraintCodeVector *rCodes;
2729   if (maIndex >= (int)info.multipleAlternatives.size())
2730     rCodes = &info.Codes;
2731   else
2732     rCodes = &info.multipleAlternatives[maIndex].Codes;
2733   ConstraintWeight BestWeight = CW_Invalid;
2734 
2735   // Loop over the options, keeping track of the most general one.
2736   for (unsigned i = 0, e = rCodes->size(); i != e; ++i) {
2737     ConstraintWeight weight =
2738       getSingleConstraintMatchWeight(info, (*rCodes)[i].c_str());
2739     if (weight > BestWeight)
2740       BestWeight = weight;
2741   }
2742 
2743   return BestWeight;
2744 }
2745 
2746 /// Examine constraint type and operand type and determine a weight value.
2747 /// This object must already have been set up with the operand type
2748 /// and the current alternative constraint selected.
2749 TargetLowering::ConstraintWeight
2750   TargetLowering::getSingleConstraintMatchWeight(
2751     AsmOperandInfo &info, const char *constraint) const {
2752   ConstraintWeight weight = CW_Invalid;
2753   Value *CallOperandVal = info.CallOperandVal;
2754     // If we don't have a value, we can't do a match,
2755     // but allow it at the lowest weight.
2756   if (!CallOperandVal)
2757     return CW_Default;
2758   // Look at the constraint type.
2759   switch (*constraint) {
2760     case 'i': // immediate integer.
2761     case 'n': // immediate integer with a known value.
2762       if (isa<ConstantInt>(CallOperandVal))
2763         weight = CW_Constant;
2764       break;
2765     case 's': // non-explicit intregal immediate.
2766       if (isa<GlobalValue>(CallOperandVal))
2767         weight = CW_Constant;
2768       break;
2769     case 'E': // immediate float if host format.
2770     case 'F': // immediate float.
2771       if (isa<ConstantFP>(CallOperandVal))
2772         weight = CW_Constant;
2773       break;
2774     case '<': // memory operand with autodecrement.
2775     case '>': // memory operand with autoincrement.
2776     case 'm': // memory operand.
2777     case 'o': // offsettable memory operand
2778     case 'V': // non-offsettable memory operand
2779       weight = CW_Memory;
2780       break;
2781     case 'r': // general register.
2782     case 'g': // general register, memory operand or immediate integer.
2783               // note: Clang converts "g" to "imr".
2784       if (CallOperandVal->getType()->isIntegerTy())
2785         weight = CW_Register;
2786       break;
2787     case 'X': // any operand.
2788     default:
2789       weight = CW_Default;
2790       break;
2791   }
2792   return weight;
2793 }
2794 
2795 /// If there are multiple different constraints that we could pick for this
2796 /// operand (e.g. "imr") try to pick the 'best' one.
2797 /// This is somewhat tricky: constraints fall into four classes:
2798 ///    Other         -> immediates and magic values
2799 ///    Register      -> one specific register
2800 ///    RegisterClass -> a group of regs
2801 ///    Memory        -> memory
2802 /// Ideally, we would pick the most specific constraint possible: if we have
2803 /// something that fits into a register, we would pick it.  The problem here
2804 /// is that if we have something that could either be in a register or in
2805 /// memory that use of the register could cause selection of *other*
2806 /// operands to fail: they might only succeed if we pick memory.  Because of
2807 /// this the heuristic we use is:
2808 ///
2809 ///  1) If there is an 'other' constraint, and if the operand is valid for
2810 ///     that constraint, use it.  This makes us take advantage of 'i'
2811 ///     constraints when available.
2812 ///  2) Otherwise, pick the most general constraint present.  This prefers
2813 ///     'm' over 'r', for example.
2814 ///
2815 static void ChooseConstraint(TargetLowering::AsmOperandInfo &OpInfo,
2816                              const TargetLowering &TLI,
2817                              SDValue Op, SelectionDAG *DAG) {
2818   assert(OpInfo.Codes.size() > 1 && "Doesn't have multiple constraint options");
2819   unsigned BestIdx = 0;
2820   TargetLowering::ConstraintType BestType = TargetLowering::C_Unknown;
2821   int BestGenerality = -1;
2822 
2823   // Loop over the options, keeping track of the most general one.
2824   for (unsigned i = 0, e = OpInfo.Codes.size(); i != e; ++i) {
2825     TargetLowering::ConstraintType CType =
2826       TLI.getConstraintType(OpInfo.Codes[i]);
2827 
2828     // If this is an 'other' constraint, see if the operand is valid for it.
2829     // For example, on X86 we might have an 'rI' constraint.  If the operand
2830     // is an integer in the range [0..31] we want to use I (saving a load
2831     // of a register), otherwise we must use 'r'.
2832     if (CType == TargetLowering::C_Other && Op.getNode()) {
2833       assert(OpInfo.Codes[i].size() == 1 &&
2834              "Unhandled multi-letter 'other' constraint");
2835       std::vector<SDValue> ResultOps;
2836       TLI.LowerAsmOperandForConstraint(Op, OpInfo.Codes[i],
2837                                        ResultOps, *DAG);
2838       if (!ResultOps.empty()) {
2839         BestType = CType;
2840         BestIdx = i;
2841         break;
2842       }
2843     }
2844 
2845     // Things with matching constraints can only be registers, per gcc
2846     // documentation.  This mainly affects "g" constraints.
2847     if (CType == TargetLowering::C_Memory && OpInfo.hasMatchingInput())
2848       continue;
2849 
2850     // This constraint letter is more general than the previous one, use it.
2851     int Generality = getConstraintGenerality(CType);
2852     if (Generality > BestGenerality) {
2853       BestType = CType;
2854       BestIdx = i;
2855       BestGenerality = Generality;
2856     }
2857   }
2858 
2859   OpInfo.ConstraintCode = OpInfo.Codes[BestIdx];
2860   OpInfo.ConstraintType = BestType;
2861 }
2862 
2863 /// Determines the constraint code and constraint type to use for the specific
2864 /// AsmOperandInfo, setting OpInfo.ConstraintCode and OpInfo.ConstraintType.
2865 void TargetLowering::ComputeConstraintToUse(AsmOperandInfo &OpInfo,
2866                                             SDValue Op,
2867                                             SelectionDAG *DAG) const {
2868   assert(!OpInfo.Codes.empty() && "Must have at least one constraint");
2869 
2870   // Single-letter constraints ('r') are very common.
2871   if (OpInfo.Codes.size() == 1) {
2872     OpInfo.ConstraintCode = OpInfo.Codes[0];
2873     OpInfo.ConstraintType = getConstraintType(OpInfo.ConstraintCode);
2874   } else {
2875     ChooseConstraint(OpInfo, *this, Op, DAG);
2876   }
2877 
2878   // 'X' matches anything.
2879   if (OpInfo.ConstraintCode == "X" && OpInfo.CallOperandVal) {
2880     // Labels and constants are handled elsewhere ('X' is the only thing
2881     // that matches labels).  For Functions, the type here is the type of
2882     // the result, which is not what we want to look at; leave them alone.
2883     Value *v = OpInfo.CallOperandVal;
2884     if (isa<BasicBlock>(v) || isa<ConstantInt>(v) || isa<Function>(v)) {
2885       OpInfo.CallOperandVal = v;
2886       return;
2887     }
2888 
2889     // Otherwise, try to resolve it to something we know about by looking at
2890     // the actual operand type.
2891     if (const char *Repl = LowerXConstraint(OpInfo.ConstraintVT)) {
2892       OpInfo.ConstraintCode = Repl;
2893       OpInfo.ConstraintType = getConstraintType(OpInfo.ConstraintCode);
2894     }
2895   }
2896 }
2897 
2898 /// \brief Given an exact SDIV by a constant, create a multiplication
2899 /// with the multiplicative inverse of the constant.
2900 static SDValue BuildExactSDIV(const TargetLowering &TLI, SDValue Op1, APInt d,
2901                               const SDLoc &dl, SelectionDAG &DAG,
2902                               std::vector<SDNode *> &Created) {
2903   assert(d != 0 && "Division by zero!");
2904 
2905   // Shift the value upfront if it is even, so the LSB is one.
2906   unsigned ShAmt = d.countTrailingZeros();
2907   if (ShAmt) {
2908     // TODO: For UDIV use SRL instead of SRA.
2909     SDValue Amt =
2910         DAG.getConstant(ShAmt, dl, TLI.getShiftAmountTy(Op1.getValueType(),
2911                                                         DAG.getDataLayout()));
2912     SDNodeFlags Flags;
2913     Flags.setExact(true);
2914     Op1 = DAG.getNode(ISD::SRA, dl, Op1.getValueType(), Op1, Amt, &Flags);
2915     Created.push_back(Op1.getNode());
2916     d = d.ashr(ShAmt);
2917   }
2918 
2919   // Calculate the multiplicative inverse, using Newton's method.
2920   APInt t, xn = d;
2921   while ((t = d*xn) != 1)
2922     xn *= APInt(d.getBitWidth(), 2) - t;
2923 
2924   SDValue Op2 = DAG.getConstant(xn, dl, Op1.getValueType());
2925   SDValue Mul = DAG.getNode(ISD::MUL, dl, Op1.getValueType(), Op1, Op2);
2926   Created.push_back(Mul.getNode());
2927   return Mul;
2928 }
2929 
2930 SDValue TargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
2931                                       SelectionDAG &DAG,
2932                                       std::vector<SDNode *> *Created) const {
2933   AttributeSet Attr = DAG.getMachineFunction().getFunction()->getAttributes();
2934   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2935   if (TLI.isIntDivCheap(N->getValueType(0), Attr))
2936     return SDValue(N,0); // Lower SDIV as SDIV
2937   return SDValue();
2938 }
2939 
2940 /// \brief Given an ISD::SDIV node expressing a divide by constant,
2941 /// return a DAG expression to select that will generate the same value by
2942 /// multiplying by a magic number.
2943 /// Ref: "Hacker's Delight" or "The PowerPC Compiler Writer's Guide".
2944 SDValue TargetLowering::BuildSDIV(SDNode *N, const APInt &Divisor,
2945                                   SelectionDAG &DAG, bool IsAfterLegalization,
2946                                   std::vector<SDNode *> *Created) const {
2947   assert(Created && "No vector to hold sdiv ops.");
2948 
2949   EVT VT = N->getValueType(0);
2950   SDLoc dl(N);
2951 
2952   // Check to see if we can do this.
2953   // FIXME: We should be more aggressive here.
2954   if (!isTypeLegal(VT))
2955     return SDValue();
2956 
2957   // If the sdiv has an 'exact' bit we can use a simpler lowering.
2958   if (cast<BinaryWithFlagsSDNode>(N)->Flags.hasExact())
2959     return BuildExactSDIV(*this, N->getOperand(0), Divisor, dl, DAG, *Created);
2960 
2961   APInt::ms magics = Divisor.magic();
2962 
2963   // Multiply the numerator (operand 0) by the magic value
2964   // FIXME: We should support doing a MUL in a wider type
2965   SDValue Q;
2966   if (IsAfterLegalization ? isOperationLegal(ISD::MULHS, VT) :
2967                             isOperationLegalOrCustom(ISD::MULHS, VT))
2968     Q = DAG.getNode(ISD::MULHS, dl, VT, N->getOperand(0),
2969                     DAG.getConstant(magics.m, dl, VT));
2970   else if (IsAfterLegalization ? isOperationLegal(ISD::SMUL_LOHI, VT) :
2971                                  isOperationLegalOrCustom(ISD::SMUL_LOHI, VT))
2972     Q = SDValue(DAG.getNode(ISD::SMUL_LOHI, dl, DAG.getVTList(VT, VT),
2973                               N->getOperand(0),
2974                               DAG.getConstant(magics.m, dl, VT)).getNode(), 1);
2975   else
2976     return SDValue();       // No mulhs or equvialent
2977   // If d > 0 and m < 0, add the numerator
2978   if (Divisor.isStrictlyPositive() && magics.m.isNegative()) {
2979     Q = DAG.getNode(ISD::ADD, dl, VT, Q, N->getOperand(0));
2980     Created->push_back(Q.getNode());
2981   }
2982   // If d < 0 and m > 0, subtract the numerator.
2983   if (Divisor.isNegative() && magics.m.isStrictlyPositive()) {
2984     Q = DAG.getNode(ISD::SUB, dl, VT, Q, N->getOperand(0));
2985     Created->push_back(Q.getNode());
2986   }
2987   auto &DL = DAG.getDataLayout();
2988   // Shift right algebraic if shift value is nonzero
2989   if (magics.s > 0) {
2990     Q = DAG.getNode(
2991         ISD::SRA, dl, VT, Q,
2992         DAG.getConstant(magics.s, dl, getShiftAmountTy(Q.getValueType(), DL)));
2993     Created->push_back(Q.getNode());
2994   }
2995   // Extract the sign bit and add it to the quotient
2996   SDValue T =
2997       DAG.getNode(ISD::SRL, dl, VT, Q,
2998                   DAG.getConstant(VT.getScalarSizeInBits() - 1, dl,
2999                                   getShiftAmountTy(Q.getValueType(), DL)));
3000   Created->push_back(T.getNode());
3001   return DAG.getNode(ISD::ADD, dl, VT, Q, T);
3002 }
3003 
3004 /// \brief Given an ISD::UDIV node expressing a divide by constant,
3005 /// return a DAG expression to select that will generate the same value by
3006 /// multiplying by a magic number.
3007 /// Ref: "Hacker's Delight" or "The PowerPC Compiler Writer's Guide".
3008 SDValue TargetLowering::BuildUDIV(SDNode *N, const APInt &Divisor,
3009                                   SelectionDAG &DAG, bool IsAfterLegalization,
3010                                   std::vector<SDNode *> *Created) const {
3011   assert(Created && "No vector to hold udiv ops.");
3012 
3013   EVT VT = N->getValueType(0);
3014   SDLoc dl(N);
3015   auto &DL = DAG.getDataLayout();
3016 
3017   // Check to see if we can do this.
3018   // FIXME: We should be more aggressive here.
3019   if (!isTypeLegal(VT))
3020     return SDValue();
3021 
3022   // FIXME: We should use a narrower constant when the upper
3023   // bits are known to be zero.
3024   APInt::mu magics = Divisor.magicu();
3025 
3026   SDValue Q = N->getOperand(0);
3027 
3028   // If the divisor is even, we can avoid using the expensive fixup by shifting
3029   // the divided value upfront.
3030   if (magics.a != 0 && !Divisor[0]) {
3031     unsigned Shift = Divisor.countTrailingZeros();
3032     Q = DAG.getNode(
3033         ISD::SRL, dl, VT, Q,
3034         DAG.getConstant(Shift, dl, getShiftAmountTy(Q.getValueType(), DL)));
3035     Created->push_back(Q.getNode());
3036 
3037     // Get magic number for the shifted divisor.
3038     magics = Divisor.lshr(Shift).magicu(Shift);
3039     assert(magics.a == 0 && "Should use cheap fixup now");
3040   }
3041 
3042   // Multiply the numerator (operand 0) by the magic value
3043   // FIXME: We should support doing a MUL in a wider type
3044   if (IsAfterLegalization ? isOperationLegal(ISD::MULHU, VT) :
3045                             isOperationLegalOrCustom(ISD::MULHU, VT))
3046     Q = DAG.getNode(ISD::MULHU, dl, VT, Q, DAG.getConstant(magics.m, dl, VT));
3047   else if (IsAfterLegalization ? isOperationLegal(ISD::UMUL_LOHI, VT) :
3048                                  isOperationLegalOrCustom(ISD::UMUL_LOHI, VT))
3049     Q = SDValue(DAG.getNode(ISD::UMUL_LOHI, dl, DAG.getVTList(VT, VT), Q,
3050                             DAG.getConstant(magics.m, dl, VT)).getNode(), 1);
3051   else
3052     return SDValue();       // No mulhu or equivalent
3053 
3054   Created->push_back(Q.getNode());
3055 
3056   if (magics.a == 0) {
3057     assert(magics.s < Divisor.getBitWidth() &&
3058            "We shouldn't generate an undefined shift!");
3059     return DAG.getNode(
3060         ISD::SRL, dl, VT, Q,
3061         DAG.getConstant(magics.s, dl, getShiftAmountTy(Q.getValueType(), DL)));
3062   } else {
3063     SDValue NPQ = DAG.getNode(ISD::SUB, dl, VT, N->getOperand(0), Q);
3064     Created->push_back(NPQ.getNode());
3065     NPQ = DAG.getNode(
3066         ISD::SRL, dl, VT, NPQ,
3067         DAG.getConstant(1, dl, getShiftAmountTy(NPQ.getValueType(), DL)));
3068     Created->push_back(NPQ.getNode());
3069     NPQ = DAG.getNode(ISD::ADD, dl, VT, NPQ, Q);
3070     Created->push_back(NPQ.getNode());
3071     return DAG.getNode(
3072         ISD::SRL, dl, VT, NPQ,
3073         DAG.getConstant(magics.s - 1, dl,
3074                         getShiftAmountTy(NPQ.getValueType(), DL)));
3075   }
3076 }
3077 
3078 bool TargetLowering::
3079 verifyReturnAddressArgumentIsConstant(SDValue Op, SelectionDAG &DAG) const {
3080   if (!isa<ConstantSDNode>(Op.getOperand(0))) {
3081     DAG.getContext()->emitError("argument to '__builtin_return_address' must "
3082                                 "be a constant integer");
3083     return true;
3084   }
3085 
3086   return false;
3087 }
3088 
3089 //===----------------------------------------------------------------------===//
3090 // Legalization Utilities
3091 //===----------------------------------------------------------------------===//
3092 
3093 bool TargetLowering::expandMUL_LOHI(unsigned Opcode, EVT VT, SDLoc dl,
3094                                     SDValue LHS, SDValue RHS,
3095                                     SmallVectorImpl<SDValue> &Result,
3096                                     EVT HiLoVT, SelectionDAG &DAG,
3097                                     MulExpansionKind Kind, SDValue LL,
3098                                     SDValue LH, SDValue RL, SDValue RH) const {
3099   assert(Opcode == ISD::MUL || Opcode == ISD::UMUL_LOHI ||
3100          Opcode == ISD::SMUL_LOHI);
3101 
3102   bool HasMULHS = (Kind == MulExpansionKind::Always) ||
3103                   isOperationLegalOrCustom(ISD::MULHS, HiLoVT);
3104   bool HasMULHU = (Kind == MulExpansionKind::Always) ||
3105                   isOperationLegalOrCustom(ISD::MULHU, HiLoVT);
3106   bool HasSMUL_LOHI = (Kind == MulExpansionKind::Always) ||
3107                       isOperationLegalOrCustom(ISD::SMUL_LOHI, HiLoVT);
3108   bool HasUMUL_LOHI = (Kind == MulExpansionKind::Always) ||
3109                       isOperationLegalOrCustom(ISD::UMUL_LOHI, HiLoVT);
3110 
3111   if (!HasMULHU && !HasMULHS && !HasUMUL_LOHI && !HasSMUL_LOHI)
3112     return false;
3113 
3114   unsigned OuterBitSize = VT.getScalarSizeInBits();
3115   unsigned InnerBitSize = HiLoVT.getScalarSizeInBits();
3116   unsigned LHSSB = DAG.ComputeNumSignBits(LHS);
3117   unsigned RHSSB = DAG.ComputeNumSignBits(RHS);
3118 
3119   // LL, LH, RL, and RH must be either all NULL or all set to a value.
3120   assert((LL.getNode() && LH.getNode() && RL.getNode() && RH.getNode()) ||
3121          (!LL.getNode() && !LH.getNode() && !RL.getNode() && !RH.getNode()));
3122 
3123   SDVTList VTs = DAG.getVTList(HiLoVT, HiLoVT);
3124   auto MakeMUL_LOHI = [&](SDValue L, SDValue R, SDValue &Lo, SDValue &Hi,
3125                           bool Signed) -> bool {
3126     if ((Signed && HasSMUL_LOHI) || (!Signed && HasUMUL_LOHI)) {
3127       Lo = DAG.getNode(Signed ? ISD::SMUL_LOHI : ISD::UMUL_LOHI, dl, VTs, L, R);
3128       Hi = SDValue(Lo.getNode(), 1);
3129       return true;
3130     }
3131     if ((Signed && HasMULHS) || (!Signed && HasMULHU)) {
3132       Lo = DAG.getNode(ISD::MUL, dl, HiLoVT, L, R);
3133       Hi = DAG.getNode(Signed ? ISD::MULHS : ISD::MULHU, dl, HiLoVT, L, R);
3134       return true;
3135     }
3136     return false;
3137   };
3138 
3139   SDValue Lo, Hi;
3140 
3141   if (!LL.getNode() && !RL.getNode() &&
3142       isOperationLegalOrCustom(ISD::TRUNCATE, HiLoVT)) {
3143     LL = DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, LHS);
3144     RL = DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, RHS);
3145   }
3146 
3147   if (!LL.getNode())
3148     return false;
3149 
3150   APInt HighMask = APInt::getHighBitsSet(OuterBitSize, InnerBitSize);
3151   if (DAG.MaskedValueIsZero(LHS, HighMask) &&
3152       DAG.MaskedValueIsZero(RHS, HighMask)) {
3153     // The inputs are both zero-extended.
3154     if (MakeMUL_LOHI(LL, RL, Lo, Hi, false)) {
3155       Result.push_back(Lo);
3156       Result.push_back(Hi);
3157       if (Opcode != ISD::MUL) {
3158         SDValue Zero = DAG.getConstant(0, dl, HiLoVT);
3159         Result.push_back(Zero);
3160         Result.push_back(Zero);
3161       }
3162       return true;
3163     }
3164   }
3165 
3166   if (!VT.isVector() && Opcode == ISD::MUL && LHSSB > InnerBitSize &&
3167       RHSSB > InnerBitSize) {
3168     // The input values are both sign-extended.
3169     // TODO non-MUL case?
3170     if (MakeMUL_LOHI(LL, RL, Lo, Hi, true)) {
3171       Result.push_back(Lo);
3172       Result.push_back(Hi);
3173       return true;
3174     }
3175   }
3176 
3177   unsigned ShiftAmount = OuterBitSize - InnerBitSize;
3178   EVT ShiftAmountTy = getShiftAmountTy(VT, DAG.getDataLayout());
3179   if (APInt::getMaxValue(ShiftAmountTy.getSizeInBits()).ult(ShiftAmount)) {
3180     // FIXME getShiftAmountTy does not always return a sensible result when VT
3181     // is an illegal type, and so the type may be too small to fit the shift
3182     // amount. Override it with i32. The shift will have to be legalized.
3183     ShiftAmountTy = MVT::i32;
3184   }
3185   SDValue Shift = DAG.getConstant(ShiftAmount, dl, ShiftAmountTy);
3186 
3187   if (!LH.getNode() && !RH.getNode() &&
3188       isOperationLegalOrCustom(ISD::SRL, VT) &&
3189       isOperationLegalOrCustom(ISD::TRUNCATE, HiLoVT)) {
3190     LH = DAG.getNode(ISD::SRL, dl, VT, LHS, Shift);
3191     LH = DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, LH);
3192     RH = DAG.getNode(ISD::SRL, dl, VT, RHS, Shift);
3193     RH = DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, RH);
3194   }
3195 
3196   if (!LH.getNode())
3197     return false;
3198 
3199   if (!MakeMUL_LOHI(LL, RL, Lo, Hi, false))
3200     return false;
3201 
3202   Result.push_back(Lo);
3203 
3204   if (Opcode == ISD::MUL) {
3205     RH = DAG.getNode(ISD::MUL, dl, HiLoVT, LL, RH);
3206     LH = DAG.getNode(ISD::MUL, dl, HiLoVT, LH, RL);
3207     Hi = DAG.getNode(ISD::ADD, dl, HiLoVT, Hi, RH);
3208     Hi = DAG.getNode(ISD::ADD, dl, HiLoVT, Hi, LH);
3209     Result.push_back(Hi);
3210     return true;
3211   }
3212 
3213   // Compute the full width result.
3214   auto Merge = [&](SDValue Lo, SDValue Hi) -> SDValue {
3215     Lo = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Lo);
3216     Hi = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Hi);
3217     Hi = DAG.getNode(ISD::SHL, dl, VT, Hi, Shift);
3218     return DAG.getNode(ISD::OR, dl, VT, Lo, Hi);
3219   };
3220 
3221   SDValue Next = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Hi);
3222   if (!MakeMUL_LOHI(LL, RH, Lo, Hi, false))
3223     return false;
3224 
3225   // This is effectively the add part of a multiply-add of half-sized operands,
3226   // so it cannot overflow.
3227   Next = DAG.getNode(ISD::ADD, dl, VT, Next, Merge(Lo, Hi));
3228 
3229   if (!MakeMUL_LOHI(LH, RL, Lo, Hi, false))
3230     return false;
3231 
3232   Next = DAG.getNode(ISD::ADDC, dl, DAG.getVTList(VT, MVT::Glue), Next,
3233                      Merge(Lo, Hi));
3234 
3235   SDValue Carry = Next.getValue(1);
3236   Result.push_back(DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, Next));
3237   Next = DAG.getNode(ISD::SRL, dl, VT, Next, Shift);
3238 
3239   if (!MakeMUL_LOHI(LH, RH, Lo, Hi, Opcode == ISD::SMUL_LOHI))
3240     return false;
3241 
3242   SDValue Zero = DAG.getConstant(0, dl, HiLoVT);
3243   Hi = DAG.getNode(ISD::ADDE, dl, DAG.getVTList(HiLoVT, MVT::Glue), Hi, Zero,
3244                    Carry);
3245   Next = DAG.getNode(ISD::ADD, dl, VT, Next, Merge(Lo, Hi));
3246 
3247   if (Opcode == ISD::SMUL_LOHI) {
3248     SDValue NextSub = DAG.getNode(ISD::SUB, dl, VT, Next,
3249                                   DAG.getNode(ISD::ZERO_EXTEND, dl, VT, RL));
3250     Next = DAG.getSelectCC(dl, LH, Zero, NextSub, Next, ISD::SETLT);
3251 
3252     NextSub = DAG.getNode(ISD::SUB, dl, VT, Next,
3253                           DAG.getNode(ISD::ZERO_EXTEND, dl, VT, LL));
3254     Next = DAG.getSelectCC(dl, RH, Zero, NextSub, Next, ISD::SETLT);
3255   }
3256 
3257   Result.push_back(DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, Next));
3258   Next = DAG.getNode(ISD::SRL, dl, VT, Next, Shift);
3259   Result.push_back(DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, Next));
3260   return true;
3261 }
3262 
3263 bool TargetLowering::expandMUL(SDNode *N, SDValue &Lo, SDValue &Hi, EVT HiLoVT,
3264                                SelectionDAG &DAG, MulExpansionKind Kind,
3265                                SDValue LL, SDValue LH, SDValue RL,
3266                                SDValue RH) const {
3267   SmallVector<SDValue, 2> Result;
3268   bool Ok = expandMUL_LOHI(N->getOpcode(), N->getValueType(0), N,
3269                            N->getOperand(0), N->getOperand(1), Result, HiLoVT,
3270                            DAG, Kind, LL, LH, RL, RH);
3271   if (Ok) {
3272     assert(Result.size() == 2);
3273     Lo = Result[0];
3274     Hi = Result[1];
3275   }
3276   return Ok;
3277 }
3278 
3279 bool TargetLowering::expandFP_TO_SINT(SDNode *Node, SDValue &Result,
3280                                SelectionDAG &DAG) const {
3281   EVT VT = Node->getOperand(0).getValueType();
3282   EVT NVT = Node->getValueType(0);
3283   SDLoc dl(SDValue(Node, 0));
3284 
3285   // FIXME: Only f32 to i64 conversions are supported.
3286   if (VT != MVT::f32 || NVT != MVT::i64)
3287     return false;
3288 
3289   // Expand f32 -> i64 conversion
3290   // This algorithm comes from compiler-rt's implementation of fixsfdi:
3291   // https://github.com/llvm-mirror/compiler-rt/blob/master/lib/builtins/fixsfdi.c
3292   EVT IntVT = EVT::getIntegerVT(*DAG.getContext(),
3293                                 VT.getSizeInBits());
3294   SDValue ExponentMask = DAG.getConstant(0x7F800000, dl, IntVT);
3295   SDValue ExponentLoBit = DAG.getConstant(23, dl, IntVT);
3296   SDValue Bias = DAG.getConstant(127, dl, IntVT);
3297   SDValue SignMask = DAG.getConstant(APInt::getSignBit(VT.getSizeInBits()), dl,
3298                                      IntVT);
3299   SDValue SignLowBit = DAG.getConstant(VT.getSizeInBits() - 1, dl, IntVT);
3300   SDValue MantissaMask = DAG.getConstant(0x007FFFFF, dl, IntVT);
3301 
3302   SDValue Bits = DAG.getNode(ISD::BITCAST, dl, IntVT, Node->getOperand(0));
3303 
3304   auto &DL = DAG.getDataLayout();
3305   SDValue ExponentBits = DAG.getNode(
3306       ISD::SRL, dl, IntVT, DAG.getNode(ISD::AND, dl, IntVT, Bits, ExponentMask),
3307       DAG.getZExtOrTrunc(ExponentLoBit, dl, getShiftAmountTy(IntVT, DL)));
3308   SDValue Exponent = DAG.getNode(ISD::SUB, dl, IntVT, ExponentBits, Bias);
3309 
3310   SDValue Sign = DAG.getNode(
3311       ISD::SRA, dl, IntVT, DAG.getNode(ISD::AND, dl, IntVT, Bits, SignMask),
3312       DAG.getZExtOrTrunc(SignLowBit, dl, getShiftAmountTy(IntVT, DL)));
3313   Sign = DAG.getSExtOrTrunc(Sign, dl, NVT);
3314 
3315   SDValue R = DAG.getNode(ISD::OR, dl, IntVT,
3316       DAG.getNode(ISD::AND, dl, IntVT, Bits, MantissaMask),
3317       DAG.getConstant(0x00800000, dl, IntVT));
3318 
3319   R = DAG.getZExtOrTrunc(R, dl, NVT);
3320 
3321   R = DAG.getSelectCC(
3322       dl, Exponent, ExponentLoBit,
3323       DAG.getNode(ISD::SHL, dl, NVT, R,
3324                   DAG.getZExtOrTrunc(
3325                       DAG.getNode(ISD::SUB, dl, IntVT, Exponent, ExponentLoBit),
3326                       dl, getShiftAmountTy(IntVT, DL))),
3327       DAG.getNode(ISD::SRL, dl, NVT, R,
3328                   DAG.getZExtOrTrunc(
3329                       DAG.getNode(ISD::SUB, dl, IntVT, ExponentLoBit, Exponent),
3330                       dl, getShiftAmountTy(IntVT, DL))),
3331       ISD::SETGT);
3332 
3333   SDValue Ret = DAG.getNode(ISD::SUB, dl, NVT,
3334       DAG.getNode(ISD::XOR, dl, NVT, R, Sign),
3335       Sign);
3336 
3337   Result = DAG.getSelectCC(dl, Exponent, DAG.getConstant(0, dl, IntVT),
3338       DAG.getConstant(0, dl, NVT), Ret, ISD::SETLT);
3339   return true;
3340 }
3341 
3342 SDValue TargetLowering::scalarizeVectorLoad(LoadSDNode *LD,
3343                                             SelectionDAG &DAG) const {
3344   SDLoc SL(LD);
3345   SDValue Chain = LD->getChain();
3346   SDValue BasePTR = LD->getBasePtr();
3347   EVT SrcVT = LD->getMemoryVT();
3348   ISD::LoadExtType ExtType = LD->getExtensionType();
3349 
3350   unsigned NumElem = SrcVT.getVectorNumElements();
3351 
3352   EVT SrcEltVT = SrcVT.getScalarType();
3353   EVT DstEltVT = LD->getValueType(0).getScalarType();
3354 
3355   unsigned Stride = SrcEltVT.getSizeInBits() / 8;
3356   assert(SrcEltVT.isByteSized());
3357 
3358   EVT PtrVT = BasePTR.getValueType();
3359 
3360   SmallVector<SDValue, 8> Vals;
3361   SmallVector<SDValue, 8> LoadChains;
3362 
3363   for (unsigned Idx = 0; Idx < NumElem; ++Idx) {
3364     SDValue ScalarLoad =
3365         DAG.getExtLoad(ExtType, SL, DstEltVT, Chain, BasePTR,
3366                        LD->getPointerInfo().getWithOffset(Idx * Stride),
3367                        SrcEltVT, MinAlign(LD->getAlignment(), Idx * Stride),
3368                        LD->getMemOperand()->getFlags(), LD->getAAInfo());
3369 
3370     BasePTR = DAG.getNode(ISD::ADD, SL, PtrVT, BasePTR,
3371                           DAG.getConstant(Stride, SL, PtrVT));
3372 
3373     Vals.push_back(ScalarLoad.getValue(0));
3374     LoadChains.push_back(ScalarLoad.getValue(1));
3375   }
3376 
3377   SDValue NewChain = DAG.getNode(ISD::TokenFactor, SL, MVT::Other, LoadChains);
3378   SDValue Value = DAG.getBuildVector(LD->getValueType(0), SL, Vals);
3379 
3380   return DAG.getMergeValues({ Value, NewChain }, SL);
3381 }
3382 
3383 // FIXME: This relies on each element having a byte size, otherwise the stride
3384 // is 0 and just overwrites the same location. ExpandStore currently expects
3385 // this broken behavior.
3386 SDValue TargetLowering::scalarizeVectorStore(StoreSDNode *ST,
3387                                              SelectionDAG &DAG) const {
3388   SDLoc SL(ST);
3389 
3390   SDValue Chain = ST->getChain();
3391   SDValue BasePtr = ST->getBasePtr();
3392   SDValue Value = ST->getValue();
3393   EVT StVT = ST->getMemoryVT();
3394 
3395   // The type of the data we want to save
3396   EVT RegVT = Value.getValueType();
3397   EVT RegSclVT = RegVT.getScalarType();
3398 
3399   // The type of data as saved in memory.
3400   EVT MemSclVT = StVT.getScalarType();
3401 
3402   EVT PtrVT = BasePtr.getValueType();
3403 
3404   // Store Stride in bytes
3405   unsigned Stride = MemSclVT.getSizeInBits() / 8;
3406   EVT IdxVT = getVectorIdxTy(DAG.getDataLayout());
3407   unsigned NumElem = StVT.getVectorNumElements();
3408 
3409   // Extract each of the elements from the original vector and save them into
3410   // memory individually.
3411   SmallVector<SDValue, 8> Stores;
3412   for (unsigned Idx = 0; Idx < NumElem; ++Idx) {
3413     SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, RegSclVT, Value,
3414                               DAG.getConstant(Idx, SL, IdxVT));
3415 
3416     SDValue Ptr = DAG.getNode(ISD::ADD, SL, PtrVT, BasePtr,
3417                               DAG.getConstant(Idx * Stride, SL, PtrVT));
3418 
3419     // This scalar TruncStore may be illegal, but we legalize it later.
3420     SDValue Store = DAG.getTruncStore(
3421         Chain, SL, Elt, Ptr, ST->getPointerInfo().getWithOffset(Idx * Stride),
3422         MemSclVT, MinAlign(ST->getAlignment(), Idx * Stride),
3423         ST->getMemOperand()->getFlags(), ST->getAAInfo());
3424 
3425     Stores.push_back(Store);
3426   }
3427 
3428   return DAG.getNode(ISD::TokenFactor, SL, MVT::Other, Stores);
3429 }
3430 
3431 std::pair<SDValue, SDValue>
3432 TargetLowering::expandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG) const {
3433   assert(LD->getAddressingMode() == ISD::UNINDEXED &&
3434          "unaligned indexed loads not implemented!");
3435   SDValue Chain = LD->getChain();
3436   SDValue Ptr = LD->getBasePtr();
3437   EVT VT = LD->getValueType(0);
3438   EVT LoadedVT = LD->getMemoryVT();
3439   SDLoc dl(LD);
3440   if (VT.isFloatingPoint() || VT.isVector()) {
3441     EVT intVT = EVT::getIntegerVT(*DAG.getContext(), LoadedVT.getSizeInBits());
3442     if (isTypeLegal(intVT) && isTypeLegal(LoadedVT)) {
3443       if (!isOperationLegalOrCustom(ISD::LOAD, intVT)) {
3444         // Scalarize the load and let the individual components be handled.
3445         SDValue Scalarized = scalarizeVectorLoad(LD, DAG);
3446         return std::make_pair(Scalarized.getValue(0), Scalarized.getValue(1));
3447       }
3448 
3449       // Expand to a (misaligned) integer load of the same size,
3450       // then bitconvert to floating point or vector.
3451       SDValue newLoad = DAG.getLoad(intVT, dl, Chain, Ptr,
3452                                     LD->getMemOperand());
3453       SDValue Result = DAG.getNode(ISD::BITCAST, dl, LoadedVT, newLoad);
3454       if (LoadedVT != VT)
3455         Result = DAG.getNode(VT.isFloatingPoint() ? ISD::FP_EXTEND :
3456                              ISD::ANY_EXTEND, dl, VT, Result);
3457 
3458       return std::make_pair(Result, newLoad.getValue(1));
3459     }
3460 
3461     // Copy the value to a (aligned) stack slot using (unaligned) integer
3462     // loads and stores, then do a (aligned) load from the stack slot.
3463     MVT RegVT = getRegisterType(*DAG.getContext(), intVT);
3464     unsigned LoadedBytes = LoadedVT.getSizeInBits() / 8;
3465     unsigned RegBytes = RegVT.getSizeInBits() / 8;
3466     unsigned NumRegs = (LoadedBytes + RegBytes - 1) / RegBytes;
3467 
3468     // Make sure the stack slot is also aligned for the register type.
3469     SDValue StackBase = DAG.CreateStackTemporary(LoadedVT, RegVT);
3470 
3471     SmallVector<SDValue, 8> Stores;
3472     SDValue StackPtr = StackBase;
3473     unsigned Offset = 0;
3474 
3475     EVT PtrVT = Ptr.getValueType();
3476     EVT StackPtrVT = StackPtr.getValueType();
3477 
3478     SDValue PtrIncrement = DAG.getConstant(RegBytes, dl, PtrVT);
3479     SDValue StackPtrIncrement = DAG.getConstant(RegBytes, dl, StackPtrVT);
3480 
3481     // Do all but one copies using the full register width.
3482     for (unsigned i = 1; i < NumRegs; i++) {
3483       // Load one integer register's worth from the original location.
3484       SDValue Load = DAG.getLoad(
3485           RegVT, dl, Chain, Ptr, LD->getPointerInfo().getWithOffset(Offset),
3486           MinAlign(LD->getAlignment(), Offset), LD->getMemOperand()->getFlags(),
3487           LD->getAAInfo());
3488       // Follow the load with a store to the stack slot.  Remember the store.
3489       Stores.push_back(DAG.getStore(Load.getValue(1), dl, Load, StackPtr,
3490                                     MachinePointerInfo()));
3491       // Increment the pointers.
3492       Offset += RegBytes;
3493       Ptr = DAG.getNode(ISD::ADD, dl, PtrVT, Ptr, PtrIncrement);
3494       StackPtr = DAG.getNode(ISD::ADD, dl, StackPtrVT, StackPtr,
3495                              StackPtrIncrement);
3496     }
3497 
3498     // The last copy may be partial.  Do an extending load.
3499     EVT MemVT = EVT::getIntegerVT(*DAG.getContext(),
3500                                   8 * (LoadedBytes - Offset));
3501     SDValue Load =
3502         DAG.getExtLoad(ISD::EXTLOAD, dl, RegVT, Chain, Ptr,
3503                        LD->getPointerInfo().getWithOffset(Offset), MemVT,
3504                        MinAlign(LD->getAlignment(), Offset),
3505                        LD->getMemOperand()->getFlags(), LD->getAAInfo());
3506     // Follow the load with a store to the stack slot.  Remember the store.
3507     // On big-endian machines this requires a truncating store to ensure
3508     // that the bits end up in the right place.
3509     Stores.push_back(DAG.getTruncStore(Load.getValue(1), dl, Load, StackPtr,
3510                                        MachinePointerInfo(), MemVT));
3511 
3512     // The order of the stores doesn't matter - say it with a TokenFactor.
3513     SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores);
3514 
3515     // Finally, perform the original load only redirected to the stack slot.
3516     Load = DAG.getExtLoad(LD->getExtensionType(), dl, VT, TF, StackBase,
3517                           MachinePointerInfo(), LoadedVT);
3518 
3519     // Callers expect a MERGE_VALUES node.
3520     return std::make_pair(Load, TF);
3521   }
3522 
3523   assert(LoadedVT.isInteger() && !LoadedVT.isVector() &&
3524          "Unaligned load of unsupported type.");
3525 
3526   // Compute the new VT that is half the size of the old one.  This is an
3527   // integer MVT.
3528   unsigned NumBits = LoadedVT.getSizeInBits();
3529   EVT NewLoadedVT;
3530   NewLoadedVT = EVT::getIntegerVT(*DAG.getContext(), NumBits/2);
3531   NumBits >>= 1;
3532 
3533   unsigned Alignment = LD->getAlignment();
3534   unsigned IncrementSize = NumBits / 8;
3535   ISD::LoadExtType HiExtType = LD->getExtensionType();
3536 
3537   // If the original load is NON_EXTLOAD, the hi part load must be ZEXTLOAD.
3538   if (HiExtType == ISD::NON_EXTLOAD)
3539     HiExtType = ISD::ZEXTLOAD;
3540 
3541   // Load the value in two parts
3542   SDValue Lo, Hi;
3543   if (DAG.getDataLayout().isLittleEndian()) {
3544     Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, VT, Chain, Ptr, LD->getPointerInfo(),
3545                         NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(),
3546                         LD->getAAInfo());
3547     Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr,
3548                       DAG.getConstant(IncrementSize, dl, Ptr.getValueType()));
3549     Hi = DAG.getExtLoad(HiExtType, dl, VT, Chain, Ptr,
3550                         LD->getPointerInfo().getWithOffset(IncrementSize),
3551                         NewLoadedVT, MinAlign(Alignment, IncrementSize),
3552                         LD->getMemOperand()->getFlags(), LD->getAAInfo());
3553   } else {
3554     Hi = DAG.getExtLoad(HiExtType, dl, VT, Chain, Ptr, LD->getPointerInfo(),
3555                         NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(),
3556                         LD->getAAInfo());
3557     Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr,
3558                       DAG.getConstant(IncrementSize, dl, Ptr.getValueType()));
3559     Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, VT, Chain, Ptr,
3560                         LD->getPointerInfo().getWithOffset(IncrementSize),
3561                         NewLoadedVT, MinAlign(Alignment, IncrementSize),
3562                         LD->getMemOperand()->getFlags(), LD->getAAInfo());
3563   }
3564 
3565   // aggregate the two parts
3566   SDValue ShiftAmount =
3567       DAG.getConstant(NumBits, dl, getShiftAmountTy(Hi.getValueType(),
3568                                                     DAG.getDataLayout()));
3569   SDValue Result = DAG.getNode(ISD::SHL, dl, VT, Hi, ShiftAmount);
3570   Result = DAG.getNode(ISD::OR, dl, VT, Result, Lo);
3571 
3572   SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1),
3573                              Hi.getValue(1));
3574 
3575   return std::make_pair(Result, TF);
3576 }
3577 
3578 SDValue TargetLowering::expandUnalignedStore(StoreSDNode *ST,
3579                                              SelectionDAG &DAG) const {
3580   assert(ST->getAddressingMode() == ISD::UNINDEXED &&
3581          "unaligned indexed stores not implemented!");
3582   SDValue Chain = ST->getChain();
3583   SDValue Ptr = ST->getBasePtr();
3584   SDValue Val = ST->getValue();
3585   EVT VT = Val.getValueType();
3586   int Alignment = ST->getAlignment();
3587 
3588   SDLoc dl(ST);
3589   if (ST->getMemoryVT().isFloatingPoint() ||
3590       ST->getMemoryVT().isVector()) {
3591     EVT intVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits());
3592     if (isTypeLegal(intVT)) {
3593       if (!isOperationLegalOrCustom(ISD::STORE, intVT)) {
3594         // Scalarize the store and let the individual components be handled.
3595         SDValue Result = scalarizeVectorStore(ST, DAG);
3596 
3597         return Result;
3598       }
3599       // Expand to a bitconvert of the value to the integer type of the
3600       // same size, then a (misaligned) int store.
3601       // FIXME: Does not handle truncating floating point stores!
3602       SDValue Result = DAG.getNode(ISD::BITCAST, dl, intVT, Val);
3603       Result = DAG.getStore(Chain, dl, Result, Ptr, ST->getPointerInfo(),
3604                             Alignment, ST->getMemOperand()->getFlags());
3605       return Result;
3606     }
3607     // Do a (aligned) store to a stack slot, then copy from the stack slot
3608     // to the final destination using (unaligned) integer loads and stores.
3609     EVT StoredVT = ST->getMemoryVT();
3610     MVT RegVT =
3611       getRegisterType(*DAG.getContext(),
3612                       EVT::getIntegerVT(*DAG.getContext(),
3613                                         StoredVT.getSizeInBits()));
3614     EVT PtrVT = Ptr.getValueType();
3615     unsigned StoredBytes = StoredVT.getSizeInBits() / 8;
3616     unsigned RegBytes = RegVT.getSizeInBits() / 8;
3617     unsigned NumRegs = (StoredBytes + RegBytes - 1) / RegBytes;
3618 
3619     // Make sure the stack slot is also aligned for the register type.
3620     SDValue StackPtr = DAG.CreateStackTemporary(StoredVT, RegVT);
3621 
3622     // Perform the original store, only redirected to the stack slot.
3623     SDValue Store = DAG.getTruncStore(Chain, dl, Val, StackPtr,
3624                                       MachinePointerInfo(), StoredVT);
3625 
3626     EVT StackPtrVT = StackPtr.getValueType();
3627 
3628     SDValue PtrIncrement = DAG.getConstant(RegBytes, dl, PtrVT);
3629     SDValue StackPtrIncrement = DAG.getConstant(RegBytes, dl, StackPtrVT);
3630     SmallVector<SDValue, 8> Stores;
3631     unsigned Offset = 0;
3632 
3633     // Do all but one copies using the full register width.
3634     for (unsigned i = 1; i < NumRegs; i++) {
3635       // Load one integer register's worth from the stack slot.
3636       SDValue Load =
3637           DAG.getLoad(RegVT, dl, Store, StackPtr, MachinePointerInfo());
3638       // Store it to the final location.  Remember the store.
3639       Stores.push_back(DAG.getStore(Load.getValue(1), dl, Load, Ptr,
3640                                     ST->getPointerInfo().getWithOffset(Offset),
3641                                     MinAlign(ST->getAlignment(), Offset),
3642                                     ST->getMemOperand()->getFlags()));
3643       // Increment the pointers.
3644       Offset += RegBytes;
3645       StackPtr = DAG.getNode(ISD::ADD, dl, StackPtrVT,
3646                              StackPtr, StackPtrIncrement);
3647       Ptr = DAG.getNode(ISD::ADD, dl, PtrVT, Ptr, PtrIncrement);
3648     }
3649 
3650     // The last store may be partial.  Do a truncating store.  On big-endian
3651     // machines this requires an extending load from the stack slot to ensure
3652     // that the bits are in the right place.
3653     EVT MemVT = EVT::getIntegerVT(*DAG.getContext(),
3654                                   8 * (StoredBytes - Offset));
3655 
3656     // Load from the stack slot.
3657     SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, RegVT, Store, StackPtr,
3658                                   MachinePointerInfo(), MemVT);
3659 
3660     Stores.push_back(
3661         DAG.getTruncStore(Load.getValue(1), dl, Load, Ptr,
3662                           ST->getPointerInfo().getWithOffset(Offset), MemVT,
3663                           MinAlign(ST->getAlignment(), Offset),
3664                           ST->getMemOperand()->getFlags(), ST->getAAInfo()));
3665     // The order of the stores doesn't matter - say it with a TokenFactor.
3666     SDValue Result = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores);
3667     return Result;
3668   }
3669 
3670   assert(ST->getMemoryVT().isInteger() &&
3671          !ST->getMemoryVT().isVector() &&
3672          "Unaligned store of unknown type.");
3673   // Get the half-size VT
3674   EVT NewStoredVT = ST->getMemoryVT().getHalfSizedIntegerVT(*DAG.getContext());
3675   int NumBits = NewStoredVT.getSizeInBits();
3676   int IncrementSize = NumBits / 8;
3677 
3678   // Divide the stored value in two parts.
3679   SDValue ShiftAmount =
3680       DAG.getConstant(NumBits, dl, getShiftAmountTy(Val.getValueType(),
3681                                                     DAG.getDataLayout()));
3682   SDValue Lo = Val;
3683   SDValue Hi = DAG.getNode(ISD::SRL, dl, VT, Val, ShiftAmount);
3684 
3685   // Store the two parts
3686   SDValue Store1, Store2;
3687   Store1 = DAG.getTruncStore(Chain, dl,
3688                              DAG.getDataLayout().isLittleEndian() ? Lo : Hi,
3689                              Ptr, ST->getPointerInfo(), NewStoredVT, Alignment,
3690                              ST->getMemOperand()->getFlags());
3691 
3692   EVT PtrVT = Ptr.getValueType();
3693   Ptr = DAG.getNode(ISD::ADD, dl, PtrVT, Ptr,
3694                     DAG.getConstant(IncrementSize, dl, PtrVT));
3695   Alignment = MinAlign(Alignment, IncrementSize);
3696   Store2 = DAG.getTruncStore(
3697       Chain, dl, DAG.getDataLayout().isLittleEndian() ? Hi : Lo, Ptr,
3698       ST->getPointerInfo().getWithOffset(IncrementSize), NewStoredVT, Alignment,
3699       ST->getMemOperand()->getFlags(), ST->getAAInfo());
3700 
3701   SDValue Result =
3702     DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Store1, Store2);
3703   return Result;
3704 }
3705 
3706 SDValue
3707 TargetLowering::IncrementMemoryAddress(SDValue Addr, SDValue Mask,
3708                                        const SDLoc &DL, EVT DataVT,
3709                                        SelectionDAG &DAG,
3710                                        bool IsCompressedMemory) const {
3711   SDValue Increment;
3712   EVT AddrVT = Addr.getValueType();
3713   EVT MaskVT = Mask.getValueType();
3714   assert(DataVT.getVectorNumElements() == MaskVT.getVectorNumElements() &&
3715          "Incompatible types of Data and Mask");
3716   if (IsCompressedMemory) {
3717     // Incrementing the pointer according to number of '1's in the mask.
3718     EVT MaskIntVT = EVT::getIntegerVT(*DAG.getContext(), MaskVT.getSizeInBits());
3719     SDValue MaskInIntReg = DAG.getBitcast(MaskIntVT, Mask);
3720     if (MaskIntVT.getSizeInBits() < 32) {
3721       MaskInIntReg = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, MaskInIntReg);
3722       MaskIntVT = MVT::i32;
3723     }
3724 
3725     // Count '1's with POPCNT.
3726     Increment = DAG.getNode(ISD::CTPOP, DL, MaskIntVT, MaskInIntReg);
3727     Increment = DAG.getZExtOrTrunc(Increment, DL, AddrVT);
3728     // Scale is an element size in bytes.
3729     SDValue Scale = DAG.getConstant(DataVT.getScalarSizeInBits() / 8, DL,
3730                                     AddrVT);
3731     Increment = DAG.getNode(ISD::MUL, DL, AddrVT, Increment, Scale);
3732   } else
3733     Increment = DAG.getConstant(DataVT.getSizeInBits() / 8, DL, AddrVT);
3734 
3735   return DAG.getNode(ISD::ADD, DL, AddrVT, Addr, Increment);
3736 }
3737 
3738 static SDValue clampDynamicVectorIndex(SelectionDAG &DAG,
3739                                        SDValue Idx,
3740                                        EVT VecVT,
3741                                        const SDLoc &dl) {
3742   if (isa<ConstantSDNode>(Idx))
3743     return Idx;
3744 
3745   EVT IdxVT = Idx.getValueType();
3746   unsigned NElts = VecVT.getVectorNumElements();
3747   if (isPowerOf2_32(NElts)) {
3748     APInt Imm = APInt::getLowBitsSet(IdxVT.getSizeInBits(),
3749                                      Log2_32(NElts));
3750     return DAG.getNode(ISD::AND, dl, IdxVT, Idx,
3751                        DAG.getConstant(Imm, dl, IdxVT));
3752   }
3753 
3754   return DAG.getNode(ISD::UMIN, dl, IdxVT, Idx,
3755                      DAG.getConstant(NElts - 1, dl, IdxVT));
3756 }
3757 
3758 SDValue TargetLowering::getVectorElementPointer(SelectionDAG &DAG,
3759                                                 SDValue VecPtr, EVT VecVT,
3760                                                 SDValue Index) const {
3761   SDLoc dl(Index);
3762   // Make sure the index type is big enough to compute in.
3763   Index = DAG.getZExtOrTrunc(Index, dl, getPointerTy(DAG.getDataLayout()));
3764 
3765   EVT EltVT = VecVT.getVectorElementType();
3766 
3767   // Calculate the element offset and add it to the pointer.
3768   unsigned EltSize = EltVT.getSizeInBits() / 8; // FIXME: should be ABI size.
3769   assert(EltSize * 8 == EltVT.getSizeInBits() &&
3770          "Converting bits to bytes lost precision");
3771 
3772   Index = clampDynamicVectorIndex(DAG, Index, VecVT, dl);
3773 
3774   EVT IdxVT = Index.getValueType();
3775 
3776   Index = DAG.getNode(ISD::MUL, dl, IdxVT, Index,
3777                       DAG.getConstant(EltSize, dl, IdxVT));
3778   return DAG.getNode(ISD::ADD, dl, IdxVT, Index, VecPtr);
3779 }
3780 
3781 //===----------------------------------------------------------------------===//
3782 // Implementation of Emulated TLS Model
3783 //===----------------------------------------------------------------------===//
3784 
3785 SDValue TargetLowering::LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA,
3786                                                 SelectionDAG &DAG) const {
3787   // Access to address of TLS varialbe xyz is lowered to a function call:
3788   //   __emutls_get_address( address of global variable named "__emutls_v.xyz" )
3789   EVT PtrVT = getPointerTy(DAG.getDataLayout());
3790   PointerType *VoidPtrType = Type::getInt8PtrTy(*DAG.getContext());
3791   SDLoc dl(GA);
3792 
3793   ArgListTy Args;
3794   ArgListEntry Entry;
3795   std::string NameString = ("__emutls_v." + GA->getGlobal()->getName()).str();
3796   Module *VariableModule = const_cast<Module*>(GA->getGlobal()->getParent());
3797   StringRef EmuTlsVarName(NameString);
3798   GlobalVariable *EmuTlsVar = VariableModule->getNamedGlobal(EmuTlsVarName);
3799   assert(EmuTlsVar && "Cannot find EmuTlsVar ");
3800   Entry.Node = DAG.getGlobalAddress(EmuTlsVar, dl, PtrVT);
3801   Entry.Ty = VoidPtrType;
3802   Args.push_back(Entry);
3803 
3804   SDValue EmuTlsGetAddr = DAG.getExternalSymbol("__emutls_get_address", PtrVT);
3805 
3806   TargetLowering::CallLoweringInfo CLI(DAG);
3807   CLI.setDebugLoc(dl).setChain(DAG.getEntryNode());
3808   CLI.setCallee(CallingConv::C, VoidPtrType, EmuTlsGetAddr, std::move(Args));
3809   std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
3810 
3811   // TLSADDR will be codegen'ed as call. Inform MFI that function has calls.
3812   // At last for X86 targets, maybe good for other targets too?
3813   MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
3814   MFI.setAdjustsStack(true);  // Is this only for X86 target?
3815   MFI.setHasCalls(true);
3816 
3817   assert((GA->getOffset() == 0) &&
3818          "Emulated TLS must have zero offset in GlobalAddressSDNode");
3819   return CallResult.first;
3820 }
3821 
3822 SDValue TargetLowering::lowerCmpEqZeroToCtlzSrl(SDValue Op,
3823                                                 SelectionDAG &DAG) const {
3824   assert((Op->getOpcode() == ISD::SETCC) && "Input has to be a SETCC node.");
3825   if (!isCtlzFast())
3826     return SDValue();
3827   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
3828   SDLoc dl(Op);
3829   if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
3830     if (C->isNullValue() && CC == ISD::SETEQ) {
3831       EVT VT = Op.getOperand(0).getValueType();
3832       SDValue Zext = Op.getOperand(0);
3833       if (VT.bitsLT(MVT::i32)) {
3834         VT = MVT::i32;
3835         Zext = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Op.getOperand(0));
3836       }
3837       unsigned Log2b = Log2_32(VT.getSizeInBits());
3838       SDValue Clz = DAG.getNode(ISD::CTLZ, dl, VT, Zext);
3839       SDValue Scc = DAG.getNode(ISD::SRL, dl, VT, Clz,
3840                                 DAG.getConstant(Log2b, dl, MVT::i32));
3841       return DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Scc);
3842     }
3843   }
3844   return SDValue();
3845 }
3846