1 //===-- lib/CodeGen/GlobalISel/InlineAsmLowering.cpp ----------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 ///
9 /// \file
10 /// This file implements the lowering from LLVM IR inline asm to MIR INLINEASM
11 ///
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/CodeGen/GlobalISel/InlineAsmLowering.h"
15 #include "llvm/CodeGen/Analysis.h"
16 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
17 #include "llvm/CodeGen/GlobalISel/Utils.h"
18 #include "llvm/CodeGen/MachineOperand.h"
19 #include "llvm/CodeGen/MachineRegisterInfo.h"
20 #include "llvm/CodeGen/TargetLowering.h"
21 #include "llvm/IR/DataLayout.h"
22 #include "llvm/IR/Instructions.h"
23 #include "llvm/IR/LLVMContext.h"
24 #include "llvm/IR/Module.h"
25 
26 #define DEBUG_TYPE "inline-asm-lowering"
27 
28 using namespace llvm;
29 
30 void InlineAsmLowering::anchor() {}
31 
32 namespace {
33 
34 /// GISelAsmOperandInfo - This contains information for each constraint that we
35 /// are lowering.
36 class GISelAsmOperandInfo : public TargetLowering::AsmOperandInfo {
37 public:
38   /// Regs - If this is a register or register class operand, this
39   /// contains the set of assigned registers corresponding to the operand.
40   SmallVector<Register, 1> Regs;
41 
42   explicit GISelAsmOperandInfo(const TargetLowering::AsmOperandInfo &Info)
43       : TargetLowering::AsmOperandInfo(Info) {}
44 };
45 
46 using GISelAsmOperandInfoVector = SmallVector<GISelAsmOperandInfo, 16>;
47 
48 class ExtraFlags {
49   unsigned Flags = 0;
50 
51 public:
52   explicit ExtraFlags(const CallBase &CB) {
53     const InlineAsm *IA = cast<InlineAsm>(CB.getCalledOperand());
54     if (IA->hasSideEffects())
55       Flags |= InlineAsm::Extra_HasSideEffects;
56     if (IA->isAlignStack())
57       Flags |= InlineAsm::Extra_IsAlignStack;
58     if (CB.isConvergent())
59       Flags |= InlineAsm::Extra_IsConvergent;
60     Flags |= IA->getDialect() * InlineAsm::Extra_AsmDialect;
61   }
62 
63   void update(const TargetLowering::AsmOperandInfo &OpInfo) {
64     // Ideally, we would only check against memory constraints.  However, the
65     // meaning of an Other constraint can be target-specific and we can't easily
66     // reason about it.  Therefore, be conservative and set MayLoad/MayStore
67     // for Other constraints as well.
68     if (OpInfo.ConstraintType == TargetLowering::C_Memory ||
69         OpInfo.ConstraintType == TargetLowering::C_Other) {
70       if (OpInfo.Type == InlineAsm::isInput)
71         Flags |= InlineAsm::Extra_MayLoad;
72       else if (OpInfo.Type == InlineAsm::isOutput)
73         Flags |= InlineAsm::Extra_MayStore;
74       else if (OpInfo.Type == InlineAsm::isClobber)
75         Flags |= (InlineAsm::Extra_MayLoad | InlineAsm::Extra_MayStore);
76     }
77   }
78 
79   unsigned get() const { return Flags; }
80 };
81 
82 } // namespace
83 
84 /// Assign virtual/physical registers for the specified register operand.
85 static void getRegistersForValue(MachineFunction &MF,
86                                  MachineIRBuilder &MIRBuilder,
87                                  GISelAsmOperandInfo &OpInfo,
88                                  GISelAsmOperandInfo &RefOpInfo) {
89 
90   const TargetLowering &TLI = *MF.getSubtarget().getTargetLowering();
91   const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
92 
93   // No work to do for memory operations.
94   if (OpInfo.ConstraintType == TargetLowering::C_Memory)
95     return;
96 
97   // If this is a constraint for a single physreg, or a constraint for a
98   // register class, find it.
99   Register AssignedReg;
100   const TargetRegisterClass *RC;
101   std::tie(AssignedReg, RC) = TLI.getRegForInlineAsmConstraint(
102       &TRI, RefOpInfo.ConstraintCode, RefOpInfo.ConstraintVT);
103   // RC is unset only on failure. Return immediately.
104   if (!RC)
105     return;
106 
107   // No need to allocate a matching input constraint since the constraint it's
108   // matching to has already been allocated.
109   if (OpInfo.isMatchingInputConstraint())
110     return;
111 
112   // Initialize NumRegs.
113   unsigned NumRegs = 1;
114   if (OpInfo.ConstraintVT != MVT::Other)
115     NumRegs =
116         TLI.getNumRegisters(MF.getFunction().getContext(), OpInfo.ConstraintVT);
117 
118   // If this is a constraint for a specific physical register, but the type of
119   // the operand requires more than one register to be passed, we allocate the
120   // required amount of physical registers, starting from the selected physical
121   // register.
122   // For this, first retrieve a register iterator for the given register class
123   TargetRegisterClass::iterator I = RC->begin();
124   MachineRegisterInfo &RegInfo = MF.getRegInfo();
125 
126   // Advance the iterator to the assigned register (if set)
127   if (AssignedReg) {
128     for (; *I != AssignedReg; ++I)
129       assert(I != RC->end() && "AssignedReg should be a member of provided RC");
130   }
131 
132   // Finally, assign the registers. If the AssignedReg isn't set, create virtual
133   // registers with the provided register class
134   for (; NumRegs; --NumRegs, ++I) {
135     assert(I != RC->end() && "Ran out of registers to allocate!");
136     Register R = AssignedReg ? Register(*I) : RegInfo.createVirtualRegister(RC);
137     OpInfo.Regs.push_back(R);
138   }
139 }
140 
141 /// Return an integer indicating how general CT is.
142 static unsigned getConstraintGenerality(TargetLowering::ConstraintType CT) {
143   switch (CT) {
144   case TargetLowering::C_Immediate:
145   case TargetLowering::C_Other:
146   case TargetLowering::C_Unknown:
147     return 0;
148   case TargetLowering::C_Register:
149     return 1;
150   case TargetLowering::C_RegisterClass:
151     return 2;
152   case TargetLowering::C_Memory:
153     return 3;
154   }
155   llvm_unreachable("Invalid constraint type");
156 }
157 
158 static void chooseConstraint(TargetLowering::AsmOperandInfo &OpInfo,
159                              const TargetLowering *TLI) {
160   assert(OpInfo.Codes.size() > 1 && "Doesn't have multiple constraint options");
161   unsigned BestIdx = 0;
162   TargetLowering::ConstraintType BestType = TargetLowering::C_Unknown;
163   int BestGenerality = -1;
164 
165   // Loop over the options, keeping track of the most general one.
166   for (unsigned i = 0, e = OpInfo.Codes.size(); i != e; ++i) {
167     TargetLowering::ConstraintType CType =
168         TLI->getConstraintType(OpInfo.Codes[i]);
169 
170     // Indirect 'other' or 'immediate' constraints are not allowed.
171     if (OpInfo.isIndirect && !(CType == TargetLowering::C_Memory ||
172                                CType == TargetLowering::C_Register ||
173                                CType == TargetLowering::C_RegisterClass))
174       continue;
175 
176     // If this is an 'other' or 'immediate' constraint, see if the operand is
177     // valid for it. For example, on X86 we might have an 'rI' constraint. If
178     // the operand is an integer in the range [0..31] we want to use I (saving a
179     // load of a register), otherwise we must use 'r'.
180     if (CType == TargetLowering::C_Other ||
181         CType == TargetLowering::C_Immediate) {
182       assert(OpInfo.Codes[i].size() == 1 &&
183              "Unhandled multi-letter 'other' constraint");
184       // FIXME: prefer immediate constraints if the target allows it
185     }
186 
187     // Things with matching constraints can only be registers, per gcc
188     // documentation.  This mainly affects "g" constraints.
189     if (CType == TargetLowering::C_Memory && OpInfo.hasMatchingInput())
190       continue;
191 
192     // This constraint letter is more general than the previous one, use it.
193     int Generality = getConstraintGenerality(CType);
194     if (Generality > BestGenerality) {
195       BestType = CType;
196       BestIdx = i;
197       BestGenerality = Generality;
198     }
199   }
200 
201   OpInfo.ConstraintCode = OpInfo.Codes[BestIdx];
202   OpInfo.ConstraintType = BestType;
203 }
204 
205 static void computeConstraintToUse(const TargetLowering *TLI,
206                                    TargetLowering::AsmOperandInfo &OpInfo) {
207   assert(!OpInfo.Codes.empty() && "Must have at least one constraint");
208 
209   // Single-letter constraints ('r') are very common.
210   if (OpInfo.Codes.size() == 1) {
211     OpInfo.ConstraintCode = OpInfo.Codes[0];
212     OpInfo.ConstraintType = TLI->getConstraintType(OpInfo.ConstraintCode);
213   } else {
214     chooseConstraint(OpInfo, TLI);
215   }
216 
217   // 'X' matches anything.
218   if (OpInfo.ConstraintCode == "X" && OpInfo.CallOperandVal) {
219     // Labels and constants are handled elsewhere ('X' is the only thing
220     // that matches labels).  For Functions, the type here is the type of
221     // the result, which is not what we want to look at; leave them alone.
222     Value *Val = OpInfo.CallOperandVal;
223     if (isa<BasicBlock>(Val) || isa<ConstantInt>(Val) || isa<Function>(Val))
224       return;
225 
226     // Otherwise, try to resolve it to something we know about by looking at
227     // the actual operand type.
228     if (const char *Repl = TLI->LowerXConstraint(OpInfo.ConstraintVT)) {
229       OpInfo.ConstraintCode = Repl;
230       OpInfo.ConstraintType = TLI->getConstraintType(OpInfo.ConstraintCode);
231     }
232   }
233 }
234 
235 bool InlineAsmLowering::lowerInlineAsm(
236     MachineIRBuilder &MIRBuilder, const CallBase &Call,
237     std::function<ArrayRef<Register>(const Value &Val)> GetOrCreateVRegs)
238     const {
239   const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
240 
241   /// ConstraintOperands - Information about all of the constraints.
242   GISelAsmOperandInfoVector ConstraintOperands;
243 
244   MachineFunction &MF = MIRBuilder.getMF();
245   const Function &F = MF.getFunction();
246   const DataLayout &DL = F.getParent()->getDataLayout();
247   const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
248 
249   MachineRegisterInfo *MRI = MIRBuilder.getMRI();
250 
251   TargetLowering::AsmOperandInfoVector TargetConstraints =
252       TLI->ParseConstraints(DL, TRI, Call);
253 
254   ExtraFlags ExtraInfo(Call);
255   unsigned ArgNo = 0; // ArgNo - The argument of the CallInst.
256   unsigned ResNo = 0; // ResNo - The result number of the next output.
257   for (auto &T : TargetConstraints) {
258     ConstraintOperands.push_back(GISelAsmOperandInfo(T));
259     GISelAsmOperandInfo &OpInfo = ConstraintOperands.back();
260 
261     // Compute the value type for each operand.
262     if (OpInfo.Type == InlineAsm::isInput ||
263         (OpInfo.Type == InlineAsm::isOutput && OpInfo.isIndirect)) {
264 
265       OpInfo.CallOperandVal = const_cast<Value *>(Call.getArgOperand(ArgNo++));
266 
267       if (isa<BasicBlock>(OpInfo.CallOperandVal)) {
268         LLVM_DEBUG(dbgs() << "Basic block input operands not supported yet\n");
269         return false;
270       }
271 
272       Type *OpTy = OpInfo.CallOperandVal->getType();
273 
274       // If this is an indirect operand, the operand is a pointer to the
275       // accessed type.
276       if (OpInfo.isIndirect) {
277         PointerType *PtrTy = dyn_cast<PointerType>(OpTy);
278         if (!PtrTy)
279           report_fatal_error("Indirect operand for inline asm not a pointer!");
280         OpTy = PtrTy->getElementType();
281       }
282 
283       // FIXME: Support aggregate input operands
284       if (!OpTy->isSingleValueType()) {
285         LLVM_DEBUG(
286             dbgs() << "Aggregate input operands are not supported yet\n");
287         return false;
288       }
289 
290       OpInfo.ConstraintVT = TLI->getValueType(DL, OpTy, true).getSimpleVT();
291 
292     } else if (OpInfo.Type == InlineAsm::isOutput && !OpInfo.isIndirect) {
293       assert(!Call.getType()->isVoidTy() && "Bad inline asm!");
294       if (StructType *STy = dyn_cast<StructType>(Call.getType())) {
295         OpInfo.ConstraintVT =
296             TLI->getSimpleValueType(DL, STy->getElementType(ResNo));
297       } else {
298         assert(ResNo == 0 && "Asm only has one result!");
299         OpInfo.ConstraintVT = TLI->getSimpleValueType(DL, Call.getType());
300       }
301       ++ResNo;
302     } else {
303       OpInfo.ConstraintVT = MVT::Other;
304     }
305 
306     // Compute the constraint code and ConstraintType to use.
307     computeConstraintToUse(TLI, OpInfo);
308 
309     // The selected constraint type might expose new sideeffects
310     ExtraInfo.update(OpInfo);
311   }
312 
313   // At this point, all operand types are decided.
314   // Create the MachineInstr, but don't insert it yet since input
315   // operands still need to insert instructions before this one
316   auto Inst = MIRBuilder.buildInstrNoInsert(TargetOpcode::INLINEASM)
317                   .addExternalSymbol(IA->getAsmString().c_str())
318                   .addImm(ExtraInfo.get());
319 
320   // Collects the output operands for later processing
321   GISelAsmOperandInfoVector OutputOperands;
322 
323   for (auto &OpInfo : ConstraintOperands) {
324     GISelAsmOperandInfo &RefOpInfo =
325         OpInfo.isMatchingInputConstraint()
326             ? ConstraintOperands[OpInfo.getMatchedOperand()]
327             : OpInfo;
328 
329     // Assign registers for register operands
330     getRegistersForValue(MF, MIRBuilder, OpInfo, RefOpInfo);
331 
332     switch (OpInfo.Type) {
333     case InlineAsm::isOutput:
334       if (OpInfo.ConstraintType == TargetLowering::C_Memory) {
335         unsigned ConstraintID =
336             TLI->getInlineAsmMemConstraint(OpInfo.ConstraintCode);
337         assert(ConstraintID != InlineAsm::Constraint_Unknown &&
338                "Failed to convert memory constraint code to constraint id.");
339 
340         // Add information to the INLINEASM instruction to know about this
341         // output.
342         unsigned OpFlags = InlineAsm::getFlagWord(InlineAsm::Kind_Mem, 1);
343         OpFlags = InlineAsm::getFlagWordForMem(OpFlags, ConstraintID);
344         Inst.addImm(OpFlags);
345         ArrayRef<Register> SourceRegs =
346             GetOrCreateVRegs(*OpInfo.CallOperandVal);
347         assert(
348             SourceRegs.size() == 1 &&
349             "Expected the memory output to fit into a single virtual register");
350         Inst.addReg(SourceRegs[0]);
351       } else {
352         // Otherwise, this outputs to a register (directly for C_Register /
353         // C_RegisterClass. Find a register that we can use.
354         assert(OpInfo.ConstraintType == TargetLowering::C_Register ||
355                OpInfo.ConstraintType == TargetLowering::C_RegisterClass);
356 
357         if (OpInfo.Regs.empty()) {
358           LLVM_DEBUG(dbgs()
359                      << "Couldn't allocate output register for constraint\n");
360           return false;
361         }
362 
363         // Add information to the INLINEASM instruction to know that this
364         // register is set.
365         unsigned Flag = InlineAsm::getFlagWord(
366             OpInfo.isEarlyClobber ? InlineAsm::Kind_RegDefEarlyClobber
367                                   : InlineAsm::Kind_RegDef,
368             OpInfo.Regs.size());
369         if (OpInfo.Regs.front().isVirtual()) {
370           // Put the register class of the virtual registers in the flag word.
371           // That way, later passes can recompute register class constraints for
372           // inline assembly as well as normal instructions. Don't do this for
373           // tied operands that can use the regclass information from the def.
374           const TargetRegisterClass *RC = MRI->getRegClass(OpInfo.Regs.front());
375           Flag = InlineAsm::getFlagWordForRegClass(Flag, RC->getID());
376         }
377 
378         Inst.addImm(Flag);
379 
380         for (Register Reg : OpInfo.Regs) {
381           Inst.addReg(Reg,
382                       RegState::Define | getImplRegState(Reg.isPhysical()) |
383                           (OpInfo.isEarlyClobber ? RegState::EarlyClobber : 0));
384         }
385 
386         // Remember this output operand for later processing
387         OutputOperands.push_back(OpInfo);
388       }
389 
390       break;
391     case InlineAsm::isInput: {
392       if (OpInfo.isMatchingInputConstraint()) {
393         LLVM_DEBUG(dbgs() << "Tied input operands not supported yet\n");
394         return false;
395       }
396 
397       if (OpInfo.ConstraintType == TargetLowering::C_Other &&
398           OpInfo.isIndirect) {
399         LLVM_DEBUG(dbgs() << "Indirect input operands with unknown constraint "
400                              "not supported yet\n");
401         return false;
402       }
403 
404       if (OpInfo.ConstraintType == TargetLowering::C_Immediate ||
405           OpInfo.ConstraintType == TargetLowering::C_Other) {
406 
407         std::vector<MachineOperand> Ops;
408         if (!lowerAsmOperandForConstraint(OpInfo.CallOperandVal,
409                                           OpInfo.ConstraintCode, Ops,
410                                           MIRBuilder)) {
411           LLVM_DEBUG(dbgs() << "Don't support constraint: "
412                             << OpInfo.ConstraintCode << " yet\n");
413           return false;
414         }
415 
416         assert(Ops.size() > 0 &&
417                "Expected constraint to be lowered to at least one operand");
418 
419         // Add information to the INLINEASM node to know about this input.
420         unsigned OpFlags =
421             InlineAsm::getFlagWord(InlineAsm::Kind_Imm, Ops.size());
422         Inst.addImm(OpFlags);
423         Inst.add(Ops);
424         break;
425       }
426 
427       if (OpInfo.ConstraintType == TargetLowering::C_Memory) {
428 
429         if (!OpInfo.isIndirect) {
430           LLVM_DEBUG(dbgs()
431                      << "Cannot indirectify memory input operands yet\n");
432           return false;
433         }
434 
435         assert(OpInfo.isIndirect && "Operand must be indirect to be a mem!");
436 
437         unsigned ConstraintID =
438             TLI->getInlineAsmMemConstraint(OpInfo.ConstraintCode);
439         unsigned OpFlags = InlineAsm::getFlagWord(InlineAsm::Kind_Mem, 1);
440         OpFlags = InlineAsm::getFlagWordForMem(OpFlags, ConstraintID);
441         Inst.addImm(OpFlags);
442         ArrayRef<Register> SourceRegs =
443             GetOrCreateVRegs(*OpInfo.CallOperandVal);
444         assert(
445             SourceRegs.size() == 1 &&
446             "Expected the memory input to fit into a single virtual register");
447         Inst.addReg(SourceRegs[0]);
448         break;
449       }
450 
451       assert((OpInfo.ConstraintType == TargetLowering::C_RegisterClass ||
452               OpInfo.ConstraintType == TargetLowering::C_Register) &&
453              "Unknown constraint type!");
454 
455       if (OpInfo.isIndirect) {
456         LLVM_DEBUG(dbgs() << "Can't handle indirect register inputs yet "
457                              "for constraint '"
458                           << OpInfo.ConstraintCode << "'\n");
459         return false;
460       }
461 
462       // Copy the input into the appropriate registers.
463       if (OpInfo.Regs.empty()) {
464         LLVM_DEBUG(
465             dbgs()
466             << "Couldn't allocate input register for register constraint\n");
467         return false;
468       }
469 
470       unsigned NumRegs = OpInfo.Regs.size();
471       ArrayRef<Register> SourceRegs = GetOrCreateVRegs(*OpInfo.CallOperandVal);
472       assert(NumRegs == SourceRegs.size() &&
473              "Expected the number of input registers to match the number of "
474              "source registers");
475 
476       if (NumRegs > 1) {
477         LLVM_DEBUG(dbgs() << "Input operands with multiple input registers are "
478                              "not supported yet\n");
479         return false;
480       }
481 
482       unsigned Flag = InlineAsm::getFlagWord(InlineAsm::Kind_RegUse, NumRegs);
483       Inst.addImm(Flag);
484       MIRBuilder.buildCopy(OpInfo.Regs[0], SourceRegs[0]);
485       Inst.addReg(OpInfo.Regs[0]);
486       break;
487     }
488 
489     case InlineAsm::isClobber: {
490 
491       unsigned NumRegs = OpInfo.Regs.size();
492       if (NumRegs > 0) {
493         unsigned Flag =
494             InlineAsm::getFlagWord(InlineAsm::Kind_Clobber, NumRegs);
495         Inst.addImm(Flag);
496 
497         for (Register Reg : OpInfo.Regs) {
498           Inst.addReg(Reg, RegState::Define | RegState::EarlyClobber |
499                                getImplRegState(Reg.isPhysical()));
500         }
501       }
502       break;
503     }
504     }
505   }
506 
507   if (const MDNode *SrcLoc = Call.getMetadata("srcloc"))
508     Inst.addMetadata(SrcLoc);
509 
510   // All inputs are handled, insert the instruction now
511   MIRBuilder.insertInstr(Inst);
512 
513   // Finally, copy the output operands into the output registers
514   ArrayRef<Register> ResRegs = GetOrCreateVRegs(Call);
515   if (ResRegs.size() != OutputOperands.size()) {
516     LLVM_DEBUG(dbgs() << "Expected the number of output registers to match the "
517                          "number of destination registers\n");
518     return false;
519   }
520   for (unsigned int i = 0, e = ResRegs.size(); i < e; i++) {
521     GISelAsmOperandInfo &OpInfo = OutputOperands[i];
522 
523     if (OpInfo.Regs.empty())
524       continue;
525 
526     switch (OpInfo.ConstraintType) {
527     case TargetLowering::C_Register:
528     case TargetLowering::C_RegisterClass: {
529       if (OpInfo.Regs.size() > 1) {
530         LLVM_DEBUG(dbgs() << "Output operands with multiple defining "
531                              "registers are not supported yet\n");
532         return false;
533       }
534 
535       Register SrcReg = OpInfo.Regs[0];
536       unsigned SrcSize = TRI->getRegSizeInBits(SrcReg, *MRI);
537       if (MRI->getType(ResRegs[i]).getSizeInBits() < SrcSize) {
538         // First copy the non-typed virtual register into a generic virtual
539         // register
540         Register Tmp1Reg =
541             MRI->createGenericVirtualRegister(LLT::scalar(SrcSize));
542         MIRBuilder.buildCopy(Tmp1Reg, SrcReg);
543         // Need to truncate the result of the register
544         MIRBuilder.buildTrunc(ResRegs[i], Tmp1Reg);
545       } else {
546         MIRBuilder.buildCopy(ResRegs[i], SrcReg);
547       }
548       break;
549     }
550     case TargetLowering::C_Immediate:
551     case TargetLowering::C_Other:
552       LLVM_DEBUG(
553           dbgs() << "Cannot lower target specific output constraints yet\n");
554       return false;
555     case TargetLowering::C_Memory:
556       break; // Already handled.
557     case TargetLowering::C_Unknown:
558       LLVM_DEBUG(dbgs() << "Unexpected unknown constraint\n");
559       return false;
560     }
561   }
562 
563   return true;
564 }
565 
566 bool InlineAsmLowering::lowerAsmOperandForConstraint(
567     Value *Val, StringRef Constraint, std::vector<MachineOperand> &Ops,
568     MachineIRBuilder &MIRBuilder) const {
569   if (Constraint.size() > 1)
570     return false;
571 
572   char ConstraintLetter = Constraint[0];
573   switch (ConstraintLetter) {
574   default:
575     return false;
576   case 'i': // Simple Integer or Relocatable Constant
577     if (ConstantInt *CI = dyn_cast<ConstantInt>(Val)) {
578       assert(CI->getBitWidth() <= 64 &&
579              "expected immediate to fit into 64-bits");
580       // Boolean constants should be zero-extended, others are sign-extended
581       bool IsBool = CI->getBitWidth() == 1;
582       int64_t ExtVal = IsBool ? CI->getZExtValue() : CI->getSExtValue();
583       Ops.push_back(MachineOperand::CreateImm(ExtVal));
584       return true;
585     }
586     return false;
587   }
588 }
589