1 //===-- llvm/CodeGen/GlobalISel/IRTranslator.cpp - IRTranslator --*- C++ -*-==//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 /// \file
10 /// This file implements the IRTranslator class.
11 //===----------------------------------------------------------------------===//
12 
13 #include "llvm/CodeGen/GlobalISel/IRTranslator.h"
14 
15 #include "llvm/ADT/SmallVector.h"
16 #include "llvm/CodeGen/GlobalISel/CallLowering.h"
17 #include "llvm/CodeGen/MachineFunction.h"
18 #include "llvm/CodeGen/MachineFrameInfo.h"
19 #include "llvm/CodeGen/MachineRegisterInfo.h"
20 #include "llvm/IR/Constant.h"
21 #include "llvm/IR/Function.h"
22 #include "llvm/IR/IntrinsicInst.h"
23 #include "llvm/IR/Type.h"
24 #include "llvm/IR/Value.h"
25 #include "llvm/Target/TargetIntrinsicInfo.h"
26 #include "llvm/Target/TargetLowering.h"
27 
28 #define DEBUG_TYPE "irtranslator"
29 
30 using namespace llvm;
31 
32 char IRTranslator::ID = 0;
33 INITIALIZE_PASS(IRTranslator, "irtranslator", "IRTranslator LLVM IR -> MI",
34                 false, false)
35 
36 IRTranslator::IRTranslator() : MachineFunctionPass(ID), MRI(nullptr) {
37   initializeIRTranslatorPass(*PassRegistry::getPassRegistry());
38 }
39 
40 unsigned IRTranslator::getOrCreateVReg(const Value &Val) {
41   unsigned &ValReg = ValToVReg[&Val];
42   // Check if this is the first time we see Val.
43   if (!ValReg) {
44     // Fill ValRegsSequence with the sequence of registers
45     // we need to concat together to produce the value.
46     assert(Val.getType()->isSized() &&
47            "Don't know how to create an empty vreg");
48     assert(!Val.getType()->isAggregateType() && "Not yet implemented");
49     unsigned Size = DL->getTypeSizeInBits(Val.getType());
50     unsigned VReg = MRI->createGenericVirtualRegister(Size);
51     ValReg = VReg;
52     assert(!isa<Constant>(Val) && "Not yet implemented");
53   }
54   return ValReg;
55 }
56 
57 unsigned IRTranslator::getMemOpAlignment(const Instruction &I) {
58   unsigned Alignment = 0;
59   Type *ValTy = nullptr;
60   if (const StoreInst *SI = dyn_cast<StoreInst>(&I)) {
61     Alignment = SI->getAlignment();
62     ValTy = SI->getValueOperand()->getType();
63   } else if (const LoadInst *LI = dyn_cast<LoadInst>(&I)) {
64     Alignment = LI->getAlignment();
65     ValTy = LI->getType();
66   } else
67     llvm_unreachable("unhandled memory instruction");
68 
69   return Alignment ? Alignment : DL->getABITypeAlignment(ValTy);
70 }
71 
72 MachineBasicBlock &IRTranslator::getOrCreateBB(const BasicBlock &BB) {
73   MachineBasicBlock *&MBB = BBToMBB[&BB];
74   if (!MBB) {
75     MachineFunction &MF = MIRBuilder.getMF();
76     MBB = MF.CreateMachineBasicBlock();
77     MF.push_back(MBB);
78   }
79   return *MBB;
80 }
81 
82 bool IRTranslator::translateBinaryOp(unsigned Opcode,
83                                      const BinaryOperator &Inst) {
84   // FIXME: handle signed/unsigned wrapping flags.
85 
86   // Get or create a virtual register for each value.
87   // Unless the value is a Constant => loadimm cst?
88   // or inline constant each time?
89   // Creation of a virtual register needs to have a size.
90   unsigned Op0 = getOrCreateVReg(*Inst.getOperand(0));
91   unsigned Op1 = getOrCreateVReg(*Inst.getOperand(1));
92   unsigned Res = getOrCreateVReg(Inst);
93   MIRBuilder.buildInstr(Opcode, LLT{*Inst.getType()})
94       .addDef(Res)
95       .addUse(Op0)
96       .addUse(Op1);
97   return true;
98 }
99 
100 bool IRTranslator::translateReturn(const ReturnInst &RI) {
101   const Value *Ret = RI.getReturnValue();
102   // The target may mess up with the insertion point, but
103   // this is not important as a return is the last instruction
104   // of the block anyway.
105   return CLI->lowerReturn(MIRBuilder, Ret, !Ret ? 0 : getOrCreateVReg(*Ret));
106 }
107 
108 bool IRTranslator::translateBr(const BranchInst &BrInst) {
109   unsigned Succ = 0;
110   if (!BrInst.isUnconditional()) {
111     // We want a G_BRCOND to the true BB followed by an unconditional branch.
112     unsigned Tst = getOrCreateVReg(*BrInst.getCondition());
113     const BasicBlock &TrueTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ++));
114     MachineBasicBlock &TrueBB = getOrCreateBB(TrueTgt);
115     MIRBuilder.buildBrCond(LLT{*BrInst.getCondition()->getType()}, Tst, TrueBB);
116   }
117 
118   const BasicBlock &BrTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ));
119   MachineBasicBlock &TgtBB = getOrCreateBB(BrTgt);
120   MIRBuilder.buildBr(TgtBB);
121 
122   // Link successors.
123   MachineBasicBlock &CurBB = MIRBuilder.getMBB();
124   for (const BasicBlock *Succ : BrInst.successors())
125     CurBB.addSuccessor(&getOrCreateBB(*Succ));
126   return true;
127 }
128 
129 bool IRTranslator::translateLoad(const LoadInst &LI) {
130   assert(LI.isSimple() && "only simple loads are supported at the moment");
131 
132   MachineFunction &MF = MIRBuilder.getMF();
133   unsigned Res = getOrCreateVReg(LI);
134   unsigned Addr = getOrCreateVReg(*LI.getPointerOperand());
135   LLT VTy{*LI.getType()}, PTy{*LI.getPointerOperand()->getType()};
136 
137   MIRBuilder.buildLoad(
138       VTy, PTy, Res, Addr,
139       *MF.getMachineMemOperand(MachinePointerInfo(LI.getPointerOperand()),
140                                MachineMemOperand::MOLoad,
141                                VTy.getSizeInBits() / 8, getMemOpAlignment(LI)));
142   return true;
143 }
144 
145 bool IRTranslator::translateStore(const StoreInst &SI) {
146   assert(SI.isSimple() && "only simple loads are supported at the moment");
147 
148   MachineFunction &MF = MIRBuilder.getMF();
149   unsigned Val = getOrCreateVReg(*SI.getValueOperand());
150   unsigned Addr = getOrCreateVReg(*SI.getPointerOperand());
151   LLT VTy{*SI.getValueOperand()->getType()},
152       PTy{*SI.getPointerOperand()->getType()};
153 
154   MIRBuilder.buildStore(
155       VTy, PTy, Val, Addr,
156       *MF.getMachineMemOperand(MachinePointerInfo(SI.getPointerOperand()),
157                                MachineMemOperand::MOStore,
158                                VTy.getSizeInBits() / 8, getMemOpAlignment(SI)));
159   return true;
160 }
161 
162 bool IRTranslator::translateBitCast(const CastInst &CI) {
163   if (LLT{*CI.getDestTy()} == LLT{*CI.getSrcTy()}) {
164     MIRBuilder.buildCopy(getOrCreateVReg(CI),
165                          getOrCreateVReg(*CI.getOperand(0)));
166     return true;
167   }
168   return translateCast(TargetOpcode::G_BITCAST, CI);
169 }
170 
171 bool IRTranslator::translateCast(unsigned Opcode, const CastInst &CI) {
172   unsigned Op = getOrCreateVReg(*CI.getOperand(0));
173   unsigned Res = getOrCreateVReg(CI);
174   MIRBuilder.buildInstr(Opcode, {LLT{*CI.getDestTy()}, LLT{*CI.getSrcTy()}})
175       .addDef(Res)
176       .addUse(Op);
177   return true;
178 }
179 
180 bool IRTranslator::translateCall(const CallInst &CI) {
181   auto TII = MIRBuilder.getMF().getTarget().getIntrinsicInfo();
182   const Function &F = *CI.getCalledFunction();
183   Intrinsic::ID ID = F.getIntrinsicID();
184   if (TII && ID == Intrinsic::not_intrinsic)
185     ID = static_cast<Intrinsic::ID>(TII->getIntrinsicID(&F));
186 
187   assert(ID != Intrinsic::not_intrinsic && "FIXME: support real calls");
188 
189   // Need types (starting with return) & args.
190   SmallVector<LLT, 4> Tys;
191   Tys.emplace_back(*CI.getType());
192   for (auto &Arg : CI.arg_operands())
193     Tys.emplace_back(*Arg->getType());
194 
195   unsigned Res = CI.getType()->isVoidTy() ? 0 : getOrCreateVReg(CI);
196   MachineInstrBuilder MIB =
197       MIRBuilder.buildIntrinsic(Tys, ID, Res, !CI.doesNotAccessMemory());
198 
199   for (auto &Arg : CI.arg_operands()) {
200     if (ConstantInt *CI = dyn_cast<ConstantInt>(Arg))
201       MIB.addImm(CI->getSExtValue());
202     else
203       MIB.addUse(getOrCreateVReg(*Arg));
204   }
205   return true;
206 }
207 
208 bool IRTranslator::translateStaticAlloca(const AllocaInst &AI) {
209   assert(AI.isStaticAlloca() && "only handle static allocas now");
210   MachineFunction &MF = MIRBuilder.getMF();
211   unsigned ElementSize = DL->getTypeStoreSize(AI.getAllocatedType());
212   unsigned Size =
213       ElementSize * cast<ConstantInt>(AI.getArraySize())->getZExtValue();
214 
215   // Always allocate at least one byte.
216   Size = std::max(Size, 1u);
217 
218   unsigned Alignment = AI.getAlignment();
219   if (!Alignment)
220     Alignment = DL->getABITypeAlignment(AI.getAllocatedType());
221 
222   unsigned Res = getOrCreateVReg(AI);
223   int FI = MF.getFrameInfo().CreateStackObject(Size, Alignment, false, &AI);
224   MIRBuilder.buildFrameIndex(LLT::pointer(0), Res, FI);
225   return true;
226 }
227 
228 bool IRTranslator::translate(const Instruction &Inst) {
229   MIRBuilder.setDebugLoc(Inst.getDebugLoc());
230   switch(Inst.getOpcode()) {
231   // Arithmetic operations.
232   case Instruction::Add:
233     return translateBinaryOp(TargetOpcode::G_ADD, cast<BinaryOperator>(Inst));
234   case Instruction::Sub:
235     return translateBinaryOp(TargetOpcode::G_SUB, cast<BinaryOperator>(Inst));
236 
237   // Bitwise operations.
238   case Instruction::And:
239     return translateBinaryOp(TargetOpcode::G_AND, cast<BinaryOperator>(Inst));
240   case Instruction::Or:
241     return translateBinaryOp(TargetOpcode::G_OR, cast<BinaryOperator>(Inst));
242   case Instruction::Xor:
243     return translateBinaryOp(TargetOpcode::G_XOR, cast<BinaryOperator>(Inst));
244 
245   // Branch operations.
246   case Instruction::Br:
247     return translateBr(cast<BranchInst>(Inst));
248   case Instruction::Ret:
249     return translateReturn(cast<ReturnInst>(Inst));
250 
251   // Calls
252   case Instruction::Call:
253     return translateCall(cast<CallInst>(Inst));
254 
255   // Casts
256   case Instruction::BitCast:
257     return translateBitCast(cast<CastInst>(Inst));
258   case Instruction::IntToPtr:
259     return translateCast(TargetOpcode::G_INTTOPTR, cast<CastInst>(Inst));
260   case Instruction::PtrToInt:
261     return translateCast(TargetOpcode::G_PTRTOINT, cast<CastInst>(Inst));
262 
263   // Memory ops.
264   case Instruction::Load:
265     return translateLoad(cast<LoadInst>(Inst));
266   case Instruction::Store:
267     return translateStore(cast<StoreInst>(Inst));
268 
269   case Instruction::Alloca:
270     return translateStaticAlloca(cast<AllocaInst>(Inst));
271 
272   case Instruction::Unreachable:
273     return true;
274 
275   default:
276     llvm_unreachable("Opcode not supported");
277   }
278 }
279 
280 
281 void IRTranslator::finalize() {
282   // Release the memory used by the different maps we
283   // needed during the translation.
284   ValToVReg.clear();
285   Constants.clear();
286 }
287 
288 bool IRTranslator::runOnMachineFunction(MachineFunction &MF) {
289   const Function &F = *MF.getFunction();
290   if (F.empty())
291     return false;
292   CLI = MF.getSubtarget().getCallLowering();
293   MIRBuilder.setMF(MF);
294   MRI = &MF.getRegInfo();
295   DL = &F.getParent()->getDataLayout();
296 
297   // Setup the arguments.
298   MachineBasicBlock &MBB = getOrCreateBB(F.front());
299   MIRBuilder.setMBB(MBB);
300   SmallVector<unsigned, 8> VRegArgs;
301   for (const Argument &Arg: F.args())
302     VRegArgs.push_back(getOrCreateVReg(Arg));
303   bool Succeeded =
304       CLI->lowerFormalArguments(MIRBuilder, F.getArgumentList(), VRegArgs);
305   if (!Succeeded)
306     report_fatal_error("Unable to lower arguments");
307 
308   for (const BasicBlock &BB: F) {
309     MachineBasicBlock &MBB = getOrCreateBB(BB);
310     // Set the insertion point of all the following translations to
311     // the end of this basic block.
312     MIRBuilder.setMBB(MBB);
313     for (const Instruction &Inst: BB) {
314       bool Succeeded = translate(Inst);
315       if (!Succeeded) {
316         DEBUG(dbgs() << "Cannot translate: " << Inst << '\n');
317         report_fatal_error("Unable to translate instruction");
318       }
319     }
320   }
321 
322   // Now that the MachineFrameInfo has been configured, no further changes to
323   // the reserved registers are possible.
324   MRI->freezeReservedRegs(MF);
325 
326   return false;
327 }
328