1 //===- X86InstructionSelector.cpp -----------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the targeting of the InstructionSelector class for
10 /// X86.
11 /// \todo This should be generated by TableGen.
12 //===----------------------------------------------------------------------===//
13 
14 #include "MCTargetDesc/X86BaseInfo.h"
15 #include "X86.h"
16 #include "X86InstrBuilder.h"
17 #include "X86InstrInfo.h"
18 #include "X86RegisterBankInfo.h"
19 #include "X86RegisterInfo.h"
20 #include "X86Subtarget.h"
21 #include "X86TargetMachine.h"
22 #include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
23 #include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h"
24 #include "llvm/CodeGen/GlobalISel/Utils.h"
25 #include "llvm/CodeGen/MachineBasicBlock.h"
26 #include "llvm/CodeGen/MachineConstantPool.h"
27 #include "llvm/CodeGen/MachineFunction.h"
28 #include "llvm/CodeGen/MachineInstr.h"
29 #include "llvm/CodeGen/MachineInstrBuilder.h"
30 #include "llvm/CodeGen/MachineMemOperand.h"
31 #include "llvm/CodeGen/MachineOperand.h"
32 #include "llvm/CodeGen/MachineRegisterInfo.h"
33 #include "llvm/CodeGen/RegisterBank.h"
34 #include "llvm/CodeGen/TargetOpcodes.h"
35 #include "llvm/CodeGen/TargetRegisterInfo.h"
36 #include "llvm/IR/DataLayout.h"
37 #include "llvm/IR/InstrTypes.h"
38 #include "llvm/IR/IntrinsicsX86.h"
39 #include "llvm/Support/AtomicOrdering.h"
40 #include "llvm/Support/CodeGen.h"
41 #include "llvm/Support/Debug.h"
42 #include "llvm/Support/ErrorHandling.h"
43 #include "llvm/Support/LowLevelTypeImpl.h"
44 #include "llvm/Support/MathExtras.h"
45 #include "llvm/Support/raw_ostream.h"
46 #include <cassert>
47 #include <cstdint>
48 #include <tuple>
49 
50 #define DEBUG_TYPE "X86-isel"
51 
52 using namespace llvm;
53 
54 namespace {
55 
56 #define GET_GLOBALISEL_PREDICATE_BITSET
57 #include "X86GenGlobalISel.inc"
58 #undef GET_GLOBALISEL_PREDICATE_BITSET
59 
60 class X86InstructionSelector : public InstructionSelector {
61 public:
62   X86InstructionSelector(const X86TargetMachine &TM, const X86Subtarget &STI,
63                          const X86RegisterBankInfo &RBI);
64 
65   bool select(MachineInstr &I) override;
getName()66   static const char *getName() { return DEBUG_TYPE; }
67 
68 private:
69   /// tblgen-erated 'select' implementation, used as the initial selector for
70   /// the patterns that don't require complex C++.
71   bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
72 
73   // TODO: remove after supported by Tablegen-erated instruction selection.
74   unsigned getLoadStoreOp(const LLT &Ty, const RegisterBank &RB, unsigned Opc,
75                           Align Alignment) const;
76 
77   bool selectLoadStoreOp(MachineInstr &I, MachineRegisterInfo &MRI,
78                          MachineFunction &MF) const;
79   bool selectFrameIndexOrGep(MachineInstr &I, MachineRegisterInfo &MRI,
80                              MachineFunction &MF) const;
81   bool selectGlobalValue(MachineInstr &I, MachineRegisterInfo &MRI,
82                          MachineFunction &MF) const;
83   bool selectConstant(MachineInstr &I, MachineRegisterInfo &MRI,
84                       MachineFunction &MF) const;
85   bool selectTruncOrPtrToInt(MachineInstr &I, MachineRegisterInfo &MRI,
86                              MachineFunction &MF) const;
87   bool selectZext(MachineInstr &I, MachineRegisterInfo &MRI,
88                   MachineFunction &MF) const;
89   bool selectAnyext(MachineInstr &I, MachineRegisterInfo &MRI,
90                     MachineFunction &MF) const;
91   bool selectCmp(MachineInstr &I, MachineRegisterInfo &MRI,
92                  MachineFunction &MF) const;
93   bool selectFCmp(MachineInstr &I, MachineRegisterInfo &MRI,
94                   MachineFunction &MF) const;
95   bool selectUadde(MachineInstr &I, MachineRegisterInfo &MRI,
96                    MachineFunction &MF) const;
97   bool selectCopy(MachineInstr &I, MachineRegisterInfo &MRI) const;
98   bool selectUnmergeValues(MachineInstr &I, MachineRegisterInfo &MRI,
99                            MachineFunction &MF);
100   bool selectMergeValues(MachineInstr &I, MachineRegisterInfo &MRI,
101                          MachineFunction &MF);
102   bool selectInsert(MachineInstr &I, MachineRegisterInfo &MRI,
103                     MachineFunction &MF) const;
104   bool selectExtract(MachineInstr &I, MachineRegisterInfo &MRI,
105                      MachineFunction &MF) const;
106   bool selectCondBranch(MachineInstr &I, MachineRegisterInfo &MRI,
107                         MachineFunction &MF) const;
108   bool selectTurnIntoCOPY(MachineInstr &I, MachineRegisterInfo &MRI,
109                           const unsigned DstReg,
110                           const TargetRegisterClass *DstRC,
111                           const unsigned SrcReg,
112                           const TargetRegisterClass *SrcRC) const;
113   bool materializeFP(MachineInstr &I, MachineRegisterInfo &MRI,
114                      MachineFunction &MF) const;
115   bool selectImplicitDefOrPHI(MachineInstr &I, MachineRegisterInfo &MRI) const;
116   bool selectDivRem(MachineInstr &I, MachineRegisterInfo &MRI,
117                     MachineFunction &MF) const;
118   bool selectIntrinsicWSideEffects(MachineInstr &I, MachineRegisterInfo &MRI,
119                                    MachineFunction &MF) const;
120 
121   // emit insert subreg instruction and insert it before MachineInstr &I
122   bool emitInsertSubreg(unsigned DstReg, unsigned SrcReg, MachineInstr &I,
123                         MachineRegisterInfo &MRI, MachineFunction &MF) const;
124   // emit extract subreg instruction and insert it before MachineInstr &I
125   bool emitExtractSubreg(unsigned DstReg, unsigned SrcReg, MachineInstr &I,
126                          MachineRegisterInfo &MRI, MachineFunction &MF) const;
127 
128   const TargetRegisterClass *getRegClass(LLT Ty, const RegisterBank &RB) const;
129   const TargetRegisterClass *getRegClass(LLT Ty, unsigned Reg,
130                                          MachineRegisterInfo &MRI) const;
131 
132   const X86TargetMachine &TM;
133   const X86Subtarget &STI;
134   const X86InstrInfo &TII;
135   const X86RegisterInfo &TRI;
136   const X86RegisterBankInfo &RBI;
137 
138 #define GET_GLOBALISEL_PREDICATES_DECL
139 #include "X86GenGlobalISel.inc"
140 #undef GET_GLOBALISEL_PREDICATES_DECL
141 
142 #define GET_GLOBALISEL_TEMPORARIES_DECL
143 #include "X86GenGlobalISel.inc"
144 #undef GET_GLOBALISEL_TEMPORARIES_DECL
145 };
146 
147 } // end anonymous namespace
148 
149 #define GET_GLOBALISEL_IMPL
150 #include "X86GenGlobalISel.inc"
151 #undef GET_GLOBALISEL_IMPL
152 
X86InstructionSelector(const X86TargetMachine & TM,const X86Subtarget & STI,const X86RegisterBankInfo & RBI)153 X86InstructionSelector::X86InstructionSelector(const X86TargetMachine &TM,
154                                                const X86Subtarget &STI,
155                                                const X86RegisterBankInfo &RBI)
156     : TM(TM), STI(STI), TII(*STI.getInstrInfo()), TRI(*STI.getRegisterInfo()),
157       RBI(RBI),
158 #define GET_GLOBALISEL_PREDICATES_INIT
159 #include "X86GenGlobalISel.inc"
160 #undef GET_GLOBALISEL_PREDICATES_INIT
161 #define GET_GLOBALISEL_TEMPORARIES_INIT
162 #include "X86GenGlobalISel.inc"
163 #undef GET_GLOBALISEL_TEMPORARIES_INIT
164 {
165 }
166 
167 // FIXME: This should be target-independent, inferred from the types declared
168 // for each class in the bank.
169 const TargetRegisterClass *
getRegClass(LLT Ty,const RegisterBank & RB) const170 X86InstructionSelector::getRegClass(LLT Ty, const RegisterBank &RB) const {
171   if (RB.getID() == X86::GPRRegBankID) {
172     if (Ty.getSizeInBits() <= 8)
173       return &X86::GR8RegClass;
174     if (Ty.getSizeInBits() == 16)
175       return &X86::GR16RegClass;
176     if (Ty.getSizeInBits() == 32)
177       return &X86::GR32RegClass;
178     if (Ty.getSizeInBits() == 64)
179       return &X86::GR64RegClass;
180   }
181   if (RB.getID() == X86::VECRRegBankID) {
182     if (Ty.getSizeInBits() == 16)
183       return STI.hasAVX512() ? &X86::FR16XRegClass : &X86::FR16RegClass;
184     if (Ty.getSizeInBits() == 32)
185       return STI.hasAVX512() ? &X86::FR32XRegClass : &X86::FR32RegClass;
186     if (Ty.getSizeInBits() == 64)
187       return STI.hasAVX512() ? &X86::FR64XRegClass : &X86::FR64RegClass;
188     if (Ty.getSizeInBits() == 128)
189       return STI.hasAVX512() ? &X86::VR128XRegClass : &X86::VR128RegClass;
190     if (Ty.getSizeInBits() == 256)
191       return STI.hasAVX512() ? &X86::VR256XRegClass : &X86::VR256RegClass;
192     if (Ty.getSizeInBits() == 512)
193       return &X86::VR512RegClass;
194   }
195 
196   llvm_unreachable("Unknown RegBank!");
197 }
198 
199 const TargetRegisterClass *
getRegClass(LLT Ty,unsigned Reg,MachineRegisterInfo & MRI) const200 X86InstructionSelector::getRegClass(LLT Ty, unsigned Reg,
201                                     MachineRegisterInfo &MRI) const {
202   const RegisterBank &RegBank = *RBI.getRegBank(Reg, MRI, TRI);
203   return getRegClass(Ty, RegBank);
204 }
205 
getSubRegIndex(const TargetRegisterClass * RC)206 static unsigned getSubRegIndex(const TargetRegisterClass *RC) {
207   unsigned SubIdx = X86::NoSubRegister;
208   if (RC == &X86::GR32RegClass) {
209     SubIdx = X86::sub_32bit;
210   } else if (RC == &X86::GR16RegClass) {
211     SubIdx = X86::sub_16bit;
212   } else if (RC == &X86::GR8RegClass) {
213     SubIdx = X86::sub_8bit;
214   }
215 
216   return SubIdx;
217 }
218 
getRegClassFromGRPhysReg(Register Reg)219 static const TargetRegisterClass *getRegClassFromGRPhysReg(Register Reg) {
220   assert(Reg.isPhysical());
221   if (X86::GR64RegClass.contains(Reg))
222     return &X86::GR64RegClass;
223   if (X86::GR32RegClass.contains(Reg))
224     return &X86::GR32RegClass;
225   if (X86::GR16RegClass.contains(Reg))
226     return &X86::GR16RegClass;
227   if (X86::GR8RegClass.contains(Reg))
228     return &X86::GR8RegClass;
229 
230   llvm_unreachable("Unknown RegClass for PhysReg!");
231 }
232 
233 // Set X86 Opcode and constrain DestReg.
selectCopy(MachineInstr & I,MachineRegisterInfo & MRI) const234 bool X86InstructionSelector::selectCopy(MachineInstr &I,
235                                         MachineRegisterInfo &MRI) const {
236   Register DstReg = I.getOperand(0).getReg();
237   const unsigned DstSize = RBI.getSizeInBits(DstReg, MRI, TRI);
238   const RegisterBank &DstRegBank = *RBI.getRegBank(DstReg, MRI, TRI);
239 
240   Register SrcReg = I.getOperand(1).getReg();
241   const unsigned SrcSize = RBI.getSizeInBits(SrcReg, MRI, TRI);
242   const RegisterBank &SrcRegBank = *RBI.getRegBank(SrcReg, MRI, TRI);
243 
244   if (DstReg.isPhysical()) {
245     assert(I.isCopy() && "Generic operators do not allow physical registers");
246 
247     if (DstSize > SrcSize && SrcRegBank.getID() == X86::GPRRegBankID &&
248         DstRegBank.getID() == X86::GPRRegBankID) {
249 
250       const TargetRegisterClass *SrcRC =
251           getRegClass(MRI.getType(SrcReg), SrcRegBank);
252       const TargetRegisterClass *DstRC = getRegClassFromGRPhysReg(DstReg);
253 
254       if (SrcRC != DstRC) {
255         // This case can be generated by ABI lowering, performe anyext
256         Register ExtSrc = MRI.createVirtualRegister(DstRC);
257         BuildMI(*I.getParent(), I, I.getDebugLoc(),
258                 TII.get(TargetOpcode::SUBREG_TO_REG))
259             .addDef(ExtSrc)
260             .addImm(0)
261             .addReg(SrcReg)
262             .addImm(getSubRegIndex(SrcRC));
263 
264         I.getOperand(1).setReg(ExtSrc);
265       }
266     }
267 
268     return true;
269   }
270 
271   assert((!SrcReg.isPhysical() || I.isCopy()) &&
272          "No phys reg on generic operators");
273   assert((DstSize == SrcSize ||
274           // Copies are a mean to setup initial types, the number of
275           // bits may not exactly match.
276           (SrcReg.isPhysical() &&
277            DstSize <= RBI.getSizeInBits(SrcReg, MRI, TRI))) &&
278          "Copy with different width?!");
279 
280   const TargetRegisterClass *DstRC =
281       getRegClass(MRI.getType(DstReg), DstRegBank);
282 
283   if (SrcRegBank.getID() == X86::GPRRegBankID &&
284       DstRegBank.getID() == X86::GPRRegBankID && SrcSize > DstSize &&
285       SrcReg.isPhysical()) {
286     // Change the physical register to performe truncate.
287 
288     const TargetRegisterClass *SrcRC = getRegClassFromGRPhysReg(SrcReg);
289 
290     if (DstRC != SrcRC) {
291       I.getOperand(1).setSubReg(getSubRegIndex(DstRC));
292       I.getOperand(1).substPhysReg(SrcReg, TRI);
293     }
294   }
295 
296   // No need to constrain SrcReg. It will get constrained when
297   // we hit another of its use or its defs.
298   // Copies do not have constraints.
299   const TargetRegisterClass *OldRC = MRI.getRegClassOrNull(DstReg);
300   if (!OldRC || !DstRC->hasSubClassEq(OldRC)) {
301     if (!RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
302       LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
303                         << " operand\n");
304       return false;
305     }
306   }
307   I.setDesc(TII.get(X86::COPY));
308   return true;
309 }
310 
select(MachineInstr & I)311 bool X86InstructionSelector::select(MachineInstr &I) {
312   assert(I.getParent() && "Instruction should be in a basic block!");
313   assert(I.getParent()->getParent() && "Instruction should be in a function!");
314 
315   MachineBasicBlock &MBB = *I.getParent();
316   MachineFunction &MF = *MBB.getParent();
317   MachineRegisterInfo &MRI = MF.getRegInfo();
318 
319   unsigned Opcode = I.getOpcode();
320   if (!isPreISelGenericOpcode(Opcode)) {
321     // Certain non-generic instructions also need some special handling.
322 
323     if (Opcode == TargetOpcode::LOAD_STACK_GUARD)
324       return false;
325 
326     if (I.isCopy())
327       return selectCopy(I, MRI);
328 
329     return true;
330   }
331 
332   assert(I.getNumOperands() == I.getNumExplicitOperands() &&
333          "Generic instruction has unexpected implicit operands\n");
334 
335   if (selectImpl(I, *CoverageInfo))
336     return true;
337 
338   LLVM_DEBUG(dbgs() << " C++ instruction selection: "; I.print(dbgs()));
339 
340   // TODO: This should be implemented by tblgen.
341   switch (I.getOpcode()) {
342   default:
343     return false;
344   case TargetOpcode::G_STORE:
345   case TargetOpcode::G_LOAD:
346     return selectLoadStoreOp(I, MRI, MF);
347   case TargetOpcode::G_PTR_ADD:
348   case TargetOpcode::G_FRAME_INDEX:
349     return selectFrameIndexOrGep(I, MRI, MF);
350   case TargetOpcode::G_GLOBAL_VALUE:
351     return selectGlobalValue(I, MRI, MF);
352   case TargetOpcode::G_CONSTANT:
353     return selectConstant(I, MRI, MF);
354   case TargetOpcode::G_FCONSTANT:
355     return materializeFP(I, MRI, MF);
356   case TargetOpcode::G_PTRTOINT:
357   case TargetOpcode::G_TRUNC:
358     return selectTruncOrPtrToInt(I, MRI, MF);
359   case TargetOpcode::G_INTTOPTR:
360     return selectCopy(I, MRI);
361   case TargetOpcode::G_ZEXT:
362     return selectZext(I, MRI, MF);
363   case TargetOpcode::G_ANYEXT:
364     return selectAnyext(I, MRI, MF);
365   case TargetOpcode::G_ICMP:
366     return selectCmp(I, MRI, MF);
367   case TargetOpcode::G_FCMP:
368     return selectFCmp(I, MRI, MF);
369   case TargetOpcode::G_UADDE:
370     return selectUadde(I, MRI, MF);
371   case TargetOpcode::G_UNMERGE_VALUES:
372     return selectUnmergeValues(I, MRI, MF);
373   case TargetOpcode::G_MERGE_VALUES:
374   case TargetOpcode::G_CONCAT_VECTORS:
375     return selectMergeValues(I, MRI, MF);
376   case TargetOpcode::G_EXTRACT:
377     return selectExtract(I, MRI, MF);
378   case TargetOpcode::G_INSERT:
379     return selectInsert(I, MRI, MF);
380   case TargetOpcode::G_BRCOND:
381     return selectCondBranch(I, MRI, MF);
382   case TargetOpcode::G_IMPLICIT_DEF:
383   case TargetOpcode::G_PHI:
384     return selectImplicitDefOrPHI(I, MRI);
385   case TargetOpcode::G_SDIV:
386   case TargetOpcode::G_UDIV:
387   case TargetOpcode::G_SREM:
388   case TargetOpcode::G_UREM:
389     return selectDivRem(I, MRI, MF);
390   case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
391     return selectIntrinsicWSideEffects(I, MRI, MF);
392   }
393 
394   return false;
395 }
396 
getLoadStoreOp(const LLT & Ty,const RegisterBank & RB,unsigned Opc,Align Alignment) const397 unsigned X86InstructionSelector::getLoadStoreOp(const LLT &Ty,
398                                                 const RegisterBank &RB,
399                                                 unsigned Opc,
400                                                 Align Alignment) const {
401   bool Isload = (Opc == TargetOpcode::G_LOAD);
402   bool HasAVX = STI.hasAVX();
403   bool HasAVX512 = STI.hasAVX512();
404   bool HasVLX = STI.hasVLX();
405 
406   if (Ty == LLT::scalar(8)) {
407     if (X86::GPRRegBankID == RB.getID())
408       return Isload ? X86::MOV8rm : X86::MOV8mr;
409   } else if (Ty == LLT::scalar(16)) {
410     if (X86::GPRRegBankID == RB.getID())
411       return Isload ? X86::MOV16rm : X86::MOV16mr;
412   } else if (Ty == LLT::scalar(32) || Ty == LLT::pointer(0, 32)) {
413     if (X86::GPRRegBankID == RB.getID())
414       return Isload ? X86::MOV32rm : X86::MOV32mr;
415     if (X86::VECRRegBankID == RB.getID())
416       return Isload ? (HasAVX512 ? X86::VMOVSSZrm_alt :
417                        HasAVX    ? X86::VMOVSSrm_alt :
418                                    X86::MOVSSrm_alt)
419                     : (HasAVX512 ? X86::VMOVSSZmr :
420                        HasAVX    ? X86::VMOVSSmr :
421                                    X86::MOVSSmr);
422   } else if (Ty == LLT::scalar(64) || Ty == LLT::pointer(0, 64)) {
423     if (X86::GPRRegBankID == RB.getID())
424       return Isload ? X86::MOV64rm : X86::MOV64mr;
425     if (X86::VECRRegBankID == RB.getID())
426       return Isload ? (HasAVX512 ? X86::VMOVSDZrm_alt :
427                        HasAVX    ? X86::VMOVSDrm_alt :
428                                    X86::MOVSDrm_alt)
429                     : (HasAVX512 ? X86::VMOVSDZmr :
430                        HasAVX    ? X86::VMOVSDmr :
431                                    X86::MOVSDmr);
432   } else if (Ty.isVector() && Ty.getSizeInBits() == 128) {
433     if (Alignment >= Align(16))
434       return Isload ? (HasVLX ? X86::VMOVAPSZ128rm
435                               : HasAVX512
436                                     ? X86::VMOVAPSZ128rm_NOVLX
437                                     : HasAVX ? X86::VMOVAPSrm : X86::MOVAPSrm)
438                     : (HasVLX ? X86::VMOVAPSZ128mr
439                               : HasAVX512
440                                     ? X86::VMOVAPSZ128mr_NOVLX
441                                     : HasAVX ? X86::VMOVAPSmr : X86::MOVAPSmr);
442     else
443       return Isload ? (HasVLX ? X86::VMOVUPSZ128rm
444                               : HasAVX512
445                                     ? X86::VMOVUPSZ128rm_NOVLX
446                                     : HasAVX ? X86::VMOVUPSrm : X86::MOVUPSrm)
447                     : (HasVLX ? X86::VMOVUPSZ128mr
448                               : HasAVX512
449                                     ? X86::VMOVUPSZ128mr_NOVLX
450                                     : HasAVX ? X86::VMOVUPSmr : X86::MOVUPSmr);
451   } else if (Ty.isVector() && Ty.getSizeInBits() == 256) {
452     if (Alignment >= Align(32))
453       return Isload ? (HasVLX ? X86::VMOVAPSZ256rm
454                               : HasAVX512 ? X86::VMOVAPSZ256rm_NOVLX
455                                           : X86::VMOVAPSYrm)
456                     : (HasVLX ? X86::VMOVAPSZ256mr
457                               : HasAVX512 ? X86::VMOVAPSZ256mr_NOVLX
458                                           : X86::VMOVAPSYmr);
459     else
460       return Isload ? (HasVLX ? X86::VMOVUPSZ256rm
461                               : HasAVX512 ? X86::VMOVUPSZ256rm_NOVLX
462                                           : X86::VMOVUPSYrm)
463                     : (HasVLX ? X86::VMOVUPSZ256mr
464                               : HasAVX512 ? X86::VMOVUPSZ256mr_NOVLX
465                                           : X86::VMOVUPSYmr);
466   } else if (Ty.isVector() && Ty.getSizeInBits() == 512) {
467     if (Alignment >= Align(64))
468       return Isload ? X86::VMOVAPSZrm : X86::VMOVAPSZmr;
469     else
470       return Isload ? X86::VMOVUPSZrm : X86::VMOVUPSZmr;
471   }
472   return Opc;
473 }
474 
475 // Fill in an address from the given instruction.
X86SelectAddress(const MachineInstr & I,const MachineRegisterInfo & MRI,X86AddressMode & AM)476 static void X86SelectAddress(const MachineInstr &I,
477                              const MachineRegisterInfo &MRI,
478                              X86AddressMode &AM) {
479   assert(I.getOperand(0).isReg() && "unsupported opperand.");
480   assert(MRI.getType(I.getOperand(0).getReg()).isPointer() &&
481          "unsupported type.");
482 
483   if (I.getOpcode() == TargetOpcode::G_PTR_ADD) {
484     if (auto COff = getIConstantVRegSExtVal(I.getOperand(2).getReg(), MRI)) {
485       int64_t Imm = *COff;
486       if (isInt<32>(Imm)) { // Check for displacement overflow.
487         AM.Disp = static_cast<int32_t>(Imm);
488         AM.Base.Reg = I.getOperand(1).getReg();
489         return;
490       }
491     }
492   } else if (I.getOpcode() == TargetOpcode::G_FRAME_INDEX) {
493     AM.Base.FrameIndex = I.getOperand(1).getIndex();
494     AM.BaseType = X86AddressMode::FrameIndexBase;
495     return;
496   }
497 
498   // Default behavior.
499   AM.Base.Reg = I.getOperand(0).getReg();
500 }
501 
selectLoadStoreOp(MachineInstr & I,MachineRegisterInfo & MRI,MachineFunction & MF) const502 bool X86InstructionSelector::selectLoadStoreOp(MachineInstr &I,
503                                                MachineRegisterInfo &MRI,
504                                                MachineFunction &MF) const {
505   unsigned Opc = I.getOpcode();
506 
507   assert((Opc == TargetOpcode::G_STORE || Opc == TargetOpcode::G_LOAD) &&
508          "unexpected instruction");
509 
510   const Register DefReg = I.getOperand(0).getReg();
511   LLT Ty = MRI.getType(DefReg);
512   const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
513 
514   assert(I.hasOneMemOperand());
515   auto &MemOp = **I.memoperands_begin();
516   if (MemOp.isAtomic()) {
517     // Note: for unordered operations, we rely on the fact the appropriate MMO
518     // is already on the instruction we're mutating, and thus we don't need to
519     // make any changes.  So long as we select an opcode which is capable of
520     // loading or storing the appropriate size atomically, the rest of the
521     // backend is required to respect the MMO state.
522     if (!MemOp.isUnordered()) {
523       LLVM_DEBUG(dbgs() << "Atomic ordering not supported yet\n");
524       return false;
525     }
526     if (MemOp.getAlign() < Ty.getSizeInBits() / 8) {
527       LLVM_DEBUG(dbgs() << "Unaligned atomics not supported yet\n");
528       return false;
529     }
530   }
531 
532   unsigned NewOpc = getLoadStoreOp(Ty, RB, Opc, MemOp.getAlign());
533   if (NewOpc == Opc)
534     return false;
535 
536   X86AddressMode AM;
537   X86SelectAddress(*MRI.getVRegDef(I.getOperand(1).getReg()), MRI, AM);
538 
539   I.setDesc(TII.get(NewOpc));
540   MachineInstrBuilder MIB(MF, I);
541   if (Opc == TargetOpcode::G_LOAD) {
542     I.removeOperand(1);
543     addFullAddress(MIB, AM);
544   } else {
545     // G_STORE (VAL, Addr), X86Store instruction (Addr, VAL)
546     I.removeOperand(1);
547     I.removeOperand(0);
548     addFullAddress(MIB, AM).addUse(DefReg);
549   }
550   return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
551 }
552 
getLeaOP(LLT Ty,const X86Subtarget & STI)553 static unsigned getLeaOP(LLT Ty, const X86Subtarget &STI) {
554   if (Ty == LLT::pointer(0, 64))
555     return X86::LEA64r;
556   else if (Ty == LLT::pointer(0, 32))
557     return STI.isTarget64BitILP32() ? X86::LEA64_32r : X86::LEA32r;
558   else
559     llvm_unreachable("Can't get LEA opcode. Unsupported type.");
560 }
561 
selectFrameIndexOrGep(MachineInstr & I,MachineRegisterInfo & MRI,MachineFunction & MF) const562 bool X86InstructionSelector::selectFrameIndexOrGep(MachineInstr &I,
563                                                    MachineRegisterInfo &MRI,
564                                                    MachineFunction &MF) const {
565   unsigned Opc = I.getOpcode();
566 
567   assert((Opc == TargetOpcode::G_FRAME_INDEX || Opc == TargetOpcode::G_PTR_ADD) &&
568          "unexpected instruction");
569 
570   const Register DefReg = I.getOperand(0).getReg();
571   LLT Ty = MRI.getType(DefReg);
572 
573   // Use LEA to calculate frame index and GEP
574   unsigned NewOpc = getLeaOP(Ty, STI);
575   I.setDesc(TII.get(NewOpc));
576   MachineInstrBuilder MIB(MF, I);
577 
578   if (Opc == TargetOpcode::G_FRAME_INDEX) {
579     addOffset(MIB, 0);
580   } else {
581     MachineOperand &InxOp = I.getOperand(2);
582     I.addOperand(InxOp);        // set IndexReg
583     InxOp.ChangeToImmediate(1); // set Scale
584     MIB.addImm(0).addReg(0);
585   }
586 
587   return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
588 }
589 
selectGlobalValue(MachineInstr & I,MachineRegisterInfo & MRI,MachineFunction & MF) const590 bool X86InstructionSelector::selectGlobalValue(MachineInstr &I,
591                                                MachineRegisterInfo &MRI,
592                                                MachineFunction &MF) const {
593   assert((I.getOpcode() == TargetOpcode::G_GLOBAL_VALUE) &&
594          "unexpected instruction");
595 
596   auto GV = I.getOperand(1).getGlobal();
597   if (GV->isThreadLocal()) {
598     return false; // TODO: we don't support TLS yet.
599   }
600 
601   // Can't handle alternate code models yet.
602   if (TM.getCodeModel() != CodeModel::Small)
603     return false;
604 
605   X86AddressMode AM;
606   AM.GV = GV;
607   AM.GVOpFlags = STI.classifyGlobalReference(GV);
608 
609   // TODO: The ABI requires an extra load. not supported yet.
610   if (isGlobalStubReference(AM.GVOpFlags))
611     return false;
612 
613   // TODO: This reference is relative to the pic base. not supported yet.
614   if (isGlobalRelativeToPICBase(AM.GVOpFlags))
615     return false;
616 
617   if (STI.isPICStyleRIPRel()) {
618     // Use rip-relative addressing.
619     assert(AM.Base.Reg == 0 && AM.IndexReg == 0);
620     AM.Base.Reg = X86::RIP;
621   }
622 
623   const Register DefReg = I.getOperand(0).getReg();
624   LLT Ty = MRI.getType(DefReg);
625   unsigned NewOpc = getLeaOP(Ty, STI);
626 
627   I.setDesc(TII.get(NewOpc));
628   MachineInstrBuilder MIB(MF, I);
629 
630   I.removeOperand(1);
631   addFullAddress(MIB, AM);
632 
633   return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
634 }
635 
selectConstant(MachineInstr & I,MachineRegisterInfo & MRI,MachineFunction & MF) const636 bool X86InstructionSelector::selectConstant(MachineInstr &I,
637                                             MachineRegisterInfo &MRI,
638                                             MachineFunction &MF) const {
639   assert((I.getOpcode() == TargetOpcode::G_CONSTANT) &&
640          "unexpected instruction");
641 
642   const Register DefReg = I.getOperand(0).getReg();
643   LLT Ty = MRI.getType(DefReg);
644 
645   if (RBI.getRegBank(DefReg, MRI, TRI)->getID() != X86::GPRRegBankID)
646     return false;
647 
648   uint64_t Val = 0;
649   if (I.getOperand(1).isCImm()) {
650     Val = I.getOperand(1).getCImm()->getZExtValue();
651     I.getOperand(1).ChangeToImmediate(Val);
652   } else if (I.getOperand(1).isImm()) {
653     Val = I.getOperand(1).getImm();
654   } else
655     llvm_unreachable("Unsupported operand type.");
656 
657   unsigned NewOpc;
658   switch (Ty.getSizeInBits()) {
659   case 8:
660     NewOpc = X86::MOV8ri;
661     break;
662   case 16:
663     NewOpc = X86::MOV16ri;
664     break;
665   case 32:
666     NewOpc = X86::MOV32ri;
667     break;
668   case 64:
669     // TODO: in case isUInt<32>(Val), X86::MOV32ri can be used
670     if (isInt<32>(Val))
671       NewOpc = X86::MOV64ri32;
672     else
673       NewOpc = X86::MOV64ri;
674     break;
675   default:
676     llvm_unreachable("Can't select G_CONSTANT, unsupported type.");
677   }
678 
679   I.setDesc(TII.get(NewOpc));
680   return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
681 }
682 
683 // Helper function for selectTruncOrPtrToInt and selectAnyext.
684 // Returns true if DstRC lives on a floating register class and
685 // SrcRC lives on a 128-bit vector class.
canTurnIntoCOPY(const TargetRegisterClass * DstRC,const TargetRegisterClass * SrcRC)686 static bool canTurnIntoCOPY(const TargetRegisterClass *DstRC,
687                             const TargetRegisterClass *SrcRC) {
688   return (DstRC == &X86::FR32RegClass || DstRC == &X86::FR32XRegClass ||
689           DstRC == &X86::FR64RegClass || DstRC == &X86::FR64XRegClass) &&
690          (SrcRC == &X86::VR128RegClass || SrcRC == &X86::VR128XRegClass);
691 }
692 
selectTurnIntoCOPY(MachineInstr & I,MachineRegisterInfo & MRI,const unsigned DstReg,const TargetRegisterClass * DstRC,const unsigned SrcReg,const TargetRegisterClass * SrcRC) const693 bool X86InstructionSelector::selectTurnIntoCOPY(
694     MachineInstr &I, MachineRegisterInfo &MRI, const unsigned DstReg,
695     const TargetRegisterClass *DstRC, const unsigned SrcReg,
696     const TargetRegisterClass *SrcRC) const {
697 
698   if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
699       !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
700     LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
701                       << " operand\n");
702     return false;
703   }
704   I.setDesc(TII.get(X86::COPY));
705   return true;
706 }
707 
selectTruncOrPtrToInt(MachineInstr & I,MachineRegisterInfo & MRI,MachineFunction & MF) const708 bool X86InstructionSelector::selectTruncOrPtrToInt(MachineInstr &I,
709                                                    MachineRegisterInfo &MRI,
710                                                    MachineFunction &MF) const {
711   assert((I.getOpcode() == TargetOpcode::G_TRUNC ||
712           I.getOpcode() == TargetOpcode::G_PTRTOINT) &&
713          "unexpected instruction");
714 
715   const Register DstReg = I.getOperand(0).getReg();
716   const Register SrcReg = I.getOperand(1).getReg();
717 
718   const LLT DstTy = MRI.getType(DstReg);
719   const LLT SrcTy = MRI.getType(SrcReg);
720 
721   const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
722   const RegisterBank &SrcRB = *RBI.getRegBank(SrcReg, MRI, TRI);
723 
724   if (DstRB.getID() != SrcRB.getID()) {
725     LLVM_DEBUG(dbgs() << TII.getName(I.getOpcode())
726                       << " input/output on different banks\n");
727     return false;
728   }
729 
730   const TargetRegisterClass *DstRC = getRegClass(DstTy, DstRB);
731   const TargetRegisterClass *SrcRC = getRegClass(SrcTy, SrcRB);
732 
733   if (!DstRC || !SrcRC)
734     return false;
735 
736   // If that's truncation of the value that lives on the vector class and goes
737   // into the floating class, just replace it with copy, as we are able to
738   // select it as a regular move.
739   if (canTurnIntoCOPY(DstRC, SrcRC))
740     return selectTurnIntoCOPY(I, MRI, DstReg, DstRC, SrcReg, SrcRC);
741 
742   if (DstRB.getID() != X86::GPRRegBankID)
743     return false;
744 
745   unsigned SubIdx;
746   if (DstRC == SrcRC) {
747     // Nothing to be done
748     SubIdx = X86::NoSubRegister;
749   } else if (DstRC == &X86::GR32RegClass) {
750     SubIdx = X86::sub_32bit;
751   } else if (DstRC == &X86::GR16RegClass) {
752     SubIdx = X86::sub_16bit;
753   } else if (DstRC == &X86::GR8RegClass) {
754     SubIdx = X86::sub_8bit;
755   } else {
756     return false;
757   }
758 
759   SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubIdx);
760 
761   if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
762       !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
763     LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
764                       << "\n");
765     return false;
766   }
767 
768   I.getOperand(1).setSubReg(SubIdx);
769 
770   I.setDesc(TII.get(X86::COPY));
771   return true;
772 }
773 
selectZext(MachineInstr & I,MachineRegisterInfo & MRI,MachineFunction & MF) const774 bool X86InstructionSelector::selectZext(MachineInstr &I,
775                                         MachineRegisterInfo &MRI,
776                                         MachineFunction &MF) const {
777   assert((I.getOpcode() == TargetOpcode::G_ZEXT) && "unexpected instruction");
778 
779   const Register DstReg = I.getOperand(0).getReg();
780   const Register SrcReg = I.getOperand(1).getReg();
781 
782   const LLT DstTy = MRI.getType(DstReg);
783   const LLT SrcTy = MRI.getType(SrcReg);
784 
785   assert(!(SrcTy == LLT::scalar(8) && DstTy == LLT::scalar(16)) &&
786          "8=>16 Zext is handled by tablegen");
787   assert(!(SrcTy == LLT::scalar(8) && DstTy == LLT::scalar(32)) &&
788          "8=>32 Zext is handled by tablegen");
789   assert(!(SrcTy == LLT::scalar(16) && DstTy == LLT::scalar(32)) &&
790          "16=>32 Zext is handled by tablegen");
791   assert(!(SrcTy == LLT::scalar(8) && DstTy == LLT::scalar(64)) &&
792          "8=>64 Zext is handled by tablegen");
793   assert(!(SrcTy == LLT::scalar(16) && DstTy == LLT::scalar(64)) &&
794          "16=>64 Zext is handled by tablegen");
795   assert(!(SrcTy == LLT::scalar(32) && DstTy == LLT::scalar(64)) &&
796          "32=>64 Zext is handled by tablegen");
797 
798   if (SrcTy != LLT::scalar(1))
799     return false;
800 
801   unsigned AndOpc;
802   if (DstTy == LLT::scalar(8))
803     AndOpc = X86::AND8ri;
804   else if (DstTy == LLT::scalar(16))
805     AndOpc = X86::AND16ri8;
806   else if (DstTy == LLT::scalar(32))
807     AndOpc = X86::AND32ri8;
808   else if (DstTy == LLT::scalar(64))
809     AndOpc = X86::AND64ri8;
810   else
811     return false;
812 
813   Register DefReg = SrcReg;
814   if (DstTy != LLT::scalar(8)) {
815     Register ImpDefReg =
816         MRI.createVirtualRegister(getRegClass(DstTy, DstReg, MRI));
817     BuildMI(*I.getParent(), I, I.getDebugLoc(),
818             TII.get(TargetOpcode::IMPLICIT_DEF), ImpDefReg);
819 
820     DefReg = MRI.createVirtualRegister(getRegClass(DstTy, DstReg, MRI));
821     BuildMI(*I.getParent(), I, I.getDebugLoc(),
822             TII.get(TargetOpcode::INSERT_SUBREG), DefReg)
823         .addReg(ImpDefReg)
824         .addReg(SrcReg)
825         .addImm(X86::sub_8bit);
826   }
827 
828   MachineInstr &AndInst =
829       *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(AndOpc), DstReg)
830            .addReg(DefReg)
831            .addImm(1);
832 
833   constrainSelectedInstRegOperands(AndInst, TII, TRI, RBI);
834 
835   I.eraseFromParent();
836   return true;
837 }
838 
selectAnyext(MachineInstr & I,MachineRegisterInfo & MRI,MachineFunction & MF) const839 bool X86InstructionSelector::selectAnyext(MachineInstr &I,
840                                           MachineRegisterInfo &MRI,
841                                           MachineFunction &MF) const {
842   assert((I.getOpcode() == TargetOpcode::G_ANYEXT) && "unexpected instruction");
843 
844   const Register DstReg = I.getOperand(0).getReg();
845   const Register SrcReg = I.getOperand(1).getReg();
846 
847   const LLT DstTy = MRI.getType(DstReg);
848   const LLT SrcTy = MRI.getType(SrcReg);
849 
850   const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
851   const RegisterBank &SrcRB = *RBI.getRegBank(SrcReg, MRI, TRI);
852 
853   assert(DstRB.getID() == SrcRB.getID() &&
854          "G_ANYEXT input/output on different banks\n");
855 
856   assert(DstTy.getSizeInBits() > SrcTy.getSizeInBits() &&
857          "G_ANYEXT incorrect operand size");
858 
859   const TargetRegisterClass *DstRC = getRegClass(DstTy, DstRB);
860   const TargetRegisterClass *SrcRC = getRegClass(SrcTy, SrcRB);
861 
862   // If that's ANY_EXT of the value that lives on the floating class and goes
863   // into the vector class, just replace it with copy, as we are able to select
864   // it as a regular move.
865   if (canTurnIntoCOPY(SrcRC, DstRC))
866     return selectTurnIntoCOPY(I, MRI, SrcReg, SrcRC, DstReg, DstRC);
867 
868   if (DstRB.getID() != X86::GPRRegBankID)
869     return false;
870 
871   if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
872       !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
873     LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
874                       << " operand\n");
875     return false;
876   }
877 
878   if (SrcRC == DstRC) {
879     I.setDesc(TII.get(X86::COPY));
880     return true;
881   }
882 
883   BuildMI(*I.getParent(), I, I.getDebugLoc(),
884           TII.get(TargetOpcode::SUBREG_TO_REG))
885       .addDef(DstReg)
886       .addImm(0)
887       .addReg(SrcReg)
888       .addImm(getSubRegIndex(SrcRC));
889 
890   I.eraseFromParent();
891   return true;
892 }
893 
selectCmp(MachineInstr & I,MachineRegisterInfo & MRI,MachineFunction & MF) const894 bool X86InstructionSelector::selectCmp(MachineInstr &I,
895                                        MachineRegisterInfo &MRI,
896                                        MachineFunction &MF) const {
897   assert((I.getOpcode() == TargetOpcode::G_ICMP) && "unexpected instruction");
898 
899   X86::CondCode CC;
900   bool SwapArgs;
901   std::tie(CC, SwapArgs) = X86::getX86ConditionCode(
902       (CmpInst::Predicate)I.getOperand(1).getPredicate());
903 
904   Register LHS = I.getOperand(2).getReg();
905   Register RHS = I.getOperand(3).getReg();
906 
907   if (SwapArgs)
908     std::swap(LHS, RHS);
909 
910   unsigned OpCmp;
911   LLT Ty = MRI.getType(LHS);
912 
913   switch (Ty.getSizeInBits()) {
914   default:
915     return false;
916   case 8:
917     OpCmp = X86::CMP8rr;
918     break;
919   case 16:
920     OpCmp = X86::CMP16rr;
921     break;
922   case 32:
923     OpCmp = X86::CMP32rr;
924     break;
925   case 64:
926     OpCmp = X86::CMP64rr;
927     break;
928   }
929 
930   MachineInstr &CmpInst =
931       *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(OpCmp))
932            .addReg(LHS)
933            .addReg(RHS);
934 
935   MachineInstr &SetInst = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
936                                    TII.get(X86::SETCCr), I.getOperand(0).getReg()).addImm(CC);
937 
938   constrainSelectedInstRegOperands(CmpInst, TII, TRI, RBI);
939   constrainSelectedInstRegOperands(SetInst, TII, TRI, RBI);
940 
941   I.eraseFromParent();
942   return true;
943 }
944 
selectFCmp(MachineInstr & I,MachineRegisterInfo & MRI,MachineFunction & MF) const945 bool X86InstructionSelector::selectFCmp(MachineInstr &I,
946                                         MachineRegisterInfo &MRI,
947                                         MachineFunction &MF) const {
948   assert((I.getOpcode() == TargetOpcode::G_FCMP) && "unexpected instruction");
949 
950   Register LhsReg = I.getOperand(2).getReg();
951   Register RhsReg = I.getOperand(3).getReg();
952   CmpInst::Predicate Predicate =
953       (CmpInst::Predicate)I.getOperand(1).getPredicate();
954 
955   // FCMP_OEQ and FCMP_UNE cannot be checked with a single instruction.
956   static const uint16_t SETFOpcTable[2][3] = {
957       {X86::COND_E, X86::COND_NP, X86::AND8rr},
958       {X86::COND_NE, X86::COND_P, X86::OR8rr}};
959   const uint16_t *SETFOpc = nullptr;
960   switch (Predicate) {
961   default:
962     break;
963   case CmpInst::FCMP_OEQ:
964     SETFOpc = &SETFOpcTable[0][0];
965     break;
966   case CmpInst::FCMP_UNE:
967     SETFOpc = &SETFOpcTable[1][0];
968     break;
969   }
970 
971   // Compute the opcode for the CMP instruction.
972   unsigned OpCmp;
973   LLT Ty = MRI.getType(LhsReg);
974   switch (Ty.getSizeInBits()) {
975   default:
976     return false;
977   case 32:
978     OpCmp = X86::UCOMISSrr;
979     break;
980   case 64:
981     OpCmp = X86::UCOMISDrr;
982     break;
983   }
984 
985   Register ResultReg = I.getOperand(0).getReg();
986   RBI.constrainGenericRegister(
987       ResultReg,
988       *getRegClass(LLT::scalar(8), *RBI.getRegBank(ResultReg, MRI, TRI)), MRI);
989   if (SETFOpc) {
990     MachineInstr &CmpInst =
991         *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(OpCmp))
992              .addReg(LhsReg)
993              .addReg(RhsReg);
994 
995     Register FlagReg1 = MRI.createVirtualRegister(&X86::GR8RegClass);
996     Register FlagReg2 = MRI.createVirtualRegister(&X86::GR8RegClass);
997     MachineInstr &Set1 = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
998                                   TII.get(X86::SETCCr), FlagReg1).addImm(SETFOpc[0]);
999     MachineInstr &Set2 = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
1000                                   TII.get(X86::SETCCr), FlagReg2).addImm(SETFOpc[1]);
1001     MachineInstr &Set3 = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
1002                                   TII.get(SETFOpc[2]), ResultReg)
1003                               .addReg(FlagReg1)
1004                               .addReg(FlagReg2);
1005     constrainSelectedInstRegOperands(CmpInst, TII, TRI, RBI);
1006     constrainSelectedInstRegOperands(Set1, TII, TRI, RBI);
1007     constrainSelectedInstRegOperands(Set2, TII, TRI, RBI);
1008     constrainSelectedInstRegOperands(Set3, TII, TRI, RBI);
1009 
1010     I.eraseFromParent();
1011     return true;
1012   }
1013 
1014   X86::CondCode CC;
1015   bool SwapArgs;
1016   std::tie(CC, SwapArgs) = X86::getX86ConditionCode(Predicate);
1017   assert(CC <= X86::LAST_VALID_COND && "Unexpected condition code.");
1018 
1019   if (SwapArgs)
1020     std::swap(LhsReg, RhsReg);
1021 
1022   // Emit a compare of LHS/RHS.
1023   MachineInstr &CmpInst =
1024       *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(OpCmp))
1025            .addReg(LhsReg)
1026            .addReg(RhsReg);
1027 
1028   MachineInstr &Set =
1029       *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::SETCCr), ResultReg).addImm(CC);
1030   constrainSelectedInstRegOperands(CmpInst, TII, TRI, RBI);
1031   constrainSelectedInstRegOperands(Set, TII, TRI, RBI);
1032   I.eraseFromParent();
1033   return true;
1034 }
1035 
selectUadde(MachineInstr & I,MachineRegisterInfo & MRI,MachineFunction & MF) const1036 bool X86InstructionSelector::selectUadde(MachineInstr &I,
1037                                          MachineRegisterInfo &MRI,
1038                                          MachineFunction &MF) const {
1039   assert((I.getOpcode() == TargetOpcode::G_UADDE) && "unexpected instruction");
1040 
1041   const Register DstReg = I.getOperand(0).getReg();
1042   const Register CarryOutReg = I.getOperand(1).getReg();
1043   const Register Op0Reg = I.getOperand(2).getReg();
1044   const Register Op1Reg = I.getOperand(3).getReg();
1045   Register CarryInReg = I.getOperand(4).getReg();
1046 
1047   const LLT DstTy = MRI.getType(DstReg);
1048 
1049   if (DstTy != LLT::scalar(32))
1050     return false;
1051 
1052   // find CarryIn def instruction.
1053   MachineInstr *Def = MRI.getVRegDef(CarryInReg);
1054   while (Def->getOpcode() == TargetOpcode::G_TRUNC) {
1055     CarryInReg = Def->getOperand(1).getReg();
1056     Def = MRI.getVRegDef(CarryInReg);
1057   }
1058 
1059   unsigned Opcode;
1060   if (Def->getOpcode() == TargetOpcode::G_UADDE) {
1061     // carry set by prev ADD.
1062 
1063     BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::COPY), X86::EFLAGS)
1064         .addReg(CarryInReg);
1065 
1066     if (!RBI.constrainGenericRegister(CarryInReg, X86::GR32RegClass, MRI))
1067       return false;
1068 
1069     Opcode = X86::ADC32rr;
1070   } else if (auto val = getIConstantVRegVal(CarryInReg, MRI)) {
1071     // carry is constant, support only 0.
1072     if (*val != 0)
1073       return false;
1074 
1075     Opcode = X86::ADD32rr;
1076   } else
1077     return false;
1078 
1079   MachineInstr &AddInst =
1080       *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode), DstReg)
1081            .addReg(Op0Reg)
1082            .addReg(Op1Reg);
1083 
1084   BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::COPY), CarryOutReg)
1085       .addReg(X86::EFLAGS);
1086 
1087   if (!constrainSelectedInstRegOperands(AddInst, TII, TRI, RBI) ||
1088       !RBI.constrainGenericRegister(CarryOutReg, X86::GR32RegClass, MRI))
1089     return false;
1090 
1091   I.eraseFromParent();
1092   return true;
1093 }
1094 
selectExtract(MachineInstr & I,MachineRegisterInfo & MRI,MachineFunction & MF) const1095 bool X86InstructionSelector::selectExtract(MachineInstr &I,
1096                                            MachineRegisterInfo &MRI,
1097                                            MachineFunction &MF) const {
1098   assert((I.getOpcode() == TargetOpcode::G_EXTRACT) &&
1099          "unexpected instruction");
1100 
1101   const Register DstReg = I.getOperand(0).getReg();
1102   const Register SrcReg = I.getOperand(1).getReg();
1103   int64_t Index = I.getOperand(2).getImm();
1104 
1105   const LLT DstTy = MRI.getType(DstReg);
1106   const LLT SrcTy = MRI.getType(SrcReg);
1107 
1108   // Meanwile handle vector type only.
1109   if (!DstTy.isVector())
1110     return false;
1111 
1112   if (Index % DstTy.getSizeInBits() != 0)
1113     return false; // Not extract subvector.
1114 
1115   if (Index == 0) {
1116     // Replace by extract subreg copy.
1117     if (!emitExtractSubreg(DstReg, SrcReg, I, MRI, MF))
1118       return false;
1119 
1120     I.eraseFromParent();
1121     return true;
1122   }
1123 
1124   bool HasAVX = STI.hasAVX();
1125   bool HasAVX512 = STI.hasAVX512();
1126   bool HasVLX = STI.hasVLX();
1127 
1128   if (SrcTy.getSizeInBits() == 256 && DstTy.getSizeInBits() == 128) {
1129     if (HasVLX)
1130       I.setDesc(TII.get(X86::VEXTRACTF32x4Z256rr));
1131     else if (HasAVX)
1132       I.setDesc(TII.get(X86::VEXTRACTF128rr));
1133     else
1134       return false;
1135   } else if (SrcTy.getSizeInBits() == 512 && HasAVX512) {
1136     if (DstTy.getSizeInBits() == 128)
1137       I.setDesc(TII.get(X86::VEXTRACTF32x4Zrr));
1138     else if (DstTy.getSizeInBits() == 256)
1139       I.setDesc(TII.get(X86::VEXTRACTF64x4Zrr));
1140     else
1141       return false;
1142   } else
1143     return false;
1144 
1145   // Convert to X86 VEXTRACT immediate.
1146   Index = Index / DstTy.getSizeInBits();
1147   I.getOperand(2).setImm(Index);
1148 
1149   return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1150 }
1151 
emitExtractSubreg(unsigned DstReg,unsigned SrcReg,MachineInstr & I,MachineRegisterInfo & MRI,MachineFunction & MF) const1152 bool X86InstructionSelector::emitExtractSubreg(unsigned DstReg, unsigned SrcReg,
1153                                                MachineInstr &I,
1154                                                MachineRegisterInfo &MRI,
1155                                                MachineFunction &MF) const {
1156   const LLT DstTy = MRI.getType(DstReg);
1157   const LLT SrcTy = MRI.getType(SrcReg);
1158   unsigned SubIdx = X86::NoSubRegister;
1159 
1160   if (!DstTy.isVector() || !SrcTy.isVector())
1161     return false;
1162 
1163   assert(SrcTy.getSizeInBits() > DstTy.getSizeInBits() &&
1164          "Incorrect Src/Dst register size");
1165 
1166   if (DstTy.getSizeInBits() == 128)
1167     SubIdx = X86::sub_xmm;
1168   else if (DstTy.getSizeInBits() == 256)
1169     SubIdx = X86::sub_ymm;
1170   else
1171     return false;
1172 
1173   const TargetRegisterClass *DstRC = getRegClass(DstTy, DstReg, MRI);
1174   const TargetRegisterClass *SrcRC = getRegClass(SrcTy, SrcReg, MRI);
1175 
1176   SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubIdx);
1177 
1178   if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
1179       !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
1180     LLVM_DEBUG(dbgs() << "Failed to constrain EXTRACT_SUBREG\n");
1181     return false;
1182   }
1183 
1184   BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::COPY), DstReg)
1185       .addReg(SrcReg, 0, SubIdx);
1186 
1187   return true;
1188 }
1189 
emitInsertSubreg(unsigned DstReg,unsigned SrcReg,MachineInstr & I,MachineRegisterInfo & MRI,MachineFunction & MF) const1190 bool X86InstructionSelector::emitInsertSubreg(unsigned DstReg, unsigned SrcReg,
1191                                               MachineInstr &I,
1192                                               MachineRegisterInfo &MRI,
1193                                               MachineFunction &MF) const {
1194   const LLT DstTy = MRI.getType(DstReg);
1195   const LLT SrcTy = MRI.getType(SrcReg);
1196   unsigned SubIdx = X86::NoSubRegister;
1197 
1198   // TODO: support scalar types
1199   if (!DstTy.isVector() || !SrcTy.isVector())
1200     return false;
1201 
1202   assert(SrcTy.getSizeInBits() < DstTy.getSizeInBits() &&
1203          "Incorrect Src/Dst register size");
1204 
1205   if (SrcTy.getSizeInBits() == 128)
1206     SubIdx = X86::sub_xmm;
1207   else if (SrcTy.getSizeInBits() == 256)
1208     SubIdx = X86::sub_ymm;
1209   else
1210     return false;
1211 
1212   const TargetRegisterClass *SrcRC = getRegClass(SrcTy, SrcReg, MRI);
1213   const TargetRegisterClass *DstRC = getRegClass(DstTy, DstReg, MRI);
1214 
1215   if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
1216       !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
1217     LLVM_DEBUG(dbgs() << "Failed to constrain INSERT_SUBREG\n");
1218     return false;
1219   }
1220 
1221   BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::COPY))
1222       .addReg(DstReg, RegState::DefineNoRead, SubIdx)
1223       .addReg(SrcReg);
1224 
1225   return true;
1226 }
1227 
selectInsert(MachineInstr & I,MachineRegisterInfo & MRI,MachineFunction & MF) const1228 bool X86InstructionSelector::selectInsert(MachineInstr &I,
1229                                           MachineRegisterInfo &MRI,
1230                                           MachineFunction &MF) const {
1231   assert((I.getOpcode() == TargetOpcode::G_INSERT) && "unexpected instruction");
1232 
1233   const Register DstReg = I.getOperand(0).getReg();
1234   const Register SrcReg = I.getOperand(1).getReg();
1235   const Register InsertReg = I.getOperand(2).getReg();
1236   int64_t Index = I.getOperand(3).getImm();
1237 
1238   const LLT DstTy = MRI.getType(DstReg);
1239   const LLT InsertRegTy = MRI.getType(InsertReg);
1240 
1241   // Meanwile handle vector type only.
1242   if (!DstTy.isVector())
1243     return false;
1244 
1245   if (Index % InsertRegTy.getSizeInBits() != 0)
1246     return false; // Not insert subvector.
1247 
1248   if (Index == 0 && MRI.getVRegDef(SrcReg)->isImplicitDef()) {
1249     // Replace by subreg copy.
1250     if (!emitInsertSubreg(DstReg, InsertReg, I, MRI, MF))
1251       return false;
1252 
1253     I.eraseFromParent();
1254     return true;
1255   }
1256 
1257   bool HasAVX = STI.hasAVX();
1258   bool HasAVX512 = STI.hasAVX512();
1259   bool HasVLX = STI.hasVLX();
1260 
1261   if (DstTy.getSizeInBits() == 256 && InsertRegTy.getSizeInBits() == 128) {
1262     if (HasVLX)
1263       I.setDesc(TII.get(X86::VINSERTF32x4Z256rr));
1264     else if (HasAVX)
1265       I.setDesc(TII.get(X86::VINSERTF128rr));
1266     else
1267       return false;
1268   } else if (DstTy.getSizeInBits() == 512 && HasAVX512) {
1269     if (InsertRegTy.getSizeInBits() == 128)
1270       I.setDesc(TII.get(X86::VINSERTF32x4Zrr));
1271     else if (InsertRegTy.getSizeInBits() == 256)
1272       I.setDesc(TII.get(X86::VINSERTF64x4Zrr));
1273     else
1274       return false;
1275   } else
1276     return false;
1277 
1278   // Convert to X86 VINSERT immediate.
1279   Index = Index / InsertRegTy.getSizeInBits();
1280 
1281   I.getOperand(3).setImm(Index);
1282 
1283   return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1284 }
1285 
selectUnmergeValues(MachineInstr & I,MachineRegisterInfo & MRI,MachineFunction & MF)1286 bool X86InstructionSelector::selectUnmergeValues(
1287     MachineInstr &I, MachineRegisterInfo &MRI, MachineFunction &MF) {
1288   assert((I.getOpcode() == TargetOpcode::G_UNMERGE_VALUES) &&
1289          "unexpected instruction");
1290 
1291   // Split to extracts.
1292   unsigned NumDefs = I.getNumOperands() - 1;
1293   Register SrcReg = I.getOperand(NumDefs).getReg();
1294   unsigned DefSize = MRI.getType(I.getOperand(0).getReg()).getSizeInBits();
1295 
1296   for (unsigned Idx = 0; Idx < NumDefs; ++Idx) {
1297     MachineInstr &ExtrInst =
1298         *BuildMI(*I.getParent(), I, I.getDebugLoc(),
1299                  TII.get(TargetOpcode::G_EXTRACT), I.getOperand(Idx).getReg())
1300              .addReg(SrcReg)
1301              .addImm(Idx * DefSize);
1302 
1303     if (!select(ExtrInst))
1304       return false;
1305   }
1306 
1307   I.eraseFromParent();
1308   return true;
1309 }
1310 
selectMergeValues(MachineInstr & I,MachineRegisterInfo & MRI,MachineFunction & MF)1311 bool X86InstructionSelector::selectMergeValues(
1312     MachineInstr &I, MachineRegisterInfo &MRI, MachineFunction &MF) {
1313   assert((I.getOpcode() == TargetOpcode::G_MERGE_VALUES ||
1314           I.getOpcode() == TargetOpcode::G_CONCAT_VECTORS) &&
1315          "unexpected instruction");
1316 
1317   // Split to inserts.
1318   Register DstReg = I.getOperand(0).getReg();
1319   Register SrcReg0 = I.getOperand(1).getReg();
1320 
1321   const LLT DstTy = MRI.getType(DstReg);
1322   const LLT SrcTy = MRI.getType(SrcReg0);
1323   unsigned SrcSize = SrcTy.getSizeInBits();
1324 
1325   const RegisterBank &RegBank = *RBI.getRegBank(DstReg, MRI, TRI);
1326 
1327   // For the first src use insertSubReg.
1328   Register DefReg = MRI.createGenericVirtualRegister(DstTy);
1329   MRI.setRegBank(DefReg, RegBank);
1330   if (!emitInsertSubreg(DefReg, I.getOperand(1).getReg(), I, MRI, MF))
1331     return false;
1332 
1333   for (unsigned Idx = 2; Idx < I.getNumOperands(); ++Idx) {
1334     Register Tmp = MRI.createGenericVirtualRegister(DstTy);
1335     MRI.setRegBank(Tmp, RegBank);
1336 
1337     MachineInstr &InsertInst = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
1338                                         TII.get(TargetOpcode::G_INSERT), Tmp)
1339                                     .addReg(DefReg)
1340                                     .addReg(I.getOperand(Idx).getReg())
1341                                     .addImm((Idx - 1) * SrcSize);
1342 
1343     DefReg = Tmp;
1344 
1345     if (!select(InsertInst))
1346       return false;
1347   }
1348 
1349   MachineInstr &CopyInst = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
1350                                     TII.get(TargetOpcode::COPY), DstReg)
1351                                 .addReg(DefReg);
1352 
1353   if (!select(CopyInst))
1354     return false;
1355 
1356   I.eraseFromParent();
1357   return true;
1358 }
1359 
selectCondBranch(MachineInstr & I,MachineRegisterInfo & MRI,MachineFunction & MF) const1360 bool X86InstructionSelector::selectCondBranch(MachineInstr &I,
1361                                               MachineRegisterInfo &MRI,
1362                                               MachineFunction &MF) const {
1363   assert((I.getOpcode() == TargetOpcode::G_BRCOND) && "unexpected instruction");
1364 
1365   const Register CondReg = I.getOperand(0).getReg();
1366   MachineBasicBlock *DestMBB = I.getOperand(1).getMBB();
1367 
1368   MachineInstr &TestInst =
1369       *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::TEST8ri))
1370            .addReg(CondReg)
1371            .addImm(1);
1372   BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::JCC_1))
1373       .addMBB(DestMBB).addImm(X86::COND_NE);
1374 
1375   constrainSelectedInstRegOperands(TestInst, TII, TRI, RBI);
1376 
1377   I.eraseFromParent();
1378   return true;
1379 }
1380 
materializeFP(MachineInstr & I,MachineRegisterInfo & MRI,MachineFunction & MF) const1381 bool X86InstructionSelector::materializeFP(MachineInstr &I,
1382                                            MachineRegisterInfo &MRI,
1383                                            MachineFunction &MF) const {
1384   assert((I.getOpcode() == TargetOpcode::G_FCONSTANT) &&
1385          "unexpected instruction");
1386 
1387   // Can't handle alternate code models yet.
1388   CodeModel::Model CM = TM.getCodeModel();
1389   if (CM != CodeModel::Small && CM != CodeModel::Large)
1390     return false;
1391 
1392   const Register DstReg = I.getOperand(0).getReg();
1393   const LLT DstTy = MRI.getType(DstReg);
1394   const RegisterBank &RegBank = *RBI.getRegBank(DstReg, MRI, TRI);
1395   Align Alignment = Align(DstTy.getSizeInBytes());
1396   const DebugLoc &DbgLoc = I.getDebugLoc();
1397 
1398   unsigned Opc =
1399       getLoadStoreOp(DstTy, RegBank, TargetOpcode::G_LOAD, Alignment);
1400 
1401   // Create the load from the constant pool.
1402   const ConstantFP *CFP = I.getOperand(1).getFPImm();
1403   unsigned CPI = MF.getConstantPool()->getConstantPoolIndex(CFP, Alignment);
1404   MachineInstr *LoadInst = nullptr;
1405   unsigned char OpFlag = STI.classifyLocalReference(nullptr);
1406 
1407   if (CM == CodeModel::Large && STI.is64Bit()) {
1408     // Under X86-64 non-small code model, GV (and friends) are 64-bits, so
1409     // they cannot be folded into immediate fields.
1410 
1411     Register AddrReg = MRI.createVirtualRegister(&X86::GR64RegClass);
1412     BuildMI(*I.getParent(), I, DbgLoc, TII.get(X86::MOV64ri), AddrReg)
1413         .addConstantPoolIndex(CPI, 0, OpFlag);
1414 
1415     MachineMemOperand *MMO = MF.getMachineMemOperand(
1416         MachinePointerInfo::getConstantPool(MF), MachineMemOperand::MOLoad,
1417         LLT::pointer(0, MF.getDataLayout().getPointerSizeInBits()), Alignment);
1418 
1419     LoadInst =
1420         addDirectMem(BuildMI(*I.getParent(), I, DbgLoc, TII.get(Opc), DstReg),
1421                      AddrReg)
1422             .addMemOperand(MMO);
1423 
1424   } else if (CM == CodeModel::Small || !STI.is64Bit()) {
1425     // Handle the case when globals fit in our immediate field.
1426     // This is true for X86-32 always and X86-64 when in -mcmodel=small mode.
1427 
1428     // x86-32 PIC requires a PIC base register for constant pools.
1429     unsigned PICBase = 0;
1430     if (OpFlag == X86II::MO_PIC_BASE_OFFSET || OpFlag == X86II::MO_GOTOFF) {
1431       // PICBase can be allocated by TII.getGlobalBaseReg(&MF).
1432       // In DAGISEL the code that initialize it generated by the CGBR pass.
1433       return false; // TODO support the mode.
1434     } else if (STI.is64Bit() && TM.getCodeModel() == CodeModel::Small)
1435       PICBase = X86::RIP;
1436 
1437     LoadInst = addConstantPoolReference(
1438         BuildMI(*I.getParent(), I, DbgLoc, TII.get(Opc), DstReg), CPI, PICBase,
1439         OpFlag);
1440   } else
1441     return false;
1442 
1443   constrainSelectedInstRegOperands(*LoadInst, TII, TRI, RBI);
1444   I.eraseFromParent();
1445   return true;
1446 }
1447 
selectImplicitDefOrPHI(MachineInstr & I,MachineRegisterInfo & MRI) const1448 bool X86InstructionSelector::selectImplicitDefOrPHI(
1449     MachineInstr &I, MachineRegisterInfo &MRI) const {
1450   assert((I.getOpcode() == TargetOpcode::G_IMPLICIT_DEF ||
1451           I.getOpcode() == TargetOpcode::G_PHI) &&
1452          "unexpected instruction");
1453 
1454   Register DstReg = I.getOperand(0).getReg();
1455 
1456   if (!MRI.getRegClassOrNull(DstReg)) {
1457     const LLT DstTy = MRI.getType(DstReg);
1458     const TargetRegisterClass *RC = getRegClass(DstTy, DstReg, MRI);
1459 
1460     if (!RBI.constrainGenericRegister(DstReg, *RC, MRI)) {
1461       LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
1462                         << " operand\n");
1463       return false;
1464     }
1465   }
1466 
1467   if (I.getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
1468     I.setDesc(TII.get(X86::IMPLICIT_DEF));
1469   else
1470     I.setDesc(TII.get(X86::PHI));
1471 
1472   return true;
1473 }
1474 
selectDivRem(MachineInstr & I,MachineRegisterInfo & MRI,MachineFunction & MF) const1475 bool X86InstructionSelector::selectDivRem(MachineInstr &I,
1476                                           MachineRegisterInfo &MRI,
1477                                           MachineFunction &MF) const {
1478   // The implementation of this function is taken from X86FastISel.
1479   assert((I.getOpcode() == TargetOpcode::G_SDIV ||
1480           I.getOpcode() == TargetOpcode::G_SREM ||
1481           I.getOpcode() == TargetOpcode::G_UDIV ||
1482           I.getOpcode() == TargetOpcode::G_UREM) &&
1483          "unexpected instruction");
1484 
1485   const Register DstReg = I.getOperand(0).getReg();
1486   const Register Op1Reg = I.getOperand(1).getReg();
1487   const Register Op2Reg = I.getOperand(2).getReg();
1488 
1489   const LLT RegTy = MRI.getType(DstReg);
1490   assert(RegTy == MRI.getType(Op1Reg) && RegTy == MRI.getType(Op2Reg) &&
1491          "Arguments and return value types must match");
1492 
1493   const RegisterBank *RegRB = RBI.getRegBank(DstReg, MRI, TRI);
1494   if (!RegRB || RegRB->getID() != X86::GPRRegBankID)
1495     return false;
1496 
1497   const static unsigned NumTypes = 4; // i8, i16, i32, i64
1498   const static unsigned NumOps = 4;   // SDiv, SRem, UDiv, URem
1499   const static bool S = true;         // IsSigned
1500   const static bool U = false;        // !IsSigned
1501   const static unsigned Copy = TargetOpcode::COPY;
1502   // For the X86 IDIV instruction, in most cases the dividend
1503   // (numerator) must be in a specific register pair highreg:lowreg,
1504   // producing the quotient in lowreg and the remainder in highreg.
1505   // For most data types, to set up the instruction, the dividend is
1506   // copied into lowreg, and lowreg is sign-extended into highreg.  The
1507   // exception is i8, where the dividend is defined as a single register rather
1508   // than a register pair, and we therefore directly sign-extend the dividend
1509   // into lowreg, instead of copying, and ignore the highreg.
1510   const static struct DivRemEntry {
1511     // The following portion depends only on the data type.
1512     unsigned SizeInBits;
1513     unsigned LowInReg;  // low part of the register pair
1514     unsigned HighInReg; // high part of the register pair
1515     // The following portion depends on both the data type and the operation.
1516     struct DivRemResult {
1517       unsigned OpDivRem;        // The specific DIV/IDIV opcode to use.
1518       unsigned OpSignExtend;    // Opcode for sign-extending lowreg into
1519                                 // highreg, or copying a zero into highreg.
1520       unsigned OpCopy;          // Opcode for copying dividend into lowreg, or
1521                                 // zero/sign-extending into lowreg for i8.
1522       unsigned DivRemResultReg; // Register containing the desired result.
1523       bool IsOpSigned;          // Whether to use signed or unsigned form.
1524     } ResultTable[NumOps];
1525   } OpTable[NumTypes] = {
1526       {8,
1527        X86::AX,
1528        0,
1529        {
1530            {X86::IDIV8r, 0, X86::MOVSX16rr8, X86::AL, S}, // SDiv
1531            {X86::IDIV8r, 0, X86::MOVSX16rr8, X86::AH, S}, // SRem
1532            {X86::DIV8r, 0, X86::MOVZX16rr8, X86::AL, U},  // UDiv
1533            {X86::DIV8r, 0, X86::MOVZX16rr8, X86::AH, U},  // URem
1534        }},                                                // i8
1535       {16,
1536        X86::AX,
1537        X86::DX,
1538        {
1539            {X86::IDIV16r, X86::CWD, Copy, X86::AX, S},    // SDiv
1540            {X86::IDIV16r, X86::CWD, Copy, X86::DX, S},    // SRem
1541            {X86::DIV16r, X86::MOV32r0, Copy, X86::AX, U}, // UDiv
1542            {X86::DIV16r, X86::MOV32r0, Copy, X86::DX, U}, // URem
1543        }},                                                // i16
1544       {32,
1545        X86::EAX,
1546        X86::EDX,
1547        {
1548            {X86::IDIV32r, X86::CDQ, Copy, X86::EAX, S},    // SDiv
1549            {X86::IDIV32r, X86::CDQ, Copy, X86::EDX, S},    // SRem
1550            {X86::DIV32r, X86::MOV32r0, Copy, X86::EAX, U}, // UDiv
1551            {X86::DIV32r, X86::MOV32r0, Copy, X86::EDX, U}, // URem
1552        }},                                                 // i32
1553       {64,
1554        X86::RAX,
1555        X86::RDX,
1556        {
1557            {X86::IDIV64r, X86::CQO, Copy, X86::RAX, S},    // SDiv
1558            {X86::IDIV64r, X86::CQO, Copy, X86::RDX, S},    // SRem
1559            {X86::DIV64r, X86::MOV32r0, Copy, X86::RAX, U}, // UDiv
1560            {X86::DIV64r, X86::MOV32r0, Copy, X86::RDX, U}, // URem
1561        }},                                                 // i64
1562   };
1563 
1564   auto OpEntryIt = llvm::find_if(OpTable, [RegTy](const DivRemEntry &El) {
1565     return El.SizeInBits == RegTy.getSizeInBits();
1566   });
1567   if (OpEntryIt == std::end(OpTable))
1568     return false;
1569 
1570   unsigned OpIndex;
1571   switch (I.getOpcode()) {
1572   default:
1573     llvm_unreachable("Unexpected div/rem opcode");
1574   case TargetOpcode::G_SDIV:
1575     OpIndex = 0;
1576     break;
1577   case TargetOpcode::G_SREM:
1578     OpIndex = 1;
1579     break;
1580   case TargetOpcode::G_UDIV:
1581     OpIndex = 2;
1582     break;
1583   case TargetOpcode::G_UREM:
1584     OpIndex = 3;
1585     break;
1586   }
1587 
1588   const DivRemEntry &TypeEntry = *OpEntryIt;
1589   const DivRemEntry::DivRemResult &OpEntry = TypeEntry.ResultTable[OpIndex];
1590 
1591   const TargetRegisterClass *RegRC = getRegClass(RegTy, *RegRB);
1592   if (!RBI.constrainGenericRegister(Op1Reg, *RegRC, MRI) ||
1593       !RBI.constrainGenericRegister(Op2Reg, *RegRC, MRI) ||
1594       !RBI.constrainGenericRegister(DstReg, *RegRC, MRI)) {
1595     LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
1596                       << " operand\n");
1597     return false;
1598   }
1599 
1600   // Move op1 into low-order input register.
1601   BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(OpEntry.OpCopy),
1602           TypeEntry.LowInReg)
1603       .addReg(Op1Reg);
1604   // Zero-extend or sign-extend into high-order input register.
1605   if (OpEntry.OpSignExtend) {
1606     if (OpEntry.IsOpSigned)
1607       BuildMI(*I.getParent(), I, I.getDebugLoc(),
1608               TII.get(OpEntry.OpSignExtend));
1609     else {
1610       Register Zero32 = MRI.createVirtualRegister(&X86::GR32RegClass);
1611       BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::MOV32r0),
1612               Zero32);
1613 
1614       // Copy the zero into the appropriate sub/super/identical physical
1615       // register. Unfortunately the operations needed are not uniform enough
1616       // to fit neatly into the table above.
1617       if (RegTy.getSizeInBits() == 16) {
1618         BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Copy),
1619                 TypeEntry.HighInReg)
1620             .addReg(Zero32, 0, X86::sub_16bit);
1621       } else if (RegTy.getSizeInBits() == 32) {
1622         BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Copy),
1623                 TypeEntry.HighInReg)
1624             .addReg(Zero32);
1625       } else if (RegTy.getSizeInBits() == 64) {
1626         BuildMI(*I.getParent(), I, I.getDebugLoc(),
1627                 TII.get(TargetOpcode::SUBREG_TO_REG), TypeEntry.HighInReg)
1628             .addImm(0)
1629             .addReg(Zero32)
1630             .addImm(X86::sub_32bit);
1631       }
1632     }
1633   }
1634   // Generate the DIV/IDIV instruction.
1635   BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(OpEntry.OpDivRem))
1636       .addReg(Op2Reg);
1637   // For i8 remainder, we can't reference ah directly, as we'll end
1638   // up with bogus copies like %r9b = COPY %ah. Reference ax
1639   // instead to prevent ah references in a rex instruction.
1640   //
1641   // The current assumption of the fast register allocator is that isel
1642   // won't generate explicit references to the GR8_NOREX registers. If
1643   // the allocator and/or the backend get enhanced to be more robust in
1644   // that regard, this can be, and should be, removed.
1645   if ((I.getOpcode() == Instruction::SRem ||
1646        I.getOpcode() == Instruction::URem) &&
1647       OpEntry.DivRemResultReg == X86::AH && STI.is64Bit()) {
1648     Register SourceSuperReg = MRI.createVirtualRegister(&X86::GR16RegClass);
1649     Register ResultSuperReg = MRI.createVirtualRegister(&X86::GR16RegClass);
1650     BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Copy), SourceSuperReg)
1651         .addReg(X86::AX);
1652 
1653     // Shift AX right by 8 bits instead of using AH.
1654     BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::SHR16ri),
1655             ResultSuperReg)
1656         .addReg(SourceSuperReg)
1657         .addImm(8);
1658 
1659     // Now reference the 8-bit subreg of the result.
1660     BuildMI(*I.getParent(), I, I.getDebugLoc(),
1661             TII.get(TargetOpcode::SUBREG_TO_REG))
1662         .addDef(DstReg)
1663         .addImm(0)
1664         .addReg(ResultSuperReg)
1665         .addImm(X86::sub_8bit);
1666   } else {
1667     BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(TargetOpcode::COPY),
1668             DstReg)
1669         .addReg(OpEntry.DivRemResultReg);
1670   }
1671   I.eraseFromParent();
1672   return true;
1673 }
1674 
selectIntrinsicWSideEffects(MachineInstr & I,MachineRegisterInfo & MRI,MachineFunction & MF) const1675 bool X86InstructionSelector::selectIntrinsicWSideEffects(
1676     MachineInstr &I, MachineRegisterInfo &MRI, MachineFunction &MF) const {
1677 
1678   assert(I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS &&
1679          "unexpected instruction");
1680 
1681   if (I.getOperand(0).getIntrinsicID() != Intrinsic::trap)
1682     return false;
1683 
1684   BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::TRAP));
1685 
1686   I.eraseFromParent();
1687   return true;
1688 }
1689 
1690 InstructionSelector *
createX86InstructionSelector(const X86TargetMachine & TM,X86Subtarget & Subtarget,X86RegisterBankInfo & RBI)1691 llvm::createX86InstructionSelector(const X86TargetMachine &TM,
1692                                    X86Subtarget &Subtarget,
1693                                    X86RegisterBankInfo &RBI) {
1694   return new X86InstructionSelector(TM, Subtarget, RBI);
1695 }
1696