1 //===-- X86MCTargetDesc.cpp - X86 Target Descriptions ---------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file provides X86 specific target descriptions.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "X86MCTargetDesc.h"
14 #include "TargetInfo/X86TargetInfo.h"
15 #include "X86ATTInstPrinter.h"
16 #include "X86BaseInfo.h"
17 #include "X86IntelInstPrinter.h"
18 #include "X86MCAsmInfo.h"
19 #include "llvm/ADT/APInt.h"
20 #include "llvm/ADT/Triple.h"
21 #include "llvm/DebugInfo/CodeView/CodeView.h"
22 #include "llvm/MC/MCDwarf.h"
23 #include "llvm/MC/MCInstrAnalysis.h"
24 #include "llvm/MC/MCInstrInfo.h"
25 #include "llvm/MC/MCRegisterInfo.h"
26 #include "llvm/MC/MCStreamer.h"
27 #include "llvm/MC/MCSubtargetInfo.h"
28 #include "llvm/MC/MachineLocation.h"
29 #include "llvm/MC/TargetRegistry.h"
30 #include "llvm/Support/ErrorHandling.h"
31 #include "llvm/Support/Host.h"
32
33 using namespace llvm;
34
35 #define GET_REGINFO_MC_DESC
36 #include "X86GenRegisterInfo.inc"
37
38 #define GET_INSTRINFO_MC_DESC
39 #define GET_INSTRINFO_MC_HELPERS
40 #define ENABLE_INSTR_PREDICATE_VERIFIER
41 #include "X86GenInstrInfo.inc"
42
43 #define GET_SUBTARGETINFO_MC_DESC
44 #include "X86GenSubtargetInfo.inc"
45
ParseX86Triple(const Triple & TT)46 std::string X86_MC::ParseX86Triple(const Triple &TT) {
47 std::string FS;
48 // SSE2 should default to enabled in 64-bit mode, but can be turned off
49 // explicitly.
50 if (TT.isArch64Bit())
51 FS = "+64bit-mode,-32bit-mode,-16bit-mode,+sse2";
52 else if (TT.getEnvironment() != Triple::CODE16)
53 FS = "-64bit-mode,+32bit-mode,-16bit-mode";
54 else
55 FS = "-64bit-mode,-32bit-mode,+16bit-mode";
56
57 return FS;
58 }
59
getDwarfRegFlavour(const Triple & TT,bool isEH)60 unsigned X86_MC::getDwarfRegFlavour(const Triple &TT, bool isEH) {
61 if (TT.getArch() == Triple::x86_64)
62 return DWARFFlavour::X86_64;
63
64 if (TT.isOSDarwin())
65 return isEH ? DWARFFlavour::X86_32_DarwinEH : DWARFFlavour::X86_32_Generic;
66 if (TT.isOSCygMing())
67 // Unsupported by now, just quick fallback
68 return DWARFFlavour::X86_32_Generic;
69 return DWARFFlavour::X86_32_Generic;
70 }
71
hasLockPrefix(const MCInst & MI)72 bool X86_MC::hasLockPrefix(const MCInst &MI) {
73 return MI.getFlags() & X86::IP_HAS_LOCK;
74 }
75
isMemOperand(const MCInst & MI,unsigned Op,unsigned RegClassID)76 static bool isMemOperand(const MCInst &MI, unsigned Op, unsigned RegClassID) {
77 const MCOperand &Base = MI.getOperand(Op + X86::AddrBaseReg);
78 const MCOperand &Index = MI.getOperand(Op + X86::AddrIndexReg);
79 const MCRegisterClass &RC = X86MCRegisterClasses[RegClassID];
80
81 return (Base.isReg() && Base.getReg() != 0 && RC.contains(Base.getReg())) ||
82 (Index.isReg() && Index.getReg() != 0 && RC.contains(Index.getReg()));
83 }
84
is16BitMemOperand(const MCInst & MI,unsigned Op,const MCSubtargetInfo & STI)85 bool X86_MC::is16BitMemOperand(const MCInst &MI, unsigned Op,
86 const MCSubtargetInfo &STI) {
87 const MCOperand &Base = MI.getOperand(Op + X86::AddrBaseReg);
88 const MCOperand &Index = MI.getOperand(Op + X86::AddrIndexReg);
89
90 if (STI.hasFeature(X86::Is16Bit) && Base.isReg() && Base.getReg() == 0 &&
91 Index.isReg() && Index.getReg() == 0)
92 return true;
93 return isMemOperand(MI, Op, X86::GR16RegClassID);
94 }
95
is32BitMemOperand(const MCInst & MI,unsigned Op)96 bool X86_MC::is32BitMemOperand(const MCInst &MI, unsigned Op) {
97 const MCOperand &Base = MI.getOperand(Op + X86::AddrBaseReg);
98 const MCOperand &Index = MI.getOperand(Op + X86::AddrIndexReg);
99 if (Base.isReg() && Base.getReg() == X86::EIP) {
100 assert(Index.isReg() && Index.getReg() == 0 && "Invalid eip-based address");
101 return true;
102 }
103 if (Index.isReg() && Index.getReg() == X86::EIZ)
104 return true;
105 return isMemOperand(MI, Op, X86::GR32RegClassID);
106 }
107
108 #ifndef NDEBUG
is64BitMemOperand(const MCInst & MI,unsigned Op)109 bool X86_MC::is64BitMemOperand(const MCInst &MI, unsigned Op) {
110 return isMemOperand(MI, Op, X86::GR64RegClassID);
111 }
112 #endif
113
needsAddressSizeOverride(const MCInst & MI,const MCSubtargetInfo & STI,int MemoryOperand,uint64_t TSFlags)114 bool X86_MC::needsAddressSizeOverride(const MCInst &MI,
115 const MCSubtargetInfo &STI,
116 int MemoryOperand, uint64_t TSFlags) {
117 uint64_t AdSize = TSFlags & X86II::AdSizeMask;
118 bool Is16BitMode = STI.hasFeature(X86::Is16Bit);
119 bool Is32BitMode = STI.hasFeature(X86::Is32Bit);
120 bool Is64BitMode = STI.hasFeature(X86::Is64Bit);
121 if ((Is16BitMode && AdSize == X86II::AdSize32) ||
122 (Is32BitMode && AdSize == X86II::AdSize16) ||
123 (Is64BitMode && AdSize == X86II::AdSize32))
124 return true;
125 uint64_t Form = TSFlags & X86II::FormMask;
126 switch (Form) {
127 default:
128 break;
129 case X86II::RawFrmDstSrc: {
130 unsigned siReg = MI.getOperand(1).getReg();
131 assert(((siReg == X86::SI && MI.getOperand(0).getReg() == X86::DI) ||
132 (siReg == X86::ESI && MI.getOperand(0).getReg() == X86::EDI) ||
133 (siReg == X86::RSI && MI.getOperand(0).getReg() == X86::RDI)) &&
134 "SI and DI register sizes do not match");
135 return (!Is32BitMode && siReg == X86::ESI) ||
136 (Is32BitMode && siReg == X86::SI);
137 }
138 case X86II::RawFrmSrc: {
139 unsigned siReg = MI.getOperand(0).getReg();
140 return (!Is32BitMode && siReg == X86::ESI) ||
141 (Is32BitMode && siReg == X86::SI);
142 }
143 case X86II::RawFrmDst: {
144 unsigned siReg = MI.getOperand(0).getReg();
145 return (!Is32BitMode && siReg == X86::EDI) ||
146 (Is32BitMode && siReg == X86::DI);
147 }
148 }
149
150 // Determine where the memory operand starts, if present.
151 if (MemoryOperand < 0)
152 return false;
153
154 if (STI.hasFeature(X86::Is64Bit)) {
155 assert(!is16BitMemOperand(MI, MemoryOperand, STI));
156 return is32BitMemOperand(MI, MemoryOperand);
157 }
158 if (STI.hasFeature(X86::Is32Bit)) {
159 assert(!is64BitMemOperand(MI, MemoryOperand));
160 return is16BitMemOperand(MI, MemoryOperand, STI);
161 }
162 assert(STI.hasFeature(X86::Is16Bit));
163 assert(!is64BitMemOperand(MI, MemoryOperand));
164 return !is16BitMemOperand(MI, MemoryOperand, STI);
165 }
166
initLLVMToSEHAndCVRegMapping(MCRegisterInfo * MRI)167 void X86_MC::initLLVMToSEHAndCVRegMapping(MCRegisterInfo *MRI) {
168 // FIXME: TableGen these.
169 for (unsigned Reg = X86::NoRegister + 1; Reg < X86::NUM_TARGET_REGS; ++Reg) {
170 unsigned SEH = MRI->getEncodingValue(Reg);
171 MRI->mapLLVMRegToSEHReg(Reg, SEH);
172 }
173
174 // Mapping from CodeView to MC register id.
175 static const struct {
176 codeview::RegisterId CVReg;
177 MCPhysReg Reg;
178 } RegMap[] = {
179 {codeview::RegisterId::AL, X86::AL},
180 {codeview::RegisterId::CL, X86::CL},
181 {codeview::RegisterId::DL, X86::DL},
182 {codeview::RegisterId::BL, X86::BL},
183 {codeview::RegisterId::AH, X86::AH},
184 {codeview::RegisterId::CH, X86::CH},
185 {codeview::RegisterId::DH, X86::DH},
186 {codeview::RegisterId::BH, X86::BH},
187 {codeview::RegisterId::AX, X86::AX},
188 {codeview::RegisterId::CX, X86::CX},
189 {codeview::RegisterId::DX, X86::DX},
190 {codeview::RegisterId::BX, X86::BX},
191 {codeview::RegisterId::SP, X86::SP},
192 {codeview::RegisterId::BP, X86::BP},
193 {codeview::RegisterId::SI, X86::SI},
194 {codeview::RegisterId::DI, X86::DI},
195 {codeview::RegisterId::EAX, X86::EAX},
196 {codeview::RegisterId::ECX, X86::ECX},
197 {codeview::RegisterId::EDX, X86::EDX},
198 {codeview::RegisterId::EBX, X86::EBX},
199 {codeview::RegisterId::ESP, X86::ESP},
200 {codeview::RegisterId::EBP, X86::EBP},
201 {codeview::RegisterId::ESI, X86::ESI},
202 {codeview::RegisterId::EDI, X86::EDI},
203
204 {codeview::RegisterId::EFLAGS, X86::EFLAGS},
205
206 {codeview::RegisterId::ST0, X86::ST0},
207 {codeview::RegisterId::ST1, X86::ST1},
208 {codeview::RegisterId::ST2, X86::ST2},
209 {codeview::RegisterId::ST3, X86::ST3},
210 {codeview::RegisterId::ST4, X86::ST4},
211 {codeview::RegisterId::ST5, X86::ST5},
212 {codeview::RegisterId::ST6, X86::ST6},
213 {codeview::RegisterId::ST7, X86::ST7},
214
215 {codeview::RegisterId::ST0, X86::FP0},
216 {codeview::RegisterId::ST1, X86::FP1},
217 {codeview::RegisterId::ST2, X86::FP2},
218 {codeview::RegisterId::ST3, X86::FP3},
219 {codeview::RegisterId::ST4, X86::FP4},
220 {codeview::RegisterId::ST5, X86::FP5},
221 {codeview::RegisterId::ST6, X86::FP6},
222 {codeview::RegisterId::ST7, X86::FP7},
223
224 {codeview::RegisterId::MM0, X86::MM0},
225 {codeview::RegisterId::MM1, X86::MM1},
226 {codeview::RegisterId::MM2, X86::MM2},
227 {codeview::RegisterId::MM3, X86::MM3},
228 {codeview::RegisterId::MM4, X86::MM4},
229 {codeview::RegisterId::MM5, X86::MM5},
230 {codeview::RegisterId::MM6, X86::MM6},
231 {codeview::RegisterId::MM7, X86::MM7},
232
233 {codeview::RegisterId::XMM0, X86::XMM0},
234 {codeview::RegisterId::XMM1, X86::XMM1},
235 {codeview::RegisterId::XMM2, X86::XMM2},
236 {codeview::RegisterId::XMM3, X86::XMM3},
237 {codeview::RegisterId::XMM4, X86::XMM4},
238 {codeview::RegisterId::XMM5, X86::XMM5},
239 {codeview::RegisterId::XMM6, X86::XMM6},
240 {codeview::RegisterId::XMM7, X86::XMM7},
241
242 {codeview::RegisterId::XMM8, X86::XMM8},
243 {codeview::RegisterId::XMM9, X86::XMM9},
244 {codeview::RegisterId::XMM10, X86::XMM10},
245 {codeview::RegisterId::XMM11, X86::XMM11},
246 {codeview::RegisterId::XMM12, X86::XMM12},
247 {codeview::RegisterId::XMM13, X86::XMM13},
248 {codeview::RegisterId::XMM14, X86::XMM14},
249 {codeview::RegisterId::XMM15, X86::XMM15},
250
251 {codeview::RegisterId::SIL, X86::SIL},
252 {codeview::RegisterId::DIL, X86::DIL},
253 {codeview::RegisterId::BPL, X86::BPL},
254 {codeview::RegisterId::SPL, X86::SPL},
255 {codeview::RegisterId::RAX, X86::RAX},
256 {codeview::RegisterId::RBX, X86::RBX},
257 {codeview::RegisterId::RCX, X86::RCX},
258 {codeview::RegisterId::RDX, X86::RDX},
259 {codeview::RegisterId::RSI, X86::RSI},
260 {codeview::RegisterId::RDI, X86::RDI},
261 {codeview::RegisterId::RBP, X86::RBP},
262 {codeview::RegisterId::RSP, X86::RSP},
263 {codeview::RegisterId::R8, X86::R8},
264 {codeview::RegisterId::R9, X86::R9},
265 {codeview::RegisterId::R10, X86::R10},
266 {codeview::RegisterId::R11, X86::R11},
267 {codeview::RegisterId::R12, X86::R12},
268 {codeview::RegisterId::R13, X86::R13},
269 {codeview::RegisterId::R14, X86::R14},
270 {codeview::RegisterId::R15, X86::R15},
271 {codeview::RegisterId::R8B, X86::R8B},
272 {codeview::RegisterId::R9B, X86::R9B},
273 {codeview::RegisterId::R10B, X86::R10B},
274 {codeview::RegisterId::R11B, X86::R11B},
275 {codeview::RegisterId::R12B, X86::R12B},
276 {codeview::RegisterId::R13B, X86::R13B},
277 {codeview::RegisterId::R14B, X86::R14B},
278 {codeview::RegisterId::R15B, X86::R15B},
279 {codeview::RegisterId::R8W, X86::R8W},
280 {codeview::RegisterId::R9W, X86::R9W},
281 {codeview::RegisterId::R10W, X86::R10W},
282 {codeview::RegisterId::R11W, X86::R11W},
283 {codeview::RegisterId::R12W, X86::R12W},
284 {codeview::RegisterId::R13W, X86::R13W},
285 {codeview::RegisterId::R14W, X86::R14W},
286 {codeview::RegisterId::R15W, X86::R15W},
287 {codeview::RegisterId::R8D, X86::R8D},
288 {codeview::RegisterId::R9D, X86::R9D},
289 {codeview::RegisterId::R10D, X86::R10D},
290 {codeview::RegisterId::R11D, X86::R11D},
291 {codeview::RegisterId::R12D, X86::R12D},
292 {codeview::RegisterId::R13D, X86::R13D},
293 {codeview::RegisterId::R14D, X86::R14D},
294 {codeview::RegisterId::R15D, X86::R15D},
295 {codeview::RegisterId::AMD64_YMM0, X86::YMM0},
296 {codeview::RegisterId::AMD64_YMM1, X86::YMM1},
297 {codeview::RegisterId::AMD64_YMM2, X86::YMM2},
298 {codeview::RegisterId::AMD64_YMM3, X86::YMM3},
299 {codeview::RegisterId::AMD64_YMM4, X86::YMM4},
300 {codeview::RegisterId::AMD64_YMM5, X86::YMM5},
301 {codeview::RegisterId::AMD64_YMM6, X86::YMM6},
302 {codeview::RegisterId::AMD64_YMM7, X86::YMM7},
303 {codeview::RegisterId::AMD64_YMM8, X86::YMM8},
304 {codeview::RegisterId::AMD64_YMM9, X86::YMM9},
305 {codeview::RegisterId::AMD64_YMM10, X86::YMM10},
306 {codeview::RegisterId::AMD64_YMM11, X86::YMM11},
307 {codeview::RegisterId::AMD64_YMM12, X86::YMM12},
308 {codeview::RegisterId::AMD64_YMM13, X86::YMM13},
309 {codeview::RegisterId::AMD64_YMM14, X86::YMM14},
310 {codeview::RegisterId::AMD64_YMM15, X86::YMM15},
311 {codeview::RegisterId::AMD64_YMM16, X86::YMM16},
312 {codeview::RegisterId::AMD64_YMM17, X86::YMM17},
313 {codeview::RegisterId::AMD64_YMM18, X86::YMM18},
314 {codeview::RegisterId::AMD64_YMM19, X86::YMM19},
315 {codeview::RegisterId::AMD64_YMM20, X86::YMM20},
316 {codeview::RegisterId::AMD64_YMM21, X86::YMM21},
317 {codeview::RegisterId::AMD64_YMM22, X86::YMM22},
318 {codeview::RegisterId::AMD64_YMM23, X86::YMM23},
319 {codeview::RegisterId::AMD64_YMM24, X86::YMM24},
320 {codeview::RegisterId::AMD64_YMM25, X86::YMM25},
321 {codeview::RegisterId::AMD64_YMM26, X86::YMM26},
322 {codeview::RegisterId::AMD64_YMM27, X86::YMM27},
323 {codeview::RegisterId::AMD64_YMM28, X86::YMM28},
324 {codeview::RegisterId::AMD64_YMM29, X86::YMM29},
325 {codeview::RegisterId::AMD64_YMM30, X86::YMM30},
326 {codeview::RegisterId::AMD64_YMM31, X86::YMM31},
327 {codeview::RegisterId::AMD64_ZMM0, X86::ZMM0},
328 {codeview::RegisterId::AMD64_ZMM1, X86::ZMM1},
329 {codeview::RegisterId::AMD64_ZMM2, X86::ZMM2},
330 {codeview::RegisterId::AMD64_ZMM3, X86::ZMM3},
331 {codeview::RegisterId::AMD64_ZMM4, X86::ZMM4},
332 {codeview::RegisterId::AMD64_ZMM5, X86::ZMM5},
333 {codeview::RegisterId::AMD64_ZMM6, X86::ZMM6},
334 {codeview::RegisterId::AMD64_ZMM7, X86::ZMM7},
335 {codeview::RegisterId::AMD64_ZMM8, X86::ZMM8},
336 {codeview::RegisterId::AMD64_ZMM9, X86::ZMM9},
337 {codeview::RegisterId::AMD64_ZMM10, X86::ZMM10},
338 {codeview::RegisterId::AMD64_ZMM11, X86::ZMM11},
339 {codeview::RegisterId::AMD64_ZMM12, X86::ZMM12},
340 {codeview::RegisterId::AMD64_ZMM13, X86::ZMM13},
341 {codeview::RegisterId::AMD64_ZMM14, X86::ZMM14},
342 {codeview::RegisterId::AMD64_ZMM15, X86::ZMM15},
343 {codeview::RegisterId::AMD64_ZMM16, X86::ZMM16},
344 {codeview::RegisterId::AMD64_ZMM17, X86::ZMM17},
345 {codeview::RegisterId::AMD64_ZMM18, X86::ZMM18},
346 {codeview::RegisterId::AMD64_ZMM19, X86::ZMM19},
347 {codeview::RegisterId::AMD64_ZMM20, X86::ZMM20},
348 {codeview::RegisterId::AMD64_ZMM21, X86::ZMM21},
349 {codeview::RegisterId::AMD64_ZMM22, X86::ZMM22},
350 {codeview::RegisterId::AMD64_ZMM23, X86::ZMM23},
351 {codeview::RegisterId::AMD64_ZMM24, X86::ZMM24},
352 {codeview::RegisterId::AMD64_ZMM25, X86::ZMM25},
353 {codeview::RegisterId::AMD64_ZMM26, X86::ZMM26},
354 {codeview::RegisterId::AMD64_ZMM27, X86::ZMM27},
355 {codeview::RegisterId::AMD64_ZMM28, X86::ZMM28},
356 {codeview::RegisterId::AMD64_ZMM29, X86::ZMM29},
357 {codeview::RegisterId::AMD64_ZMM30, X86::ZMM30},
358 {codeview::RegisterId::AMD64_ZMM31, X86::ZMM31},
359 {codeview::RegisterId::AMD64_K0, X86::K0},
360 {codeview::RegisterId::AMD64_K1, X86::K1},
361 {codeview::RegisterId::AMD64_K2, X86::K2},
362 {codeview::RegisterId::AMD64_K3, X86::K3},
363 {codeview::RegisterId::AMD64_K4, X86::K4},
364 {codeview::RegisterId::AMD64_K5, X86::K5},
365 {codeview::RegisterId::AMD64_K6, X86::K6},
366 {codeview::RegisterId::AMD64_K7, X86::K7},
367 {codeview::RegisterId::AMD64_XMM16, X86::XMM16},
368 {codeview::RegisterId::AMD64_XMM17, X86::XMM17},
369 {codeview::RegisterId::AMD64_XMM18, X86::XMM18},
370 {codeview::RegisterId::AMD64_XMM19, X86::XMM19},
371 {codeview::RegisterId::AMD64_XMM20, X86::XMM20},
372 {codeview::RegisterId::AMD64_XMM21, X86::XMM21},
373 {codeview::RegisterId::AMD64_XMM22, X86::XMM22},
374 {codeview::RegisterId::AMD64_XMM23, X86::XMM23},
375 {codeview::RegisterId::AMD64_XMM24, X86::XMM24},
376 {codeview::RegisterId::AMD64_XMM25, X86::XMM25},
377 {codeview::RegisterId::AMD64_XMM26, X86::XMM26},
378 {codeview::RegisterId::AMD64_XMM27, X86::XMM27},
379 {codeview::RegisterId::AMD64_XMM28, X86::XMM28},
380 {codeview::RegisterId::AMD64_XMM29, X86::XMM29},
381 {codeview::RegisterId::AMD64_XMM30, X86::XMM30},
382 {codeview::RegisterId::AMD64_XMM31, X86::XMM31},
383
384 };
385 for (const auto &I : RegMap)
386 MRI->mapLLVMRegToCVReg(I.Reg, static_cast<int>(I.CVReg));
387 }
388
createX86MCSubtargetInfo(const Triple & TT,StringRef CPU,StringRef FS)389 MCSubtargetInfo *X86_MC::createX86MCSubtargetInfo(const Triple &TT,
390 StringRef CPU, StringRef FS) {
391 std::string ArchFS = X86_MC::ParseX86Triple(TT);
392 assert(!ArchFS.empty() && "Failed to parse X86 triple");
393 if (!FS.empty())
394 ArchFS = (Twine(ArchFS) + "," + FS).str();
395
396 if (CPU.empty())
397 CPU = "generic";
398
399 return createX86MCSubtargetInfoImpl(TT, CPU, /*TuneCPU*/ CPU, ArchFS);
400 }
401
createX86MCInstrInfo()402 static MCInstrInfo *createX86MCInstrInfo() {
403 MCInstrInfo *X = new MCInstrInfo();
404 InitX86MCInstrInfo(X);
405 return X;
406 }
407
createX86MCRegisterInfo(const Triple & TT)408 static MCRegisterInfo *createX86MCRegisterInfo(const Triple &TT) {
409 unsigned RA = (TT.getArch() == Triple::x86_64)
410 ? X86::RIP // Should have dwarf #16.
411 : X86::EIP; // Should have dwarf #8.
412
413 MCRegisterInfo *X = new MCRegisterInfo();
414 InitX86MCRegisterInfo(X, RA, X86_MC::getDwarfRegFlavour(TT, false),
415 X86_MC::getDwarfRegFlavour(TT, true), RA);
416 X86_MC::initLLVMToSEHAndCVRegMapping(X);
417 return X;
418 }
419
createX86MCAsmInfo(const MCRegisterInfo & MRI,const Triple & TheTriple,const MCTargetOptions & Options)420 static MCAsmInfo *createX86MCAsmInfo(const MCRegisterInfo &MRI,
421 const Triple &TheTriple,
422 const MCTargetOptions &Options) {
423 bool is64Bit = TheTriple.getArch() == Triple::x86_64;
424
425 MCAsmInfo *MAI;
426 if (TheTriple.isOSBinFormatMachO()) {
427 if (is64Bit)
428 MAI = new X86_64MCAsmInfoDarwin(TheTriple);
429 else
430 MAI = new X86MCAsmInfoDarwin(TheTriple);
431 } else if (TheTriple.isOSBinFormatELF()) {
432 // Force the use of an ELF container.
433 MAI = new X86ELFMCAsmInfo(TheTriple);
434 } else if (TheTriple.isWindowsMSVCEnvironment() ||
435 TheTriple.isWindowsCoreCLREnvironment()) {
436 if (Options.getAssemblyLanguage().equals_insensitive("masm"))
437 MAI = new X86MCAsmInfoMicrosoftMASM(TheTriple);
438 else
439 MAI = new X86MCAsmInfoMicrosoft(TheTriple);
440 } else if (TheTriple.isOSCygMing() ||
441 TheTriple.isWindowsItaniumEnvironment()) {
442 MAI = new X86MCAsmInfoGNUCOFF(TheTriple);
443 } else {
444 // The default is ELF.
445 MAI = new X86ELFMCAsmInfo(TheTriple);
446 }
447
448 // Initialize initial frame state.
449 // Calculate amount of bytes used for return address storing
450 int stackGrowth = is64Bit ? -8 : -4;
451
452 // Initial state of the frame pointer is esp+stackGrowth.
453 unsigned StackPtr = is64Bit ? X86::RSP : X86::ESP;
454 MCCFIInstruction Inst = MCCFIInstruction::cfiDefCfa(
455 nullptr, MRI.getDwarfRegNum(StackPtr, true), -stackGrowth);
456 MAI->addInitialFrameState(Inst);
457
458 // Add return address to move list
459 unsigned InstPtr = is64Bit ? X86::RIP : X86::EIP;
460 MCCFIInstruction Inst2 = MCCFIInstruction::createOffset(
461 nullptr, MRI.getDwarfRegNum(InstPtr, true), stackGrowth);
462 MAI->addInitialFrameState(Inst2);
463
464 return MAI;
465 }
466
createX86MCInstPrinter(const Triple & T,unsigned SyntaxVariant,const MCAsmInfo & MAI,const MCInstrInfo & MII,const MCRegisterInfo & MRI)467 static MCInstPrinter *createX86MCInstPrinter(const Triple &T,
468 unsigned SyntaxVariant,
469 const MCAsmInfo &MAI,
470 const MCInstrInfo &MII,
471 const MCRegisterInfo &MRI) {
472 if (SyntaxVariant == 0)
473 return new X86ATTInstPrinter(MAI, MII, MRI);
474 if (SyntaxVariant == 1)
475 return new X86IntelInstPrinter(MAI, MII, MRI);
476 return nullptr;
477 }
478
createX86MCRelocationInfo(const Triple & TheTriple,MCContext & Ctx)479 static MCRelocationInfo *createX86MCRelocationInfo(const Triple &TheTriple,
480 MCContext &Ctx) {
481 // Default to the stock relocation info.
482 return llvm::createMCRelocationInfo(TheTriple, Ctx);
483 }
484
485 namespace llvm {
486 namespace X86_MC {
487
488 class X86MCInstrAnalysis : public MCInstrAnalysis {
489 X86MCInstrAnalysis(const X86MCInstrAnalysis &) = delete;
490 X86MCInstrAnalysis &operator=(const X86MCInstrAnalysis &) = delete;
491 virtual ~X86MCInstrAnalysis() = default;
492
493 public:
X86MCInstrAnalysis(const MCInstrInfo * MCII)494 X86MCInstrAnalysis(const MCInstrInfo *MCII) : MCInstrAnalysis(MCII) {}
495
496 #define GET_STIPREDICATE_DECLS_FOR_MC_ANALYSIS
497 #include "X86GenSubtargetInfo.inc"
498
499 bool clearsSuperRegisters(const MCRegisterInfo &MRI, const MCInst &Inst,
500 APInt &Mask) const override;
501 std::vector<std::pair<uint64_t, uint64_t>>
502 findPltEntries(uint64_t PltSectionVA, ArrayRef<uint8_t> PltContents,
503 uint64_t GotSectionVA,
504 const Triple &TargetTriple) const override;
505
506 bool evaluateBranch(const MCInst &Inst, uint64_t Addr, uint64_t Size,
507 uint64_t &Target) const override;
508 Optional<uint64_t> evaluateMemoryOperandAddress(const MCInst &Inst,
509 const MCSubtargetInfo *STI,
510 uint64_t Addr,
511 uint64_t Size) const override;
512 Optional<uint64_t>
513 getMemoryOperandRelocationOffset(const MCInst &Inst,
514 uint64_t Size) const override;
515 };
516
517 #define GET_STIPREDICATE_DEFS_FOR_MC_ANALYSIS
518 #include "X86GenSubtargetInfo.inc"
519
clearsSuperRegisters(const MCRegisterInfo & MRI,const MCInst & Inst,APInt & Mask) const520 bool X86MCInstrAnalysis::clearsSuperRegisters(const MCRegisterInfo &MRI,
521 const MCInst &Inst,
522 APInt &Mask) const {
523 const MCInstrDesc &Desc = Info->get(Inst.getOpcode());
524 unsigned NumDefs = Desc.getNumDefs();
525 unsigned NumImplicitDefs = Desc.getNumImplicitDefs();
526 assert(Mask.getBitWidth() == NumDefs + NumImplicitDefs &&
527 "Unexpected number of bits in the mask!");
528
529 bool HasVEX = (Desc.TSFlags & X86II::EncodingMask) == X86II::VEX;
530 bool HasEVEX = (Desc.TSFlags & X86II::EncodingMask) == X86II::EVEX;
531 bool HasXOP = (Desc.TSFlags & X86II::EncodingMask) == X86II::XOP;
532
533 const MCRegisterClass &GR32RC = MRI.getRegClass(X86::GR32RegClassID);
534 const MCRegisterClass &VR128XRC = MRI.getRegClass(X86::VR128XRegClassID);
535 const MCRegisterClass &VR256XRC = MRI.getRegClass(X86::VR256XRegClassID);
536
537 auto ClearsSuperReg = [=](unsigned RegID) {
538 // On X86-64, a general purpose integer register is viewed as a 64-bit
539 // register internal to the processor.
540 // An update to the lower 32 bits of a 64 bit integer register is
541 // architecturally defined to zero extend the upper 32 bits.
542 if (GR32RC.contains(RegID))
543 return true;
544
545 // Early exit if this instruction has no vex/evex/xop prefix.
546 if (!HasEVEX && !HasVEX && !HasXOP)
547 return false;
548
549 // All VEX and EVEX encoded instructions are defined to zero the high bits
550 // of the destination register up to VLMAX (i.e. the maximum vector register
551 // width pertaining to the instruction).
552 // We assume the same behavior for XOP instructions too.
553 return VR128XRC.contains(RegID) || VR256XRC.contains(RegID);
554 };
555
556 Mask.clearAllBits();
557 for (unsigned I = 0, E = NumDefs; I < E; ++I) {
558 const MCOperand &Op = Inst.getOperand(I);
559 if (ClearsSuperReg(Op.getReg()))
560 Mask.setBit(I);
561 }
562
563 for (unsigned I = 0, E = NumImplicitDefs; I < E; ++I) {
564 const MCPhysReg Reg = Desc.getImplicitDefs()[I];
565 if (ClearsSuperReg(Reg))
566 Mask.setBit(NumDefs + I);
567 }
568
569 return Mask.getBoolValue();
570 }
571
572 static std::vector<std::pair<uint64_t, uint64_t>>
findX86PltEntries(uint64_t PltSectionVA,ArrayRef<uint8_t> PltContents,uint64_t GotPltSectionVA)573 findX86PltEntries(uint64_t PltSectionVA, ArrayRef<uint8_t> PltContents,
574 uint64_t GotPltSectionVA) {
575 // Do a lightweight parsing of PLT entries.
576 std::vector<std::pair<uint64_t, uint64_t>> Result;
577 for (uint64_t Byte = 0, End = PltContents.size(); Byte + 6 < End; ) {
578 // Recognize a jmp.
579 if (PltContents[Byte] == 0xff && PltContents[Byte + 1] == 0xa3) {
580 // The jmp instruction at the beginning of each PLT entry jumps to the
581 // address of the base of the .got.plt section plus the immediate.
582 uint32_t Imm = support::endian::read32le(PltContents.data() + Byte + 2);
583 Result.push_back(
584 std::make_pair(PltSectionVA + Byte, GotPltSectionVA + Imm));
585 Byte += 6;
586 } else if (PltContents[Byte] == 0xff && PltContents[Byte + 1] == 0x25) {
587 // The jmp instruction at the beginning of each PLT entry jumps to the
588 // immediate.
589 uint32_t Imm = support::endian::read32le(PltContents.data() + Byte + 2);
590 Result.push_back(std::make_pair(PltSectionVA + Byte, Imm));
591 Byte += 6;
592 } else
593 Byte++;
594 }
595 return Result;
596 }
597
598 static std::vector<std::pair<uint64_t, uint64_t>>
findX86_64PltEntries(uint64_t PltSectionVA,ArrayRef<uint8_t> PltContents)599 findX86_64PltEntries(uint64_t PltSectionVA, ArrayRef<uint8_t> PltContents) {
600 // Do a lightweight parsing of PLT entries.
601 std::vector<std::pair<uint64_t, uint64_t>> Result;
602 for (uint64_t Byte = 0, End = PltContents.size(); Byte + 6 < End; ) {
603 // Recognize a jmp.
604 if (PltContents[Byte] == 0xff && PltContents[Byte + 1] == 0x25) {
605 // The jmp instruction at the beginning of each PLT entry jumps to the
606 // address of the next instruction plus the immediate.
607 uint32_t Imm = support::endian::read32le(PltContents.data() + Byte + 2);
608 Result.push_back(
609 std::make_pair(PltSectionVA + Byte, PltSectionVA + Byte + 6 + Imm));
610 Byte += 6;
611 } else
612 Byte++;
613 }
614 return Result;
615 }
616
findPltEntries(uint64_t PltSectionVA,ArrayRef<uint8_t> PltContents,uint64_t GotPltSectionVA,const Triple & TargetTriple) const617 std::vector<std::pair<uint64_t, uint64_t>> X86MCInstrAnalysis::findPltEntries(
618 uint64_t PltSectionVA, ArrayRef<uint8_t> PltContents,
619 uint64_t GotPltSectionVA, const Triple &TargetTriple) const {
620 switch (TargetTriple.getArch()) {
621 case Triple::x86:
622 return findX86PltEntries(PltSectionVA, PltContents, GotPltSectionVA);
623 case Triple::x86_64:
624 return findX86_64PltEntries(PltSectionVA, PltContents);
625 default:
626 return {};
627 }
628 }
629
evaluateBranch(const MCInst & Inst,uint64_t Addr,uint64_t Size,uint64_t & Target) const630 bool X86MCInstrAnalysis::evaluateBranch(const MCInst &Inst, uint64_t Addr,
631 uint64_t Size, uint64_t &Target) const {
632 if (Inst.getNumOperands() == 0 ||
633 Info->get(Inst.getOpcode()).OpInfo[0].OperandType != MCOI::OPERAND_PCREL)
634 return false;
635 Target = Addr + Size + Inst.getOperand(0).getImm();
636 return true;
637 }
638
evaluateMemoryOperandAddress(const MCInst & Inst,const MCSubtargetInfo * STI,uint64_t Addr,uint64_t Size) const639 Optional<uint64_t> X86MCInstrAnalysis::evaluateMemoryOperandAddress(
640 const MCInst &Inst, const MCSubtargetInfo *STI, uint64_t Addr,
641 uint64_t Size) const {
642 const MCInstrDesc &MCID = Info->get(Inst.getOpcode());
643 int MemOpStart = X86II::getMemoryOperandNo(MCID.TSFlags);
644 if (MemOpStart == -1)
645 return None;
646 MemOpStart += X86II::getOperandBias(MCID);
647
648 const MCOperand &SegReg = Inst.getOperand(MemOpStart + X86::AddrSegmentReg);
649 const MCOperand &BaseReg = Inst.getOperand(MemOpStart + X86::AddrBaseReg);
650 const MCOperand &IndexReg = Inst.getOperand(MemOpStart + X86::AddrIndexReg);
651 const MCOperand &ScaleAmt = Inst.getOperand(MemOpStart + X86::AddrScaleAmt);
652 const MCOperand &Disp = Inst.getOperand(MemOpStart + X86::AddrDisp);
653 if (SegReg.getReg() != 0 || IndexReg.getReg() != 0 || ScaleAmt.getImm() != 1 ||
654 !Disp.isImm())
655 return None;
656
657 // RIP-relative addressing.
658 if (BaseReg.getReg() == X86::RIP)
659 return Addr + Size + Disp.getImm();
660
661 return None;
662 }
663
664 Optional<uint64_t>
getMemoryOperandRelocationOffset(const MCInst & Inst,uint64_t Size) const665 X86MCInstrAnalysis::getMemoryOperandRelocationOffset(const MCInst &Inst,
666 uint64_t Size) const {
667 if (Inst.getOpcode() != X86::LEA64r)
668 return None;
669 const MCInstrDesc &MCID = Info->get(Inst.getOpcode());
670 int MemOpStart = X86II::getMemoryOperandNo(MCID.TSFlags);
671 if (MemOpStart == -1)
672 return None;
673 MemOpStart += X86II::getOperandBias(MCID);
674 const MCOperand &SegReg = Inst.getOperand(MemOpStart + X86::AddrSegmentReg);
675 const MCOperand &BaseReg = Inst.getOperand(MemOpStart + X86::AddrBaseReg);
676 const MCOperand &IndexReg = Inst.getOperand(MemOpStart + X86::AddrIndexReg);
677 const MCOperand &ScaleAmt = Inst.getOperand(MemOpStart + X86::AddrScaleAmt);
678 const MCOperand &Disp = Inst.getOperand(MemOpStart + X86::AddrDisp);
679 // Must be a simple rip-relative address.
680 if (BaseReg.getReg() != X86::RIP || SegReg.getReg() != 0 ||
681 IndexReg.getReg() != 0 || ScaleAmt.getImm() != 1 || !Disp.isImm())
682 return None;
683 // rip-relative ModR/M immediate is 32 bits.
684 assert(Size > 4 && "invalid instruction size for rip-relative lea");
685 return Size - 4;
686 }
687
688 } // end of namespace X86_MC
689
690 } // end of namespace llvm
691
createX86MCInstrAnalysis(const MCInstrInfo * Info)692 static MCInstrAnalysis *createX86MCInstrAnalysis(const MCInstrInfo *Info) {
693 return new X86_MC::X86MCInstrAnalysis(Info);
694 }
695
696 // Force static initialization.
LLVMInitializeX86TargetMC()697 extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeX86TargetMC() {
698 for (Target *T : {&getTheX86_32Target(), &getTheX86_64Target()}) {
699 // Register the MC asm info.
700 RegisterMCAsmInfoFn X(*T, createX86MCAsmInfo);
701
702 // Register the MC instruction info.
703 TargetRegistry::RegisterMCInstrInfo(*T, createX86MCInstrInfo);
704
705 // Register the MC register info.
706 TargetRegistry::RegisterMCRegInfo(*T, createX86MCRegisterInfo);
707
708 // Register the MC subtarget info.
709 TargetRegistry::RegisterMCSubtargetInfo(*T,
710 X86_MC::createX86MCSubtargetInfo);
711
712 // Register the MC instruction analyzer.
713 TargetRegistry::RegisterMCInstrAnalysis(*T, createX86MCInstrAnalysis);
714
715 // Register the code emitter.
716 TargetRegistry::RegisterMCCodeEmitter(*T, createX86MCCodeEmitter);
717
718 // Register the obj target streamer.
719 TargetRegistry::RegisterObjectTargetStreamer(*T,
720 createX86ObjectTargetStreamer);
721
722 // Register the asm target streamer.
723 TargetRegistry::RegisterAsmTargetStreamer(*T, createX86AsmTargetStreamer);
724
725 TargetRegistry::RegisterCOFFStreamer(*T, createX86WinCOFFStreamer);
726
727 // Register the MCInstPrinter.
728 TargetRegistry::RegisterMCInstPrinter(*T, createX86MCInstPrinter);
729
730 // Register the MC relocation info.
731 TargetRegistry::RegisterMCRelocationInfo(*T, createX86MCRelocationInfo);
732 }
733
734 // Register the asm backend.
735 TargetRegistry::RegisterMCAsmBackend(getTheX86_32Target(),
736 createX86_32AsmBackend);
737 TargetRegistry::RegisterMCAsmBackend(getTheX86_64Target(),
738 createX86_64AsmBackend);
739 }
740
getX86SubSuperRegisterOrZero(MCRegister Reg,unsigned Size,bool High)741 MCRegister llvm::getX86SubSuperRegisterOrZero(MCRegister Reg, unsigned Size,
742 bool High) {
743 switch (Size) {
744 default: return X86::NoRegister;
745 case 8:
746 if (High) {
747 switch (Reg.id()) {
748 default: return getX86SubSuperRegisterOrZero(Reg, 64);
749 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
750 return X86::SI;
751 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
752 return X86::DI;
753 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
754 return X86::BP;
755 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
756 return X86::SP;
757 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
758 return X86::AH;
759 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
760 return X86::DH;
761 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
762 return X86::CH;
763 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
764 return X86::BH;
765 }
766 } else {
767 switch (Reg.id()) {
768 default: return X86::NoRegister;
769 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
770 return X86::AL;
771 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
772 return X86::DL;
773 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
774 return X86::CL;
775 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
776 return X86::BL;
777 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
778 return X86::SIL;
779 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
780 return X86::DIL;
781 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
782 return X86::BPL;
783 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
784 return X86::SPL;
785 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8:
786 return X86::R8B;
787 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9:
788 return X86::R9B;
789 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10:
790 return X86::R10B;
791 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11:
792 return X86::R11B;
793 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12:
794 return X86::R12B;
795 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13:
796 return X86::R13B;
797 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14:
798 return X86::R14B;
799 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15:
800 return X86::R15B;
801 }
802 }
803 case 16:
804 switch (Reg.id()) {
805 default: return X86::NoRegister;
806 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
807 return X86::AX;
808 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
809 return X86::DX;
810 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
811 return X86::CX;
812 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
813 return X86::BX;
814 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
815 return X86::SI;
816 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
817 return X86::DI;
818 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
819 return X86::BP;
820 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
821 return X86::SP;
822 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8:
823 return X86::R8W;
824 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9:
825 return X86::R9W;
826 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10:
827 return X86::R10W;
828 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11:
829 return X86::R11W;
830 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12:
831 return X86::R12W;
832 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13:
833 return X86::R13W;
834 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14:
835 return X86::R14W;
836 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15:
837 return X86::R15W;
838 }
839 case 32:
840 switch (Reg.id()) {
841 default: return X86::NoRegister;
842 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
843 return X86::EAX;
844 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
845 return X86::EDX;
846 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
847 return X86::ECX;
848 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
849 return X86::EBX;
850 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
851 return X86::ESI;
852 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
853 return X86::EDI;
854 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
855 return X86::EBP;
856 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
857 return X86::ESP;
858 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8:
859 return X86::R8D;
860 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9:
861 return X86::R9D;
862 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10:
863 return X86::R10D;
864 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11:
865 return X86::R11D;
866 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12:
867 return X86::R12D;
868 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13:
869 return X86::R13D;
870 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14:
871 return X86::R14D;
872 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15:
873 return X86::R15D;
874 }
875 case 64:
876 switch (Reg.id()) {
877 default: return 0;
878 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
879 return X86::RAX;
880 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
881 return X86::RDX;
882 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
883 return X86::RCX;
884 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
885 return X86::RBX;
886 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
887 return X86::RSI;
888 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
889 return X86::RDI;
890 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
891 return X86::RBP;
892 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
893 return X86::RSP;
894 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8:
895 return X86::R8;
896 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9:
897 return X86::R9;
898 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10:
899 return X86::R10;
900 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11:
901 return X86::R11;
902 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12:
903 return X86::R12;
904 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13:
905 return X86::R13;
906 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14:
907 return X86::R14;
908 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15:
909 return X86::R15;
910 }
911 }
912 }
913
getX86SubSuperRegister(MCRegister Reg,unsigned Size,bool High)914 MCRegister llvm::getX86SubSuperRegister(MCRegister Reg, unsigned Size, bool High) {
915 MCRegister Res = getX86SubSuperRegisterOrZero(Reg, Size, High);
916 assert(Res != X86::NoRegister && "Unexpected register or VT");
917 return Res;
918 }
919
920
921