1 //===- bolt/Target/X86/X86MCPlusBuilder.cpp -------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file provides X86-specific MCPlus builder.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "MCTargetDesc/X86BaseInfo.h"
14 #include "MCTargetDesc/X86MCTargetDesc.h"
15 #include "bolt/Core/MCPlusBuilder.h"
16 #include "llvm/MC/MCContext.h"
17 #include "llvm/MC/MCFixupKindInfo.h"
18 #include "llvm/MC/MCInstBuilder.h"
19 #include "llvm/MC/MCInstrInfo.h"
20 #include "llvm/MC/MCRegisterInfo.h"
21 #include "llvm/Support/DataExtractor.h"
22 #include "llvm/Support/Debug.h"
23 #include "llvm/Support/Errc.h"
24 #include "llvm/Support/ErrorHandling.h"
25 #include "llvm/Support/ErrorOr.h"
26 #include <set>
27 
28 #define DEBUG_TYPE "mcplus"
29 
30 using namespace llvm;
31 using namespace bolt;
32 
33 namespace {
34 
35 unsigned getShortBranchOpcode(unsigned Opcode) {
36   switch (Opcode) {
37   default:
38     return Opcode;
39   case X86::JMP_2: return X86::JMP_1;
40   case X86::JMP_4: return X86::JMP_1;
41   case X86::JCC_2: return X86::JCC_1;
42   case X86::JCC_4: return X86::JCC_1;
43   }
44 }
45 
46 unsigned getShortArithOpcode(unsigned Opcode) {
47   switch (Opcode) {
48   default:
49     return Opcode;
50 
51   // IMUL
52   case X86::IMUL16rri:   return X86::IMUL16rri8;
53   case X86::IMUL16rmi:   return X86::IMUL16rmi8;
54   case X86::IMUL32rri:   return X86::IMUL32rri8;
55   case X86::IMUL32rmi:   return X86::IMUL32rmi8;
56   case X86::IMUL64rri32: return X86::IMUL64rri8;
57   case X86::IMUL64rmi32: return X86::IMUL64rmi8;
58 
59   // OR
60   case X86::OR16ri:    return X86::OR16ri8;
61   case X86::OR16mi:    return X86::OR16mi8;
62   case X86::OR32ri:    return X86::OR32ri8;
63   case X86::OR32mi:    return X86::OR32mi8;
64   case X86::OR64ri32:  return X86::OR64ri8;
65   case X86::OR64mi32:  return X86::OR64mi8;
66 
67   // AND
68   case X86::AND16ri:   return X86::AND16ri8;
69   case X86::AND16mi:   return X86::AND16mi8;
70   case X86::AND32ri:   return X86::AND32ri8;
71   case X86::AND32mi:   return X86::AND32mi8;
72   case X86::AND64ri32: return X86::AND64ri8;
73   case X86::AND64mi32: return X86::AND64mi8;
74 
75   // XOR
76   case X86::XOR16ri:   return X86::XOR16ri8;
77   case X86::XOR16mi:   return X86::XOR16mi8;
78   case X86::XOR32ri:   return X86::XOR32ri8;
79   case X86::XOR32mi:   return X86::XOR32mi8;
80   case X86::XOR64ri32: return X86::XOR64ri8;
81   case X86::XOR64mi32: return X86::XOR64mi8;
82 
83   // ADD
84   case X86::ADD16ri:   return X86::ADD16ri8;
85   case X86::ADD16mi:   return X86::ADD16mi8;
86   case X86::ADD32ri:   return X86::ADD32ri8;
87   case X86::ADD32mi:   return X86::ADD32mi8;
88   case X86::ADD64ri32: return X86::ADD64ri8;
89   case X86::ADD64mi32: return X86::ADD64mi8;
90 
91   // SUB
92   case X86::SUB16ri:   return X86::SUB16ri8;
93   case X86::SUB16mi:   return X86::SUB16mi8;
94   case X86::SUB32ri:   return X86::SUB32ri8;
95   case X86::SUB32mi:   return X86::SUB32mi8;
96   case X86::SUB64ri32: return X86::SUB64ri8;
97   case X86::SUB64mi32: return X86::SUB64mi8;
98 
99   // CMP
100   case X86::CMP16ri:   return X86::CMP16ri8;
101   case X86::CMP16mi:   return X86::CMP16mi8;
102   case X86::CMP32ri:   return X86::CMP32ri8;
103   case X86::CMP32mi:   return X86::CMP32mi8;
104   case X86::CMP64ri32: return X86::CMP64ri8;
105   case X86::CMP64mi32: return X86::CMP64mi8;
106 
107   // PUSH
108   case X86::PUSHi32:    return X86::PUSH32i8;
109   case X86::PUSHi16:    return X86::PUSH16i8;
110   case X86::PUSH64i32:  return X86::PUSH64i8;
111   }
112 }
113 
114 bool isADD(unsigned Opcode) {
115   switch (Opcode) {
116   default:
117     return false;
118   case X86::ADD16i16:
119   case X86::ADD16mi:
120   case X86::ADD16mi8:
121   case X86::ADD16mr:
122   case X86::ADD16ri:
123   case X86::ADD16ri8:
124   case X86::ADD16ri8_DB:
125   case X86::ADD16ri_DB:
126   case X86::ADD16rm:
127   case X86::ADD16rr:
128   case X86::ADD16rr_DB:
129   case X86::ADD16rr_REV:
130   case X86::ADD32i32:
131   case X86::ADD32mi:
132   case X86::ADD32mi8:
133   case X86::ADD32mr:
134   case X86::ADD32ri:
135   case X86::ADD32ri8:
136   case X86::ADD32ri8_DB:
137   case X86::ADD32ri_DB:
138   case X86::ADD32rm:
139   case X86::ADD32rr:
140   case X86::ADD32rr_DB:
141   case X86::ADD32rr_REV:
142   case X86::ADD64i32:
143   case X86::ADD64mi32:
144   case X86::ADD64mi8:
145   case X86::ADD64mr:
146   case X86::ADD64ri32:
147   case X86::ADD64ri32_DB:
148   case X86::ADD64ri8:
149   case X86::ADD64ri8_DB:
150   case X86::ADD64rm:
151   case X86::ADD64rr:
152   case X86::ADD64rr_DB:
153   case X86::ADD64rr_REV:
154   case X86::ADD8i8:
155   case X86::ADD8mi:
156   case X86::ADD8mi8:
157   case X86::ADD8mr:
158   case X86::ADD8ri:
159   case X86::ADD8ri8:
160   case X86::ADD8rm:
161   case X86::ADD8rr:
162   case X86::ADD8rr_REV:
163     return true;
164   }
165 }
166 
167 bool isAND(unsigned Opcode) {
168   switch (Opcode) {
169   default:
170     return false;
171   case X86::AND16i16:
172   case X86::AND16mi:
173   case X86::AND16mi8:
174   case X86::AND16mr:
175   case X86::AND16ri:
176   case X86::AND16ri8:
177   case X86::AND16rm:
178   case X86::AND16rr:
179   case X86::AND16rr_REV:
180   case X86::AND32i32:
181   case X86::AND32mi:
182   case X86::AND32mi8:
183   case X86::AND32mr:
184   case X86::AND32ri:
185   case X86::AND32ri8:
186   case X86::AND32rm:
187   case X86::AND32rr:
188   case X86::AND32rr_REV:
189   case X86::AND64i32:
190   case X86::AND64mi32:
191   case X86::AND64mi8:
192   case X86::AND64mr:
193   case X86::AND64ri32:
194   case X86::AND64ri8:
195   case X86::AND64rm:
196   case X86::AND64rr:
197   case X86::AND64rr_REV:
198   case X86::AND8i8:
199   case X86::AND8mi:
200   case X86::AND8mi8:
201   case X86::AND8mr:
202   case X86::AND8ri:
203   case X86::AND8ri8:
204   case X86::AND8rm:
205   case X86::AND8rr:
206   case X86::AND8rr_REV:
207     return true;
208   }
209 }
210 
211 bool isCMP(unsigned Opcode) {
212   switch (Opcode) {
213   default:
214     return false;
215   case X86::CMP16i16:
216   case X86::CMP16mi:
217   case X86::CMP16mi8:
218   case X86::CMP16mr:
219   case X86::CMP16ri:
220   case X86::CMP16ri8:
221   case X86::CMP16rm:
222   case X86::CMP16rr:
223   case X86::CMP16rr_REV:
224   case X86::CMP32i32:
225   case X86::CMP32mi:
226   case X86::CMP32mi8:
227   case X86::CMP32mr:
228   case X86::CMP32ri:
229   case X86::CMP32ri8:
230   case X86::CMP32rm:
231   case X86::CMP32rr:
232   case X86::CMP32rr_REV:
233   case X86::CMP64i32:
234   case X86::CMP64mi32:
235   case X86::CMP64mi8:
236   case X86::CMP64mr:
237   case X86::CMP64ri32:
238   case X86::CMP64ri8:
239   case X86::CMP64rm:
240   case X86::CMP64rr:
241   case X86::CMP64rr_REV:
242   case X86::CMP8i8:
243   case X86::CMP8mi:
244   case X86::CMP8mi8:
245   case X86::CMP8mr:
246   case X86::CMP8ri:
247   case X86::CMP8ri8:
248   case X86::CMP8rm:
249   case X86::CMP8rr:
250   case X86::CMP8rr_REV:
251     return true;
252   }
253 }
254 
255 bool isSUB(unsigned Opcode) {
256   switch (Opcode) {
257   default:
258     return false;
259   case X86::SUB16i16:
260   case X86::SUB16mi:
261   case X86::SUB16mi8:
262   case X86::SUB16mr:
263   case X86::SUB16ri:
264   case X86::SUB16ri8:
265   case X86::SUB16rm:
266   case X86::SUB16rr:
267   case X86::SUB16rr_REV:
268   case X86::SUB32i32:
269   case X86::SUB32mi:
270   case X86::SUB32mi8:
271   case X86::SUB32mr:
272   case X86::SUB32ri:
273   case X86::SUB32ri8:
274   case X86::SUB32rm:
275   case X86::SUB32rr:
276   case X86::SUB32rr_REV:
277   case X86::SUB64i32:
278   case X86::SUB64mi32:
279   case X86::SUB64mi8:
280   case X86::SUB64mr:
281   case X86::SUB64ri32:
282   case X86::SUB64ri8:
283   case X86::SUB64rm:
284   case X86::SUB64rr:
285   case X86::SUB64rr_REV:
286   case X86::SUB8i8:
287   case X86::SUB8mi:
288   case X86::SUB8mi8:
289   case X86::SUB8mr:
290   case X86::SUB8ri:
291   case X86::SUB8ri8:
292   case X86::SUB8rm:
293   case X86::SUB8rr:
294   case X86::SUB8rr_REV:
295     return true;
296   }
297 }
298 
299 bool isTEST(unsigned Opcode) {
300   switch (Opcode) {
301   default:
302     return false;
303   case X86::TEST16i16:
304   case X86::TEST16mi:
305   case X86::TEST16mr:
306   case X86::TEST16ri:
307   case X86::TEST16rr:
308   case X86::TEST32i32:
309   case X86::TEST32mi:
310   case X86::TEST32mr:
311   case X86::TEST32ri:
312   case X86::TEST32rr:
313   case X86::TEST64i32:
314   case X86::TEST64mi32:
315   case X86::TEST64mr:
316   case X86::TEST64ri32:
317   case X86::TEST64rr:
318   case X86::TEST8i8:
319   case X86::TEST8mi:
320   case X86::TEST8mr:
321   case X86::TEST8ri:
322   case X86::TEST8rr:
323     return true;
324   }
325 }
326 
327 class X86MCPlusBuilder : public MCPlusBuilder {
328 public:
329   X86MCPlusBuilder(const MCInstrAnalysis *Analysis, const MCInstrInfo *Info,
330                    const MCRegisterInfo *RegInfo)
331       : MCPlusBuilder(Analysis, Info, RegInfo) {}
332 
333   bool isBranch(const MCInst &Inst) const override {
334     return Analysis->isBranch(Inst) && !isTailCall(Inst);
335   }
336 
337   bool isUnconditionalBranch(const MCInst &Inst) const override {
338     return Analysis->isUnconditionalBranch(Inst) && !isTailCall(Inst);
339   }
340 
341   bool isNoop(const MCInst &Inst) const override {
342     switch (Inst.getOpcode()) {
343     case X86::NOOP:
344     case X86::NOOPL:
345     case X86::NOOPLr:
346     case X86::NOOPQ:
347     case X86::NOOPQr:
348     case X86::NOOPW:
349     case X86::NOOPWr:
350       return true;
351     }
352     return false;
353   }
354 
355   unsigned getCondCode(const MCInst &Inst) const override {
356     switch (Inst.getOpcode()) {
357     default:
358       return X86::COND_INVALID;
359     case X86::JCC_1:
360     case X86::JCC_2:
361     case X86::JCC_4:
362       return Inst.getOperand(Info->get(Inst.getOpcode()).NumOperands - 1)
363           .getImm();
364     }
365   }
366 
367   unsigned getInvertedCondCode(unsigned CC) const override {
368     switch (CC) {
369     default: return X86::COND_INVALID;
370     case X86::COND_E:  return X86::COND_NE;
371     case X86::COND_NE: return X86::COND_E;
372     case X86::COND_L:  return X86::COND_GE;
373     case X86::COND_LE: return X86::COND_G;
374     case X86::COND_G:  return X86::COND_LE;
375     case X86::COND_GE: return X86::COND_L;
376     case X86::COND_B:  return X86::COND_AE;
377     case X86::COND_BE: return X86::COND_A;
378     case X86::COND_A:  return X86::COND_BE;
379     case X86::COND_AE: return X86::COND_B;
380     case X86::COND_S:  return X86::COND_NS;
381     case X86::COND_NS: return X86::COND_S;
382     case X86::COND_P:  return X86::COND_NP;
383     case X86::COND_NP: return X86::COND_P;
384     case X86::COND_O:  return X86::COND_NO;
385     case X86::COND_NO: return X86::COND_O;
386     }
387   }
388 
389   unsigned getCondCodesLogicalOr(unsigned CC1, unsigned CC2) const override {
390     enum DecodedCondCode : uint8_t {
391       DCC_EQUAL = 0x1,
392       DCC_GREATER = 0x2,
393       DCC_LESSER = 0x4,
394       DCC_GREATER_OR_LESSER = 0x6,
395       DCC_UNSIGNED = 0x8,
396       DCC_SIGNED = 0x10,
397       DCC_INVALID = 0x20,
398     };
399 
400     auto decodeCondCode = [&](unsigned CC) -> uint8_t {
401       switch (CC) {
402       default: return DCC_INVALID;
403       case X86::COND_E: return DCC_EQUAL;
404       case X86::COND_NE: return DCC_GREATER | DCC_LESSER;
405       case X86::COND_L: return DCC_LESSER | DCC_SIGNED;
406       case X86::COND_LE: return DCC_EQUAL | DCC_LESSER | DCC_SIGNED;
407       case X86::COND_G: return DCC_GREATER | DCC_SIGNED;
408       case X86::COND_GE: return DCC_GREATER | DCC_EQUAL | DCC_SIGNED;
409       case X86::COND_B: return DCC_LESSER | DCC_UNSIGNED;
410       case X86::COND_BE: return DCC_EQUAL | DCC_LESSER | DCC_UNSIGNED;
411       case X86::COND_A: return DCC_GREATER | DCC_UNSIGNED;
412       case X86::COND_AE: return DCC_GREATER | DCC_EQUAL | DCC_UNSIGNED;
413       }
414     };
415 
416     uint8_t DCC = decodeCondCode(CC1) | decodeCondCode(CC2);
417 
418     if (DCC & DCC_INVALID)
419       return X86::COND_INVALID;
420 
421     if (DCC & DCC_SIGNED && DCC & DCC_UNSIGNED)
422       return X86::COND_INVALID;
423 
424     switch (DCC) {
425     default: return X86::COND_INVALID;
426     case DCC_EQUAL | DCC_LESSER | DCC_SIGNED: return X86::COND_LE;
427     case DCC_EQUAL | DCC_LESSER | DCC_UNSIGNED: return X86::COND_BE;
428     case DCC_EQUAL | DCC_GREATER | DCC_SIGNED: return X86::COND_GE;
429     case DCC_EQUAL | DCC_GREATER | DCC_UNSIGNED: return X86::COND_AE;
430     case DCC_GREATER | DCC_LESSER | DCC_SIGNED: return X86::COND_NE;
431     case DCC_GREATER | DCC_LESSER | DCC_UNSIGNED: return X86::COND_NE;
432     case DCC_GREATER | DCC_LESSER: return X86::COND_NE;
433     case DCC_EQUAL | DCC_SIGNED: return X86::COND_E;
434     case DCC_EQUAL | DCC_UNSIGNED: return X86::COND_E;
435     case DCC_EQUAL: return X86::COND_E;
436     case DCC_LESSER | DCC_SIGNED: return X86::COND_L;
437     case DCC_LESSER | DCC_UNSIGNED: return X86::COND_B;
438     case DCC_GREATER | DCC_SIGNED: return X86::COND_G;
439     case DCC_GREATER | DCC_UNSIGNED: return X86::COND_A;
440     }
441   }
442 
443   bool isValidCondCode(unsigned CC) const override {
444     return (CC != X86::COND_INVALID);
445   }
446 
447   bool isBreakpoint(const MCInst &Inst) const override {
448     return Inst.getOpcode() == X86::INT3;
449   }
450 
451   bool isPrefix(const MCInst &Inst) const override {
452     switch (Inst.getOpcode()) {
453     case X86::LOCK_PREFIX:
454     case X86::REPNE_PREFIX:
455     case X86::REP_PREFIX:
456       return true;
457     }
458     return false;
459   }
460 
461   bool isRep(const MCInst &Inst) const override {
462     return Inst.getFlags() == X86::IP_HAS_REPEAT;
463   }
464 
465   bool deleteREPPrefix(MCInst &Inst) const override {
466     if (Inst.getFlags() == X86::IP_HAS_REPEAT) {
467       Inst.setFlags(0);
468       return true;
469     }
470     return false;
471   }
472 
473   // FIXME: For compatibility with old LLVM only!
474   bool isTerminator(const MCInst &Inst) const override {
475     if (Info->get(Inst.getOpcode()).isTerminator())
476       return true;
477     switch (Inst.getOpcode()) {
478     default:
479       return false;
480     case X86::TRAP:
481     // Opcodes previously known as X86::UD2B
482     case X86::UD1Wm:
483     case X86::UD1Lm:
484     case X86::UD1Qm:
485     case X86::UD1Wr:
486     case X86::UD1Lr:
487     case X86::UD1Qr:
488       return true;
489     }
490   }
491 
492   bool isIndirectCall(const MCInst &Inst) const override {
493     return isCall(Inst) &&
494            ((getMemoryOperandNo(Inst) != -1) || Inst.getOperand(0).isReg());
495   }
496 
497   bool isPop(const MCInst &Inst) const override {
498     return getPopSize(Inst) == 0 ? false : true;
499   }
500 
501   bool isTerminateBranch(const MCInst &Inst) const override {
502     return Inst.getOpcode() == X86::ENDBR32 || Inst.getOpcode() == X86::ENDBR64;
503   }
504 
505   int getPopSize(const MCInst &Inst) const override {
506     switch (Inst.getOpcode()) {
507     case X86::POP16r:
508     case X86::POP16rmm:
509     case X86::POP16rmr:
510     case X86::POPF16:
511     case X86::POPA16:
512     case X86::POPDS16:
513     case X86::POPES16:
514     case X86::POPFS16:
515     case X86::POPGS16:
516     case X86::POPSS16:
517       return 2;
518     case X86::POP32r:
519     case X86::POP32rmm:
520     case X86::POP32rmr:
521     case X86::POPA32:
522     case X86::POPDS32:
523     case X86::POPES32:
524     case X86::POPF32:
525     case X86::POPFS32:
526     case X86::POPGS32:
527     case X86::POPSS32:
528       return 4;
529     case X86::POP64r:
530     case X86::POP64rmm:
531     case X86::POP64rmr:
532     case X86::POPF64:
533     case X86::POPFS64:
534     case X86::POPGS64:
535       return 8;
536     }
537     return 0;
538   }
539 
540   bool isPush(const MCInst &Inst) const override {
541     return getPushSize(Inst) == 0 ? false : true;
542   }
543 
544   int getPushSize(const MCInst &Inst) const override {
545     switch (Inst.getOpcode()) {
546     case X86::PUSH16i8:
547     case X86::PUSH16r:
548     case X86::PUSH16rmm:
549     case X86::PUSH16rmr:
550     case X86::PUSHA16:
551     case X86::PUSHCS16:
552     case X86::PUSHDS16:
553     case X86::PUSHES16:
554     case X86::PUSHF16:
555     case X86::PUSHFS16:
556     case X86::PUSHGS16:
557     case X86::PUSHSS16:
558     case X86::PUSHi16:
559       return 2;
560     case X86::PUSH32i8:
561     case X86::PUSH32r:
562     case X86::PUSH32rmm:
563     case X86::PUSH32rmr:
564     case X86::PUSHA32:
565     case X86::PUSHCS32:
566     case X86::PUSHDS32:
567     case X86::PUSHES32:
568     case X86::PUSHF32:
569     case X86::PUSHFS32:
570     case X86::PUSHGS32:
571     case X86::PUSHSS32:
572     case X86::PUSHi32:
573       return 4;
574     case X86::PUSH64i32:
575     case X86::PUSH64i8:
576     case X86::PUSH64r:
577     case X86::PUSH64rmm:
578     case X86::PUSH64rmr:
579     case X86::PUSHF64:
580     case X86::PUSHFS64:
581     case X86::PUSHGS64:
582       return 8;
583     }
584     return 0;
585   }
586 
587   bool isADD64rr(const MCInst &Inst) const override {
588     return Inst.getOpcode() == X86::ADD64rr;
589   }
590 
591   bool isSUB(const MCInst &Inst) const override {
592     return ::isSUB(Inst.getOpcode());
593   }
594 
595   bool isADDri(const MCInst &Inst) const {
596     return Inst.getOpcode() == X86::ADD64ri32 ||
597            Inst.getOpcode() == X86::ADD64ri8;
598   }
599 
600   bool isLEA64r(const MCInst &Inst) const override {
601     return Inst.getOpcode() == X86::LEA64r;
602   }
603 
604   bool isMOVSX64rm32(const MCInst &Inst) const override {
605     return Inst.getOpcode() == X86::MOVSX64rm32;
606   }
607 
608   bool isLeave(const MCInst &Inst) const override {
609     return Inst.getOpcode() == X86::LEAVE || Inst.getOpcode() == X86::LEAVE64;
610   }
611 
612   bool isMoveMem2Reg(const MCInst &Inst) const override {
613     switch (Inst.getOpcode()) {
614     case X86::MOV16rm:
615     case X86::MOV32rm:
616     case X86::MOV64rm:
617       return true;
618     }
619     return false;
620   }
621 
622   bool isUnsupportedBranch(unsigned Opcode) const override {
623     switch (Opcode) {
624     default:
625       return false;
626     case X86::LOOP:
627     case X86::LOOPE:
628     case X86::LOOPNE:
629     case X86::JECXZ:
630     case X86::JRCXZ:
631       return true;
632     }
633   }
634 
635   bool isLoad(const MCInst &Inst) const override {
636     if (isPop(Inst))
637       return true;
638 
639     int MemOpNo = getMemoryOperandNo(Inst);
640     const MCInstrDesc &MCII = Info->get(Inst.getOpcode());
641 
642     if (MemOpNo == -1)
643       return false;
644 
645     return MCII.mayLoad();
646   }
647 
648   bool isStore(const MCInst &Inst) const override {
649     if (isPush(Inst))
650       return true;
651 
652     int MemOpNo = getMemoryOperandNo(Inst);
653     const MCInstrDesc &MCII = Info->get(Inst.getOpcode());
654 
655     if (MemOpNo == -1)
656       return false;
657 
658     return MCII.mayStore();
659   }
660 
661   bool isCleanRegXOR(const MCInst &Inst) const override {
662     switch (Inst.getOpcode()) {
663     case X86::XOR16rr:
664     case X86::XOR32rr:
665     case X86::XOR64rr:
666       break;
667     default:
668       return false;
669     }
670     return (Inst.getOperand(0).getReg() == Inst.getOperand(2).getReg());
671   }
672 
673   bool isPacked(const MCInst &Inst) const override {
674     const MCInstrDesc &Desc = Info->get(Inst.getOpcode());
675     return (Desc.TSFlags & X86II::OpPrefixMask) == X86II::PD;
676   }
677 
678   unsigned getTrapFillValue() const override { return 0xCC; }
679 
680   struct IndJmpMatcherFrag1 : MCInstMatcher {
681     std::unique_ptr<MCInstMatcher> Base;
682     std::unique_ptr<MCInstMatcher> Scale;
683     std::unique_ptr<MCInstMatcher> Index;
684     std::unique_ptr<MCInstMatcher> Offset;
685 
686     IndJmpMatcherFrag1(std::unique_ptr<MCInstMatcher> Base,
687                        std::unique_ptr<MCInstMatcher> Scale,
688                        std::unique_ptr<MCInstMatcher> Index,
689                        std::unique_ptr<MCInstMatcher> Offset)
690         : Base(std::move(Base)), Scale(std::move(Scale)),
691           Index(std::move(Index)), Offset(std::move(Offset)) {}
692 
693     bool match(const MCRegisterInfo &MRI, MCPlusBuilder &MIB,
694                MutableArrayRef<MCInst> InInstrWindow, int OpNum) override {
695       if (!MCInstMatcher::match(MRI, MIB, InInstrWindow, OpNum))
696         return false;
697 
698       if (CurInst->getOpcode() != X86::JMP64m)
699         return false;
700 
701       int MemOpNo = MIB.getMemoryOperandNo(*CurInst);
702       if (MemOpNo == -1)
703         return false;
704 
705       if (!Base->match(MRI, MIB, this->InstrWindow, MemOpNo + X86::AddrBaseReg))
706         return false;
707       if (!Scale->match(MRI, MIB, this->InstrWindow,
708                         MemOpNo + X86::AddrScaleAmt))
709         return false;
710       if (!Index->match(MRI, MIB, this->InstrWindow,
711                         MemOpNo + X86::AddrIndexReg))
712         return false;
713       if (!Offset->match(MRI, MIB, this->InstrWindow, MemOpNo + X86::AddrDisp))
714         return false;
715       return true;
716     }
717 
718     void annotate(MCPlusBuilder &MIB, StringRef Annotation) override {
719       MIB.addAnnotation(*CurInst, Annotation, true);
720       Base->annotate(MIB, Annotation);
721       Scale->annotate(MIB, Annotation);
722       Index->annotate(MIB, Annotation);
723       Offset->annotate(MIB, Annotation);
724     }
725   };
726 
727   std::unique_ptr<MCInstMatcher>
728   matchIndJmp(std::unique_ptr<MCInstMatcher> Base,
729               std::unique_ptr<MCInstMatcher> Scale,
730               std::unique_ptr<MCInstMatcher> Index,
731               std::unique_ptr<MCInstMatcher> Offset) const override {
732     return std::unique_ptr<MCInstMatcher>(
733         new IndJmpMatcherFrag1(std::move(Base), std::move(Scale),
734                                std::move(Index), std::move(Offset)));
735   }
736 
737   struct IndJmpMatcherFrag2 : MCInstMatcher {
738     std::unique_ptr<MCInstMatcher> Reg;
739 
740     IndJmpMatcherFrag2(std::unique_ptr<MCInstMatcher> Reg)
741         : Reg(std::move(Reg)) {}
742 
743     bool match(const MCRegisterInfo &MRI, MCPlusBuilder &MIB,
744                MutableArrayRef<MCInst> InInstrWindow, int OpNum) override {
745       if (!MCInstMatcher::match(MRI, MIB, InInstrWindow, OpNum))
746         return false;
747 
748       if (CurInst->getOpcode() != X86::JMP64r)
749         return false;
750 
751       return Reg->match(MRI, MIB, this->InstrWindow, 0);
752     }
753 
754     void annotate(MCPlusBuilder &MIB, StringRef Annotation) override {
755       MIB.addAnnotation(*CurInst, Annotation, true);
756       Reg->annotate(MIB, Annotation);
757     }
758   };
759 
760   std::unique_ptr<MCInstMatcher>
761   matchIndJmp(std::unique_ptr<MCInstMatcher> Target) const override {
762     return std::unique_ptr<MCInstMatcher>(
763         new IndJmpMatcherFrag2(std::move(Target)));
764   }
765 
766   struct LoadMatcherFrag1 : MCInstMatcher {
767     std::unique_ptr<MCInstMatcher> Base;
768     std::unique_ptr<MCInstMatcher> Scale;
769     std::unique_ptr<MCInstMatcher> Index;
770     std::unique_ptr<MCInstMatcher> Offset;
771 
772     LoadMatcherFrag1(std::unique_ptr<MCInstMatcher> Base,
773                      std::unique_ptr<MCInstMatcher> Scale,
774                      std::unique_ptr<MCInstMatcher> Index,
775                      std::unique_ptr<MCInstMatcher> Offset)
776         : Base(std::move(Base)), Scale(std::move(Scale)),
777           Index(std::move(Index)), Offset(std::move(Offset)) {}
778 
779     bool match(const MCRegisterInfo &MRI, MCPlusBuilder &MIB,
780                MutableArrayRef<MCInst> InInstrWindow, int OpNum) override {
781       if (!MCInstMatcher::match(MRI, MIB, InInstrWindow, OpNum))
782         return false;
783 
784       if (CurInst->getOpcode() != X86::MOV64rm &&
785           CurInst->getOpcode() != X86::MOVSX64rm32)
786         return false;
787 
788       int MemOpNo = MIB.getMemoryOperandNo(*CurInst);
789       if (MemOpNo == -1)
790         return false;
791 
792       if (!Base->match(MRI, MIB, this->InstrWindow, MemOpNo + X86::AddrBaseReg))
793         return false;
794       if (!Scale->match(MRI, MIB, this->InstrWindow,
795                         MemOpNo + X86::AddrScaleAmt))
796         return false;
797       if (!Index->match(MRI, MIB, this->InstrWindow,
798                         MemOpNo + X86::AddrIndexReg))
799         return false;
800       if (!Offset->match(MRI, MIB, this->InstrWindow, MemOpNo + X86::AddrDisp))
801         return false;
802       return true;
803     }
804 
805     void annotate(MCPlusBuilder &MIB, StringRef Annotation) override {
806       MIB.addAnnotation(*CurInst, Annotation, true);
807       Base->annotate(MIB, Annotation);
808       Scale->annotate(MIB, Annotation);
809       Index->annotate(MIB, Annotation);
810       Offset->annotate(MIB, Annotation);
811     }
812   };
813 
814   std::unique_ptr<MCInstMatcher>
815   matchLoad(std::unique_ptr<MCInstMatcher> Base,
816             std::unique_ptr<MCInstMatcher> Scale,
817             std::unique_ptr<MCInstMatcher> Index,
818             std::unique_ptr<MCInstMatcher> Offset) const override {
819     return std::unique_ptr<MCInstMatcher>(
820         new LoadMatcherFrag1(std::move(Base), std::move(Scale),
821                              std::move(Index), std::move(Offset)));
822   }
823 
824   struct AddMatcher : MCInstMatcher {
825     std::unique_ptr<MCInstMatcher> A;
826     std::unique_ptr<MCInstMatcher> B;
827 
828     AddMatcher(std::unique_ptr<MCInstMatcher> A,
829                std::unique_ptr<MCInstMatcher> B)
830         : A(std::move(A)), B(std::move(B)) {}
831 
832     bool match(const MCRegisterInfo &MRI, MCPlusBuilder &MIB,
833                MutableArrayRef<MCInst> InInstrWindow, int OpNum) override {
834       if (!MCInstMatcher::match(MRI, MIB, InInstrWindow, OpNum))
835         return false;
836 
837       if (CurInst->getOpcode() == X86::ADD64rr ||
838           CurInst->getOpcode() == X86::ADD64rr_DB ||
839           CurInst->getOpcode() == X86::ADD64rr_REV) {
840         if (!A->match(MRI, MIB, this->InstrWindow, 1)) {
841           if (!B->match(MRI, MIB, this->InstrWindow, 1))
842             return false;
843           return A->match(MRI, MIB, this->InstrWindow, 2);
844         }
845 
846         if (B->match(MRI, MIB, this->InstrWindow, 2))
847           return true;
848 
849         if (!B->match(MRI, MIB, this->InstrWindow, 1))
850           return false;
851         return A->match(MRI, MIB, this->InstrWindow, 2);
852       }
853 
854       return false;
855     }
856 
857     void annotate(MCPlusBuilder &MIB, StringRef Annotation) override {
858       MIB.addAnnotation(*CurInst, Annotation, true);
859       A->annotate(MIB, Annotation);
860       B->annotate(MIB, Annotation);
861     }
862   };
863 
864   virtual std::unique_ptr<MCInstMatcher>
865   matchAdd(std::unique_ptr<MCInstMatcher> A,
866            std::unique_ptr<MCInstMatcher> B) const override {
867     return std::unique_ptr<MCInstMatcher>(
868         new AddMatcher(std::move(A), std::move(B)));
869   }
870 
871   struct LEAMatcher : MCInstMatcher {
872     std::unique_ptr<MCInstMatcher> Target;
873 
874     LEAMatcher(std::unique_ptr<MCInstMatcher> Target)
875         : Target(std::move(Target)) {}
876 
877     bool match(const MCRegisterInfo &MRI, MCPlusBuilder &MIB,
878                MutableArrayRef<MCInst> InInstrWindow, int OpNum) override {
879       if (!MCInstMatcher::match(MRI, MIB, InInstrWindow, OpNum))
880         return false;
881 
882       if (CurInst->getOpcode() != X86::LEA64r)
883         return false;
884 
885       if (CurInst->getOperand(1 + X86::AddrScaleAmt).getImm() != 1 ||
886           CurInst->getOperand(1 + X86::AddrIndexReg).getReg() !=
887               X86::NoRegister ||
888           (CurInst->getOperand(1 + X86::AddrBaseReg).getReg() !=
889                X86::NoRegister &&
890            CurInst->getOperand(1 + X86::AddrBaseReg).getReg() != X86::RIP))
891         return false;
892 
893       return Target->match(MRI, MIB, this->InstrWindow, 1 + X86::AddrDisp);
894     }
895 
896     void annotate(MCPlusBuilder &MIB, StringRef Annotation) override {
897       MIB.addAnnotation(*CurInst, Annotation, true);
898       Target->annotate(MIB, Annotation);
899     }
900   };
901 
902   virtual std::unique_ptr<MCInstMatcher>
903   matchLoadAddr(std::unique_ptr<MCInstMatcher> Target) const override {
904     return std::unique_ptr<MCInstMatcher>(new LEAMatcher(std::move(Target)));
905   }
906 
907   bool hasPCRelOperand(const MCInst &Inst) const override {
908     for (const MCOperand &Operand : Inst)
909       if (Operand.isReg() && Operand.getReg() == X86::RIP)
910         return true;
911     return false;
912   }
913 
914   int getMemoryOperandNo(const MCInst &Inst) const override {
915     unsigned Opcode = Inst.getOpcode();
916     const MCInstrDesc &Desc = Info->get(Opcode);
917     int MemOpNo = X86II::getMemoryOperandNo(Desc.TSFlags);
918     if (MemOpNo >= 0)
919       MemOpNo += X86II::getOperandBias(Desc);
920     return MemOpNo;
921   }
922 
923   bool hasEVEXEncoding(const MCInst &Inst) const override {
924     const MCInstrDesc &Desc = Info->get(Inst.getOpcode());
925     return (Desc.TSFlags & X86II::EncodingMask) == X86II::EVEX;
926   }
927 
928   bool isMacroOpFusionPair(ArrayRef<MCInst> Insts) const override {
929     const auto *I = Insts.begin();
930     while (I != Insts.end() && isPrefix(*I))
931       ++I;
932     if (I == Insts.end())
933       return false;
934 
935     const MCInst &FirstInst = *I;
936     ++I;
937     while (I != Insts.end() && isPrefix(*I))
938       ++I;
939     if (I == Insts.end())
940       return false;
941     const MCInst &SecondInst = *I;
942 
943     if (!isConditionalBranch(SecondInst))
944       return false;
945     // Cannot fuse if the first instruction uses RIP-relative memory.
946     if (hasPCRelOperand(FirstInst))
947       return false;
948 
949     const X86::FirstMacroFusionInstKind CmpKind =
950         X86::classifyFirstOpcodeInMacroFusion(FirstInst.getOpcode());
951     if (CmpKind == X86::FirstMacroFusionInstKind::Invalid)
952       return false;
953 
954     X86::CondCode CC = static_cast<X86::CondCode>(getCondCode(SecondInst));
955     X86::SecondMacroFusionInstKind BranchKind =
956         X86::classifySecondCondCodeInMacroFusion(CC);
957     if (BranchKind == X86::SecondMacroFusionInstKind::Invalid)
958       return false;
959     return X86::isMacroFused(CmpKind, BranchKind);
960   }
961 
962   bool
963   evaluateX86MemoryOperand(const MCInst &Inst, unsigned *BaseRegNum,
964                            int64_t *ScaleImm, unsigned *IndexRegNum,
965                            int64_t *DispImm, unsigned *SegmentRegNum,
966                            const MCExpr **DispExpr = nullptr) const override {
967     assert(BaseRegNum && ScaleImm && IndexRegNum && SegmentRegNum &&
968            "one of the input pointers is null");
969     int MemOpNo = getMemoryOperandNo(Inst);
970     if (MemOpNo < 0)
971       return false;
972     unsigned MemOpOffset = static_cast<unsigned>(MemOpNo);
973 
974     if (MemOpOffset + X86::AddrSegmentReg >= MCPlus::getNumPrimeOperands(Inst))
975       return false;
976 
977     const MCOperand &Base = Inst.getOperand(MemOpOffset + X86::AddrBaseReg);
978     const MCOperand &Scale = Inst.getOperand(MemOpOffset + X86::AddrScaleAmt);
979     const MCOperand &Index = Inst.getOperand(MemOpOffset + X86::AddrIndexReg);
980     const MCOperand &Disp = Inst.getOperand(MemOpOffset + X86::AddrDisp);
981     const MCOperand &Segment =
982         Inst.getOperand(MemOpOffset + X86::AddrSegmentReg);
983 
984     // Make sure it is a well-formed memory operand.
985     if (!Base.isReg() || !Scale.isImm() || !Index.isReg() ||
986         (!Disp.isImm() && !Disp.isExpr()) || !Segment.isReg())
987       return false;
988 
989     *BaseRegNum = Base.getReg();
990     *ScaleImm = Scale.getImm();
991     *IndexRegNum = Index.getReg();
992     if (Disp.isImm()) {
993       assert(DispImm && "DispImm needs to be set");
994       *DispImm = Disp.getImm();
995       if (DispExpr)
996         *DispExpr = nullptr;
997     } else {
998       assert(DispExpr && "DispExpr needs to be set");
999       *DispExpr = Disp.getExpr();
1000       if (DispImm)
1001         *DispImm = 0;
1002     }
1003     *SegmentRegNum = Segment.getReg();
1004     return true;
1005   }
1006 
1007   bool evaluateMemOperandTarget(const MCInst &Inst, uint64_t &Target,
1008                                 uint64_t Address,
1009                                 uint64_t Size) const override {
1010     unsigned      BaseRegNum;
1011     int64_t       ScaleValue;
1012     unsigned      IndexRegNum;
1013     int64_t       DispValue;
1014     unsigned      SegRegNum;
1015     const MCExpr *DispExpr = nullptr;
1016     if (!evaluateX86MemoryOperand(Inst, &BaseRegNum, &ScaleValue, &IndexRegNum,
1017                                   &DispValue, &SegRegNum, &DispExpr))
1018       return false;
1019 
1020     // Make sure it's a well-formed addressing we can statically evaluate.
1021     if ((BaseRegNum != X86::RIP && BaseRegNum != X86::NoRegister) ||
1022         IndexRegNum != X86::NoRegister || SegRegNum != X86::NoRegister ||
1023         DispExpr)
1024       return false;
1025 
1026     Target = DispValue;
1027     if (BaseRegNum == X86::RIP) {
1028       assert(Size != 0 && "instruction size required in order to statically "
1029                           "evaluate RIP-relative address");
1030       Target += Address + Size;
1031     }
1032     return true;
1033   }
1034 
1035   MCInst::iterator getMemOperandDisp(MCInst &Inst) const override {
1036     int MemOpNo = getMemoryOperandNo(Inst);
1037     if (MemOpNo < 0)
1038       return Inst.end();
1039     return Inst.begin() + (MemOpNo + X86::AddrDisp);
1040   }
1041 
1042   bool replaceMemOperandDisp(MCInst &Inst, MCOperand Operand) const override {
1043     MCOperand *OI = getMemOperandDisp(Inst);
1044     if (OI == Inst.end())
1045       return false;
1046     *OI = Operand;
1047     return true;
1048   }
1049 
1050   /// Get the registers used as function parameters.
1051   /// This function is specific to the x86_64 abi on Linux.
1052   BitVector getRegsUsedAsParams() const override {
1053     BitVector Regs = BitVector(RegInfo->getNumRegs(), false);
1054     Regs |= getAliases(X86::RSI);
1055     Regs |= getAliases(X86::RDI);
1056     Regs |= getAliases(X86::RDX);
1057     Regs |= getAliases(X86::RCX);
1058     Regs |= getAliases(X86::R8);
1059     Regs |= getAliases(X86::R9);
1060     return Regs;
1061   }
1062 
1063   void getCalleeSavedRegs(BitVector &Regs) const override {
1064     Regs |= getAliases(X86::RBX);
1065     Regs |= getAliases(X86::RBP);
1066     Regs |= getAliases(X86::R12);
1067     Regs |= getAliases(X86::R13);
1068     Regs |= getAliases(X86::R14);
1069     Regs |= getAliases(X86::R15);
1070   }
1071 
1072   void getDefaultDefIn(BitVector &Regs) const override {
1073     assert(Regs.size() >= RegInfo->getNumRegs() &&
1074            "The size of BitVector is less than RegInfo->getNumRegs().");
1075     Regs.set(X86::RAX);
1076     Regs.set(X86::RCX);
1077     Regs.set(X86::RDX);
1078     Regs.set(X86::RSI);
1079     Regs.set(X86::RDI);
1080     Regs.set(X86::R8);
1081     Regs.set(X86::R9);
1082     Regs.set(X86::XMM0);
1083     Regs.set(X86::XMM1);
1084     Regs.set(X86::XMM2);
1085     Regs.set(X86::XMM3);
1086     Regs.set(X86::XMM4);
1087     Regs.set(X86::XMM5);
1088     Regs.set(X86::XMM6);
1089     Regs.set(X86::XMM7);
1090   }
1091 
1092   void getDefaultLiveOut(BitVector &Regs) const override {
1093     assert(Regs.size() >= RegInfo->getNumRegs() &&
1094            "The size of BitVector is less than RegInfo->getNumRegs().");
1095     Regs |= getAliases(X86::RAX);
1096     Regs |= getAliases(X86::RDX);
1097     Regs |= getAliases(X86::RCX);
1098     Regs |= getAliases(X86::XMM0);
1099     Regs |= getAliases(X86::XMM1);
1100   }
1101 
1102   void getGPRegs(BitVector &Regs, bool IncludeAlias) const override {
1103     if (IncludeAlias) {
1104       Regs |= getAliases(X86::RAX);
1105       Regs |= getAliases(X86::RBX);
1106       Regs |= getAliases(X86::RBP);
1107       Regs |= getAliases(X86::RSI);
1108       Regs |= getAliases(X86::RDI);
1109       Regs |= getAliases(X86::RDX);
1110       Regs |= getAliases(X86::RCX);
1111       Regs |= getAliases(X86::R8);
1112       Regs |= getAliases(X86::R9);
1113       Regs |= getAliases(X86::R10);
1114       Regs |= getAliases(X86::R11);
1115       Regs |= getAliases(X86::R12);
1116       Regs |= getAliases(X86::R13);
1117       Regs |= getAliases(X86::R14);
1118       Regs |= getAliases(X86::R15);
1119       return;
1120     }
1121     Regs.set(X86::RAX);
1122     Regs.set(X86::RBX);
1123     Regs.set(X86::RBP);
1124     Regs.set(X86::RSI);
1125     Regs.set(X86::RDI);
1126     Regs.set(X86::RDX);
1127     Regs.set(X86::RCX);
1128     Regs.set(X86::R8);
1129     Regs.set(X86::R9);
1130     Regs.set(X86::R10);
1131     Regs.set(X86::R11);
1132     Regs.set(X86::R12);
1133     Regs.set(X86::R13);
1134     Regs.set(X86::R14);
1135     Regs.set(X86::R15);
1136   }
1137 
1138   void getClassicGPRegs(BitVector &Regs) const override {
1139     Regs |= getAliases(X86::RAX);
1140     Regs |= getAliases(X86::RBX);
1141     Regs |= getAliases(X86::RBP);
1142     Regs |= getAliases(X86::RSI);
1143     Regs |= getAliases(X86::RDI);
1144     Regs |= getAliases(X86::RDX);
1145     Regs |= getAliases(X86::RCX);
1146   }
1147 
1148   void getRepRegs(BitVector &Regs) const override {
1149     Regs |= getAliases(X86::RCX);
1150   }
1151 
1152   MCPhysReg getAliasSized(MCPhysReg Reg, uint8_t Size) const override {
1153     switch (Reg) {
1154     case X86::RAX: case X86::EAX: case X86::AX: case X86::AL: case X86::AH:
1155       switch (Size) {
1156       case 8: return X86::RAX;       case 4: return X86::EAX;
1157       case 2: return X86::AX;        case 1: return X86::AL;
1158       default: llvm_unreachable("Unexpected size");
1159       }
1160     case X86::RBX: case X86::EBX: case X86::BX: case X86::BL: case X86::BH:
1161       switch (Size) {
1162       case 8: return X86::RBX;       case 4: return X86::EBX;
1163       case 2: return X86::BX;        case 1: return X86::BL;
1164       default: llvm_unreachable("Unexpected size");
1165       }
1166     case X86::RDX: case X86::EDX: case X86::DX: case X86::DL: case X86::DH:
1167       switch (Size) {
1168       case 8: return X86::RDX;       case 4: return X86::EDX;
1169       case 2: return X86::DX;        case 1: return X86::DL;
1170       default: llvm_unreachable("Unexpected size");
1171       }
1172     case X86::RDI: case X86::EDI: case X86::DI: case X86::DIL:
1173       switch (Size) {
1174       case 8: return X86::RDI;       case 4: return X86::EDI;
1175       case 2: return X86::DI;        case 1: return X86::DIL;
1176       default: llvm_unreachable("Unexpected size");
1177       }
1178     case X86::RSI: case X86::ESI: case X86::SI: case X86::SIL:
1179       switch (Size) {
1180       case 8: return X86::RSI;       case 4: return X86::ESI;
1181       case 2: return X86::SI;        case 1: return X86::SIL;
1182       default: llvm_unreachable("Unexpected size");
1183       }
1184     case X86::RCX: case X86::ECX: case X86::CX: case X86::CL: case X86::CH:
1185       switch (Size) {
1186       case 8: return X86::RCX;       case 4: return X86::ECX;
1187       case 2: return X86::CX;        case 1: return X86::CL;
1188       default: llvm_unreachable("Unexpected size");
1189       }
1190     case X86::RSP: case X86::ESP: case X86::SP: case X86::SPL:
1191       switch (Size) {
1192       case 8: return X86::RSP;       case 4: return X86::ESP;
1193       case 2: return X86::SP;        case 1: return X86::SPL;
1194       default: llvm_unreachable("Unexpected size");
1195       }
1196     case X86::RBP: case X86::EBP: case X86::BP: case X86::BPL:
1197       switch (Size) {
1198       case 8: return X86::RBP;       case 4: return X86::EBP;
1199       case 2: return X86::BP;        case 1: return X86::BPL;
1200       default: llvm_unreachable("Unexpected size");
1201       }
1202   case X86::R8: case X86::R8D: case X86::R8W: case X86::R8B:
1203       switch (Size) {
1204       case 8: return X86::R8;        case 4: return X86::R8D;
1205       case 2: return X86::R8W;       case 1: return X86::R8B;
1206       default: llvm_unreachable("Unexpected size");
1207       }
1208     case X86::R9: case X86::R9D: case X86::R9W: case X86::R9B:
1209       switch (Size) {
1210       case 8: return X86::R9;        case 4: return X86::R9D;
1211       case 2: return X86::R9W;       case 1: return X86::R9B;
1212       default: llvm_unreachable("Unexpected size");
1213       }
1214     case X86::R10: case X86::R10D: case X86::R10W: case X86::R10B:
1215       switch (Size) {
1216       case 8: return X86::R10;        case 4: return X86::R10D;
1217       case 2: return X86::R10W;       case 1: return X86::R10B;
1218       default: llvm_unreachable("Unexpected size");
1219       }
1220     case X86::R11: case X86::R11D: case X86::R11W: case X86::R11B:
1221       switch (Size) {
1222       case 8: return X86::R11;        case 4: return X86::R11D;
1223       case 2: return X86::R11W;       case 1: return X86::R11B;
1224       default: llvm_unreachable("Unexpected size");
1225       }
1226     case X86::R12: case X86::R12D: case X86::R12W: case X86::R12B:
1227       switch (Size) {
1228       case 8: return X86::R12;        case 4: return X86::R12D;
1229       case 2: return X86::R12W;       case 1: return X86::R12B;
1230       default: llvm_unreachable("Unexpected size");
1231       }
1232     case X86::R13: case X86::R13D: case X86::R13W: case X86::R13B:
1233       switch (Size) {
1234       case 8: return X86::R13;        case 4: return X86::R13D;
1235       case 2: return X86::R13W;       case 1: return X86::R13B;
1236       default: llvm_unreachable("Unexpected size");
1237       }
1238     case X86::R14: case X86::R14D: case X86::R14W: case X86::R14B:
1239       switch (Size) {
1240       case 8: return X86::R14;        case 4: return X86::R14D;
1241       case 2: return X86::R14W;       case 1: return X86::R14B;
1242       default: llvm_unreachable("Unexpected size");
1243       }
1244     case X86::R15: case X86::R15D: case X86::R15W: case X86::R15B:
1245       switch (Size) {
1246       case 8: return X86::R15;        case 4: return X86::R15D;
1247       case 2: return X86::R15W;       case 1: return X86::R15B;
1248       default: llvm_unreachable("Unexpected size");
1249       }
1250     default:
1251       dbgs() << Reg << " (get alias sized)\n";
1252       llvm_unreachable("Unexpected reg number");
1253       break;
1254     }
1255   }
1256 
1257   bool isUpper8BitReg(MCPhysReg Reg) const override {
1258     switch (Reg) {
1259     case X86::AH:
1260     case X86::BH:
1261     case X86::CH:
1262     case X86::DH:
1263       return true;
1264     default:
1265       return false;
1266     }
1267   }
1268 
1269   bool cannotUseREX(const MCInst &Inst) const override {
1270     switch (Inst.getOpcode()) {
1271     case X86::MOV8mr_NOREX:
1272     case X86::MOV8rm_NOREX:
1273     case X86::MOV8rr_NOREX:
1274     case X86::MOVSX32rm8_NOREX:
1275     case X86::MOVSX32rr8_NOREX:
1276     case X86::MOVZX32rm8_NOREX:
1277     case X86::MOVZX32rr8_NOREX:
1278     case X86::MOV8mr:
1279     case X86::MOV8rm:
1280     case X86::MOV8rr:
1281     case X86::MOVSX32rm8:
1282     case X86::MOVSX32rr8:
1283     case X86::MOVZX32rm8:
1284     case X86::MOVZX32rr8:
1285     case X86::TEST8ri:
1286       for (int I = 0, E = MCPlus::getNumPrimeOperands(Inst); I != E; ++I) {
1287         const MCOperand &Operand = Inst.getOperand(I);
1288         if (!Operand.isReg())
1289           continue;
1290         if (isUpper8BitReg(Operand.getReg()))
1291           return true;
1292       }
1293       LLVM_FALLTHROUGH;
1294     default:
1295       return false;
1296     }
1297   }
1298 
1299   bool isStackAccess(const MCInst &Inst, bool &IsLoad, bool &IsStore,
1300                      bool &IsStoreFromReg, MCPhysReg &Reg, int32_t &SrcImm,
1301                      uint16_t &StackPtrReg, int64_t &StackOffset, uint8_t &Size,
1302                      bool &IsSimple, bool &IsIndexed) const override {
1303     // Detect simple push/pop cases first
1304     if (int Sz = getPushSize(Inst)) {
1305       IsLoad = false;
1306       IsStore = true;
1307       IsStoreFromReg = true;
1308       StackPtrReg = X86::RSP;
1309       StackOffset = -Sz;
1310       Size = Sz;
1311       IsSimple = true;
1312       if (Inst.getOperand(0).isImm())
1313         SrcImm = Inst.getOperand(0).getImm();
1314       else if (Inst.getOperand(0).isReg())
1315         Reg = Inst.getOperand(0).getReg();
1316       else
1317         IsSimple = false;
1318 
1319       return true;
1320     }
1321     if (int Sz = getPopSize(Inst)) {
1322       IsLoad = true;
1323       IsStore = false;
1324       if (Inst.getNumOperands() == 0 || !Inst.getOperand(0).isReg()) {
1325         IsSimple = false;
1326       } else {
1327         Reg = Inst.getOperand(0).getReg();
1328         IsSimple = true;
1329       }
1330       StackPtrReg = X86::RSP;
1331       StackOffset = 0;
1332       Size = Sz;
1333       return true;
1334     }
1335 
1336     struct InstInfo {
1337       // Size in bytes that Inst loads from memory.
1338       uint8_t DataSize;
1339       bool IsLoad;
1340       bool IsStore;
1341       bool StoreFromReg;
1342       bool Simple;
1343     };
1344 
1345     InstInfo I;
1346     int MemOpNo = getMemoryOperandNo(Inst);
1347     const MCInstrDesc &MCII = Info->get(Inst.getOpcode());
1348     // If it is not dealing with a memory operand, we discard it
1349     if (MemOpNo == -1 || MCII.isCall())
1350       return false;
1351 
1352     switch (Inst.getOpcode()) {
1353     default: {
1354       uint8_t Sz = 0;
1355       bool IsLoad = MCII.mayLoad();
1356       bool IsStore = MCII.mayStore();
1357       // Is it LEA? (deals with memory but is not loading nor storing)
1358       if (!IsLoad && !IsStore)
1359         return false;
1360 
1361       // Try to guess data size involved in the load/store by looking at the
1362       // register size. If there's no reg involved, return 0 as size, meaning
1363       // we don't know.
1364       for (unsigned I = 0, E = MCII.getNumOperands(); I != E; ++I) {
1365         if (MCII.OpInfo[I].OperandType != MCOI::OPERAND_REGISTER)
1366           continue;
1367         if (static_cast<int>(I) >= MemOpNo && I < X86::AddrNumOperands)
1368           continue;
1369         Sz = RegInfo->getRegClass(MCII.OpInfo[I].RegClass).getSizeInBits() / 8;
1370         break;
1371       }
1372       I = {Sz, IsLoad, IsStore, false, false};
1373       break;
1374     }
1375     case X86::MOV16rm: I = {2, true, false, false, true}; break;
1376     case X86::MOV32rm: I = {4, true, false, false, true}; break;
1377     case X86::MOV64rm: I = {8, true, false, false, true}; break;
1378     case X86::MOV16mr: I = {2, false, true, true, true};  break;
1379     case X86::MOV32mr: I = {4, false, true, true, true};  break;
1380     case X86::MOV64mr: I = {8, false, true, true, true};  break;
1381     case X86::MOV16mi: I = {2, false, true, false, true}; break;
1382     case X86::MOV32mi: I = {4, false, true, false, true}; break;
1383     } // end switch (Inst.getOpcode())
1384 
1385     unsigned BaseRegNum;
1386     int64_t ScaleValue;
1387     unsigned IndexRegNum;
1388     int64_t DispValue;
1389     unsigned SegRegNum;
1390     const MCExpr *DispExpr;
1391     if (!evaluateX86MemoryOperand(Inst, &BaseRegNum, &ScaleValue, &IndexRegNum,
1392                                   &DispValue, &SegRegNum, &DispExpr)) {
1393       LLVM_DEBUG(dbgs() << "Evaluate failed on ");
1394       LLVM_DEBUG(Inst.dump());
1395       return false;
1396     }
1397 
1398     // Make sure it's a stack access
1399     if (BaseRegNum != X86::RBP && BaseRegNum != X86::RSP)
1400       return false;
1401 
1402     IsLoad = I.IsLoad;
1403     IsStore = I.IsStore;
1404     IsStoreFromReg = I.StoreFromReg;
1405     Size = I.DataSize;
1406     IsSimple = I.Simple;
1407     StackPtrReg = BaseRegNum;
1408     StackOffset = DispValue;
1409     IsIndexed = IndexRegNum != X86::NoRegister || SegRegNum != X86::NoRegister;
1410 
1411     if (!I.Simple)
1412       return true;
1413 
1414     // Retrieve related register in simple MOV from/to stack operations.
1415     unsigned MemOpOffset = static_cast<unsigned>(MemOpNo);
1416     if (I.IsLoad) {
1417       MCOperand RegOpnd = Inst.getOperand(0);
1418       assert(RegOpnd.isReg() && "unexpected destination operand");
1419       Reg = RegOpnd.getReg();
1420     } else if (I.IsStore) {
1421       MCOperand SrcOpnd =
1422           Inst.getOperand(MemOpOffset + X86::AddrSegmentReg + 1);
1423       if (I.StoreFromReg) {
1424         assert(SrcOpnd.isReg() && "unexpected source operand");
1425         Reg = SrcOpnd.getReg();
1426       } else {
1427         assert(SrcOpnd.isImm() && "unexpected source operand");
1428         SrcImm = SrcOpnd.getImm();
1429       }
1430     }
1431 
1432     return true;
1433   }
1434 
1435   void changeToPushOrPop(MCInst &Inst) const override {
1436     assert(!isPush(Inst) && !isPop(Inst));
1437 
1438     struct InstInfo {
1439       // Size in bytes that Inst loads from memory.
1440       uint8_t DataSize;
1441       bool IsLoad;
1442       bool StoreFromReg;
1443     };
1444 
1445     InstInfo I;
1446     switch (Inst.getOpcode()) {
1447     default: {
1448       llvm_unreachable("Unhandled opcode");
1449       return;
1450     }
1451     case X86::MOV16rm: I = {2, true, false}; break;
1452     case X86::MOV32rm: I = {4, true, false}; break;
1453     case X86::MOV64rm: I = {8, true, false}; break;
1454     case X86::MOV16mr: I = {2, false, true};  break;
1455     case X86::MOV32mr: I = {4, false, true};  break;
1456     case X86::MOV64mr: I = {8, false, true};  break;
1457     case X86::MOV16mi: I = {2, false, false}; break;
1458     case X86::MOV32mi: I = {4, false, false}; break;
1459     } // end switch (Inst.getOpcode())
1460 
1461     unsigned BaseRegNum;
1462     int64_t ScaleValue;
1463     unsigned IndexRegNum;
1464     int64_t DispValue;
1465     unsigned SegRegNum;
1466     const MCExpr *DispExpr;
1467     if (!evaluateX86MemoryOperand(Inst, &BaseRegNum, &ScaleValue, &IndexRegNum,
1468                                   &DispValue, &SegRegNum, &DispExpr)) {
1469       llvm_unreachable("Evaluate failed");
1470       return;
1471     }
1472     // Make sure it's a stack access
1473     if (BaseRegNum != X86::RBP && BaseRegNum != X86::RSP) {
1474       llvm_unreachable("Not a stack access");
1475       return;
1476     }
1477 
1478     unsigned MemOpOffset = getMemoryOperandNo(Inst);
1479     unsigned NewOpcode = 0;
1480     if (I.IsLoad) {
1481       switch (I.DataSize) {
1482       case 2: NewOpcode = X86::POP16r; break;
1483       case 4: NewOpcode = X86::POP32r; break;
1484       case 8: NewOpcode = X86::POP64r; break;
1485       default:
1486         llvm_unreachable("Unexpected size");
1487       }
1488       unsigned RegOpndNum = Inst.getOperand(0).getReg();
1489       Inst.clear();
1490       Inst.setOpcode(NewOpcode);
1491       Inst.addOperand(MCOperand::createReg(RegOpndNum));
1492     } else {
1493       MCOperand SrcOpnd =
1494           Inst.getOperand(MemOpOffset + X86::AddrSegmentReg + 1);
1495       if (I.StoreFromReg) {
1496         switch (I.DataSize) {
1497         case 2: NewOpcode = X86::PUSH16r; break;
1498         case 4: NewOpcode = X86::PUSH32r; break;
1499         case 8: NewOpcode = X86::PUSH64r; break;
1500         default:
1501           llvm_unreachable("Unexpected size");
1502         }
1503         assert(SrcOpnd.isReg() && "Unexpected source operand");
1504         unsigned RegOpndNum = SrcOpnd.getReg();
1505         Inst.clear();
1506         Inst.setOpcode(NewOpcode);
1507         Inst.addOperand(MCOperand::createReg(RegOpndNum));
1508       } else {
1509         switch (I.DataSize) {
1510         case 2: NewOpcode = X86::PUSH16i8; break;
1511         case 4: NewOpcode = X86::PUSH32i8; break;
1512         case 8: NewOpcode = X86::PUSH64i32; break;
1513         default:
1514           llvm_unreachable("Unexpected size");
1515         }
1516         assert(SrcOpnd.isImm() && "Unexpected source operand");
1517         int64_t SrcImm = SrcOpnd.getImm();
1518         Inst.clear();
1519         Inst.setOpcode(NewOpcode);
1520         Inst.addOperand(MCOperand::createImm(SrcImm));
1521       }
1522     }
1523   }
1524 
1525   bool isStackAdjustment(const MCInst &Inst) const override {
1526     switch (Inst.getOpcode()) {
1527     default:
1528       return false;
1529     case X86::SUB64ri32:
1530     case X86::SUB64ri8:
1531     case X86::ADD64ri32:
1532     case X86::ADD64ri8:
1533     case X86::LEA64r:
1534       break;
1535     }
1536 
1537     const MCInstrDesc &MCII = Info->get(Inst.getOpcode());
1538     for (int I = 0, E = MCII.getNumDefs(); I != E; ++I) {
1539       const MCOperand &Operand = Inst.getOperand(I);
1540       if (Operand.isReg() && Operand.getReg() == X86::RSP)
1541         return true;
1542     }
1543     return false;
1544   }
1545 
1546   bool evaluateSimple(const MCInst &Inst, int64_t &Output,
1547                       std::pair<MCPhysReg, int64_t> Input1,
1548                       std::pair<MCPhysReg, int64_t> Input2) const override {
1549 
1550     auto getOperandVal = [&](MCPhysReg Reg) -> ErrorOr<int64_t> {
1551       if (Reg == Input1.first)
1552         return Input1.second;
1553       if (Reg == Input2.first)
1554         return Input2.second;
1555       return make_error_code(errc::result_out_of_range);
1556     };
1557 
1558     switch (Inst.getOpcode()) {
1559     default:
1560       return false;
1561 
1562     case X86::AND64ri32:
1563     case X86::AND64ri8:
1564       if (!Inst.getOperand(2).isImm())
1565         return false;
1566       if (ErrorOr<int64_t> InputVal =
1567               getOperandVal(Inst.getOperand(1).getReg()))
1568         Output = *InputVal & Inst.getOperand(2).getImm();
1569       else
1570         return false;
1571       break;
1572     case X86::SUB64ri32:
1573     case X86::SUB64ri8:
1574       if (!Inst.getOperand(2).isImm())
1575         return false;
1576       if (ErrorOr<int64_t> InputVal =
1577               getOperandVal(Inst.getOperand(1).getReg()))
1578         Output = *InputVal - Inst.getOperand(2).getImm();
1579       else
1580         return false;
1581       break;
1582     case X86::ADD64ri32:
1583     case X86::ADD64ri8:
1584       if (!Inst.getOperand(2).isImm())
1585         return false;
1586       if (ErrorOr<int64_t> InputVal =
1587               getOperandVal(Inst.getOperand(1).getReg()))
1588         Output = *InputVal + Inst.getOperand(2).getImm();
1589       else
1590         return false;
1591       break;
1592     case X86::ADD64i32:
1593       if (!Inst.getOperand(0).isImm())
1594         return false;
1595       if (ErrorOr<int64_t> InputVal = getOperandVal(X86::RAX))
1596         Output = *InputVal + Inst.getOperand(0).getImm();
1597       else
1598         return false;
1599       break;
1600 
1601     case X86::LEA64r: {
1602       unsigned BaseRegNum;
1603       int64_t ScaleValue;
1604       unsigned IndexRegNum;
1605       int64_t DispValue;
1606       unsigned SegRegNum;
1607       const MCExpr *DispExpr = nullptr;
1608       if (!evaluateX86MemoryOperand(Inst, &BaseRegNum, &ScaleValue,
1609                                     &IndexRegNum, &DispValue, &SegRegNum,
1610                                     &DispExpr))
1611         return false;
1612 
1613       if (BaseRegNum == X86::NoRegister || IndexRegNum != X86::NoRegister ||
1614           SegRegNum != X86::NoRegister || DispExpr)
1615         return false;
1616 
1617       if (ErrorOr<int64_t> InputVal = getOperandVal(BaseRegNum))
1618         Output = *InputVal + DispValue;
1619       else
1620         return false;
1621 
1622       break;
1623     }
1624     }
1625     return true;
1626   }
1627 
1628   bool isRegToRegMove(const MCInst &Inst, MCPhysReg &From,
1629                       MCPhysReg &To) const override {
1630     switch (Inst.getOpcode()) {
1631     default:
1632       return false;
1633     case X86::LEAVE:
1634     case X86::LEAVE64:
1635       To = getStackPointer();
1636       From = getFramePointer();
1637       return true;
1638     case X86::MOV64rr:
1639       To = Inst.getOperand(0).getReg();
1640       From = Inst.getOperand(1).getReg();
1641       return true;
1642     }
1643   }
1644 
1645   MCPhysReg getStackPointer() const override { return X86::RSP; }
1646   MCPhysReg getFramePointer() const override { return X86::RBP; }
1647   MCPhysReg getFlagsReg() const override { return X86::EFLAGS; }
1648 
1649   bool escapesVariable(const MCInst &Inst,
1650                        bool HasFramePointer) const override {
1651     int MemOpNo = getMemoryOperandNo(Inst);
1652     const MCInstrDesc &MCII = Info->get(Inst.getOpcode());
1653     const unsigned NumDefs = MCII.getNumDefs();
1654     static BitVector SPBPAliases(BitVector(getAliases(X86::RSP)) |=
1655                                  getAliases(X86::RBP));
1656     static BitVector SPAliases(getAliases(X86::RSP));
1657 
1658     // FIXME: PUSH can be technically a leak, but let's ignore this for now
1659     // because a lot of harmless prologue code will spill SP to the stack.
1660     // Unless push is clearly pushing an object address to the stack as
1661     // demonstrated by having a MemOp.
1662     bool IsPush = isPush(Inst);
1663     if (IsPush && MemOpNo == -1)
1664       return false;
1665 
1666     // We use this to detect LEA (has memop but does not access mem)
1667     bool AccessMem = MCII.mayLoad() || MCII.mayStore();
1668     bool DoesLeak = false;
1669     for (int I = 0, E = MCPlus::getNumPrimeOperands(Inst); I != E; ++I) {
1670       // Ignore if SP/BP is used to dereference memory -- that's fine
1671       if (MemOpNo != -1 && !IsPush && AccessMem && I >= MemOpNo &&
1672           I <= MemOpNo + 5)
1673         continue;
1674       // Ignore if someone is writing to SP/BP
1675       if (I < static_cast<int>(NumDefs))
1676         continue;
1677 
1678       const MCOperand &Operand = Inst.getOperand(I);
1679       if (HasFramePointer && Operand.isReg() && SPBPAliases[Operand.getReg()]) {
1680         DoesLeak = true;
1681         break;
1682       }
1683       if (!HasFramePointer && Operand.isReg() && SPAliases[Operand.getReg()]) {
1684         DoesLeak = true;
1685         break;
1686       }
1687     }
1688 
1689     // If potential leak, check if it is not just writing to itself/sp/bp
1690     if (DoesLeak) {
1691       for (int I = 0, E = NumDefs; I != E; ++I) {
1692         const MCOperand &Operand = Inst.getOperand(I);
1693         if (HasFramePointer && Operand.isReg() &&
1694             SPBPAliases[Operand.getReg()]) {
1695           DoesLeak = false;
1696           break;
1697         }
1698         if (!HasFramePointer && Operand.isReg() &&
1699             SPAliases[Operand.getReg()]) {
1700           DoesLeak = false;
1701           break;
1702         }
1703       }
1704     }
1705     return DoesLeak;
1706   }
1707 
1708   bool addToImm(MCInst &Inst, int64_t &Amt, MCContext *Ctx) const override {
1709     unsigned ImmOpNo = -1U;
1710     int MemOpNo = getMemoryOperandNo(Inst);
1711     if (MemOpNo != -1)
1712       ImmOpNo = MemOpNo + X86::AddrDisp;
1713     else
1714       for (unsigned Index = 0; Index < MCPlus::getNumPrimeOperands(Inst);
1715            ++Index)
1716         if (Inst.getOperand(Index).isImm())
1717           ImmOpNo = Index;
1718     if (ImmOpNo == -1U)
1719       return false;
1720 
1721     MCOperand &Operand = Inst.getOperand(ImmOpNo);
1722     Amt += Operand.getImm();
1723     Operand.setImm(Amt);
1724     // Check for the need for relaxation
1725     if (int64_t(Amt) == int64_t(int8_t(Amt)))
1726       return true;
1727 
1728     // Relax instruction
1729     switch (Inst.getOpcode()) {
1730     case X86::SUB64ri8:
1731       Inst.setOpcode(X86::SUB64ri32);
1732       break;
1733     case X86::ADD64ri8:
1734       Inst.setOpcode(X86::ADD64ri32);
1735       break;
1736     default:
1737       // No need for relaxation
1738       break;
1739     }
1740     return true;
1741   }
1742 
1743   /// TODO: this implementation currently works for the most common opcodes that
1744   /// load from memory. It can be extended to work with memory store opcodes as
1745   /// well as more memory load opcodes.
1746   bool replaceMemOperandWithImm(MCInst &Inst, StringRef ConstantData,
1747                                 uint64_t Offset) const override {
1748     enum CheckSignExt : uint8_t {
1749       NOCHECK = 0,
1750       CHECK8,
1751       CHECK32,
1752     };
1753 
1754     using CheckList = std::vector<std::pair<CheckSignExt, unsigned>>;
1755     struct InstInfo {
1756       // Size in bytes that Inst loads from memory.
1757       uint8_t DataSize;
1758 
1759       // True when the target operand has to be duplicated because the opcode
1760       // expects a LHS operand.
1761       bool HasLHS;
1762 
1763       // List of checks and corresponding opcodes to be used. We try to use the
1764       // smallest possible immediate value when various sizes are available,
1765       // hence we may need to check whether a larger constant fits in a smaller
1766       // immediate.
1767       CheckList Checks;
1768     };
1769 
1770     InstInfo I;
1771 
1772     switch (Inst.getOpcode()) {
1773     default: {
1774       switch (getPopSize(Inst)) {
1775       case 2:            I = {2, false, {{NOCHECK, X86::MOV16ri}}};  break;
1776       case 4:            I = {4, false, {{NOCHECK, X86::MOV32ri}}};  break;
1777       case 8:            I = {8, false, {{CHECK32, X86::MOV64ri32},
1778                                          {NOCHECK, X86::MOV64rm}}};  break;
1779       default:           return false;
1780       }
1781       break;
1782     }
1783 
1784     // MOV
1785     case X86::MOV8rm:      I = {1, false, {{NOCHECK, X86::MOV8ri}}};   break;
1786     case X86::MOV16rm:     I = {2, false, {{NOCHECK, X86::MOV16ri}}};  break;
1787     case X86::MOV32rm:     I = {4, false, {{NOCHECK, X86::MOV32ri}}};  break;
1788     case X86::MOV64rm:     I = {8, false, {{CHECK32, X86::MOV64ri32},
1789                                            {NOCHECK, X86::MOV64rm}}};  break;
1790 
1791     // MOVZX
1792     case X86::MOVZX16rm8:  I = {1, false, {{NOCHECK, X86::MOV16ri}}};  break;
1793     case X86::MOVZX32rm8:  I = {1, false, {{NOCHECK, X86::MOV32ri}}};  break;
1794     case X86::MOVZX32rm16: I = {2, false, {{NOCHECK, X86::MOV32ri}}};  break;
1795 
1796     // CMP
1797     case X86::CMP8rm:      I = {1, false, {{NOCHECK, X86::CMP8ri}}};   break;
1798     case X86::CMP16rm:     I = {2, false, {{CHECK8,  X86::CMP16ri8},
1799                                            {NOCHECK, X86::CMP16ri}}};  break;
1800     case X86::CMP32rm:     I = {4, false, {{CHECK8,  X86::CMP32ri8},
1801                                            {NOCHECK, X86::CMP32ri}}};  break;
1802     case X86::CMP64rm:     I = {8, false, {{CHECK8,  X86::CMP64ri8},
1803                                            {CHECK32, X86::CMP64ri32},
1804                                            {NOCHECK, X86::CMP64rm}}};  break;
1805 
1806     // TEST
1807     case X86::TEST8mr:     I = {1, false, {{NOCHECK, X86::TEST8ri}}};  break;
1808     case X86::TEST16mr:    I = {2, false, {{NOCHECK, X86::TEST16ri}}}; break;
1809     case X86::TEST32mr:    I = {4, false, {{NOCHECK, X86::TEST32ri}}}; break;
1810     case X86::TEST64mr:    I = {8, false, {{CHECK32, X86::TEST64ri32},
1811                                            {NOCHECK, X86::TEST64mr}}}; break;
1812 
1813     // ADD
1814     case X86::ADD8rm:      I = {1, true,  {{NOCHECK, X86::ADD8ri}}};   break;
1815     case X86::ADD16rm:     I = {2, true,  {{CHECK8,  X86::ADD16ri8},
1816                                            {NOCHECK, X86::ADD16ri}}};  break;
1817     case X86::ADD32rm:     I = {4, true,  {{CHECK8,  X86::ADD32ri8},
1818                                            {NOCHECK, X86::ADD32ri}}};  break;
1819     case X86::ADD64rm:     I = {8, true,  {{CHECK8,  X86::ADD64ri8},
1820                                            {CHECK32, X86::ADD64ri32},
1821                                            {NOCHECK, X86::ADD64rm}}};  break;
1822 
1823     // SUB
1824     case X86::SUB8rm:      I = {1, true,  {{NOCHECK, X86::SUB8ri}}};   break;
1825     case X86::SUB16rm:     I = {2, true,  {{CHECK8,  X86::SUB16ri8},
1826                                            {NOCHECK, X86::SUB16ri}}};  break;
1827     case X86::SUB32rm:     I = {4, true,  {{CHECK8,  X86::SUB32ri8},
1828                                            {NOCHECK, X86::SUB32ri}}};  break;
1829     case X86::SUB64rm:     I = {8, true,  {{CHECK8,  X86::SUB64ri8},
1830                                            {CHECK32, X86::SUB64ri32},
1831                                            {NOCHECK, X86::SUB64rm}}};  break;
1832 
1833     // AND
1834     case X86::AND8rm:      I = {1, true,  {{NOCHECK, X86::AND8ri}}};   break;
1835     case X86::AND16rm:     I = {2, true,  {{CHECK8,  X86::AND16ri8},
1836                                            {NOCHECK, X86::AND16ri}}};  break;
1837     case X86::AND32rm:     I = {4, true,  {{CHECK8,  X86::AND32ri8},
1838                                            {NOCHECK, X86::AND32ri}}};  break;
1839     case X86::AND64rm:     I = {8, true,  {{CHECK8,  X86::AND64ri8},
1840                                            {CHECK32, X86::AND64ri32},
1841                                            {NOCHECK, X86::AND64rm}}};  break;
1842 
1843     // OR
1844     case X86::OR8rm:       I = {1, true,  {{NOCHECK, X86::OR8ri}}};    break;
1845     case X86::OR16rm:      I = {2, true,  {{CHECK8,  X86::OR16ri8},
1846                                            {NOCHECK, X86::OR16ri}}};   break;
1847     case X86::OR32rm:      I = {4, true,  {{CHECK8,  X86::OR32ri8},
1848                                            {NOCHECK, X86::OR32ri}}};   break;
1849     case X86::OR64rm:      I = {8, true,  {{CHECK8,  X86::OR64ri8},
1850                                            {CHECK32, X86::OR64ri32},
1851                                            {NOCHECK, X86::OR64rm}}};   break;
1852 
1853     // XOR
1854     case X86::XOR8rm:      I = {1, true,  {{NOCHECK, X86::XOR8ri}}};   break;
1855     case X86::XOR16rm:     I = {2, true,  {{CHECK8,  X86::XOR16ri8},
1856                                            {NOCHECK, X86::XOR16ri}}};  break;
1857     case X86::XOR32rm:     I = {4, true,  {{CHECK8,  X86::XOR32ri8},
1858                                            {NOCHECK, X86::XOR32ri}}};  break;
1859     case X86::XOR64rm:     I = {8, true,  {{CHECK8,  X86::XOR64ri8},
1860                                            {CHECK32, X86::XOR64ri32},
1861                                            {NOCHECK, X86::XOR64rm}}};  break;
1862     }
1863 
1864     // Compute the immediate value.
1865     assert(Offset + I.DataSize <= ConstantData.size() &&
1866            "invalid offset for given constant data");
1867     int64_t ImmVal =
1868         DataExtractor(ConstantData, true, 8).getSigned(&Offset, I.DataSize);
1869 
1870     // Compute the new opcode.
1871     unsigned NewOpcode = 0;
1872     for (const std::pair<CheckSignExt, unsigned> &Check : I.Checks) {
1873       NewOpcode = Check.second;
1874       if (Check.first == NOCHECK)
1875         break;
1876       if (Check.first == CHECK8 &&
1877           ImmVal >= std::numeric_limits<int8_t>::min() &&
1878           ImmVal <= std::numeric_limits<int8_t>::max())
1879         break;
1880       if (Check.first == CHECK32 &&
1881           ImmVal >= std::numeric_limits<int32_t>::min() &&
1882           ImmVal <= std::numeric_limits<int32_t>::max())
1883         break;
1884     }
1885     if (NewOpcode == Inst.getOpcode())
1886       return false;
1887 
1888     // Modify the instruction.
1889     MCOperand ImmOp = MCOperand::createImm(ImmVal);
1890     uint32_t TargetOpNum = 0;
1891     // Test instruction does not follow the regular pattern of putting the
1892     // memory reference of a load (5 MCOperands) last in the list of operands.
1893     // Since it is not modifying the register operand, it is not treated as
1894     // a destination operand and it is not the first operand as it is in the
1895     // other instructions we treat here.
1896     if (NewOpcode == X86::TEST8ri || NewOpcode == X86::TEST16ri ||
1897         NewOpcode == X86::TEST32ri || NewOpcode == X86::TEST64ri32)
1898       TargetOpNum = getMemoryOperandNo(Inst) + X86::AddrNumOperands;
1899 
1900     MCOperand TargetOp = Inst.getOperand(TargetOpNum);
1901     Inst.clear();
1902     Inst.setOpcode(NewOpcode);
1903     Inst.addOperand(TargetOp);
1904     if (I.HasLHS)
1905       Inst.addOperand(TargetOp);
1906     Inst.addOperand(ImmOp);
1907 
1908     return true;
1909   }
1910 
1911   /// TODO: this implementation currently works for the most common opcodes that
1912   /// load from memory. It can be extended to work with memory store opcodes as
1913   /// well as more memory load opcodes.
1914   bool replaceMemOperandWithReg(MCInst &Inst, MCPhysReg RegNum) const override {
1915     unsigned NewOpcode;
1916 
1917     switch (Inst.getOpcode()) {
1918     default: {
1919       switch (getPopSize(Inst)) {
1920       case 2:            NewOpcode = X86::MOV16rr; break;
1921       case 4:            NewOpcode = X86::MOV32rr; break;
1922       case 8:            NewOpcode = X86::MOV64rr; break;
1923       default:           return false;
1924       }
1925       break;
1926     }
1927 
1928     // MOV
1929     case X86::MOV8rm:      NewOpcode = X86::MOV8rr;   break;
1930     case X86::MOV16rm:     NewOpcode = X86::MOV16rr;  break;
1931     case X86::MOV32rm:     NewOpcode = X86::MOV32rr;  break;
1932     case X86::MOV64rm:     NewOpcode = X86::MOV64rr;  break;
1933     }
1934 
1935     // Modify the instruction.
1936     MCOperand RegOp = MCOperand::createReg(RegNum);
1937     MCOperand TargetOp = Inst.getOperand(0);
1938     Inst.clear();
1939     Inst.setOpcode(NewOpcode);
1940     Inst.addOperand(TargetOp);
1941     Inst.addOperand(RegOp);
1942 
1943     return true;
1944   }
1945 
1946   bool isRedundantMove(const MCInst &Inst) const override {
1947     switch (Inst.getOpcode()) {
1948     default:
1949       return false;
1950 
1951     // MOV
1952     case X86::MOV8rr:
1953     case X86::MOV16rr:
1954     case X86::MOV32rr:
1955     case X86::MOV64rr:
1956       break;
1957     }
1958 
1959     assert(Inst.getOperand(0).isReg() && Inst.getOperand(1).isReg());
1960     return Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg();
1961   }
1962 
1963   bool requiresAlignedAddress(const MCInst &Inst) const override {
1964     const MCInstrDesc &Desc = Info->get(Inst.getOpcode());
1965     for (unsigned int I = 0; I < Desc.getNumOperands(); ++I) {
1966       const MCOperandInfo &Op = Desc.OpInfo[I];
1967       if (Op.OperandType != MCOI::OPERAND_REGISTER)
1968         continue;
1969       if (Op.RegClass == X86::VR128RegClassID)
1970         return true;
1971     }
1972     return false;
1973   }
1974 
1975   bool convertJmpToTailCall(MCInst &Inst) override {
1976     if (isTailCall(Inst))
1977       return false;
1978 
1979     int NewOpcode;
1980     switch (Inst.getOpcode()) {
1981     default:
1982       return false;
1983     case X86::JMP_1:
1984     case X86::JMP_2:
1985     case X86::JMP_4:
1986       NewOpcode = X86::JMP_4;
1987       break;
1988     case X86::JMP16m:
1989     case X86::JMP32m:
1990     case X86::JMP64m:
1991       NewOpcode = X86::JMP32m;
1992       break;
1993     case X86::JMP16r:
1994     case X86::JMP32r:
1995     case X86::JMP64r:
1996       NewOpcode = X86::JMP32r;
1997       break;
1998     }
1999 
2000     Inst.setOpcode(NewOpcode);
2001     setTailCall(Inst);
2002     return true;
2003   }
2004 
2005   bool convertTailCallToJmp(MCInst &Inst) override {
2006     int NewOpcode;
2007     switch (Inst.getOpcode()) {
2008     default:
2009       return false;
2010     case X86::JMP_4:
2011       NewOpcode = X86::JMP_1;
2012       break;
2013     case X86::JMP32m:
2014       NewOpcode = X86::JMP64m;
2015       break;
2016     case X86::JMP32r:
2017       NewOpcode = X86::JMP64r;
2018       break;
2019     }
2020 
2021     Inst.setOpcode(NewOpcode);
2022     removeAnnotation(Inst, MCPlus::MCAnnotation::kTailCall);
2023     removeAnnotation(Inst, "Offset");
2024     return true;
2025   }
2026 
2027   bool convertTailCallToCall(MCInst &Inst) override {
2028     int NewOpcode;
2029     switch (Inst.getOpcode()) {
2030     default:
2031       return false;
2032     case X86::JMP_4:
2033       NewOpcode = X86::CALL64pcrel32;
2034       break;
2035     case X86::JMP32m:
2036       NewOpcode = X86::CALL64m;
2037       break;
2038     case X86::JMP32r:
2039       NewOpcode = X86::CALL64r;
2040       break;
2041     }
2042 
2043     Inst.setOpcode(NewOpcode);
2044     removeAnnotation(Inst, MCPlus::MCAnnotation::kTailCall);
2045     return true;
2046   }
2047 
2048   bool convertCallToIndirectCall(MCInst &Inst, const MCSymbol *TargetLocation,
2049                                  MCContext *Ctx) override {
2050     bool IsTailCall = isTailCall(Inst);
2051     assert((Inst.getOpcode() == X86::CALL64pcrel32 ||
2052             (Inst.getOpcode() == X86::JMP_4 && IsTailCall)) &&
2053            "64-bit direct (tail) call instruction expected");
2054     const auto NewOpcode =
2055         (Inst.getOpcode() == X86::CALL64pcrel32) ? X86::CALL64m : X86::JMP32m;
2056     Inst.setOpcode(NewOpcode);
2057 
2058     // Replace the first operand and preserve auxiliary operands of
2059     // the instruction.
2060     Inst.erase(Inst.begin());
2061     Inst.insert(Inst.begin(),
2062                 MCOperand::createReg(X86::NoRegister)); // AddrSegmentReg
2063     Inst.insert(Inst.begin(),
2064                 MCOperand::createExpr(                  // Displacement
2065                     MCSymbolRefExpr::create(TargetLocation,
2066                                             MCSymbolRefExpr::VK_None, *Ctx)));
2067     Inst.insert(Inst.begin(),
2068                 MCOperand::createReg(X86::NoRegister)); // IndexReg
2069     Inst.insert(Inst.begin(),
2070                 MCOperand::createImm(1));               // ScaleAmt
2071     Inst.insert(Inst.begin(),
2072                 MCOperand::createReg(X86::RIP));        // BaseReg
2073 
2074     return true;
2075   }
2076 
2077   void convertIndirectCallToLoad(MCInst &Inst, MCPhysReg Reg) override {
2078     bool IsTailCall = isTailCall(Inst);
2079     if (IsTailCall)
2080       removeAnnotation(Inst, MCPlus::MCAnnotation::kTailCall);
2081     if (Inst.getOpcode() == X86::CALL64m ||
2082         (Inst.getOpcode() == X86::JMP32m && IsTailCall)) {
2083       Inst.setOpcode(X86::MOV64rm);
2084       Inst.insert(Inst.begin(), MCOperand::createReg(Reg));
2085       return;
2086     }
2087     if (Inst.getOpcode() == X86::CALL64r ||
2088         (Inst.getOpcode() == X86::JMP32r && IsTailCall)) {
2089       Inst.setOpcode(X86::MOV64rr);
2090       Inst.insert(Inst.begin(), MCOperand::createReg(Reg));
2091       return;
2092     }
2093     LLVM_DEBUG(Inst.dump());
2094     llvm_unreachable("not implemented");
2095   }
2096 
2097   bool shortenInstruction(MCInst &Inst) const override {
2098     unsigned OldOpcode = Inst.getOpcode();
2099     unsigned NewOpcode = OldOpcode;
2100 
2101     // Check and remove EIZ/RIZ. These cases represent ambiguous cases where SIB
2102     // byte is present, but no index is used and modrm alone shoud have been
2103     // enough. Converting to NoRegister effectively removes the SIB byte.
2104     int MemOpNo = getMemoryOperandNo(Inst);
2105     if (MemOpNo >= 0) {
2106       MCOperand &IndexOp =
2107           Inst.getOperand(static_cast<unsigned>(MemOpNo) + X86::AddrIndexReg);
2108       if (IndexOp.getReg() == X86::EIZ || IndexOp.getReg() == X86::RIZ)
2109         IndexOp = MCOperand::createReg(X86::NoRegister);
2110     }
2111 
2112     if (isBranch(Inst)) {
2113       NewOpcode = getShortBranchOpcode(OldOpcode);
2114     } else if (OldOpcode == X86::MOV64ri) {
2115       if (Inst.getOperand(MCPlus::getNumPrimeOperands(Inst) - 1).isImm()) {
2116         const int64_t Imm =
2117             Inst.getOperand(MCPlus::getNumPrimeOperands(Inst) - 1).getImm();
2118         if (int64_t(Imm) == int64_t(int32_t(Imm)))
2119           NewOpcode = X86::MOV64ri32;
2120       }
2121     } else {
2122       // If it's arithmetic instruction check if signed operand fits in 1 byte.
2123       const unsigned ShortOpcode = getShortArithOpcode(OldOpcode);
2124       if (ShortOpcode != OldOpcode &&
2125           Inst.getOperand(MCPlus::getNumPrimeOperands(Inst) - 1).isImm()) {
2126         int64_t Imm =
2127             Inst.getOperand(MCPlus::getNumPrimeOperands(Inst) - 1).getImm();
2128         if (int64_t(Imm) == int64_t(int8_t(Imm)))
2129           NewOpcode = ShortOpcode;
2130       }
2131     }
2132 
2133     if (NewOpcode == OldOpcode)
2134       return false;
2135 
2136     Inst.setOpcode(NewOpcode);
2137     return true;
2138   }
2139 
2140   bool lowerTailCall(MCInst &Inst) override {
2141     if (Inst.getOpcode() == X86::JMP_4 && isTailCall(Inst)) {
2142       Inst.setOpcode(X86::JMP_1);
2143       removeAnnotation(Inst, MCPlus::MCAnnotation::kTailCall);
2144       return true;
2145     }
2146     return false;
2147   }
2148 
2149   const MCSymbol *getTargetSymbol(const MCInst &Inst,
2150                                   unsigned OpNum = 0) const override {
2151     if (OpNum >= MCPlus::getNumPrimeOperands(Inst))
2152       return nullptr;
2153 
2154     const MCOperand &Op = Inst.getOperand(OpNum);
2155     if (!Op.isExpr())
2156       return nullptr;
2157 
2158     auto *SymExpr = dyn_cast<MCSymbolRefExpr>(Op.getExpr());
2159     if (!SymExpr || SymExpr->getKind() != MCSymbolRefExpr::VK_None)
2160       return nullptr;
2161 
2162     return &SymExpr->getSymbol();
2163   }
2164 
2165   // This is the same as the base class, but since we are overriding one of
2166   // getTargetSymbol's signatures above, we need to override all of them.
2167   const MCSymbol *getTargetSymbol(const MCExpr *Expr) const override {
2168     return &cast<const MCSymbolRefExpr>(Expr)->getSymbol();
2169   }
2170 
2171   bool analyzeBranch(InstructionIterator Begin, InstructionIterator End,
2172                      const MCSymbol *&TBB, const MCSymbol *&FBB,
2173                      MCInst *&CondBranch,
2174                      MCInst *&UncondBranch) const override {
2175     auto I = End;
2176 
2177     // Bottom-up analysis
2178     while (I != Begin) {
2179       --I;
2180 
2181       // Ignore nops and CFIs
2182       if (isPseudo(*I))
2183         continue;
2184 
2185       // Stop when we find the first non-terminator
2186       if (!isTerminator(*I))
2187         break;
2188 
2189       if (!isBranch(*I))
2190         break;
2191 
2192       // Handle unconditional branches.
2193       if ((I->getOpcode() == X86::JMP_1 || I->getOpcode() == X86::JMP_2 ||
2194            I->getOpcode() == X86::JMP_4) &&
2195           !isTailCall(*I)) {
2196         // If any code was seen after this unconditional branch, we've seen
2197         // unreachable code. Ignore them.
2198         CondBranch = nullptr;
2199         UncondBranch = &*I;
2200         const MCSymbol *Sym = getTargetSymbol(*I);
2201         assert(Sym != nullptr &&
2202                "Couldn't extract BB symbol from jump operand");
2203         TBB = Sym;
2204         continue;
2205       }
2206 
2207       // Handle conditional branches and ignore indirect branches
2208       if (!isUnsupportedBranch(I->getOpcode()) &&
2209           getCondCode(*I) == X86::COND_INVALID) {
2210         // Indirect branch
2211         return false;
2212       }
2213 
2214       if (CondBranch == nullptr) {
2215         const MCSymbol *TargetBB = getTargetSymbol(*I);
2216         if (TargetBB == nullptr) {
2217           // Unrecognized branch target
2218           return false;
2219         }
2220         FBB = TBB;
2221         TBB = TargetBB;
2222         CondBranch = &*I;
2223         continue;
2224       }
2225 
2226       llvm_unreachable("multiple conditional branches in one BB");
2227     }
2228     return true;
2229   }
2230 
2231   template <typename Itr>
2232   std::pair<IndirectBranchType, MCInst *>
2233   analyzePICJumpTable(Itr II, Itr IE, MCPhysReg R1, MCPhysReg R2) const {
2234     // Analyze PIC-style jump table code template:
2235     //
2236     //    lea PIC_JUMP_TABLE(%rip), {%r1|%r2}     <- MemLocInstr
2237     //    mov ({%r1|%r2}, %index, 4), {%r2|%r1}
2238     //    add %r2, %r1
2239     //    jmp *%r1
2240     //
2241     // (with any irrelevant instructions in-between)
2242     //
2243     // When we call this helper we've already determined %r1 and %r2, and
2244     // reverse instruction iterator \p II is pointing to the ADD instruction.
2245     //
2246     // PIC jump table looks like following:
2247     //
2248     //   JT:  ----------
2249     //    E1:| L1 - JT  |
2250     //       |----------|
2251     //    E2:| L2 - JT  |
2252     //       |----------|
2253     //       |          |
2254     //          ......
2255     //    En:| Ln - JT  |
2256     //        ----------
2257     //
2258     // Where L1, L2, ..., Ln represent labels in the function.
2259     //
2260     // The actual relocations in the table will be of the form:
2261     //
2262     //   Ln - JT
2263     //    = (Ln - En) + (En - JT)
2264     //    = R_X86_64_PC32(Ln) + En - JT
2265     //    = R_X86_64_PC32(Ln + offsetof(En))
2266     //
2267     LLVM_DEBUG(dbgs() << "Checking for PIC jump table\n");
2268     MCInst *MemLocInstr = nullptr;
2269     const MCInst *MovInstr = nullptr;
2270     while (++II != IE) {
2271       MCInst &Instr = *II;
2272       const MCInstrDesc &InstrDesc = Info->get(Instr.getOpcode());
2273       if (!InstrDesc.hasDefOfPhysReg(Instr, R1, *RegInfo) &&
2274           !InstrDesc.hasDefOfPhysReg(Instr, R2, *RegInfo)) {
2275         // Ignore instructions that don't affect R1, R2 registers.
2276         continue;
2277       }
2278       if (!MovInstr) {
2279         // Expect to see MOV instruction.
2280         if (!isMOVSX64rm32(Instr)) {
2281           LLVM_DEBUG(dbgs() << "MOV instruction expected.\n");
2282           break;
2283         }
2284 
2285         // Check if it's setting %r1 or %r2. In canonical form it sets %r2.
2286         // If it sets %r1 - rename the registers so we have to only check
2287         // a single form.
2288         unsigned MovDestReg = Instr.getOperand(0).getReg();
2289         if (MovDestReg != R2)
2290           std::swap(R1, R2);
2291         if (MovDestReg != R2) {
2292           LLVM_DEBUG(dbgs() << "MOV instruction expected to set %r2\n");
2293           break;
2294         }
2295 
2296         // Verify operands for MOV.
2297         unsigned  BaseRegNum;
2298         int64_t   ScaleValue;
2299         unsigned  IndexRegNum;
2300         int64_t   DispValue;
2301         unsigned  SegRegNum;
2302         if (!evaluateX86MemoryOperand(Instr, &BaseRegNum, &ScaleValue,
2303                                       &IndexRegNum, &DispValue, &SegRegNum))
2304           break;
2305         if (BaseRegNum != R1 || ScaleValue != 4 ||
2306             IndexRegNum == X86::NoRegister || DispValue != 0 ||
2307             SegRegNum != X86::NoRegister)
2308           break;
2309         MovInstr = &Instr;
2310       } else {
2311         if (!InstrDesc.hasDefOfPhysReg(Instr, R1, *RegInfo))
2312           continue;
2313         if (!isLEA64r(Instr)) {
2314           LLVM_DEBUG(dbgs() << "LEA instruction expected\n");
2315           break;
2316         }
2317         if (Instr.getOperand(0).getReg() != R1) {
2318           LLVM_DEBUG(dbgs() << "LEA instruction expected to set %r1\n");
2319           break;
2320         }
2321 
2322         // Verify operands for LEA.
2323         unsigned      BaseRegNum;
2324         int64_t       ScaleValue;
2325         unsigned      IndexRegNum;
2326         const MCExpr *DispExpr = nullptr;
2327         int64_t       DispValue;
2328         unsigned      SegRegNum;
2329         if (!evaluateX86MemoryOperand(Instr, &BaseRegNum, &ScaleValue,
2330                                       &IndexRegNum, &DispValue, &SegRegNum,
2331                                       &DispExpr))
2332           break;
2333         if (BaseRegNum != RegInfo->getProgramCounter() ||
2334             IndexRegNum != X86::NoRegister || SegRegNum != X86::NoRegister ||
2335             DispExpr == nullptr)
2336           break;
2337         MemLocInstr = &Instr;
2338         break;
2339       }
2340     }
2341 
2342     if (!MemLocInstr)
2343       return std::make_pair(IndirectBranchType::UNKNOWN, nullptr);
2344 
2345     LLVM_DEBUG(dbgs() << "checking potential PIC jump table\n");
2346     return std::make_pair(IndirectBranchType::POSSIBLE_PIC_JUMP_TABLE,
2347                           MemLocInstr);
2348   }
2349 
2350   IndirectBranchType analyzeIndirectBranch(
2351       MCInst &Instruction, InstructionIterator Begin, InstructionIterator End,
2352       const unsigned PtrSize, MCInst *&MemLocInstrOut, unsigned &BaseRegNumOut,
2353       unsigned &IndexRegNumOut, int64_t &DispValueOut,
2354       const MCExpr *&DispExprOut, MCInst *&PCRelBaseOut) const override {
2355     // Try to find a (base) memory location from where the address for
2356     // the indirect branch is loaded. For X86-64 the memory will be specified
2357     // in the following format:
2358     //
2359     //   {%rip}/{%basereg} + Imm + IndexReg * Scale
2360     //
2361     // We are interested in the cases where Scale == sizeof(uintptr_t) and
2362     // the contents of the memory are presumably an array of pointers to code.
2363     //
2364     // Normal jump table:
2365     //
2366     //    jmp *(JUMP_TABLE, %index, Scale)        <- MemLocInstr
2367     //
2368     //    or
2369     //
2370     //    mov (JUMP_TABLE, %index, Scale), %r1    <- MemLocInstr
2371     //    ...
2372     //    jmp %r1
2373     //
2374     // We handle PIC-style jump tables separately.
2375     //
2376     MemLocInstrOut = nullptr;
2377     BaseRegNumOut = X86::NoRegister;
2378     IndexRegNumOut = X86::NoRegister;
2379     DispValueOut = 0;
2380     DispExprOut = nullptr;
2381 
2382     std::reverse_iterator<InstructionIterator> II(End);
2383     std::reverse_iterator<InstructionIterator> IE(Begin);
2384 
2385     IndirectBranchType Type = IndirectBranchType::UNKNOWN;
2386 
2387     // An instruction referencing memory used by jump instruction (directly or
2388     // via register). This location could be an array of function pointers
2389     // in case of indirect tail call, or a jump table.
2390     MCInst *MemLocInstr = nullptr;
2391 
2392     if (MCPlus::getNumPrimeOperands(Instruction) == 1) {
2393       // If the indirect jump is on register - try to detect if the
2394       // register value is loaded from a memory location.
2395       assert(Instruction.getOperand(0).isReg() && "register operand expected");
2396       const unsigned R1 = Instruction.getOperand(0).getReg();
2397       // Check if one of the previous instructions defines the jump-on register.
2398       for (auto PrevII = II; PrevII != IE; ++PrevII) {
2399         MCInst &PrevInstr = *PrevII;
2400         const MCInstrDesc &PrevInstrDesc = Info->get(PrevInstr.getOpcode());
2401 
2402         if (!PrevInstrDesc.hasDefOfPhysReg(PrevInstr, R1, *RegInfo))
2403           continue;
2404 
2405         if (isMoveMem2Reg(PrevInstr)) {
2406           MemLocInstr = &PrevInstr;
2407           break;
2408         }
2409         if (isADD64rr(PrevInstr)) {
2410           unsigned R2 = PrevInstr.getOperand(2).getReg();
2411           if (R1 == R2)
2412             return IndirectBranchType::UNKNOWN;
2413           std::tie(Type, MemLocInstr) = analyzePICJumpTable(PrevII, IE, R1, R2);
2414           break;
2415         }
2416         return IndirectBranchType::UNKNOWN;
2417       }
2418       if (!MemLocInstr) {
2419         // No definition seen for the register in this function so far. Could be
2420         // an input parameter - which means it is an external code reference.
2421         // It also could be that the definition happens to be in the code that
2422         // we haven't processed yet. Since we have to be conservative, return
2423         // as UNKNOWN case.
2424         return IndirectBranchType::UNKNOWN;
2425       }
2426     } else {
2427       MemLocInstr = &Instruction;
2428     }
2429 
2430     const MCRegister RIPRegister = RegInfo->getProgramCounter();
2431 
2432     // Analyze the memory location.
2433     unsigned BaseRegNum, IndexRegNum, SegRegNum;
2434     int64_t ScaleValue, DispValue;
2435     const MCExpr *DispExpr;
2436 
2437     if (!evaluateX86MemoryOperand(*MemLocInstr, &BaseRegNum, &ScaleValue,
2438                                   &IndexRegNum, &DispValue, &SegRegNum,
2439                                   &DispExpr))
2440       return IndirectBranchType::UNKNOWN;
2441 
2442     BaseRegNumOut = BaseRegNum;
2443     IndexRegNumOut = IndexRegNum;
2444     DispValueOut = DispValue;
2445     DispExprOut = DispExpr;
2446 
2447     if ((BaseRegNum != X86::NoRegister && BaseRegNum != RIPRegister) ||
2448         SegRegNum != X86::NoRegister)
2449       return IndirectBranchType::UNKNOWN;
2450 
2451     if (MemLocInstr == &Instruction &&
2452         (!ScaleValue || IndexRegNum == X86::NoRegister)) {
2453       MemLocInstrOut = MemLocInstr;
2454       return IndirectBranchType::POSSIBLE_FIXED_BRANCH;
2455     }
2456 
2457     if (Type == IndirectBranchType::POSSIBLE_PIC_JUMP_TABLE &&
2458         (ScaleValue != 1 || BaseRegNum != RIPRegister))
2459       return IndirectBranchType::UNKNOWN;
2460 
2461     if (Type != IndirectBranchType::POSSIBLE_PIC_JUMP_TABLE &&
2462         ScaleValue != PtrSize)
2463       return IndirectBranchType::UNKNOWN;
2464 
2465     MemLocInstrOut = MemLocInstr;
2466 
2467     return Type;
2468   }
2469 
2470   /// Analyze a callsite to see if it could be a virtual method call.  This only
2471   /// checks to see if the overall pattern is satisfied, it does not guarantee
2472   /// that the callsite is a true virtual method call.
2473   /// The format of virtual method calls that are recognized is one of the
2474   /// following:
2475   ///
2476   ///  Form 1: (found in debug code)
2477   ///    add METHOD_OFFSET, %VtableReg
2478   ///    mov (%VtableReg), %MethodReg
2479   ///    ...
2480   ///    call or jmp *%MethodReg
2481   ///
2482   ///  Form 2:
2483   ///    mov METHOD_OFFSET(%VtableReg), %MethodReg
2484   ///    ...
2485   ///    call or jmp *%MethodReg
2486   ///
2487   ///  Form 3:
2488   ///    ...
2489   ///    call or jmp *METHOD_OFFSET(%VtableReg)
2490   ///
2491   bool analyzeVirtualMethodCall(InstructionIterator ForwardBegin,
2492                                 InstructionIterator ForwardEnd,
2493                                 std::vector<MCInst *> &MethodFetchInsns,
2494                                 unsigned &VtableRegNum, unsigned &MethodRegNum,
2495                                 uint64_t &MethodOffset) const override {
2496     VtableRegNum = X86::NoRegister;
2497     MethodRegNum = X86::NoRegister;
2498     MethodOffset = 0;
2499 
2500     std::reverse_iterator<InstructionIterator> Itr(ForwardEnd);
2501     std::reverse_iterator<InstructionIterator> End(ForwardBegin);
2502 
2503     MCInst &CallInst = *Itr++;
2504     assert(isIndirectBranch(CallInst) || isCall(CallInst));
2505 
2506     unsigned BaseReg, IndexReg, SegmentReg;
2507     int64_t Scale, Disp;
2508     const MCExpr *DispExpr;
2509 
2510     // The call can just be jmp offset(reg)
2511     if (evaluateX86MemoryOperand(CallInst, &BaseReg, &Scale, &IndexReg, &Disp,
2512                                  &SegmentReg, &DispExpr)) {
2513       if (!DispExpr && BaseReg != X86::RIP && BaseReg != X86::RBP &&
2514           BaseReg != X86::NoRegister) {
2515         MethodRegNum = BaseReg;
2516         if (Scale == 1 && IndexReg == X86::NoRegister &&
2517             SegmentReg == X86::NoRegister) {
2518           VtableRegNum = MethodRegNum;
2519           MethodOffset = Disp;
2520           MethodFetchInsns.push_back(&CallInst);
2521           return true;
2522         }
2523       }
2524       return false;
2525     }
2526     if (CallInst.getOperand(0).isReg())
2527       MethodRegNum = CallInst.getOperand(0).getReg();
2528     else
2529       return false;
2530 
2531     if (MethodRegNum == X86::RIP || MethodRegNum == X86::RBP) {
2532       VtableRegNum = X86::NoRegister;
2533       MethodRegNum = X86::NoRegister;
2534       return false;
2535     }
2536 
2537     // find load from vtable, this may or may not include the method offset
2538     while (Itr != End) {
2539       MCInst &CurInst = *Itr++;
2540       const MCInstrDesc &Desc = Info->get(CurInst.getOpcode());
2541       if (Desc.hasDefOfPhysReg(CurInst, MethodRegNum, *RegInfo)) {
2542         if (isLoad(CurInst) &&
2543             evaluateX86MemoryOperand(CurInst, &BaseReg, &Scale, &IndexReg,
2544                                      &Disp, &SegmentReg, &DispExpr)) {
2545           if (!DispExpr && Scale == 1 && BaseReg != X86::RIP &&
2546               BaseReg != X86::RBP && BaseReg != X86::NoRegister &&
2547               IndexReg == X86::NoRegister && SegmentReg == X86::NoRegister &&
2548               BaseReg != X86::RIP) {
2549             VtableRegNum = BaseReg;
2550             MethodOffset = Disp;
2551             MethodFetchInsns.push_back(&CurInst);
2552             if (MethodOffset != 0)
2553               return true;
2554             break;
2555           }
2556         }
2557         return false;
2558       }
2559     }
2560 
2561     if (!VtableRegNum)
2562       return false;
2563 
2564     // look for any adds affecting the method register.
2565     while (Itr != End) {
2566       MCInst &CurInst = *Itr++;
2567       const MCInstrDesc &Desc = Info->get(CurInst.getOpcode());
2568       if (Desc.hasDefOfPhysReg(CurInst, VtableRegNum, *RegInfo)) {
2569         if (isADDri(CurInst)) {
2570           assert(!MethodOffset);
2571           MethodOffset = CurInst.getOperand(2).getImm();
2572           MethodFetchInsns.insert(MethodFetchInsns.begin(), &CurInst);
2573           break;
2574         }
2575       }
2576     }
2577 
2578     return true;
2579   }
2580 
2581   bool createStackPointerIncrement(MCInst &Inst, int Size,
2582                                    bool NoFlagsClobber) const override {
2583     if (NoFlagsClobber) {
2584       Inst.setOpcode(X86::LEA64r);
2585       Inst.clear();
2586       Inst.addOperand(MCOperand::createReg(X86::RSP));
2587       Inst.addOperand(MCOperand::createReg(X86::RSP));        // BaseReg
2588       Inst.addOperand(MCOperand::createImm(1));               // ScaleAmt
2589       Inst.addOperand(MCOperand::createReg(X86::NoRegister)); // IndexReg
2590       Inst.addOperand(MCOperand::createImm(-Size));           // Displacement
2591       Inst.addOperand(MCOperand::createReg(X86::NoRegister)); // AddrSegmentReg
2592       return true;
2593     }
2594     Inst.setOpcode(X86::SUB64ri8);
2595     Inst.clear();
2596     Inst.addOperand(MCOperand::createReg(X86::RSP));
2597     Inst.addOperand(MCOperand::createReg(X86::RSP));
2598     Inst.addOperand(MCOperand::createImm(Size));
2599     return true;
2600   }
2601 
2602   bool createStackPointerDecrement(MCInst &Inst, int Size,
2603                                    bool NoFlagsClobber) const override {
2604     if (NoFlagsClobber) {
2605       Inst.setOpcode(X86::LEA64r);
2606       Inst.clear();
2607       Inst.addOperand(MCOperand::createReg(X86::RSP));
2608       Inst.addOperand(MCOperand::createReg(X86::RSP));        // BaseReg
2609       Inst.addOperand(MCOperand::createImm(1));               // ScaleAmt
2610       Inst.addOperand(MCOperand::createReg(X86::NoRegister)); // IndexReg
2611       Inst.addOperand(MCOperand::createImm(Size));            // Displacement
2612       Inst.addOperand(MCOperand::createReg(X86::NoRegister)); // AddrSegmentReg
2613       return true;
2614     }
2615     Inst.setOpcode(X86::ADD64ri8);
2616     Inst.clear();
2617     Inst.addOperand(MCOperand::createReg(X86::RSP));
2618     Inst.addOperand(MCOperand::createReg(X86::RSP));
2619     Inst.addOperand(MCOperand::createImm(Size));
2620     return true;
2621   }
2622 
2623   bool createSaveToStack(MCInst &Inst, const MCPhysReg &StackReg, int Offset,
2624                          const MCPhysReg &SrcReg, int Size) const override {
2625     unsigned NewOpcode;
2626     switch (Size) {
2627     default:
2628       return false;
2629     case 2:      NewOpcode = X86::MOV16mr; break;
2630     case 4:      NewOpcode = X86::MOV32mr; break;
2631     case 8:      NewOpcode = X86::MOV64mr; break;
2632     }
2633     Inst.setOpcode(NewOpcode);
2634     Inst.clear();
2635     Inst.addOperand(MCOperand::createReg(StackReg));        // BaseReg
2636     Inst.addOperand(MCOperand::createImm(1));               // ScaleAmt
2637     Inst.addOperand(MCOperand::createReg(X86::NoRegister)); // IndexReg
2638     Inst.addOperand(MCOperand::createImm(Offset));          // Displacement
2639     Inst.addOperand(MCOperand::createReg(X86::NoRegister)); // AddrSegmentReg
2640     Inst.addOperand(MCOperand::createReg(SrcReg));
2641     return true;
2642   }
2643 
2644   bool createRestoreFromStack(MCInst &Inst, const MCPhysReg &StackReg,
2645                               int Offset, const MCPhysReg &DstReg,
2646                               int Size) const override {
2647     return createLoad(Inst, StackReg, /*Scale=*/1, /*IndexReg=*/X86::NoRegister,
2648                       Offset, nullptr, /*AddrSegmentReg=*/X86::NoRegister,
2649                       DstReg, Size);
2650   }
2651 
2652   bool createLoad(MCInst &Inst, const MCPhysReg &BaseReg, int64_t Scale,
2653                   const MCPhysReg &IndexReg, int64_t Offset,
2654                   const MCExpr *OffsetExpr, const MCPhysReg &AddrSegmentReg,
2655                   const MCPhysReg &DstReg, int Size) const override {
2656     unsigned NewOpcode;
2657     switch (Size) {
2658     default:
2659       return false;
2660     case 2:      NewOpcode = X86::MOV16rm; break;
2661     case 4:      NewOpcode = X86::MOV32rm; break;
2662     case 8:      NewOpcode = X86::MOV64rm; break;
2663     }
2664     Inst.setOpcode(NewOpcode);
2665     Inst.clear();
2666     Inst.addOperand(MCOperand::createReg(DstReg));
2667     Inst.addOperand(MCOperand::createReg(BaseReg));
2668     Inst.addOperand(MCOperand::createImm(Scale));
2669     Inst.addOperand(MCOperand::createReg(IndexReg));
2670     if (OffsetExpr)
2671       Inst.addOperand(MCOperand::createExpr(OffsetExpr)); // Displacement
2672     else
2673       Inst.addOperand(MCOperand::createImm(Offset)); // Displacement
2674     Inst.addOperand(MCOperand::createReg(AddrSegmentReg)); // AddrSegmentReg
2675     return true;
2676   }
2677 
2678   void createLoadImmediate(MCInst &Inst, const MCPhysReg Dest,
2679                            uint32_t Imm) const override {
2680     Inst.setOpcode(X86::MOV64ri32);
2681     Inst.clear();
2682     Inst.addOperand(MCOperand::createReg(Dest));
2683     Inst.addOperand(MCOperand::createImm(Imm));
2684   }
2685 
2686   bool createIncMemory(MCInst &Inst, const MCSymbol *Target,
2687                        MCContext *Ctx) const override {
2688 
2689     Inst.setOpcode(X86::LOCK_INC64m);
2690     Inst.clear();
2691     Inst.addOperand(MCOperand::createReg(X86::RIP));        // BaseReg
2692     Inst.addOperand(MCOperand::createImm(1));               // ScaleAmt
2693     Inst.addOperand(MCOperand::createReg(X86::NoRegister)); // IndexReg
2694 
2695     Inst.addOperand(MCOperand::createExpr(
2696         MCSymbolRefExpr::create(Target, MCSymbolRefExpr::VK_None,
2697                                 *Ctx)));                    // Displacement
2698     Inst.addOperand(MCOperand::createReg(X86::NoRegister)); // AddrSegmentReg
2699     return true;
2700   }
2701 
2702   bool createIJmp32Frag(SmallVectorImpl<MCInst> &Insts,
2703                         const MCOperand &BaseReg, const MCOperand &Scale,
2704                         const MCOperand &IndexReg, const MCOperand &Offset,
2705                         const MCOperand &TmpReg) const override {
2706     // The code fragment we emit here is:
2707     //
2708     //  mov32 (%base, %index, scale), %tmpreg
2709     //  ijmp *(%tmpreg)
2710     //
2711     MCInst IJmp;
2712     IJmp.setOpcode(X86::JMP64r);
2713     IJmp.addOperand(TmpReg);
2714 
2715     MCInst Load;
2716     Load.setOpcode(X86::MOV32rm);
2717     Load.addOperand(TmpReg);
2718     Load.addOperand(BaseReg);
2719     Load.addOperand(Scale);
2720     Load.addOperand(IndexReg);
2721     Load.addOperand(Offset);
2722     Load.addOperand(MCOperand::createReg(X86::NoRegister));
2723 
2724     Insts.push_back(Load);
2725     Insts.push_back(IJmp);
2726     return true;
2727   }
2728 
2729   bool createNoop(MCInst &Inst) const override {
2730     Inst.setOpcode(X86::NOOP);
2731     return true;
2732   }
2733 
2734   bool createReturn(MCInst &Inst) const override {
2735     Inst.setOpcode(X86::RET64);
2736     return true;
2737   }
2738 
2739   InstructionListType createInlineMemcpy(bool ReturnEnd) const override {
2740     InstructionListType Code;
2741     if (ReturnEnd)
2742       Code.emplace_back(MCInstBuilder(X86::LEA64r)
2743                             .addReg(X86::RAX)
2744                             .addReg(X86::RDI)
2745                             .addImm(1)
2746                             .addReg(X86::RDX)
2747                             .addImm(0)
2748                             .addReg(X86::NoRegister));
2749     else
2750       Code.emplace_back(MCInstBuilder(X86::MOV64rr)
2751                             .addReg(X86::RAX)
2752                             .addReg(X86::RDI));
2753 
2754     Code.emplace_back(MCInstBuilder(X86::MOV32rr)
2755                           .addReg(X86::ECX)
2756                           .addReg(X86::EDX));
2757     Code.emplace_back(MCInstBuilder(X86::REP_MOVSB_64));
2758 
2759     return Code;
2760   }
2761 
2762   InstructionListType createOneByteMemcpy() const override {
2763     InstructionListType Code;
2764     Code.emplace_back(MCInstBuilder(X86::MOV8rm)
2765                           .addReg(X86::CL)
2766                           .addReg(X86::RSI)
2767                           .addImm(0)
2768                           .addReg(X86::NoRegister)
2769                           .addImm(0)
2770                           .addReg(X86::NoRegister));
2771     Code.emplace_back(MCInstBuilder(X86::MOV8mr)
2772                           .addReg(X86::RDI)
2773                           .addImm(0)
2774                           .addReg(X86::NoRegister)
2775                           .addImm(0)
2776                           .addReg(X86::NoRegister)
2777                           .addReg(X86::CL));
2778     Code.emplace_back(MCInstBuilder(X86::MOV64rr)
2779                           .addReg(X86::RAX)
2780                           .addReg(X86::RDI));
2781     return Code;
2782   }
2783 
2784   InstructionListType createCmpJE(MCPhysReg RegNo, int64_t Imm,
2785                                   const MCSymbol *Target,
2786                                   MCContext *Ctx) const override {
2787     InstructionListType Code;
2788     Code.emplace_back(MCInstBuilder(X86::CMP64ri8)
2789                           .addReg(RegNo)
2790                           .addImm(Imm));
2791     Code.emplace_back(MCInstBuilder(X86::JCC_1)
2792                           .addExpr(MCSymbolRefExpr::create(
2793                               Target, MCSymbolRefExpr::VK_None, *Ctx))
2794                           .addImm(X86::COND_E));
2795     return Code;
2796   }
2797 
2798   Optional<Relocation>
2799   createRelocation(const MCFixup &Fixup,
2800                    const MCAsmBackend &MAB) const override {
2801     const MCFixupKindInfo &FKI = MAB.getFixupKindInfo(Fixup.getKind());
2802 
2803     assert(FKI.TargetOffset == 0 && "0-bit relocation offset expected");
2804     const uint64_t RelOffset = Fixup.getOffset();
2805 
2806     uint64_t RelType;
2807     if (FKI.Flags & MCFixupKindInfo::FKF_IsPCRel) {
2808       switch (FKI.TargetSize) {
2809       default:
2810         return NoneType();
2811       case  8: RelType = ELF::R_X86_64_PC8; break;
2812       case 16: RelType = ELF::R_X86_64_PC16; break;
2813       case 32: RelType = ELF::R_X86_64_PC32; break;
2814       case 64: RelType = ELF::R_X86_64_PC64; break;
2815       }
2816     } else {
2817       switch (FKI.TargetSize) {
2818       default:
2819         return NoneType();
2820       case  8: RelType = ELF::R_X86_64_8; break;
2821       case 16: RelType = ELF::R_X86_64_16; break;
2822       case 32: RelType = ELF::R_X86_64_32; break;
2823       case 64: RelType = ELF::R_X86_64_64; break;
2824       }
2825     }
2826 
2827     // Extract a symbol and an addend out of the fixup value expression.
2828     //
2829     // Only the following limited expression types are supported:
2830     //   Symbol + Addend
2831     //   Symbol
2832     uint64_t Addend = 0;
2833     MCSymbol *Symbol = nullptr;
2834     const MCExpr *ValueExpr = Fixup.getValue();
2835     if (ValueExpr->getKind() == MCExpr::Binary) {
2836       const auto *BinaryExpr = cast<MCBinaryExpr>(ValueExpr);
2837       assert(BinaryExpr->getOpcode() == MCBinaryExpr::Add &&
2838              "unexpected binary expression");
2839       const MCExpr *LHS = BinaryExpr->getLHS();
2840       assert(LHS->getKind() == MCExpr::SymbolRef && "unexpected LHS");
2841       Symbol = const_cast<MCSymbol *>(this->getTargetSymbol(LHS));
2842       const MCExpr *RHS = BinaryExpr->getRHS();
2843       assert(RHS->getKind() == MCExpr::Constant && "unexpected RHS");
2844       Addend = cast<MCConstantExpr>(RHS)->getValue();
2845     } else {
2846       assert(ValueExpr->getKind() == MCExpr::SymbolRef && "unexpected value");
2847       Symbol = const_cast<MCSymbol *>(this->getTargetSymbol(ValueExpr));
2848     }
2849 
2850     return Relocation({RelOffset, Symbol, RelType, Addend, 0});
2851   }
2852 
2853   bool replaceImmWithSymbolRef(MCInst &Inst, const MCSymbol *Symbol,
2854                                int64_t Addend, MCContext *Ctx, int64_t &Value,
2855                                uint64_t RelType) const override {
2856     unsigned ImmOpNo = -1U;
2857 
2858     for (unsigned Index = 0; Index < MCPlus::getNumPrimeOperands(Inst);
2859          ++Index) {
2860       if (Inst.getOperand(Index).isImm()) {
2861         ImmOpNo = Index;
2862         // TODO: this is a bit hacky.  It finds the correct operand by
2863         // searching for a specific immediate value.  If no value is
2864         // provided it defaults to the last immediate operand found.
2865         // This could lead to unexpected results if the instruction
2866         // has more than one immediate with the same value.
2867         if (Inst.getOperand(ImmOpNo).getImm() == Value)
2868           break;
2869       }
2870     }
2871 
2872     if (ImmOpNo == -1U)
2873       return false;
2874 
2875     Value = Inst.getOperand(ImmOpNo).getImm();
2876 
2877     setOperandToSymbolRef(Inst, ImmOpNo, Symbol, Addend, Ctx, RelType);
2878 
2879     return true;
2880   }
2881 
2882   bool replaceRegWithImm(MCInst &Inst, unsigned Register,
2883                          int64_t Imm) const override {
2884 
2885     enum CheckSignExt : uint8_t {
2886       NOCHECK = 0,
2887       CHECK8,
2888       CHECK32,
2889     };
2890 
2891     using CheckList = std::vector<std::pair<CheckSignExt, unsigned>>;
2892     struct InstInfo {
2893       // Size in bytes that Inst loads from memory.
2894       uint8_t DataSize;
2895 
2896       // True when the target operand has to be duplicated because the opcode
2897       // expects a LHS operand.
2898       bool HasLHS;
2899 
2900       // List of checks and corresponding opcodes to be used. We try to use the
2901       // smallest possible immediate value when various sizes are available,
2902       // hence we may need to check whether a larger constant fits in a smaller
2903       // immediate.
2904       CheckList Checks;
2905     };
2906 
2907     InstInfo I;
2908 
2909     switch (Inst.getOpcode()) {
2910     default: {
2911       switch (getPushSize(Inst)) {
2912 
2913       case 2: I = {2, false, {{CHECK8, X86::PUSH16i8}, {NOCHECK, X86::PUSHi16}}}; break;
2914       case 4: I = {4, false, {{CHECK8, X86::PUSH32i8}, {NOCHECK, X86::PUSHi32}}}; break;
2915       case 8: I = {8, false, {{CHECK8, X86::PUSH64i8},
2916                               {CHECK32, X86::PUSH64i32},
2917                               {NOCHECK, Inst.getOpcode()}}}; break;
2918       default: return false;
2919       }
2920       break;
2921     }
2922 
2923     // MOV
2924     case X86::MOV8rr:       I = {1, false, {{NOCHECK, X86::MOV8ri}}}; break;
2925     case X86::MOV16rr:      I = {2, false, {{NOCHECK, X86::MOV16ri}}}; break;
2926     case X86::MOV32rr:      I = {4, false, {{NOCHECK, X86::MOV32ri}}}; break;
2927     case X86::MOV64rr:      I = {8, false, {{CHECK32, X86::MOV64ri32},
2928                                             {NOCHECK, X86::MOV64ri}}}; break;
2929 
2930     case X86::MOV8mr:       I = {1, false, {{NOCHECK, X86::MOV8mi}}}; break;
2931     case X86::MOV16mr:      I = {2, false, {{NOCHECK, X86::MOV16mi}}}; break;
2932     case X86::MOV32mr:      I = {4, false, {{NOCHECK, X86::MOV32mi}}}; break;
2933     case X86::MOV64mr:      I = {8, false, {{CHECK32, X86::MOV64mi32},
2934                                             {NOCHECK, X86::MOV64mr}}}; break;
2935 
2936     // MOVZX
2937     case X86::MOVZX16rr8:   I = {1, false, {{NOCHECK, X86::MOV16ri}}}; break;
2938     case X86::MOVZX32rr8:   I = {1, false, {{NOCHECK, X86::MOV32ri}}}; break;
2939     case X86::MOVZX32rr16:  I = {2, false, {{NOCHECK, X86::MOV32ri}}}; break;
2940 
2941     // CMP
2942     case X86::CMP8rr:       I = {1, false, {{NOCHECK, X86::CMP8ri}}}; break;
2943     case X86::CMP16rr:      I = {2, false, {{CHECK8, X86::CMP16ri8},
2944                                             {NOCHECK, X86::CMP16ri}}}; break;
2945     case X86::CMP32rr:      I = {4, false, {{CHECK8, X86::CMP32ri8},
2946                                             {NOCHECK, X86::CMP32ri}}}; break;
2947     case X86::CMP64rr:      I = {8, false, {{CHECK8, X86::CMP64ri8},
2948                                             {CHECK32, X86::CMP64ri32},
2949                                             {NOCHECK, X86::CMP64rr}}}; break;
2950 
2951     // TEST
2952     case X86::TEST8rr:      I = {1, false, {{NOCHECK, X86::TEST8ri}}}; break;
2953     case X86::TEST16rr:     I = {2, false, {{NOCHECK, X86::TEST16ri}}}; break;
2954     case X86::TEST32rr:     I = {4, false, {{NOCHECK, X86::TEST32ri}}}; break;
2955     case X86::TEST64rr:     I = {8, false, {{CHECK32, X86::TEST64ri32},
2956                                             {NOCHECK, X86::TEST64rr}}}; break;
2957 
2958     // ADD
2959     case X86::ADD8rr:       I = {1, true, {{NOCHECK, X86::ADD8ri}}}; break;
2960     case X86::ADD16rr:      I = {2, true, {{CHECK8, X86::ADD16ri8},
2961                                            {NOCHECK, X86::ADD16ri}}}; break;
2962     case X86::ADD32rr:      I = {4, true, {{CHECK8, X86::ADD32ri8},
2963                                            {NOCHECK, X86::ADD32ri}}}; break;
2964     case X86::ADD64rr:      I = {8, true, {{CHECK8, X86::ADD64ri8},
2965                                            {CHECK32, X86::ADD64ri32},
2966                                            {NOCHECK, X86::ADD64rr}}}; break;
2967 
2968     // SUB
2969     case X86::SUB8rr:       I = {1, true, {{NOCHECK, X86::SUB8ri}}}; break;
2970     case X86::SUB16rr:      I = {2, true, {{CHECK8, X86::SUB16ri8},
2971                                            {NOCHECK, X86::SUB16ri}}}; break;
2972     case X86::SUB32rr:      I = {4, true, {{CHECK8, X86::SUB32ri8},
2973                                            {NOCHECK, X86::SUB32ri}}}; break;
2974     case X86::SUB64rr:      I = {8, true, {{CHECK8, X86::SUB64ri8},
2975                                            {CHECK32, X86::SUB64ri32},
2976                                            {NOCHECK, X86::SUB64rr}}}; break;
2977 
2978     // AND
2979     case X86::AND8rr:       I = {1, true, {{NOCHECK, X86::AND8ri}}}; break;
2980     case X86::AND16rr:      I = {2, true, {{CHECK8, X86::AND16ri8},
2981                                            {NOCHECK, X86::AND16ri}}}; break;
2982     case X86::AND32rr:      I = {4, true, {{CHECK8, X86::AND32ri8},
2983                                            {NOCHECK, X86::AND32ri}}}; break;
2984     case X86::AND64rr:      I = {8, true, {{CHECK8, X86::AND64ri8},
2985                                            {CHECK32, X86::AND64ri32},
2986                                            {NOCHECK, X86::AND64rr}}}; break;
2987 
2988     // OR
2989     case X86::OR8rr:        I = {1, true, {{NOCHECK, X86::OR8ri}}}; break;
2990     case X86::OR16rr:       I = {2, true, {{CHECK8, X86::OR16ri8},
2991                                            {NOCHECK, X86::OR16ri}}}; break;
2992     case X86::OR32rr:       I = {4, true, {{CHECK8, X86::OR32ri8},
2993                                            {NOCHECK, X86::OR32ri}}}; break;
2994     case X86::OR64rr:       I = {8, true, {{CHECK8, X86::OR64ri8},
2995                                            {CHECK32, X86::OR64ri32},
2996                                            {NOCHECK, X86::OR64rr}}}; break;
2997 
2998     // XOR
2999     case X86::XOR8rr:       I = {1, true, {{NOCHECK, X86::XOR8ri}}}; break;
3000     case X86::XOR16rr:      I = {2, true, {{CHECK8, X86::XOR16ri8},
3001                                            {NOCHECK, X86::XOR16ri}}}; break;
3002     case X86::XOR32rr:      I = {4, true, {{CHECK8, X86::XOR32ri8},
3003                                            {NOCHECK, X86::XOR32ri}}}; break;
3004     case X86::XOR64rr:      I = {8, true, {{CHECK8, X86::XOR64ri8},
3005                                            {CHECK32, X86::XOR64ri32},
3006                                            {NOCHECK, X86::XOR64rr}}}; break;
3007     }
3008 
3009     // Compute the new opcode.
3010     unsigned NewOpcode = 0;
3011     for (const std::pair<CheckSignExt, unsigned> &Check : I.Checks) {
3012       NewOpcode = Check.second;
3013       if (Check.first == NOCHECK)
3014         break;
3015       if (Check.first == CHECK8 && Imm >= std::numeric_limits<int8_t>::min() &&
3016           Imm <= std::numeric_limits<int8_t>::max())
3017         break;
3018       if (Check.first == CHECK32 &&
3019           Imm >= std::numeric_limits<int32_t>::min() &&
3020           Imm <= std::numeric_limits<int32_t>::max())
3021         break;
3022     }
3023     if (NewOpcode == Inst.getOpcode())
3024       return false;
3025 
3026     const MCInstrDesc &InstDesc = Info->get(Inst.getOpcode());
3027 
3028     unsigned NumFound = 0;
3029     for (unsigned Index = InstDesc.getNumDefs() + (I.HasLHS ? 1 : 0),
3030                   E = InstDesc.getNumOperands();
3031          Index != E; ++Index)
3032       if (Inst.getOperand(Index).isReg() &&
3033           Inst.getOperand(Index).getReg() == Register)
3034         NumFound++;
3035 
3036     if (NumFound != 1)
3037       return false;
3038 
3039     // Iterate backwards to replace the src register before the src/dest
3040     // register as in AND, ADD, and SUB Only iterate through src operands that
3041     // arent also dest operands
3042     for (unsigned Index = InstDesc.getNumOperands() - 1,
3043                   E = InstDesc.getNumDefs() + (I.HasLHS ? 0 : -1);
3044          Index != E; --Index) {
3045       if (!Inst.getOperand(Index).isReg() ||
3046           Inst.getOperand(Index).getReg() != Register)
3047         continue;
3048       MCOperand NewOperand = MCOperand::createImm(Imm);
3049       Inst.getOperand(Index) = NewOperand;
3050       break;
3051     }
3052 
3053     Inst.setOpcode(NewOpcode);
3054 
3055     return true;
3056   }
3057 
3058   bool replaceRegWithReg(MCInst &Inst, unsigned ToReplace,
3059                          unsigned ReplaceWith) const override {
3060 
3061     // Get the HasLHS value so that iteration can be done
3062     bool HasLHS;
3063     if (isAND(Inst.getOpcode()) || isADD(Inst.getOpcode()) || isSUB(Inst)) {
3064       HasLHS = true;
3065     } else if (isPop(Inst) || isPush(Inst) || isCMP(Inst.getOpcode()) ||
3066                isTEST(Inst.getOpcode())) {
3067       HasLHS = false;
3068     } else {
3069       switch (Inst.getOpcode()) {
3070       case X86::MOV8rr:
3071       case X86::MOV8rm:
3072       case X86::MOV8mr:
3073       case X86::MOV8ri:
3074       case X86::MOV16rr:
3075       case X86::MOV16rm:
3076       case X86::MOV16mr:
3077       case X86::MOV16ri:
3078       case X86::MOV32rr:
3079       case X86::MOV32rm:
3080       case X86::MOV32mr:
3081       case X86::MOV32ri:
3082       case X86::MOV64rr:
3083       case X86::MOV64rm:
3084       case X86::MOV64mr:
3085       case X86::MOV64ri:
3086       case X86::MOVZX16rr8:
3087       case X86::MOVZX32rr8:
3088       case X86::MOVZX32rr16:
3089       case X86::MOVSX32rm8:
3090       case X86::MOVSX32rr8:
3091       case X86::MOVSX64rm32:
3092       case X86::LEA64r:
3093         HasLHS = false;
3094         break;
3095       default:
3096         return false;
3097       }
3098     }
3099 
3100     const MCInstrDesc &InstDesc = Info->get(Inst.getOpcode());
3101 
3102     bool FoundOne = false;
3103 
3104     // Iterate only through src operands that arent also dest operands
3105     for (unsigned Index = InstDesc.getNumDefs() + (HasLHS ? 1 : 0),
3106                   E = InstDesc.getNumOperands();
3107          Index != E; ++Index) {
3108       BitVector RegAliases = getAliases(ToReplace, true);
3109       if (!Inst.getOperand(Index).isReg() ||
3110           !RegAliases.test(Inst.getOperand(Index).getReg()))
3111         continue;
3112       // Resize register if needed
3113       unsigned SizedReplaceWith = getAliasSized(
3114           ReplaceWith, getRegSize(Inst.getOperand(Index).getReg()));
3115       MCOperand NewOperand = MCOperand::createReg(SizedReplaceWith);
3116       Inst.getOperand(Index) = NewOperand;
3117       FoundOne = true;
3118     }
3119 
3120     // Return true if at least one operand was replaced
3121     return FoundOne;
3122   }
3123 
3124   bool createUncondBranch(MCInst &Inst, const MCSymbol *TBB,
3125                           MCContext *Ctx) const override {
3126     Inst.setOpcode(X86::JMP_1);
3127     Inst.addOperand(MCOperand::createExpr(
3128         MCSymbolRefExpr::create(TBB, MCSymbolRefExpr::VK_None, *Ctx)));
3129     return true;
3130   }
3131 
3132   bool createCall(MCInst &Inst, const MCSymbol *Target,
3133                   MCContext *Ctx) override {
3134     Inst.setOpcode(X86::CALL64pcrel32);
3135     Inst.addOperand(MCOperand::createExpr(
3136         MCSymbolRefExpr::create(Target, MCSymbolRefExpr::VK_None, *Ctx)));
3137     return true;
3138   }
3139 
3140   bool createTailCall(MCInst &Inst, const MCSymbol *Target,
3141                       MCContext *Ctx) override {
3142     return createDirectCall(Inst, Target, Ctx, /*IsTailCall*/ true);
3143   }
3144 
3145   void createLongTailCall(InstructionListType &Seq, const MCSymbol *Target,
3146                           MCContext *Ctx) override {
3147     Seq.clear();
3148     Seq.emplace_back();
3149     createDirectCall(Seq.back(), Target, Ctx, /*IsTailCall*/ true);
3150   }
3151 
3152   bool createTrap(MCInst &Inst) const override {
3153     Inst.clear();
3154     Inst.setOpcode(X86::TRAP);
3155     return true;
3156   }
3157 
3158   bool reverseBranchCondition(MCInst &Inst, const MCSymbol *TBB,
3159                               MCContext *Ctx) const override {
3160     unsigned InvCC = getInvertedCondCode(getCondCode(Inst));
3161     assert(InvCC != X86::COND_INVALID && "invalid branch instruction");
3162     Inst.getOperand(Info->get(Inst.getOpcode()).NumOperands - 1).setImm(InvCC);
3163     Inst.getOperand(0) = MCOperand::createExpr(
3164         MCSymbolRefExpr::create(TBB, MCSymbolRefExpr::VK_None, *Ctx));
3165     return true;
3166   }
3167 
3168   bool replaceBranchCondition(MCInst &Inst, const MCSymbol *TBB, MCContext *Ctx,
3169                               unsigned CC) const override {
3170     if (CC == X86::COND_INVALID)
3171       return false;
3172     Inst.getOperand(Info->get(Inst.getOpcode()).NumOperands - 1).setImm(CC);
3173     Inst.getOperand(0) = MCOperand::createExpr(
3174         MCSymbolRefExpr::create(TBB, MCSymbolRefExpr::VK_None, *Ctx));
3175     return true;
3176   }
3177 
3178   unsigned getCanonicalBranchCondCode(unsigned CC) const override {
3179     switch (CC) {
3180     default:           return X86::COND_INVALID;
3181 
3182     case X86::COND_E:  return X86::COND_E;
3183     case X86::COND_NE: return X86::COND_E;
3184 
3185     case X86::COND_L:  return X86::COND_L;
3186     case X86::COND_GE: return X86::COND_L;
3187 
3188     case X86::COND_LE: return X86::COND_G;
3189     case X86::COND_G:  return X86::COND_G;
3190 
3191     case X86::COND_B:  return X86::COND_B;
3192     case X86::COND_AE: return X86::COND_B;
3193 
3194     case X86::COND_BE: return X86::COND_A;
3195     case X86::COND_A:  return X86::COND_A;
3196 
3197     case X86::COND_S:  return X86::COND_S;
3198     case X86::COND_NS: return X86::COND_S;
3199 
3200     case X86::COND_P:  return X86::COND_P;
3201     case X86::COND_NP: return X86::COND_P;
3202 
3203     case X86::COND_O:  return X86::COND_O;
3204     case X86::COND_NO: return X86::COND_O;
3205     }
3206   }
3207 
3208   bool replaceBranchTarget(MCInst &Inst, const MCSymbol *TBB,
3209                            MCContext *Ctx) const override {
3210     assert((isCall(Inst) || isBranch(Inst)) && !isIndirectBranch(Inst) &&
3211            "Invalid instruction");
3212     Inst.getOperand(0) = MCOperand::createExpr(
3213         MCSymbolRefExpr::create(TBB, MCSymbolRefExpr::VK_None, *Ctx));
3214     return true;
3215   }
3216 
3217   MCPhysReg getX86R11() const override { return X86::R11; }
3218 
3219   MCPhysReg getNoRegister() const override { return X86::NoRegister; }
3220 
3221   MCPhysReg getIntArgRegister(unsigned ArgNo) const override {
3222     // FIXME: this should depend on the calling convention.
3223     switch (ArgNo) {
3224     case 0:   return X86::RDI;
3225     case 1:   return X86::RSI;
3226     case 2:   return X86::RDX;
3227     case 3:   return X86::RCX;
3228     case 4:   return X86::R8;
3229     case 5:   return X86::R9;
3230     default:  return getNoRegister();
3231     }
3232   }
3233 
3234   void createPause(MCInst &Inst) const override {
3235     Inst.clear();
3236     Inst.setOpcode(X86::PAUSE);
3237   }
3238 
3239   void createLfence(MCInst &Inst) const override {
3240     Inst.clear();
3241     Inst.setOpcode(X86::LFENCE);
3242   }
3243 
3244   bool createDirectCall(MCInst &Inst, const MCSymbol *Target, MCContext *Ctx,
3245                         bool IsTailCall) override {
3246     Inst.clear();
3247     Inst.setOpcode(IsTailCall ? X86::JMP_4 : X86::CALL64pcrel32);
3248     Inst.addOperand(MCOperand::createExpr(
3249         MCSymbolRefExpr::create(Target, MCSymbolRefExpr::VK_None, *Ctx)));
3250     if (IsTailCall)
3251       setTailCall(Inst);
3252     return true;
3253   }
3254 
3255   void createShortJmp(InstructionListType &Seq, const MCSymbol *Target,
3256                       MCContext *Ctx, bool IsTailCall) override {
3257     Seq.clear();
3258     MCInst Inst;
3259     Inst.setOpcode(X86::JMP_1);
3260     Inst.addOperand(MCOperand::createExpr(
3261         MCSymbolRefExpr::create(Target, MCSymbolRefExpr::VK_None, *Ctx)));
3262     if (IsTailCall)
3263       setTailCall(Inst);
3264     Seq.emplace_back(Inst);
3265   }
3266 
3267   bool isConditionalMove(const MCInst &Inst) const override {
3268     unsigned OpCode = Inst.getOpcode();
3269     return (OpCode == X86::CMOV16rr || OpCode == X86::CMOV32rr ||
3270             OpCode == X86::CMOV64rr);
3271   }
3272 
3273   bool isBranchOnMem(const MCInst &Inst) const override {
3274     unsigned OpCode = Inst.getOpcode();
3275     if (OpCode == X86::CALL64m || (OpCode == X86::JMP32m && isTailCall(Inst)) ||
3276         OpCode == X86::JMP64m)
3277       return true;
3278 
3279     return false;
3280   }
3281 
3282   bool isBranchOnReg(const MCInst &Inst) const override {
3283     unsigned OpCode = Inst.getOpcode();
3284     if (OpCode == X86::CALL64r || (OpCode == X86::JMP32r && isTailCall(Inst)) ||
3285         OpCode == X86::JMP64r)
3286       return true;
3287 
3288     return false;
3289   }
3290 
3291   void createPushRegister(MCInst &Inst, MCPhysReg Reg,
3292                           unsigned Size) const override {
3293     Inst.clear();
3294     unsigned NewOpcode = 0;
3295     if (Reg == X86::EFLAGS) {
3296       switch (Size) {
3297       case 2: NewOpcode = X86::PUSHF16;  break;
3298       case 4: NewOpcode = X86::PUSHF32;  break;
3299       case 8: NewOpcode = X86::PUSHF64;  break;
3300       default:
3301         llvm_unreachable("Unexpected size");
3302       }
3303       Inst.setOpcode(NewOpcode);
3304       return;
3305     }
3306     switch (Size) {
3307     case 2: NewOpcode = X86::PUSH16r;  break;
3308     case 4: NewOpcode = X86::PUSH32r;  break;
3309     case 8: NewOpcode = X86::PUSH64r;  break;
3310     default:
3311       llvm_unreachable("Unexpected size");
3312     }
3313     Inst.setOpcode(NewOpcode);
3314     Inst.addOperand(MCOperand::createReg(Reg));
3315   }
3316 
3317   void createPopRegister(MCInst &Inst, MCPhysReg Reg,
3318                          unsigned Size) const override {
3319     Inst.clear();
3320     unsigned NewOpcode = 0;
3321     if (Reg == X86::EFLAGS) {
3322       switch (Size) {
3323       case 2: NewOpcode = X86::POPF16;  break;
3324       case 4: NewOpcode = X86::POPF32;  break;
3325       case 8: NewOpcode = X86::POPF64;  break;
3326       default:
3327         llvm_unreachable("Unexpected size");
3328       }
3329       Inst.setOpcode(NewOpcode);
3330       return;
3331     }
3332     switch (Size) {
3333     case 2: NewOpcode = X86::POP16r;  break;
3334     case 4: NewOpcode = X86::POP32r;  break;
3335     case 8: NewOpcode = X86::POP64r;  break;
3336     default:
3337       llvm_unreachable("Unexpected size");
3338     }
3339     Inst.setOpcode(NewOpcode);
3340     Inst.addOperand(MCOperand::createReg(Reg));
3341   }
3342 
3343   void createPushFlags(MCInst &Inst, unsigned Size) const override {
3344     return createPushRegister(Inst, X86::EFLAGS, Size);
3345   }
3346 
3347   void createPopFlags(MCInst &Inst, unsigned Size) const override {
3348     return createPopRegister(Inst, X86::EFLAGS, Size);
3349   }
3350 
3351   void createAddRegImm(MCInst &Inst, MCPhysReg Reg, int64_t Value,
3352                        unsigned Size) const {
3353     unsigned int Opcode;
3354     switch (Size) {
3355     case 1: Opcode = X86::ADD8ri; break;
3356     case 2: Opcode = X86::ADD16ri; break;
3357     case 4: Opcode = X86::ADD32ri; break;
3358     default:
3359       llvm_unreachable("Unexpected size");
3360     }
3361     Inst.setOpcode(Opcode);
3362     Inst.clear();
3363     Inst.addOperand(MCOperand::createReg(Reg));
3364     Inst.addOperand(MCOperand::createReg(Reg));
3365     Inst.addOperand(MCOperand::createImm(Value));
3366   }
3367 
3368   void createClearRegWithNoEFlagsUpdate(MCInst &Inst, MCPhysReg Reg,
3369                                         unsigned Size) const {
3370     unsigned int Opcode;
3371     switch (Size) {
3372     case 1: Opcode = X86::MOV8ri; break;
3373     case 2: Opcode = X86::MOV16ri; break;
3374     case 4: Opcode = X86::MOV32ri; break;
3375     case 8: Opcode = X86::MOV64ri; break;
3376     default:
3377       llvm_unreachable("Unexpected size");
3378     }
3379     Inst.setOpcode(Opcode);
3380     Inst.clear();
3381     Inst.addOperand(MCOperand::createReg(Reg));
3382     Inst.addOperand(MCOperand::createImm(0));
3383   }
3384 
3385   void createX86SaveOVFlagToRegister(MCInst &Inst, MCPhysReg Reg) const {
3386     Inst.setOpcode(X86::SETCCr);
3387     Inst.clear();
3388     Inst.addOperand(MCOperand::createReg(Reg));
3389     Inst.addOperand(MCOperand::createImm(X86::COND_O));
3390   }
3391 
3392   void createX86Lahf(MCInst &Inst) const {
3393     Inst.setOpcode(X86::LAHF);
3394     Inst.clear();
3395   }
3396 
3397   void createX86Sahf(MCInst &Inst) const {
3398     Inst.setOpcode(X86::SAHF);
3399     Inst.clear();
3400   }
3401 
3402   void createInstrIncMemory(InstructionListType &Instrs, const MCSymbol *Target,
3403                             MCContext *Ctx, bool IsLeaf) const override {
3404     unsigned int I = 0;
3405 
3406     Instrs.resize(IsLeaf ? 13 : 11);
3407     // Don't clobber application red zone (ABI dependent)
3408     if (IsLeaf)
3409       createStackPointerIncrement(Instrs[I++], 128,
3410                                   /*NoFlagsClobber=*/true);
3411 
3412     // Performance improvements based on the optimization discussed at
3413     // https://reviews.llvm.org/D6629
3414     // LAHF/SAHF are used instead of PUSHF/POPF
3415     // PUSHF
3416     createPushRegister(Instrs[I++], X86::RAX, 8);
3417     createClearRegWithNoEFlagsUpdate(Instrs[I++], X86::RAX, 8);
3418     createX86Lahf(Instrs[I++]);
3419     createPushRegister(Instrs[I++], X86::RAX, 8);
3420     createClearRegWithNoEFlagsUpdate(Instrs[I++], X86::RAX, 8);
3421     createX86SaveOVFlagToRegister(Instrs[I++], X86::AL);
3422     // LOCK INC
3423     createIncMemory(Instrs[I++], Target, Ctx);
3424     // POPF
3425     createAddRegImm(Instrs[I++], X86::AL, 127, 1);
3426     createPopRegister(Instrs[I++], X86::RAX, 8);
3427     createX86Sahf(Instrs[I++]);
3428     createPopRegister(Instrs[I++], X86::RAX, 8);
3429 
3430     if (IsLeaf)
3431       createStackPointerDecrement(Instrs[I], 128,
3432                                   /*NoFlagsClobber=*/true);
3433   }
3434 
3435   void createSwap(MCInst &Inst, MCPhysReg Source, MCPhysReg MemBaseReg,
3436                   int64_t Disp) const {
3437     Inst.setOpcode(X86::XCHG64rm);
3438     Inst.addOperand(MCOperand::createReg(Source));
3439     Inst.addOperand(MCOperand::createReg(Source));
3440     Inst.addOperand(MCOperand::createReg(MemBaseReg));      // BaseReg
3441     Inst.addOperand(MCOperand::createImm(1));               // ScaleAmt
3442     Inst.addOperand(MCOperand::createReg(X86::NoRegister)); // IndexReg
3443     Inst.addOperand(MCOperand::createImm(Disp));            // Displacement
3444     Inst.addOperand(MCOperand::createReg(X86::NoRegister)); // AddrSegmentReg
3445   }
3446 
3447   void createIndirectBranch(MCInst &Inst, MCPhysReg MemBaseReg,
3448                             int64_t Disp) const {
3449     Inst.setOpcode(X86::JMP64m);
3450     Inst.addOperand(MCOperand::createReg(MemBaseReg));      // BaseReg
3451     Inst.addOperand(MCOperand::createImm(1));               // ScaleAmt
3452     Inst.addOperand(MCOperand::createReg(X86::NoRegister)); // IndexReg
3453     Inst.addOperand(MCOperand::createImm(Disp));            // Displacement
3454     Inst.addOperand(MCOperand::createReg(X86::NoRegister)); // AddrSegmentReg
3455   }
3456 
3457   InstructionListType createInstrumentedIndirectCall(const MCInst &CallInst,
3458                                                      bool TailCall,
3459                                                      MCSymbol *HandlerFuncAddr,
3460                                                      int CallSiteID,
3461                                                      MCContext *Ctx) override {
3462     // Check if the target address expression used in the original indirect call
3463     // uses the stack pointer, which we are going to clobber.
3464     static BitVector SPAliases(getAliases(X86::RSP));
3465     bool UsesSP = false;
3466     // Skip defs.
3467     for (unsigned I = Info->get(CallInst.getOpcode()).getNumDefs(),
3468                   E = MCPlus::getNumPrimeOperands(CallInst);
3469          I != E; ++I) {
3470       const MCOperand &Operand = CallInst.getOperand(I);
3471       if (Operand.isReg() && SPAliases[Operand.getReg()]) {
3472         UsesSP = true;
3473         break;
3474       }
3475     }
3476 
3477     InstructionListType Insts;
3478     MCPhysReg TempReg = getIntArgRegister(0);
3479     // Code sequence used to enter indirect call instrumentation helper:
3480     //   push %rdi
3481     //   add $8, %rsp       ;; $rsp may be used in target, so fix it to prev val
3482     //   movq target, %rdi  ;; via convertIndirectCallTargetToLoad
3483     //   sub $8, %rsp       ;; restore correct stack value
3484     //   push %rdi
3485     //   movq $CallSiteID, %rdi
3486     //   push %rdi
3487     //   callq/jmp HandlerFuncAddr
3488     Insts.emplace_back();
3489     createPushRegister(Insts.back(), TempReg, 8);
3490     if (UsesSP) { // Only adjust SP if we really need to
3491       Insts.emplace_back();
3492       createStackPointerDecrement(Insts.back(), 8, /*NoFlagsClobber=*/false);
3493     }
3494     Insts.emplace_back(CallInst);
3495     // Insts.back() and CallInst now share the same annotation instruction.
3496     // Strip it from Insts.back(), only preserving tail call annotation.
3497     stripAnnotations(Insts.back(), /*KeepTC=*/true);
3498     convertIndirectCallToLoad(Insts.back(), TempReg);
3499     if (UsesSP) {
3500       Insts.emplace_back();
3501       createStackPointerIncrement(Insts.back(), 8, /*NoFlagsClobber=*/false);
3502     }
3503     Insts.emplace_back();
3504     createPushRegister(Insts.back(), TempReg, 8);
3505     Insts.emplace_back();
3506     createLoadImmediate(Insts.back(), TempReg, CallSiteID);
3507     Insts.emplace_back();
3508     createPushRegister(Insts.back(), TempReg, 8);
3509     Insts.emplace_back();
3510     createDirectCall(Insts.back(), HandlerFuncAddr, Ctx,
3511                      /*TailCall=*/TailCall);
3512     // Carry over metadata
3513     for (int I = MCPlus::getNumPrimeOperands(CallInst),
3514              E = CallInst.getNumOperands();
3515          I != E; ++I)
3516       Insts.back().addOperand(CallInst.getOperand(I));
3517 
3518     return Insts;
3519   }
3520 
3521   InstructionListType createInstrumentedIndCallHandlerExitBB() const override {
3522     const MCPhysReg TempReg = getIntArgRegister(0);
3523     // We just need to undo the sequence created for every ind call in
3524     // instrumentIndirectTarget(), which can be accomplished minimally with:
3525     //   popfq
3526     //   pop %rdi
3527     //   add $16, %rsp
3528     //   xchg (%rsp), %rdi
3529     //   jmp *-8(%rsp)
3530     InstructionListType Insts(5);
3531     createPopFlags(Insts[0], 8);
3532     createPopRegister(Insts[1], TempReg, 8);
3533     createStackPointerDecrement(Insts[2], 16, /*NoFlagsClobber=*/false);
3534     createSwap(Insts[3], TempReg, X86::RSP, 0);
3535     createIndirectBranch(Insts[4], X86::RSP, -8);
3536     return Insts;
3537   }
3538 
3539   InstructionListType
3540   createInstrumentedIndTailCallHandlerExitBB() const override {
3541     const MCPhysReg TempReg = getIntArgRegister(0);
3542     // Same thing as above, but for tail calls
3543     //   popfq
3544     //   add $16, %rsp
3545     //   pop %rdi
3546     //   jmp *-16(%rsp)
3547     InstructionListType Insts(4);
3548     createPopFlags(Insts[0], 8);
3549     createStackPointerDecrement(Insts[1], 16, /*NoFlagsClobber=*/false);
3550     createPopRegister(Insts[2], TempReg, 8);
3551     createIndirectBranch(Insts[3], X86::RSP, -16);
3552     return Insts;
3553   }
3554 
3555   InstructionListType
3556   createInstrumentedIndCallHandlerEntryBB(const MCSymbol *InstrTrampoline,
3557                                           const MCSymbol *IndCallHandler,
3558                                           MCContext *Ctx) override {
3559     const MCPhysReg TempReg = getIntArgRegister(0);
3560     // Code sequence used to check whether InstrTampoline was initialized
3561     // and call it if so, returns via IndCallHandler.
3562     //   pushfq
3563     //   mov    InstrTrampoline,%rdi
3564     //   cmp    $0x0,%rdi
3565     //   je     IndCallHandler
3566     //   callq  *%rdi
3567     //   jmpq   IndCallHandler
3568     InstructionListType Insts;
3569     Insts.emplace_back();
3570     createPushFlags(Insts.back(), 8);
3571     Insts.emplace_back();
3572     createMove(Insts.back(), InstrTrampoline, TempReg, Ctx);
3573     InstructionListType cmpJmp = createCmpJE(TempReg, 0, IndCallHandler, Ctx);
3574     Insts.insert(Insts.end(), cmpJmp.begin(), cmpJmp.end());
3575     Insts.emplace_back();
3576     Insts.back().setOpcode(X86::CALL64r);
3577     Insts.back().addOperand(MCOperand::createReg(TempReg));
3578     Insts.emplace_back();
3579     createDirectCall(Insts.back(), IndCallHandler, Ctx, /*IsTailCall*/ true);
3580     return Insts;
3581   }
3582 
3583   InstructionListType createNumCountersGetter(MCContext *Ctx) const override {
3584     InstructionListType Insts(2);
3585     MCSymbol *NumLocs = Ctx->getOrCreateSymbol("__bolt_num_counters");
3586     createMove(Insts[0], NumLocs, X86::EAX, Ctx);
3587     createReturn(Insts[1]);
3588     return Insts;
3589   }
3590 
3591   InstructionListType
3592   createInstrLocationsGetter(MCContext *Ctx) const override {
3593     InstructionListType Insts(2);
3594     MCSymbol *Locs = Ctx->getOrCreateSymbol("__bolt_instr_locations");
3595     createLea(Insts[0], Locs, X86::EAX, Ctx);
3596     createReturn(Insts[1]);
3597     return Insts;
3598   }
3599 
3600   InstructionListType createInstrTablesGetter(MCContext *Ctx) const override {
3601     InstructionListType Insts(2);
3602     MCSymbol *Locs = Ctx->getOrCreateSymbol("__bolt_instr_tables");
3603     createLea(Insts[0], Locs, X86::EAX, Ctx);
3604     createReturn(Insts[1]);
3605     return Insts;
3606   }
3607 
3608   InstructionListType createInstrNumFuncsGetter(MCContext *Ctx) const override {
3609     InstructionListType Insts(2);
3610     MCSymbol *NumFuncs = Ctx->getOrCreateSymbol("__bolt_instr_num_funcs");
3611     createMove(Insts[0], NumFuncs, X86::EAX, Ctx);
3612     createReturn(Insts[1]);
3613     return Insts;
3614   }
3615 
3616   InstructionListType createSymbolTrampoline(const MCSymbol *TgtSym,
3617                                              MCContext *Ctx) const override {
3618     InstructionListType Insts(1);
3619     createUncondBranch(Insts[0], TgtSym, Ctx);
3620     return Insts;
3621   }
3622 
3623   InstructionListType createDummyReturnFunction(MCContext *Ctx) const override {
3624     InstructionListType Insts(1);
3625     createReturn(Insts[0]);
3626     return Insts;
3627   }
3628 
3629   BlocksVectorTy indirectCallPromotion(
3630       const MCInst &CallInst,
3631       const std::vector<std::pair<MCSymbol *, uint64_t>> &Targets,
3632       const std::vector<std::pair<MCSymbol *, uint64_t>> &VtableSyms,
3633       const std::vector<MCInst *> &MethodFetchInsns,
3634       const bool MinimizeCodeSize, MCContext *Ctx) override {
3635     const bool IsTailCall = isTailCall(CallInst);
3636     const bool IsJumpTable = getJumpTable(CallInst) != 0;
3637     BlocksVectorTy Results;
3638 
3639     // Label for the current code block.
3640     MCSymbol *NextTarget = nullptr;
3641 
3642     // The join block which contains all the instructions following CallInst.
3643     // MergeBlock remains null if CallInst is a tail call.
3644     MCSymbol *MergeBlock = nullptr;
3645 
3646     unsigned FuncAddrReg = X86::R10;
3647 
3648     const bool LoadElim = !VtableSyms.empty();
3649     assert((!LoadElim || VtableSyms.size() == Targets.size()) &&
3650            "There must be a vtable entry for every method "
3651            "in the targets vector.");
3652 
3653     if (MinimizeCodeSize && !LoadElim) {
3654       std::set<unsigned> UsedRegs;
3655 
3656       for (unsigned int I = 0; I < MCPlus::getNumPrimeOperands(CallInst); ++I) {
3657         const MCOperand &Op = CallInst.getOperand(I);
3658         if (Op.isReg())
3659           UsedRegs.insert(Op.getReg());
3660       }
3661 
3662       if (UsedRegs.count(X86::R10) == 0)
3663         FuncAddrReg = X86::R10;
3664       else if (UsedRegs.count(X86::R11) == 0)
3665         FuncAddrReg = X86::R11;
3666       else
3667         return Results;
3668     }
3669 
3670     const auto jumpToMergeBlock = [&](InstructionListType &NewCall) {
3671       assert(MergeBlock);
3672       NewCall.push_back(CallInst);
3673       MCInst &Merge = NewCall.back();
3674       Merge.clear();
3675       createUncondBranch(Merge, MergeBlock, Ctx);
3676     };
3677 
3678     for (unsigned int i = 0; i < Targets.size(); ++i) {
3679       Results.emplace_back(NextTarget, InstructionListType());
3680       InstructionListType *NewCall = &Results.back().second;
3681 
3682       if (MinimizeCodeSize && !LoadElim) {
3683         // Load the call target into FuncAddrReg.
3684         NewCall->push_back(CallInst); // Copy CallInst in order to get SMLoc
3685         MCInst &Target = NewCall->back();
3686         Target.clear();
3687         Target.setOpcode(X86::MOV64ri32);
3688         Target.addOperand(MCOperand::createReg(FuncAddrReg));
3689         if (Targets[i].first) {
3690           // Is this OK?
3691           Target.addOperand(MCOperand::createExpr(MCSymbolRefExpr::create(
3692               Targets[i].first, MCSymbolRefExpr::VK_None, *Ctx)));
3693         } else {
3694           const uint64_t Addr = Targets[i].second;
3695           // Immediate address is out of sign extended 32 bit range.
3696           if (int64_t(Addr) != int64_t(int32_t(Addr)))
3697             return BlocksVectorTy();
3698 
3699           Target.addOperand(MCOperand::createImm(Addr));
3700         }
3701 
3702         // Compare current call target to a specific address.
3703         NewCall->push_back(CallInst);
3704         MCInst &Compare = NewCall->back();
3705         Compare.clear();
3706         if (isBranchOnReg(CallInst))
3707           Compare.setOpcode(X86::CMP64rr);
3708         else if (CallInst.getOpcode() == X86::CALL64pcrel32)
3709           Compare.setOpcode(X86::CMP64ri32);
3710         else
3711           Compare.setOpcode(X86::CMP64rm);
3712 
3713         Compare.addOperand(MCOperand::createReg(FuncAddrReg));
3714 
3715         // TODO: Would be preferable to only load this value once.
3716         for (unsigned i = 0;
3717              i < Info->get(CallInst.getOpcode()).getNumOperands(); ++i)
3718           if (!CallInst.getOperand(i).isInst())
3719             Compare.addOperand(CallInst.getOperand(i));
3720       } else {
3721         // Compare current call target to a specific address.
3722         NewCall->push_back(CallInst);
3723         MCInst &Compare = NewCall->back();
3724         Compare.clear();
3725         if (isBranchOnReg(CallInst))
3726           Compare.setOpcode(X86::CMP64ri32);
3727         else
3728           Compare.setOpcode(X86::CMP64mi32);
3729 
3730         // Original call address.
3731         for (unsigned i = 0;
3732              i < Info->get(CallInst.getOpcode()).getNumOperands(); ++i)
3733           if (!CallInst.getOperand(i).isInst())
3734             Compare.addOperand(CallInst.getOperand(i));
3735 
3736         // Target address.
3737         if (Targets[i].first || LoadElim) {
3738           const MCSymbol *Sym =
3739               LoadElim ? VtableSyms[i].first : Targets[i].first;
3740           const uint64_t Addend = LoadElim ? VtableSyms[i].second : 0;
3741           const MCExpr *Expr = MCSymbolRefExpr::create(Sym, *Ctx);
3742           if (Addend)
3743             Expr = MCBinaryExpr::createAdd(
3744                 Expr, MCConstantExpr::create(Addend, *Ctx), *Ctx);
3745           Compare.addOperand(MCOperand::createExpr(Expr));
3746         } else {
3747           const uint64_t Addr = Targets[i].second;
3748           // Immediate address is out of sign extended 32 bit range.
3749           if (int64_t(Addr) != int64_t(int32_t(Addr)))
3750             return BlocksVectorTy();
3751 
3752           Compare.addOperand(MCOperand::createImm(Addr));
3753         }
3754       }
3755 
3756       // jump to next target compare.
3757       NextTarget =
3758           Ctx->createNamedTempSymbol(); // generate label for the next block
3759       NewCall->push_back(CallInst);
3760 
3761       if (IsJumpTable) {
3762         MCInst &Je = NewCall->back();
3763 
3764         // Jump to next compare if target addresses don't match.
3765         Je.clear();
3766         Je.setOpcode(X86::JCC_1);
3767         if (Targets[i].first)
3768           Je.addOperand(MCOperand::createExpr(MCSymbolRefExpr::create(
3769               Targets[i].first, MCSymbolRefExpr::VK_None, *Ctx)));
3770         else
3771           Je.addOperand(MCOperand::createImm(Targets[i].second));
3772 
3773         Je.addOperand(MCOperand::createImm(X86::COND_E));
3774         assert(!isInvoke(CallInst));
3775       } else {
3776         MCInst &Jne = NewCall->back();
3777 
3778         // Jump to next compare if target addresses don't match.
3779         Jne.clear();
3780         Jne.setOpcode(X86::JCC_1);
3781         Jne.addOperand(MCOperand::createExpr(MCSymbolRefExpr::create(
3782             NextTarget, MCSymbolRefExpr::VK_None, *Ctx)));
3783         Jne.addOperand(MCOperand::createImm(X86::COND_NE));
3784 
3785         // Call specific target directly.
3786         Results.emplace_back(Ctx->createNamedTempSymbol(),
3787                              InstructionListType());
3788         NewCall = &Results.back().second;
3789         NewCall->push_back(CallInst);
3790         MCInst &CallOrJmp = NewCall->back();
3791 
3792         CallOrJmp.clear();
3793 
3794         if (MinimizeCodeSize && !LoadElim) {
3795           CallOrJmp.setOpcode(IsTailCall ? X86::JMP32r : X86::CALL64r);
3796           CallOrJmp.addOperand(MCOperand::createReg(FuncAddrReg));
3797         } else {
3798           CallOrJmp.setOpcode(IsTailCall ? X86::JMP_4 : X86::CALL64pcrel32);
3799 
3800           if (Targets[i].first)
3801             CallOrJmp.addOperand(MCOperand::createExpr(MCSymbolRefExpr::create(
3802                 Targets[i].first, MCSymbolRefExpr::VK_None, *Ctx)));
3803           else
3804             CallOrJmp.addOperand(MCOperand::createImm(Targets[i].second));
3805         }
3806         if (IsTailCall)
3807           setTailCall(CallOrJmp);
3808 
3809         if (CallOrJmp.getOpcode() == X86::CALL64r ||
3810             CallOrJmp.getOpcode() == X86::CALL64pcrel32) {
3811           if (hasAnnotation(CallInst, "Offset"))
3812             // Annotated as duplicated call
3813             addAnnotation(CallOrJmp, "Offset",
3814                           getAnnotationAs<uint32_t>(CallInst, "Offset"));
3815         }
3816 
3817         if (isInvoke(CallInst) && !isInvoke(CallOrJmp)) {
3818           // Copy over any EH or GNU args size information from the original
3819           // call.
3820           Optional<MCPlus::MCLandingPad> EHInfo = getEHInfo(CallInst);
3821           if (EHInfo)
3822             addEHInfo(CallOrJmp, *EHInfo);
3823           int64_t GnuArgsSize = getGnuArgsSize(CallInst);
3824           if (GnuArgsSize >= 0)
3825             addGnuArgsSize(CallOrJmp, GnuArgsSize);
3826         }
3827 
3828         if (!IsTailCall) {
3829           // The fallthrough block for the most common target should be
3830           // the merge block.
3831           if (i == 0) {
3832             // Fallthrough to merge block.
3833             MergeBlock = Ctx->createNamedTempSymbol();
3834           } else {
3835             // Insert jump to the merge block if we are not doing a fallthrough.
3836             jumpToMergeBlock(*NewCall);
3837           }
3838         }
3839       }
3840     }
3841 
3842     // Cold call block.
3843     Results.emplace_back(NextTarget, InstructionListType());
3844     InstructionListType &NewCall = Results.back().second;
3845     for (const MCInst *Inst : MethodFetchInsns)
3846       if (Inst != &CallInst)
3847         NewCall.push_back(*Inst);
3848     NewCall.push_back(CallInst);
3849 
3850     // Jump to merge block from cold call block
3851     if (!IsTailCall && !IsJumpTable) {
3852       jumpToMergeBlock(NewCall);
3853 
3854       // Record merge block
3855       Results.emplace_back(MergeBlock, InstructionListType());
3856     }
3857 
3858     return Results;
3859   }
3860 
3861   BlocksVectorTy jumpTablePromotion(
3862       const MCInst &IJmpInst,
3863       const std::vector<std::pair<MCSymbol *, uint64_t>> &Targets,
3864       const std::vector<MCInst *> &TargetFetchInsns,
3865       MCContext *Ctx) const override {
3866     assert(getJumpTable(IJmpInst) != 0);
3867     uint16_t IndexReg = getAnnotationAs<uint16_t>(IJmpInst, "JTIndexReg");
3868     if (IndexReg == 0)
3869       return BlocksVectorTy();
3870 
3871     BlocksVectorTy Results;
3872 
3873     // Label for the current code block.
3874     MCSymbol *NextTarget = nullptr;
3875 
3876     for (unsigned int i = 0; i < Targets.size(); ++i) {
3877       Results.emplace_back(NextTarget, InstructionListType());
3878       InstructionListType *CurBB = &Results.back().second;
3879 
3880       // Compare current index to a specific index.
3881       CurBB->emplace_back(MCInst());
3882       MCInst &CompareInst = CurBB->back();
3883       CompareInst.setLoc(IJmpInst.getLoc());
3884       CompareInst.setOpcode(X86::CMP64ri32);
3885       CompareInst.addOperand(MCOperand::createReg(IndexReg));
3886 
3887       const uint64_t CaseIdx = Targets[i].second;
3888       // Immediate address is out of sign extended 32 bit range.
3889       if (int64_t(CaseIdx) != int64_t(int32_t(CaseIdx)))
3890         return BlocksVectorTy();
3891 
3892       CompareInst.addOperand(MCOperand::createImm(CaseIdx));
3893       shortenInstruction(CompareInst);
3894 
3895       // jump to next target compare.
3896       NextTarget =
3897           Ctx->createNamedTempSymbol(); // generate label for the next block
3898       CurBB->push_back(MCInst());
3899 
3900       MCInst &JEInst = CurBB->back();
3901       JEInst.setLoc(IJmpInst.getLoc());
3902 
3903       // Jump to target if indices match
3904       JEInst.setOpcode(X86::JCC_1);
3905       JEInst.addOperand(MCOperand::createExpr(MCSymbolRefExpr::create(
3906           Targets[i].first, MCSymbolRefExpr::VK_None, *Ctx)));
3907       JEInst.addOperand(MCOperand::createImm(X86::COND_E));
3908     }
3909 
3910     // Cold call block.
3911     Results.emplace_back(NextTarget, InstructionListType());
3912     InstructionListType &CurBB = Results.back().second;
3913     for (const MCInst *Inst : TargetFetchInsns)
3914       if (Inst != &IJmpInst)
3915         CurBB.push_back(*Inst);
3916 
3917     CurBB.push_back(IJmpInst);
3918 
3919     return Results;
3920   }
3921 
3922 private:
3923   bool createMove(MCInst &Inst, const MCSymbol *Src, unsigned Reg,
3924                   MCContext *Ctx) const {
3925     Inst.setOpcode(X86::MOV64rm);
3926     Inst.addOperand(MCOperand::createReg(Reg));
3927     Inst.addOperand(MCOperand::createReg(X86::RIP));        // BaseReg
3928     Inst.addOperand(MCOperand::createImm(1));               // ScaleAmt
3929     Inst.addOperand(MCOperand::createReg(X86::NoRegister)); // IndexReg
3930     Inst.addOperand(MCOperand::createExpr(
3931         MCSymbolRefExpr::create(Src, MCSymbolRefExpr::VK_None,
3932                                 *Ctx)));                    // Displacement
3933     Inst.addOperand(MCOperand::createReg(X86::NoRegister)); // AddrSegmentReg
3934 
3935     return true;
3936   }
3937 
3938   bool createLea(MCInst &Inst, const MCSymbol *Src, unsigned Reg,
3939                  MCContext *Ctx) const {
3940     Inst.setOpcode(X86::LEA64r);
3941     Inst.addOperand(MCOperand::createReg(Reg));
3942     Inst.addOperand(MCOperand::createReg(X86::RIP));        // BaseReg
3943     Inst.addOperand(MCOperand::createImm(1));               // ScaleAmt
3944     Inst.addOperand(MCOperand::createReg(X86::NoRegister)); // IndexReg
3945     Inst.addOperand(MCOperand::createExpr(
3946         MCSymbolRefExpr::create(Src, MCSymbolRefExpr::VK_None,
3947                                 *Ctx)));                    // Displacement
3948     Inst.addOperand(MCOperand::createReg(X86::NoRegister)); // AddrSegmentReg
3949     return true;
3950   }
3951 };
3952 
3953 } // namespace
3954 
3955 namespace llvm {
3956 namespace bolt {
3957 
3958 MCPlusBuilder *createX86MCPlusBuilder(const MCInstrAnalysis *Analysis,
3959                                       const MCInstrInfo *Info,
3960                                       const MCRegisterInfo *RegInfo) {
3961   return new X86MCPlusBuilder(Analysis, Info, RegInfo);
3962 }
3963 
3964 } // namespace bolt
3965 } // namespace llvm
3966