1 //===-- X86InstrInfo.cpp - X86 Instruction Information --------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the X86 implementation of the TargetInstrInfo class.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "X86InstrInfo.h"
14 #include "X86.h"
15 #include "X86InstrBuilder.h"
16 #include "X86InstrFoldTables.h"
17 #include "X86MachineFunctionInfo.h"
18 #include "X86Subtarget.h"
19 #include "X86TargetMachine.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/Sequence.h"
22 #include "llvm/CodeGen/LivePhysRegs.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineConstantPool.h"
25 #include "llvm/CodeGen/MachineDominators.h"
26 #include "llvm/CodeGen/MachineFrameInfo.h"
27 #include "llvm/CodeGen/MachineInstrBuilder.h"
28 #include "llvm/CodeGen/MachineModuleInfo.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/CodeGen/StackMaps.h"
31 #include "llvm/IR/DebugInfoMetadata.h"
32 #include "llvm/IR/DerivedTypes.h"
33 #include "llvm/IR/Function.h"
34 #include "llvm/MC/MCAsmInfo.h"
35 #include "llvm/MC/MCExpr.h"
36 #include "llvm/MC/MCInst.h"
37 #include "llvm/Support/CommandLine.h"
38 #include "llvm/Support/Debug.h"
39 #include "llvm/Support/ErrorHandling.h"
40 #include "llvm/Support/raw_ostream.h"
41 #include "llvm/Target/TargetOptions.h"
42 
43 using namespace llvm;
44 
45 #define DEBUG_TYPE "x86-instr-info"
46 
47 #define GET_INSTRINFO_CTOR_DTOR
48 #include "X86GenInstrInfo.inc"
49 
50 static cl::opt<bool>
51     NoFusing("disable-spill-fusing",
52              cl::desc("Disable fusing of spill code into instructions"),
53              cl::Hidden);
54 static cl::opt<bool>
55 PrintFailedFusing("print-failed-fuse-candidates",
56                   cl::desc("Print instructions that the allocator wants to"
57                            " fuse, but the X86 backend currently can't"),
58                   cl::Hidden);
59 static cl::opt<bool>
60 ReMatPICStubLoad("remat-pic-stub-load",
61                  cl::desc("Re-materialize load from stub in PIC mode"),
62                  cl::init(false), cl::Hidden);
63 static cl::opt<unsigned>
64 PartialRegUpdateClearance("partial-reg-update-clearance",
65                           cl::desc("Clearance between two register writes "
66                                    "for inserting XOR to avoid partial "
67                                    "register update"),
68                           cl::init(64), cl::Hidden);
69 static cl::opt<unsigned>
70 UndefRegClearance("undef-reg-clearance",
71                   cl::desc("How many idle instructions we would like before "
72                            "certain undef register reads"),
73                   cl::init(128), cl::Hidden);
74 
75 
76 // Pin the vtable to this file.
77 void X86InstrInfo::anchor() {}
78 
79 X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
80     : X86GenInstrInfo((STI.isTarget64BitLP64() ? X86::ADJCALLSTACKDOWN64
81                                                : X86::ADJCALLSTACKDOWN32),
82                       (STI.isTarget64BitLP64() ? X86::ADJCALLSTACKUP64
83                                                : X86::ADJCALLSTACKUP32),
84                       X86::CATCHRET,
85                       (STI.is64Bit() ? X86::RET64 : X86::RET32)),
86       Subtarget(STI), RI(STI.getTargetTriple()) {
87 }
88 
89 bool
90 X86InstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
91                                     Register &SrcReg, Register &DstReg,
92                                     unsigned &SubIdx) const {
93   switch (MI.getOpcode()) {
94   default: break;
95   case X86::MOVSX16rr8:
96   case X86::MOVZX16rr8:
97   case X86::MOVSX32rr8:
98   case X86::MOVZX32rr8:
99   case X86::MOVSX64rr8:
100     if (!Subtarget.is64Bit())
101       // It's not always legal to reference the low 8-bit of the larger
102       // register in 32-bit mode.
103       return false;
104     LLVM_FALLTHROUGH;
105   case X86::MOVSX32rr16:
106   case X86::MOVZX32rr16:
107   case X86::MOVSX64rr16:
108   case X86::MOVSX64rr32: {
109     if (MI.getOperand(0).getSubReg() || MI.getOperand(1).getSubReg())
110       // Be conservative.
111       return false;
112     SrcReg = MI.getOperand(1).getReg();
113     DstReg = MI.getOperand(0).getReg();
114     switch (MI.getOpcode()) {
115     default: llvm_unreachable("Unreachable!");
116     case X86::MOVSX16rr8:
117     case X86::MOVZX16rr8:
118     case X86::MOVSX32rr8:
119     case X86::MOVZX32rr8:
120     case X86::MOVSX64rr8:
121       SubIdx = X86::sub_8bit;
122       break;
123     case X86::MOVSX32rr16:
124     case X86::MOVZX32rr16:
125     case X86::MOVSX64rr16:
126       SubIdx = X86::sub_16bit;
127       break;
128     case X86::MOVSX64rr32:
129       SubIdx = X86::sub_32bit;
130       break;
131     }
132     return true;
133   }
134   }
135   return false;
136 }
137 
138 bool X86InstrInfo::isDataInvariant(MachineInstr &MI) {
139   switch (MI.getOpcode()) {
140   default:
141     // By default, assume that the instruction is not data invariant.
142     return false;
143 
144     // Some target-independent operations that trivially lower to data-invariant
145     // instructions.
146   case TargetOpcode::COPY:
147   case TargetOpcode::INSERT_SUBREG:
148   case TargetOpcode::SUBREG_TO_REG:
149     return true;
150 
151   // On x86 it is believed that imul is constant time w.r.t. the loaded data.
152   // However, they set flags and are perhaps the most surprisingly constant
153   // time operations so we call them out here separately.
154   case X86::IMUL16rr:
155   case X86::IMUL16rri8:
156   case X86::IMUL16rri:
157   case X86::IMUL32rr:
158   case X86::IMUL32rri8:
159   case X86::IMUL32rri:
160   case X86::IMUL64rr:
161   case X86::IMUL64rri32:
162   case X86::IMUL64rri8:
163 
164   // Bit scanning and counting instructions that are somewhat surprisingly
165   // constant time as they scan across bits and do other fairly complex
166   // operations like popcnt, but are believed to be constant time on x86.
167   // However, these set flags.
168   case X86::BSF16rr:
169   case X86::BSF32rr:
170   case X86::BSF64rr:
171   case X86::BSR16rr:
172   case X86::BSR32rr:
173   case X86::BSR64rr:
174   case X86::LZCNT16rr:
175   case X86::LZCNT32rr:
176   case X86::LZCNT64rr:
177   case X86::POPCNT16rr:
178   case X86::POPCNT32rr:
179   case X86::POPCNT64rr:
180   case X86::TZCNT16rr:
181   case X86::TZCNT32rr:
182   case X86::TZCNT64rr:
183 
184   // Bit manipulation instructions are effectively combinations of basic
185   // arithmetic ops, and should still execute in constant time. These also
186   // set flags.
187   case X86::BLCFILL32rr:
188   case X86::BLCFILL64rr:
189   case X86::BLCI32rr:
190   case X86::BLCI64rr:
191   case X86::BLCIC32rr:
192   case X86::BLCIC64rr:
193   case X86::BLCMSK32rr:
194   case X86::BLCMSK64rr:
195   case X86::BLCS32rr:
196   case X86::BLCS64rr:
197   case X86::BLSFILL32rr:
198   case X86::BLSFILL64rr:
199   case X86::BLSI32rr:
200   case X86::BLSI64rr:
201   case X86::BLSIC32rr:
202   case X86::BLSIC64rr:
203   case X86::BLSMSK32rr:
204   case X86::BLSMSK64rr:
205   case X86::BLSR32rr:
206   case X86::BLSR64rr:
207   case X86::TZMSK32rr:
208   case X86::TZMSK64rr:
209 
210   // Bit extracting and clearing instructions should execute in constant time,
211   // and set flags.
212   case X86::BEXTR32rr:
213   case X86::BEXTR64rr:
214   case X86::BEXTRI32ri:
215   case X86::BEXTRI64ri:
216   case X86::BZHI32rr:
217   case X86::BZHI64rr:
218 
219   // Shift and rotate.
220   case X86::ROL8r1:
221   case X86::ROL16r1:
222   case X86::ROL32r1:
223   case X86::ROL64r1:
224   case X86::ROL8rCL:
225   case X86::ROL16rCL:
226   case X86::ROL32rCL:
227   case X86::ROL64rCL:
228   case X86::ROL8ri:
229   case X86::ROL16ri:
230   case X86::ROL32ri:
231   case X86::ROL64ri:
232   case X86::ROR8r1:
233   case X86::ROR16r1:
234   case X86::ROR32r1:
235   case X86::ROR64r1:
236   case X86::ROR8rCL:
237   case X86::ROR16rCL:
238   case X86::ROR32rCL:
239   case X86::ROR64rCL:
240   case X86::ROR8ri:
241   case X86::ROR16ri:
242   case X86::ROR32ri:
243   case X86::ROR64ri:
244   case X86::SAR8r1:
245   case X86::SAR16r1:
246   case X86::SAR32r1:
247   case X86::SAR64r1:
248   case X86::SAR8rCL:
249   case X86::SAR16rCL:
250   case X86::SAR32rCL:
251   case X86::SAR64rCL:
252   case X86::SAR8ri:
253   case X86::SAR16ri:
254   case X86::SAR32ri:
255   case X86::SAR64ri:
256   case X86::SHL8r1:
257   case X86::SHL16r1:
258   case X86::SHL32r1:
259   case X86::SHL64r1:
260   case X86::SHL8rCL:
261   case X86::SHL16rCL:
262   case X86::SHL32rCL:
263   case X86::SHL64rCL:
264   case X86::SHL8ri:
265   case X86::SHL16ri:
266   case X86::SHL32ri:
267   case X86::SHL64ri:
268   case X86::SHR8r1:
269   case X86::SHR16r1:
270   case X86::SHR32r1:
271   case X86::SHR64r1:
272   case X86::SHR8rCL:
273   case X86::SHR16rCL:
274   case X86::SHR32rCL:
275   case X86::SHR64rCL:
276   case X86::SHR8ri:
277   case X86::SHR16ri:
278   case X86::SHR32ri:
279   case X86::SHR64ri:
280   case X86::SHLD16rrCL:
281   case X86::SHLD32rrCL:
282   case X86::SHLD64rrCL:
283   case X86::SHLD16rri8:
284   case X86::SHLD32rri8:
285   case X86::SHLD64rri8:
286   case X86::SHRD16rrCL:
287   case X86::SHRD32rrCL:
288   case X86::SHRD64rrCL:
289   case X86::SHRD16rri8:
290   case X86::SHRD32rri8:
291   case X86::SHRD64rri8:
292 
293   // Basic arithmetic is constant time on the input but does set flags.
294   case X86::ADC8rr:
295   case X86::ADC8ri:
296   case X86::ADC16rr:
297   case X86::ADC16ri:
298   case X86::ADC16ri8:
299   case X86::ADC32rr:
300   case X86::ADC32ri:
301   case X86::ADC32ri8:
302   case X86::ADC64rr:
303   case X86::ADC64ri8:
304   case X86::ADC64ri32:
305   case X86::ADD8rr:
306   case X86::ADD8ri:
307   case X86::ADD16rr:
308   case X86::ADD16ri:
309   case X86::ADD16ri8:
310   case X86::ADD32rr:
311   case X86::ADD32ri:
312   case X86::ADD32ri8:
313   case X86::ADD64rr:
314   case X86::ADD64ri8:
315   case X86::ADD64ri32:
316   case X86::AND8rr:
317   case X86::AND8ri:
318   case X86::AND16rr:
319   case X86::AND16ri:
320   case X86::AND16ri8:
321   case X86::AND32rr:
322   case X86::AND32ri:
323   case X86::AND32ri8:
324   case X86::AND64rr:
325   case X86::AND64ri8:
326   case X86::AND64ri32:
327   case X86::OR8rr:
328   case X86::OR8ri:
329   case X86::OR16rr:
330   case X86::OR16ri:
331   case X86::OR16ri8:
332   case X86::OR32rr:
333   case X86::OR32ri:
334   case X86::OR32ri8:
335   case X86::OR64rr:
336   case X86::OR64ri8:
337   case X86::OR64ri32:
338   case X86::SBB8rr:
339   case X86::SBB8ri:
340   case X86::SBB16rr:
341   case X86::SBB16ri:
342   case X86::SBB16ri8:
343   case X86::SBB32rr:
344   case X86::SBB32ri:
345   case X86::SBB32ri8:
346   case X86::SBB64rr:
347   case X86::SBB64ri8:
348   case X86::SBB64ri32:
349   case X86::SUB8rr:
350   case X86::SUB8ri:
351   case X86::SUB16rr:
352   case X86::SUB16ri:
353   case X86::SUB16ri8:
354   case X86::SUB32rr:
355   case X86::SUB32ri:
356   case X86::SUB32ri8:
357   case X86::SUB64rr:
358   case X86::SUB64ri8:
359   case X86::SUB64ri32:
360   case X86::XOR8rr:
361   case X86::XOR8ri:
362   case X86::XOR16rr:
363   case X86::XOR16ri:
364   case X86::XOR16ri8:
365   case X86::XOR32rr:
366   case X86::XOR32ri:
367   case X86::XOR32ri8:
368   case X86::XOR64rr:
369   case X86::XOR64ri8:
370   case X86::XOR64ri32:
371   // Arithmetic with just 32-bit and 64-bit variants and no immediates.
372   case X86::ADCX32rr:
373   case X86::ADCX64rr:
374   case X86::ADOX32rr:
375   case X86::ADOX64rr:
376   case X86::ANDN32rr:
377   case X86::ANDN64rr:
378   // Unary arithmetic operations.
379   case X86::DEC8r:
380   case X86::DEC16r:
381   case X86::DEC32r:
382   case X86::DEC64r:
383   case X86::INC8r:
384   case X86::INC16r:
385   case X86::INC32r:
386   case X86::INC64r:
387   case X86::NEG8r:
388   case X86::NEG16r:
389   case X86::NEG32r:
390   case X86::NEG64r:
391 
392   // Unlike other arithmetic, NOT doesn't set EFLAGS.
393   case X86::NOT8r:
394   case X86::NOT16r:
395   case X86::NOT32r:
396   case X86::NOT64r:
397 
398   // Various move instructions used to zero or sign extend things. Note that we
399   // intentionally don't support the _NOREX variants as we can't handle that
400   // register constraint anyways.
401   case X86::MOVSX16rr8:
402   case X86::MOVSX32rr8:
403   case X86::MOVSX32rr16:
404   case X86::MOVSX64rr8:
405   case X86::MOVSX64rr16:
406   case X86::MOVSX64rr32:
407   case X86::MOVZX16rr8:
408   case X86::MOVZX32rr8:
409   case X86::MOVZX32rr16:
410   case X86::MOVZX64rr8:
411   case X86::MOVZX64rr16:
412   case X86::MOV32rr:
413 
414   // Arithmetic instructions that are both constant time and don't set flags.
415   case X86::RORX32ri:
416   case X86::RORX64ri:
417   case X86::SARX32rr:
418   case X86::SARX64rr:
419   case X86::SHLX32rr:
420   case X86::SHLX64rr:
421   case X86::SHRX32rr:
422   case X86::SHRX64rr:
423 
424   // LEA doesn't actually access memory, and its arithmetic is constant time.
425   case X86::LEA16r:
426   case X86::LEA32r:
427   case X86::LEA64_32r:
428   case X86::LEA64r:
429     return true;
430   }
431 }
432 
433 bool X86InstrInfo::isDataInvariantLoad(MachineInstr &MI) {
434   switch (MI.getOpcode()) {
435   default:
436     // By default, assume that the load will immediately leak.
437     return false;
438 
439   // On x86 it is believed that imul is constant time w.r.t. the loaded data.
440   // However, they set flags and are perhaps the most surprisingly constant
441   // time operations so we call them out here separately.
442   case X86::IMUL16rm:
443   case X86::IMUL16rmi8:
444   case X86::IMUL16rmi:
445   case X86::IMUL32rm:
446   case X86::IMUL32rmi8:
447   case X86::IMUL32rmi:
448   case X86::IMUL64rm:
449   case X86::IMUL64rmi32:
450   case X86::IMUL64rmi8:
451 
452   // Bit scanning and counting instructions that are somewhat surprisingly
453   // constant time as they scan across bits and do other fairly complex
454   // operations like popcnt, but are believed to be constant time on x86.
455   // However, these set flags.
456   case X86::BSF16rm:
457   case X86::BSF32rm:
458   case X86::BSF64rm:
459   case X86::BSR16rm:
460   case X86::BSR32rm:
461   case X86::BSR64rm:
462   case X86::LZCNT16rm:
463   case X86::LZCNT32rm:
464   case X86::LZCNT64rm:
465   case X86::POPCNT16rm:
466   case X86::POPCNT32rm:
467   case X86::POPCNT64rm:
468   case X86::TZCNT16rm:
469   case X86::TZCNT32rm:
470   case X86::TZCNT64rm:
471 
472   // Bit manipulation instructions are effectively combinations of basic
473   // arithmetic ops, and should still execute in constant time. These also
474   // set flags.
475   case X86::BLCFILL32rm:
476   case X86::BLCFILL64rm:
477   case X86::BLCI32rm:
478   case X86::BLCI64rm:
479   case X86::BLCIC32rm:
480   case X86::BLCIC64rm:
481   case X86::BLCMSK32rm:
482   case X86::BLCMSK64rm:
483   case X86::BLCS32rm:
484   case X86::BLCS64rm:
485   case X86::BLSFILL32rm:
486   case X86::BLSFILL64rm:
487   case X86::BLSI32rm:
488   case X86::BLSI64rm:
489   case X86::BLSIC32rm:
490   case X86::BLSIC64rm:
491   case X86::BLSMSK32rm:
492   case X86::BLSMSK64rm:
493   case X86::BLSR32rm:
494   case X86::BLSR64rm:
495   case X86::TZMSK32rm:
496   case X86::TZMSK64rm:
497 
498   // Bit extracting and clearing instructions should execute in constant time,
499   // and set flags.
500   case X86::BEXTR32rm:
501   case X86::BEXTR64rm:
502   case X86::BEXTRI32mi:
503   case X86::BEXTRI64mi:
504   case X86::BZHI32rm:
505   case X86::BZHI64rm:
506 
507   // Basic arithmetic is constant time on the input but does set flags.
508   case X86::ADC8rm:
509   case X86::ADC16rm:
510   case X86::ADC32rm:
511   case X86::ADC64rm:
512   case X86::ADCX32rm:
513   case X86::ADCX64rm:
514   case X86::ADD8rm:
515   case X86::ADD16rm:
516   case X86::ADD32rm:
517   case X86::ADD64rm:
518   case X86::ADOX32rm:
519   case X86::ADOX64rm:
520   case X86::AND8rm:
521   case X86::AND16rm:
522   case X86::AND32rm:
523   case X86::AND64rm:
524   case X86::ANDN32rm:
525   case X86::ANDN64rm:
526   case X86::OR8rm:
527   case X86::OR16rm:
528   case X86::OR32rm:
529   case X86::OR64rm:
530   case X86::SBB8rm:
531   case X86::SBB16rm:
532   case X86::SBB32rm:
533   case X86::SBB64rm:
534   case X86::SUB8rm:
535   case X86::SUB16rm:
536   case X86::SUB32rm:
537   case X86::SUB64rm:
538   case X86::XOR8rm:
539   case X86::XOR16rm:
540   case X86::XOR32rm:
541   case X86::XOR64rm:
542 
543   // Integer multiply w/o affecting flags is still believed to be constant
544   // time on x86. Called out separately as this is among the most surprising
545   // instructions to exhibit that behavior.
546   case X86::MULX32rm:
547   case X86::MULX64rm:
548 
549   // Arithmetic instructions that are both constant time and don't set flags.
550   case X86::RORX32mi:
551   case X86::RORX64mi:
552   case X86::SARX32rm:
553   case X86::SARX64rm:
554   case X86::SHLX32rm:
555   case X86::SHLX64rm:
556   case X86::SHRX32rm:
557   case X86::SHRX64rm:
558 
559   // Conversions are believed to be constant time and don't set flags.
560   case X86::CVTTSD2SI64rm:
561   case X86::VCVTTSD2SI64rm:
562   case X86::VCVTTSD2SI64Zrm:
563   case X86::CVTTSD2SIrm:
564   case X86::VCVTTSD2SIrm:
565   case X86::VCVTTSD2SIZrm:
566   case X86::CVTTSS2SI64rm:
567   case X86::VCVTTSS2SI64rm:
568   case X86::VCVTTSS2SI64Zrm:
569   case X86::CVTTSS2SIrm:
570   case X86::VCVTTSS2SIrm:
571   case X86::VCVTTSS2SIZrm:
572   case X86::CVTSI2SDrm:
573   case X86::VCVTSI2SDrm:
574   case X86::VCVTSI2SDZrm:
575   case X86::CVTSI2SSrm:
576   case X86::VCVTSI2SSrm:
577   case X86::VCVTSI2SSZrm:
578   case X86::CVTSI642SDrm:
579   case X86::VCVTSI642SDrm:
580   case X86::VCVTSI642SDZrm:
581   case X86::CVTSI642SSrm:
582   case X86::VCVTSI642SSrm:
583   case X86::VCVTSI642SSZrm:
584   case X86::CVTSS2SDrm:
585   case X86::VCVTSS2SDrm:
586   case X86::VCVTSS2SDZrm:
587   case X86::CVTSD2SSrm:
588   case X86::VCVTSD2SSrm:
589   case X86::VCVTSD2SSZrm:
590   // AVX512 added unsigned integer conversions.
591   case X86::VCVTTSD2USI64Zrm:
592   case X86::VCVTTSD2USIZrm:
593   case X86::VCVTTSS2USI64Zrm:
594   case X86::VCVTTSS2USIZrm:
595   case X86::VCVTUSI2SDZrm:
596   case X86::VCVTUSI642SDZrm:
597   case X86::VCVTUSI2SSZrm:
598   case X86::VCVTUSI642SSZrm:
599 
600   // Loads to register don't set flags.
601   case X86::MOV8rm:
602   case X86::MOV8rm_NOREX:
603   case X86::MOV16rm:
604   case X86::MOV32rm:
605   case X86::MOV64rm:
606   case X86::MOVSX16rm8:
607   case X86::MOVSX32rm16:
608   case X86::MOVSX32rm8:
609   case X86::MOVSX32rm8_NOREX:
610   case X86::MOVSX64rm16:
611   case X86::MOVSX64rm32:
612   case X86::MOVSX64rm8:
613   case X86::MOVZX16rm8:
614   case X86::MOVZX32rm16:
615   case X86::MOVZX32rm8:
616   case X86::MOVZX32rm8_NOREX:
617   case X86::MOVZX64rm16:
618   case X86::MOVZX64rm8:
619     return true;
620   }
621 }
622 
623 int X86InstrInfo::getSPAdjust(const MachineInstr &MI) const {
624   const MachineFunction *MF = MI.getParent()->getParent();
625   const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering();
626 
627   if (isFrameInstr(MI)) {
628     int SPAdj = alignTo(getFrameSize(MI), TFI->getStackAlign());
629     SPAdj -= getFrameAdjustment(MI);
630     if (!isFrameSetup(MI))
631       SPAdj = -SPAdj;
632     return SPAdj;
633   }
634 
635   // To know whether a call adjusts the stack, we need information
636   // that is bound to the following ADJCALLSTACKUP pseudo.
637   // Look for the next ADJCALLSTACKUP that follows the call.
638   if (MI.isCall()) {
639     const MachineBasicBlock *MBB = MI.getParent();
640     auto I = ++MachineBasicBlock::const_iterator(MI);
641     for (auto E = MBB->end(); I != E; ++I) {
642       if (I->getOpcode() == getCallFrameDestroyOpcode() ||
643           I->isCall())
644         break;
645     }
646 
647     // If we could not find a frame destroy opcode, then it has already
648     // been simplified, so we don't care.
649     if (I->getOpcode() != getCallFrameDestroyOpcode())
650       return 0;
651 
652     return -(I->getOperand(1).getImm());
653   }
654 
655   // Currently handle only PUSHes we can reasonably expect to see
656   // in call sequences
657   switch (MI.getOpcode()) {
658   default:
659     return 0;
660   case X86::PUSH32i8:
661   case X86::PUSH32r:
662   case X86::PUSH32rmm:
663   case X86::PUSH32rmr:
664   case X86::PUSHi32:
665     return 4;
666   case X86::PUSH64i8:
667   case X86::PUSH64r:
668   case X86::PUSH64rmm:
669   case X86::PUSH64rmr:
670   case X86::PUSH64i32:
671     return 8;
672   }
673 }
674 
675 /// Return true and the FrameIndex if the specified
676 /// operand and follow operands form a reference to the stack frame.
677 bool X86InstrInfo::isFrameOperand(const MachineInstr &MI, unsigned int Op,
678                                   int &FrameIndex) const {
679   if (MI.getOperand(Op + X86::AddrBaseReg).isFI() &&
680       MI.getOperand(Op + X86::AddrScaleAmt).isImm() &&
681       MI.getOperand(Op + X86::AddrIndexReg).isReg() &&
682       MI.getOperand(Op + X86::AddrDisp).isImm() &&
683       MI.getOperand(Op + X86::AddrScaleAmt).getImm() == 1 &&
684       MI.getOperand(Op + X86::AddrIndexReg).getReg() == 0 &&
685       MI.getOperand(Op + X86::AddrDisp).getImm() == 0) {
686     FrameIndex = MI.getOperand(Op + X86::AddrBaseReg).getIndex();
687     return true;
688   }
689   return false;
690 }
691 
692 static bool isFrameLoadOpcode(int Opcode, unsigned &MemBytes) {
693   switch (Opcode) {
694   default:
695     return false;
696   case X86::MOV8rm:
697   case X86::KMOVBkm:
698     MemBytes = 1;
699     return true;
700   case X86::MOV16rm:
701   case X86::KMOVWkm:
702   case X86::VMOVSHZrm:
703   case X86::VMOVSHZrm_alt:
704     MemBytes = 2;
705     return true;
706   case X86::MOV32rm:
707   case X86::MOVSSrm:
708   case X86::MOVSSrm_alt:
709   case X86::VMOVSSrm:
710   case X86::VMOVSSrm_alt:
711   case X86::VMOVSSZrm:
712   case X86::VMOVSSZrm_alt:
713   case X86::KMOVDkm:
714     MemBytes = 4;
715     return true;
716   case X86::MOV64rm:
717   case X86::LD_Fp64m:
718   case X86::MOVSDrm:
719   case X86::MOVSDrm_alt:
720   case X86::VMOVSDrm:
721   case X86::VMOVSDrm_alt:
722   case X86::VMOVSDZrm:
723   case X86::VMOVSDZrm_alt:
724   case X86::MMX_MOVD64rm:
725   case X86::MMX_MOVQ64rm:
726   case X86::KMOVQkm:
727     MemBytes = 8;
728     return true;
729   case X86::MOVAPSrm:
730   case X86::MOVUPSrm:
731   case X86::MOVAPDrm:
732   case X86::MOVUPDrm:
733   case X86::MOVDQArm:
734   case X86::MOVDQUrm:
735   case X86::VMOVAPSrm:
736   case X86::VMOVUPSrm:
737   case X86::VMOVAPDrm:
738   case X86::VMOVUPDrm:
739   case X86::VMOVDQArm:
740   case X86::VMOVDQUrm:
741   case X86::VMOVAPSZ128rm:
742   case X86::VMOVUPSZ128rm:
743   case X86::VMOVAPSZ128rm_NOVLX:
744   case X86::VMOVUPSZ128rm_NOVLX:
745   case X86::VMOVAPDZ128rm:
746   case X86::VMOVUPDZ128rm:
747   case X86::VMOVDQU8Z128rm:
748   case X86::VMOVDQU16Z128rm:
749   case X86::VMOVDQA32Z128rm:
750   case X86::VMOVDQU32Z128rm:
751   case X86::VMOVDQA64Z128rm:
752   case X86::VMOVDQU64Z128rm:
753     MemBytes = 16;
754     return true;
755   case X86::VMOVAPSYrm:
756   case X86::VMOVUPSYrm:
757   case X86::VMOVAPDYrm:
758   case X86::VMOVUPDYrm:
759   case X86::VMOVDQAYrm:
760   case X86::VMOVDQUYrm:
761   case X86::VMOVAPSZ256rm:
762   case X86::VMOVUPSZ256rm:
763   case X86::VMOVAPSZ256rm_NOVLX:
764   case X86::VMOVUPSZ256rm_NOVLX:
765   case X86::VMOVAPDZ256rm:
766   case X86::VMOVUPDZ256rm:
767   case X86::VMOVDQU8Z256rm:
768   case X86::VMOVDQU16Z256rm:
769   case X86::VMOVDQA32Z256rm:
770   case X86::VMOVDQU32Z256rm:
771   case X86::VMOVDQA64Z256rm:
772   case X86::VMOVDQU64Z256rm:
773     MemBytes = 32;
774     return true;
775   case X86::VMOVAPSZrm:
776   case X86::VMOVUPSZrm:
777   case X86::VMOVAPDZrm:
778   case X86::VMOVUPDZrm:
779   case X86::VMOVDQU8Zrm:
780   case X86::VMOVDQU16Zrm:
781   case X86::VMOVDQA32Zrm:
782   case X86::VMOVDQU32Zrm:
783   case X86::VMOVDQA64Zrm:
784   case X86::VMOVDQU64Zrm:
785     MemBytes = 64;
786     return true;
787   }
788 }
789 
790 static bool isFrameStoreOpcode(int Opcode, unsigned &MemBytes) {
791   switch (Opcode) {
792   default:
793     return false;
794   case X86::MOV8mr:
795   case X86::KMOVBmk:
796     MemBytes = 1;
797     return true;
798   case X86::MOV16mr:
799   case X86::KMOVWmk:
800   case X86::VMOVSHZmr:
801     MemBytes = 2;
802     return true;
803   case X86::MOV32mr:
804   case X86::MOVSSmr:
805   case X86::VMOVSSmr:
806   case X86::VMOVSSZmr:
807   case X86::KMOVDmk:
808     MemBytes = 4;
809     return true;
810   case X86::MOV64mr:
811   case X86::ST_FpP64m:
812   case X86::MOVSDmr:
813   case X86::VMOVSDmr:
814   case X86::VMOVSDZmr:
815   case X86::MMX_MOVD64mr:
816   case X86::MMX_MOVQ64mr:
817   case X86::MMX_MOVNTQmr:
818   case X86::KMOVQmk:
819     MemBytes = 8;
820     return true;
821   case X86::MOVAPSmr:
822   case X86::MOVUPSmr:
823   case X86::MOVAPDmr:
824   case X86::MOVUPDmr:
825   case X86::MOVDQAmr:
826   case X86::MOVDQUmr:
827   case X86::VMOVAPSmr:
828   case X86::VMOVUPSmr:
829   case X86::VMOVAPDmr:
830   case X86::VMOVUPDmr:
831   case X86::VMOVDQAmr:
832   case X86::VMOVDQUmr:
833   case X86::VMOVUPSZ128mr:
834   case X86::VMOVAPSZ128mr:
835   case X86::VMOVUPSZ128mr_NOVLX:
836   case X86::VMOVAPSZ128mr_NOVLX:
837   case X86::VMOVUPDZ128mr:
838   case X86::VMOVAPDZ128mr:
839   case X86::VMOVDQA32Z128mr:
840   case X86::VMOVDQU32Z128mr:
841   case X86::VMOVDQA64Z128mr:
842   case X86::VMOVDQU64Z128mr:
843   case X86::VMOVDQU8Z128mr:
844   case X86::VMOVDQU16Z128mr:
845     MemBytes = 16;
846     return true;
847   case X86::VMOVUPSYmr:
848   case X86::VMOVAPSYmr:
849   case X86::VMOVUPDYmr:
850   case X86::VMOVAPDYmr:
851   case X86::VMOVDQUYmr:
852   case X86::VMOVDQAYmr:
853   case X86::VMOVUPSZ256mr:
854   case X86::VMOVAPSZ256mr:
855   case X86::VMOVUPSZ256mr_NOVLX:
856   case X86::VMOVAPSZ256mr_NOVLX:
857   case X86::VMOVUPDZ256mr:
858   case X86::VMOVAPDZ256mr:
859   case X86::VMOVDQU8Z256mr:
860   case X86::VMOVDQU16Z256mr:
861   case X86::VMOVDQA32Z256mr:
862   case X86::VMOVDQU32Z256mr:
863   case X86::VMOVDQA64Z256mr:
864   case X86::VMOVDQU64Z256mr:
865     MemBytes = 32;
866     return true;
867   case X86::VMOVUPSZmr:
868   case X86::VMOVAPSZmr:
869   case X86::VMOVUPDZmr:
870   case X86::VMOVAPDZmr:
871   case X86::VMOVDQU8Zmr:
872   case X86::VMOVDQU16Zmr:
873   case X86::VMOVDQA32Zmr:
874   case X86::VMOVDQU32Zmr:
875   case X86::VMOVDQA64Zmr:
876   case X86::VMOVDQU64Zmr:
877     MemBytes = 64;
878     return true;
879   }
880   return false;
881 }
882 
883 unsigned X86InstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
884                                            int &FrameIndex) const {
885   unsigned Dummy;
886   return X86InstrInfo::isLoadFromStackSlot(MI, FrameIndex, Dummy);
887 }
888 
889 unsigned X86InstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
890                                            int &FrameIndex,
891                                            unsigned &MemBytes) const {
892   if (isFrameLoadOpcode(MI.getOpcode(), MemBytes))
893     if (MI.getOperand(0).getSubReg() == 0 && isFrameOperand(MI, 1, FrameIndex))
894       return MI.getOperand(0).getReg();
895   return 0;
896 }
897 
898 unsigned X86InstrInfo::isLoadFromStackSlotPostFE(const MachineInstr &MI,
899                                                  int &FrameIndex) const {
900   unsigned Dummy;
901   if (isFrameLoadOpcode(MI.getOpcode(), Dummy)) {
902     unsigned Reg;
903     if ((Reg = isLoadFromStackSlot(MI, FrameIndex)))
904       return Reg;
905     // Check for post-frame index elimination operations
906     SmallVector<const MachineMemOperand *, 1> Accesses;
907     if (hasLoadFromStackSlot(MI, Accesses)) {
908       FrameIndex =
909           cast<FixedStackPseudoSourceValue>(Accesses.front()->getPseudoValue())
910               ->getFrameIndex();
911       return MI.getOperand(0).getReg();
912     }
913   }
914   return 0;
915 }
916 
917 unsigned X86InstrInfo::isStoreToStackSlot(const MachineInstr &MI,
918                                           int &FrameIndex) const {
919   unsigned Dummy;
920   return X86InstrInfo::isStoreToStackSlot(MI, FrameIndex, Dummy);
921 }
922 
923 unsigned X86InstrInfo::isStoreToStackSlot(const MachineInstr &MI,
924                                           int &FrameIndex,
925                                           unsigned &MemBytes) const {
926   if (isFrameStoreOpcode(MI.getOpcode(), MemBytes))
927     if (MI.getOperand(X86::AddrNumOperands).getSubReg() == 0 &&
928         isFrameOperand(MI, 0, FrameIndex))
929       return MI.getOperand(X86::AddrNumOperands).getReg();
930   return 0;
931 }
932 
933 unsigned X86InstrInfo::isStoreToStackSlotPostFE(const MachineInstr &MI,
934                                                 int &FrameIndex) const {
935   unsigned Dummy;
936   if (isFrameStoreOpcode(MI.getOpcode(), Dummy)) {
937     unsigned Reg;
938     if ((Reg = isStoreToStackSlot(MI, FrameIndex)))
939       return Reg;
940     // Check for post-frame index elimination operations
941     SmallVector<const MachineMemOperand *, 1> Accesses;
942     if (hasStoreToStackSlot(MI, Accesses)) {
943       FrameIndex =
944           cast<FixedStackPseudoSourceValue>(Accesses.front()->getPseudoValue())
945               ->getFrameIndex();
946       return MI.getOperand(X86::AddrNumOperands).getReg();
947     }
948   }
949   return 0;
950 }
951 
952 /// Return true if register is PIC base; i.e.g defined by X86::MOVPC32r.
953 static bool regIsPICBase(Register BaseReg, const MachineRegisterInfo &MRI) {
954   // Don't waste compile time scanning use-def chains of physregs.
955   if (!BaseReg.isVirtual())
956     return false;
957   bool isPICBase = false;
958   for (MachineRegisterInfo::def_instr_iterator I = MRI.def_instr_begin(BaseReg),
959          E = MRI.def_instr_end(); I != E; ++I) {
960     MachineInstr *DefMI = &*I;
961     if (DefMI->getOpcode() != X86::MOVPC32r)
962       return false;
963     assert(!isPICBase && "More than one PIC base?");
964     isPICBase = true;
965   }
966   return isPICBase;
967 }
968 
969 bool X86InstrInfo::isReallyTriviallyReMaterializable(const MachineInstr &MI,
970                                                      AAResults *AA) const {
971   switch (MI.getOpcode()) {
972   default:
973     // This function should only be called for opcodes with the ReMaterializable
974     // flag set.
975     llvm_unreachable("Unknown rematerializable operation!");
976     break;
977 
978   case X86::LOAD_STACK_GUARD:
979   case X86::AVX1_SETALLONES:
980   case X86::AVX2_SETALLONES:
981   case X86::AVX512_128_SET0:
982   case X86::AVX512_256_SET0:
983   case X86::AVX512_512_SET0:
984   case X86::AVX512_512_SETALLONES:
985   case X86::AVX512_FsFLD0SD:
986   case X86::AVX512_FsFLD0SH:
987   case X86::AVX512_FsFLD0SS:
988   case X86::AVX512_FsFLD0F128:
989   case X86::AVX_SET0:
990   case X86::FsFLD0SD:
991   case X86::FsFLD0SS:
992   case X86::FsFLD0F128:
993   case X86::KSET0D:
994   case X86::KSET0Q:
995   case X86::KSET0W:
996   case X86::KSET1D:
997   case X86::KSET1Q:
998   case X86::KSET1W:
999   case X86::MMX_SET0:
1000   case X86::MOV32ImmSExti8:
1001   case X86::MOV32r0:
1002   case X86::MOV32r1:
1003   case X86::MOV32r_1:
1004   case X86::MOV32ri64:
1005   case X86::MOV64ImmSExti8:
1006   case X86::V_SET0:
1007   case X86::V_SETALLONES:
1008   case X86::MOV16ri:
1009   case X86::MOV32ri:
1010   case X86::MOV64ri:
1011   case X86::MOV64ri32:
1012   case X86::MOV8ri:
1013   case X86::PTILEZEROV:
1014     return true;
1015 
1016   case X86::MOV8rm:
1017   case X86::MOV8rm_NOREX:
1018   case X86::MOV16rm:
1019   case X86::MOV32rm:
1020   case X86::MOV64rm:
1021   case X86::MOVSSrm:
1022   case X86::MOVSSrm_alt:
1023   case X86::MOVSDrm:
1024   case X86::MOVSDrm_alt:
1025   case X86::MOVAPSrm:
1026   case X86::MOVUPSrm:
1027   case X86::MOVAPDrm:
1028   case X86::MOVUPDrm:
1029   case X86::MOVDQArm:
1030   case X86::MOVDQUrm:
1031   case X86::VMOVSSrm:
1032   case X86::VMOVSSrm_alt:
1033   case X86::VMOVSDrm:
1034   case X86::VMOVSDrm_alt:
1035   case X86::VMOVAPSrm:
1036   case X86::VMOVUPSrm:
1037   case X86::VMOVAPDrm:
1038   case X86::VMOVUPDrm:
1039   case X86::VMOVDQArm:
1040   case X86::VMOVDQUrm:
1041   case X86::VMOVAPSYrm:
1042   case X86::VMOVUPSYrm:
1043   case X86::VMOVAPDYrm:
1044   case X86::VMOVUPDYrm:
1045   case X86::VMOVDQAYrm:
1046   case X86::VMOVDQUYrm:
1047   case X86::MMX_MOVD64rm:
1048   case X86::MMX_MOVQ64rm:
1049   // AVX-512
1050   case X86::VMOVSSZrm:
1051   case X86::VMOVSSZrm_alt:
1052   case X86::VMOVSDZrm:
1053   case X86::VMOVSDZrm_alt:
1054   case X86::VMOVSHZrm:
1055   case X86::VMOVSHZrm_alt:
1056   case X86::VMOVAPDZ128rm:
1057   case X86::VMOVAPDZ256rm:
1058   case X86::VMOVAPDZrm:
1059   case X86::VMOVAPSZ128rm:
1060   case X86::VMOVAPSZ256rm:
1061   case X86::VMOVAPSZ128rm_NOVLX:
1062   case X86::VMOVAPSZ256rm_NOVLX:
1063   case X86::VMOVAPSZrm:
1064   case X86::VMOVDQA32Z128rm:
1065   case X86::VMOVDQA32Z256rm:
1066   case X86::VMOVDQA32Zrm:
1067   case X86::VMOVDQA64Z128rm:
1068   case X86::VMOVDQA64Z256rm:
1069   case X86::VMOVDQA64Zrm:
1070   case X86::VMOVDQU16Z128rm:
1071   case X86::VMOVDQU16Z256rm:
1072   case X86::VMOVDQU16Zrm:
1073   case X86::VMOVDQU32Z128rm:
1074   case X86::VMOVDQU32Z256rm:
1075   case X86::VMOVDQU32Zrm:
1076   case X86::VMOVDQU64Z128rm:
1077   case X86::VMOVDQU64Z256rm:
1078   case X86::VMOVDQU64Zrm:
1079   case X86::VMOVDQU8Z128rm:
1080   case X86::VMOVDQU8Z256rm:
1081   case X86::VMOVDQU8Zrm:
1082   case X86::VMOVUPDZ128rm:
1083   case X86::VMOVUPDZ256rm:
1084   case X86::VMOVUPDZrm:
1085   case X86::VMOVUPSZ128rm:
1086   case X86::VMOVUPSZ256rm:
1087   case X86::VMOVUPSZ128rm_NOVLX:
1088   case X86::VMOVUPSZ256rm_NOVLX:
1089   case X86::VMOVUPSZrm: {
1090     // Loads from constant pools are trivially rematerializable.
1091     if (MI.getOperand(1 + X86::AddrBaseReg).isReg() &&
1092         MI.getOperand(1 + X86::AddrScaleAmt).isImm() &&
1093         MI.getOperand(1 + X86::AddrIndexReg).isReg() &&
1094         MI.getOperand(1 + X86::AddrIndexReg).getReg() == 0 &&
1095         MI.isDereferenceableInvariantLoad(AA)) {
1096       Register BaseReg = MI.getOperand(1 + X86::AddrBaseReg).getReg();
1097       if (BaseReg == 0 || BaseReg == X86::RIP)
1098         return true;
1099       // Allow re-materialization of PIC load.
1100       if (!ReMatPICStubLoad && MI.getOperand(1 + X86::AddrDisp).isGlobal())
1101         return false;
1102       const MachineFunction &MF = *MI.getParent()->getParent();
1103       const MachineRegisterInfo &MRI = MF.getRegInfo();
1104       return regIsPICBase(BaseReg, MRI);
1105     }
1106     return false;
1107   }
1108 
1109   case X86::LEA32r:
1110   case X86::LEA64r: {
1111     if (MI.getOperand(1 + X86::AddrScaleAmt).isImm() &&
1112         MI.getOperand(1 + X86::AddrIndexReg).isReg() &&
1113         MI.getOperand(1 + X86::AddrIndexReg).getReg() == 0 &&
1114         !MI.getOperand(1 + X86::AddrDisp).isReg()) {
1115       // lea fi#, lea GV, etc. are all rematerializable.
1116       if (!MI.getOperand(1 + X86::AddrBaseReg).isReg())
1117         return true;
1118       Register BaseReg = MI.getOperand(1 + X86::AddrBaseReg).getReg();
1119       if (BaseReg == 0)
1120         return true;
1121       // Allow re-materialization of lea PICBase + x.
1122       const MachineFunction &MF = *MI.getParent()->getParent();
1123       const MachineRegisterInfo &MRI = MF.getRegInfo();
1124       return regIsPICBase(BaseReg, MRI);
1125     }
1126     return false;
1127   }
1128   }
1129 }
1130 
1131 void X86InstrInfo::reMaterialize(MachineBasicBlock &MBB,
1132                                  MachineBasicBlock::iterator I,
1133                                  Register DestReg, unsigned SubIdx,
1134                                  const MachineInstr &Orig,
1135                                  const TargetRegisterInfo &TRI) const {
1136   bool ClobbersEFLAGS = Orig.modifiesRegister(X86::EFLAGS, &TRI);
1137   if (ClobbersEFLAGS && MBB.computeRegisterLiveness(&TRI, X86::EFLAGS, I) !=
1138                             MachineBasicBlock::LQR_Dead) {
1139     // The instruction clobbers EFLAGS. Re-materialize as MOV32ri to avoid side
1140     // effects.
1141     int Value;
1142     switch (Orig.getOpcode()) {
1143     case X86::MOV32r0:  Value = 0; break;
1144     case X86::MOV32r1:  Value = 1; break;
1145     case X86::MOV32r_1: Value = -1; break;
1146     default:
1147       llvm_unreachable("Unexpected instruction!");
1148     }
1149 
1150     const DebugLoc &DL = Orig.getDebugLoc();
1151     BuildMI(MBB, I, DL, get(X86::MOV32ri))
1152         .add(Orig.getOperand(0))
1153         .addImm(Value);
1154   } else {
1155     MachineInstr *MI = MBB.getParent()->CloneMachineInstr(&Orig);
1156     MBB.insert(I, MI);
1157   }
1158 
1159   MachineInstr &NewMI = *std::prev(I);
1160   NewMI.substituteRegister(Orig.getOperand(0).getReg(), DestReg, SubIdx, TRI);
1161 }
1162 
1163 /// True if MI has a condition code def, e.g. EFLAGS, that is not marked dead.
1164 bool X86InstrInfo::hasLiveCondCodeDef(MachineInstr &MI) const {
1165   for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
1166     MachineOperand &MO = MI.getOperand(i);
1167     if (MO.isReg() && MO.isDef() &&
1168         MO.getReg() == X86::EFLAGS && !MO.isDead()) {
1169       return true;
1170     }
1171   }
1172   return false;
1173 }
1174 
1175 /// Check whether the shift count for a machine operand is non-zero.
1176 inline static unsigned getTruncatedShiftCount(const MachineInstr &MI,
1177                                               unsigned ShiftAmtOperandIdx) {
1178   // The shift count is six bits with the REX.W prefix and five bits without.
1179   unsigned ShiftCountMask = (MI.getDesc().TSFlags & X86II::REX_W) ? 63 : 31;
1180   unsigned Imm = MI.getOperand(ShiftAmtOperandIdx).getImm();
1181   return Imm & ShiftCountMask;
1182 }
1183 
1184 /// Check whether the given shift count is appropriate
1185 /// can be represented by a LEA instruction.
1186 inline static bool isTruncatedShiftCountForLEA(unsigned ShAmt) {
1187   // Left shift instructions can be transformed into load-effective-address
1188   // instructions if we can encode them appropriately.
1189   // A LEA instruction utilizes a SIB byte to encode its scale factor.
1190   // The SIB.scale field is two bits wide which means that we can encode any
1191   // shift amount less than 4.
1192   return ShAmt < 4 && ShAmt > 0;
1193 }
1194 
1195 bool X86InstrInfo::classifyLEAReg(MachineInstr &MI, const MachineOperand &Src,
1196                                   unsigned Opc, bool AllowSP, Register &NewSrc,
1197                                   bool &isKill, MachineOperand &ImplicitOp,
1198                                   LiveVariables *LV) const {
1199   MachineFunction &MF = *MI.getParent()->getParent();
1200   const TargetRegisterClass *RC;
1201   if (AllowSP) {
1202     RC = Opc != X86::LEA32r ? &X86::GR64RegClass : &X86::GR32RegClass;
1203   } else {
1204     RC = Opc != X86::LEA32r ?
1205       &X86::GR64_NOSPRegClass : &X86::GR32_NOSPRegClass;
1206   }
1207   Register SrcReg = Src.getReg();
1208   isKill = MI.killsRegister(SrcReg);
1209 
1210   // For both LEA64 and LEA32 the register already has essentially the right
1211   // type (32-bit or 64-bit) we may just need to forbid SP.
1212   if (Opc != X86::LEA64_32r) {
1213     NewSrc = SrcReg;
1214     assert(!Src.isUndef() && "Undef op doesn't need optimization");
1215 
1216     if (NewSrc.isVirtual() && !MF.getRegInfo().constrainRegClass(NewSrc, RC))
1217       return false;
1218 
1219     return true;
1220   }
1221 
1222   // This is for an LEA64_32r and incoming registers are 32-bit. One way or
1223   // another we need to add 64-bit registers to the final MI.
1224   if (SrcReg.isPhysical()) {
1225     ImplicitOp = Src;
1226     ImplicitOp.setImplicit();
1227 
1228     NewSrc = getX86SubSuperRegister(SrcReg, 64);
1229     assert(!Src.isUndef() && "Undef op doesn't need optimization");
1230   } else {
1231     // Virtual register of the wrong class, we have to create a temporary 64-bit
1232     // vreg to feed into the LEA.
1233     NewSrc = MF.getRegInfo().createVirtualRegister(RC);
1234     MachineInstr *Copy =
1235         BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(TargetOpcode::COPY))
1236             .addReg(NewSrc, RegState::Define | RegState::Undef, X86::sub_32bit)
1237             .addReg(SrcReg, getKillRegState(isKill));
1238 
1239     // Which is obviously going to be dead after we're done with it.
1240     isKill = true;
1241 
1242     if (LV)
1243       LV->replaceKillInstruction(SrcReg, MI, *Copy);
1244   }
1245 
1246   // We've set all the parameters without issue.
1247   return true;
1248 }
1249 
1250 MachineInstr *X86InstrInfo::convertToThreeAddressWithLEA(unsigned MIOpc,
1251                                                          MachineInstr &MI,
1252                                                          LiveVariables *LV,
1253                                                          bool Is8BitOp) const {
1254   // We handle 8-bit adds and various 16-bit opcodes in the switch below.
1255   MachineBasicBlock &MBB = *MI.getParent();
1256   MachineRegisterInfo &RegInfo = MBB.getParent()->getRegInfo();
1257   assert((Is8BitOp || RegInfo.getTargetRegisterInfo()->getRegSizeInBits(
1258               *RegInfo.getRegClass(MI.getOperand(0).getReg())) == 16) &&
1259          "Unexpected type for LEA transform");
1260 
1261   // TODO: For a 32-bit target, we need to adjust the LEA variables with
1262   // something like this:
1263   //   Opcode = X86::LEA32r;
1264   //   InRegLEA = RegInfo.createVirtualRegister(&X86::GR32_NOSPRegClass);
1265   //   OutRegLEA =
1266   //       Is8BitOp ? RegInfo.createVirtualRegister(&X86::GR32ABCD_RegClass)
1267   //                : RegInfo.createVirtualRegister(&X86::GR32RegClass);
1268   if (!Subtarget.is64Bit())
1269     return nullptr;
1270 
1271   unsigned Opcode = X86::LEA64_32r;
1272   Register InRegLEA = RegInfo.createVirtualRegister(&X86::GR64_NOSPRegClass);
1273   Register OutRegLEA = RegInfo.createVirtualRegister(&X86::GR32RegClass);
1274 
1275   // Build and insert into an implicit UNDEF value. This is OK because
1276   // we will be shifting and then extracting the lower 8/16-bits.
1277   // This has the potential to cause partial register stall. e.g.
1278   //   movw    (%rbp,%rcx,2), %dx
1279   //   leal    -65(%rdx), %esi
1280   // But testing has shown this *does* help performance in 64-bit mode (at
1281   // least on modern x86 machines).
1282   MachineBasicBlock::iterator MBBI = MI.getIterator();
1283   Register Dest = MI.getOperand(0).getReg();
1284   Register Src = MI.getOperand(1).getReg();
1285   bool IsDead = MI.getOperand(0).isDead();
1286   bool IsKill = MI.getOperand(1).isKill();
1287   unsigned SubReg = Is8BitOp ? X86::sub_8bit : X86::sub_16bit;
1288   assert(!MI.getOperand(1).isUndef() && "Undef op doesn't need optimization");
1289   BuildMI(MBB, MBBI, MI.getDebugLoc(), get(X86::IMPLICIT_DEF), InRegLEA);
1290   MachineInstr *InsMI =
1291       BuildMI(MBB, MBBI, MI.getDebugLoc(), get(TargetOpcode::COPY))
1292           .addReg(InRegLEA, RegState::Define, SubReg)
1293           .addReg(Src, getKillRegState(IsKill));
1294 
1295   MachineInstrBuilder MIB =
1296       BuildMI(MBB, MBBI, MI.getDebugLoc(), get(Opcode), OutRegLEA);
1297   switch (MIOpc) {
1298   default: llvm_unreachable("Unreachable!");
1299   case X86::SHL8ri:
1300   case X86::SHL16ri: {
1301     unsigned ShAmt = MI.getOperand(2).getImm();
1302     MIB.addReg(0).addImm(1ULL << ShAmt)
1303        .addReg(InRegLEA, RegState::Kill).addImm(0).addReg(0);
1304     break;
1305   }
1306   case X86::INC8r:
1307   case X86::INC16r:
1308     addRegOffset(MIB, InRegLEA, true, 1);
1309     break;
1310   case X86::DEC8r:
1311   case X86::DEC16r:
1312     addRegOffset(MIB, InRegLEA, true, -1);
1313     break;
1314   case X86::ADD8ri:
1315   case X86::ADD8ri_DB:
1316   case X86::ADD16ri:
1317   case X86::ADD16ri8:
1318   case X86::ADD16ri_DB:
1319   case X86::ADD16ri8_DB:
1320     addRegOffset(MIB, InRegLEA, true, MI.getOperand(2).getImm());
1321     break;
1322   case X86::ADD8rr:
1323   case X86::ADD8rr_DB:
1324   case X86::ADD16rr:
1325   case X86::ADD16rr_DB: {
1326     Register Src2 = MI.getOperand(2).getReg();
1327     bool IsKill2 = MI.getOperand(2).isKill();
1328     assert(!MI.getOperand(2).isUndef() && "Undef op doesn't need optimization");
1329     unsigned InRegLEA2 = 0;
1330     MachineInstr *InsMI2 = nullptr;
1331     if (Src == Src2) {
1332       // ADD8rr/ADD16rr killed %reg1028, %reg1028
1333       // just a single insert_subreg.
1334       addRegReg(MIB, InRegLEA, true, InRegLEA, false);
1335     } else {
1336       if (Subtarget.is64Bit())
1337         InRegLEA2 = RegInfo.createVirtualRegister(&X86::GR64_NOSPRegClass);
1338       else
1339         InRegLEA2 = RegInfo.createVirtualRegister(&X86::GR32_NOSPRegClass);
1340       // Build and insert into an implicit UNDEF value. This is OK because
1341       // we will be shifting and then extracting the lower 8/16-bits.
1342       BuildMI(MBB, &*MIB, MI.getDebugLoc(), get(X86::IMPLICIT_DEF), InRegLEA2);
1343       InsMI2 = BuildMI(MBB, &*MIB, MI.getDebugLoc(), get(TargetOpcode::COPY))
1344                    .addReg(InRegLEA2, RegState::Define, SubReg)
1345                    .addReg(Src2, getKillRegState(IsKill2));
1346       addRegReg(MIB, InRegLEA, true, InRegLEA2, true);
1347     }
1348     if (LV && IsKill2 && InsMI2)
1349       LV->replaceKillInstruction(Src2, MI, *InsMI2);
1350     break;
1351   }
1352   }
1353 
1354   MachineInstr *NewMI = MIB;
1355   MachineInstr *ExtMI =
1356       BuildMI(MBB, MBBI, MI.getDebugLoc(), get(TargetOpcode::COPY))
1357           .addReg(Dest, RegState::Define | getDeadRegState(IsDead))
1358           .addReg(OutRegLEA, RegState::Kill, SubReg);
1359 
1360   if (LV) {
1361     // Update live variables.
1362     LV->getVarInfo(InRegLEA).Kills.push_back(NewMI);
1363     LV->getVarInfo(OutRegLEA).Kills.push_back(ExtMI);
1364     if (IsKill)
1365       LV->replaceKillInstruction(Src, MI, *InsMI);
1366     if (IsDead)
1367       LV->replaceKillInstruction(Dest, MI, *ExtMI);
1368   }
1369 
1370   return ExtMI;
1371 }
1372 
1373 /// This method must be implemented by targets that
1374 /// set the M_CONVERTIBLE_TO_3_ADDR flag.  When this flag is set, the target
1375 /// may be able to convert a two-address instruction into a true
1376 /// three-address instruction on demand.  This allows the X86 target (for
1377 /// example) to convert ADD and SHL instructions into LEA instructions if they
1378 /// would require register copies due to two-addressness.
1379 ///
1380 /// This method returns a null pointer if the transformation cannot be
1381 /// performed, otherwise it returns the new instruction.
1382 ///
1383 MachineInstr *X86InstrInfo::convertToThreeAddress(MachineInstr &MI,
1384                                                   LiveVariables *LV) const {
1385   // The following opcodes also sets the condition code register(s). Only
1386   // convert them to equivalent lea if the condition code register def's
1387   // are dead!
1388   if (hasLiveCondCodeDef(MI))
1389     return nullptr;
1390 
1391   MachineFunction &MF = *MI.getParent()->getParent();
1392   // All instructions input are two-addr instructions.  Get the known operands.
1393   const MachineOperand &Dest = MI.getOperand(0);
1394   const MachineOperand &Src = MI.getOperand(1);
1395 
1396   // Ideally, operations with undef should be folded before we get here, but we
1397   // can't guarantee it. Bail out because optimizing undefs is a waste of time.
1398   // Without this, we have to forward undef state to new register operands to
1399   // avoid machine verifier errors.
1400   if (Src.isUndef())
1401     return nullptr;
1402   if (MI.getNumOperands() > 2)
1403     if (MI.getOperand(2).isReg() && MI.getOperand(2).isUndef())
1404       return nullptr;
1405 
1406   MachineInstr *NewMI = nullptr;
1407   bool Is64Bit = Subtarget.is64Bit();
1408 
1409   bool Is8BitOp = false;
1410   unsigned MIOpc = MI.getOpcode();
1411   switch (MIOpc) {
1412   default: llvm_unreachable("Unreachable!");
1413   case X86::SHL64ri: {
1414     assert(MI.getNumOperands() >= 3 && "Unknown shift instruction!");
1415     unsigned ShAmt = getTruncatedShiftCount(MI, 2);
1416     if (!isTruncatedShiftCountForLEA(ShAmt)) return nullptr;
1417 
1418     // LEA can't handle RSP.
1419     if (Src.getReg().isVirtual() && !MF.getRegInfo().constrainRegClass(
1420                                         Src.getReg(), &X86::GR64_NOSPRegClass))
1421       return nullptr;
1422 
1423     NewMI = BuildMI(MF, MI.getDebugLoc(), get(X86::LEA64r))
1424                 .add(Dest)
1425                 .addReg(0)
1426                 .addImm(1ULL << ShAmt)
1427                 .add(Src)
1428                 .addImm(0)
1429                 .addReg(0);
1430     break;
1431   }
1432   case X86::SHL32ri: {
1433     assert(MI.getNumOperands() >= 3 && "Unknown shift instruction!");
1434     unsigned ShAmt = getTruncatedShiftCount(MI, 2);
1435     if (!isTruncatedShiftCountForLEA(ShAmt)) return nullptr;
1436 
1437     unsigned Opc = Is64Bit ? X86::LEA64_32r : X86::LEA32r;
1438 
1439     // LEA can't handle ESP.
1440     bool isKill;
1441     Register SrcReg;
1442     MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false);
1443     if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ false,
1444                         SrcReg, isKill, ImplicitOp, LV))
1445       return nullptr;
1446 
1447     MachineInstrBuilder MIB =
1448         BuildMI(MF, MI.getDebugLoc(), get(Opc))
1449             .add(Dest)
1450             .addReg(0)
1451             .addImm(1ULL << ShAmt)
1452             .addReg(SrcReg, getKillRegState(isKill))
1453             .addImm(0)
1454             .addReg(0);
1455     if (ImplicitOp.getReg() != 0)
1456       MIB.add(ImplicitOp);
1457     NewMI = MIB;
1458 
1459     break;
1460   }
1461   case X86::SHL8ri:
1462     Is8BitOp = true;
1463     LLVM_FALLTHROUGH;
1464   case X86::SHL16ri: {
1465     assert(MI.getNumOperands() >= 3 && "Unknown shift instruction!");
1466     unsigned ShAmt = getTruncatedShiftCount(MI, 2);
1467     if (!isTruncatedShiftCountForLEA(ShAmt))
1468       return nullptr;
1469     return convertToThreeAddressWithLEA(MIOpc, MI, LV, Is8BitOp);
1470   }
1471   case X86::INC64r:
1472   case X86::INC32r: {
1473     assert(MI.getNumOperands() >= 2 && "Unknown inc instruction!");
1474     unsigned Opc = MIOpc == X86::INC64r ? X86::LEA64r :
1475         (Is64Bit ? X86::LEA64_32r : X86::LEA32r);
1476     bool isKill;
1477     Register SrcReg;
1478     MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false);
1479     if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ false, SrcReg, isKill,
1480                         ImplicitOp, LV))
1481       return nullptr;
1482 
1483     MachineInstrBuilder MIB =
1484         BuildMI(MF, MI.getDebugLoc(), get(Opc))
1485             .add(Dest)
1486             .addReg(SrcReg, getKillRegState(isKill));
1487     if (ImplicitOp.getReg() != 0)
1488       MIB.add(ImplicitOp);
1489 
1490     NewMI = addOffset(MIB, 1);
1491     break;
1492   }
1493   case X86::DEC64r:
1494   case X86::DEC32r: {
1495     assert(MI.getNumOperands() >= 2 && "Unknown dec instruction!");
1496     unsigned Opc = MIOpc == X86::DEC64r ? X86::LEA64r
1497         : (Is64Bit ? X86::LEA64_32r : X86::LEA32r);
1498 
1499     bool isKill;
1500     Register SrcReg;
1501     MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false);
1502     if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ false, SrcReg, isKill,
1503                         ImplicitOp, LV))
1504       return nullptr;
1505 
1506     MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(), get(Opc))
1507                                   .add(Dest)
1508                                   .addReg(SrcReg, getKillRegState(isKill));
1509     if (ImplicitOp.getReg() != 0)
1510       MIB.add(ImplicitOp);
1511 
1512     NewMI = addOffset(MIB, -1);
1513 
1514     break;
1515   }
1516   case X86::DEC8r:
1517   case X86::INC8r:
1518     Is8BitOp = true;
1519     LLVM_FALLTHROUGH;
1520   case X86::DEC16r:
1521   case X86::INC16r:
1522     return convertToThreeAddressWithLEA(MIOpc, MI, LV, Is8BitOp);
1523   case X86::ADD64rr:
1524   case X86::ADD64rr_DB:
1525   case X86::ADD32rr:
1526   case X86::ADD32rr_DB: {
1527     assert(MI.getNumOperands() >= 3 && "Unknown add instruction!");
1528     unsigned Opc;
1529     if (MIOpc == X86::ADD64rr || MIOpc == X86::ADD64rr_DB)
1530       Opc = X86::LEA64r;
1531     else
1532       Opc = Is64Bit ? X86::LEA64_32r : X86::LEA32r;
1533 
1534     const MachineOperand &Src2 = MI.getOperand(2);
1535     bool isKill2;
1536     Register SrcReg2;
1537     MachineOperand ImplicitOp2 = MachineOperand::CreateReg(0, false);
1538     if (!classifyLEAReg(MI, Src2, Opc, /*AllowSP=*/ false,
1539                         SrcReg2, isKill2, ImplicitOp2, LV))
1540       return nullptr;
1541 
1542     bool isKill;
1543     Register SrcReg;
1544     MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false);
1545     if (Src.getReg() == Src2.getReg()) {
1546       // Don't call classify LEAReg a second time on the same register, in case
1547       // the first call inserted a COPY from Src2 and marked it as killed.
1548       isKill = isKill2;
1549       SrcReg = SrcReg2;
1550     } else {
1551       if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/true,
1552                           SrcReg, isKill, ImplicitOp, LV))
1553         return nullptr;
1554     }
1555 
1556     MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(), get(Opc)).add(Dest);
1557     if (ImplicitOp.getReg() != 0)
1558       MIB.add(ImplicitOp);
1559     if (ImplicitOp2.getReg() != 0)
1560       MIB.add(ImplicitOp2);
1561 
1562     NewMI = addRegReg(MIB, SrcReg, isKill, SrcReg2, isKill2);
1563     if (LV && Src2.isKill())
1564       LV->replaceKillInstruction(SrcReg2, MI, *NewMI);
1565     break;
1566   }
1567   case X86::ADD8rr:
1568   case X86::ADD8rr_DB:
1569     Is8BitOp = true;
1570     LLVM_FALLTHROUGH;
1571   case X86::ADD16rr:
1572   case X86::ADD16rr_DB:
1573     return convertToThreeAddressWithLEA(MIOpc, MI, LV, Is8BitOp);
1574   case X86::ADD64ri32:
1575   case X86::ADD64ri8:
1576   case X86::ADD64ri32_DB:
1577   case X86::ADD64ri8_DB:
1578     assert(MI.getNumOperands() >= 3 && "Unknown add instruction!");
1579     NewMI = addOffset(
1580         BuildMI(MF, MI.getDebugLoc(), get(X86::LEA64r)).add(Dest).add(Src),
1581         MI.getOperand(2));
1582     break;
1583   case X86::ADD32ri:
1584   case X86::ADD32ri8:
1585   case X86::ADD32ri_DB:
1586   case X86::ADD32ri8_DB: {
1587     assert(MI.getNumOperands() >= 3 && "Unknown add instruction!");
1588     unsigned Opc = Is64Bit ? X86::LEA64_32r : X86::LEA32r;
1589 
1590     bool isKill;
1591     Register SrcReg;
1592     MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false);
1593     if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ true,
1594                         SrcReg, isKill, ImplicitOp, LV))
1595       return nullptr;
1596 
1597     MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(), get(Opc))
1598                                   .add(Dest)
1599                                   .addReg(SrcReg, getKillRegState(isKill));
1600     if (ImplicitOp.getReg() != 0)
1601       MIB.add(ImplicitOp);
1602 
1603     NewMI = addOffset(MIB, MI.getOperand(2));
1604     break;
1605   }
1606   case X86::ADD8ri:
1607   case X86::ADD8ri_DB:
1608     Is8BitOp = true;
1609     LLVM_FALLTHROUGH;
1610   case X86::ADD16ri:
1611   case X86::ADD16ri8:
1612   case X86::ADD16ri_DB:
1613   case X86::ADD16ri8_DB:
1614     return convertToThreeAddressWithLEA(MIOpc, MI, LV, Is8BitOp);
1615   case X86::SUB8ri:
1616   case X86::SUB16ri8:
1617   case X86::SUB16ri:
1618     /// FIXME: Support these similar to ADD8ri/ADD16ri*.
1619     return nullptr;
1620   case X86::SUB32ri8:
1621   case X86::SUB32ri: {
1622     if (!MI.getOperand(2).isImm())
1623       return nullptr;
1624     int64_t Imm = MI.getOperand(2).getImm();
1625     if (!isInt<32>(-Imm))
1626       return nullptr;
1627 
1628     assert(MI.getNumOperands() >= 3 && "Unknown add instruction!");
1629     unsigned Opc = Is64Bit ? X86::LEA64_32r : X86::LEA32r;
1630 
1631     bool isKill;
1632     Register SrcReg;
1633     MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false);
1634     if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ true,
1635                         SrcReg, isKill, ImplicitOp, LV))
1636       return nullptr;
1637 
1638     MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(), get(Opc))
1639                                   .add(Dest)
1640                                   .addReg(SrcReg, getKillRegState(isKill));
1641     if (ImplicitOp.getReg() != 0)
1642       MIB.add(ImplicitOp);
1643 
1644     NewMI = addOffset(MIB, -Imm);
1645     break;
1646   }
1647 
1648   case X86::SUB64ri8:
1649   case X86::SUB64ri32: {
1650     if (!MI.getOperand(2).isImm())
1651       return nullptr;
1652     int64_t Imm = MI.getOperand(2).getImm();
1653     if (!isInt<32>(-Imm))
1654       return nullptr;
1655 
1656     assert(MI.getNumOperands() >= 3 && "Unknown sub instruction!");
1657 
1658     MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(),
1659                                       get(X86::LEA64r)).add(Dest).add(Src);
1660     NewMI = addOffset(MIB, -Imm);
1661     break;
1662   }
1663 
1664   case X86::VMOVDQU8Z128rmk:
1665   case X86::VMOVDQU8Z256rmk:
1666   case X86::VMOVDQU8Zrmk:
1667   case X86::VMOVDQU16Z128rmk:
1668   case X86::VMOVDQU16Z256rmk:
1669   case X86::VMOVDQU16Zrmk:
1670   case X86::VMOVDQU32Z128rmk: case X86::VMOVDQA32Z128rmk:
1671   case X86::VMOVDQU32Z256rmk: case X86::VMOVDQA32Z256rmk:
1672   case X86::VMOVDQU32Zrmk:    case X86::VMOVDQA32Zrmk:
1673   case X86::VMOVDQU64Z128rmk: case X86::VMOVDQA64Z128rmk:
1674   case X86::VMOVDQU64Z256rmk: case X86::VMOVDQA64Z256rmk:
1675   case X86::VMOVDQU64Zrmk:    case X86::VMOVDQA64Zrmk:
1676   case X86::VMOVUPDZ128rmk:   case X86::VMOVAPDZ128rmk:
1677   case X86::VMOVUPDZ256rmk:   case X86::VMOVAPDZ256rmk:
1678   case X86::VMOVUPDZrmk:      case X86::VMOVAPDZrmk:
1679   case X86::VMOVUPSZ128rmk:   case X86::VMOVAPSZ128rmk:
1680   case X86::VMOVUPSZ256rmk:   case X86::VMOVAPSZ256rmk:
1681   case X86::VMOVUPSZrmk:      case X86::VMOVAPSZrmk:
1682   case X86::VBROADCASTSDZ256rmk:
1683   case X86::VBROADCASTSDZrmk:
1684   case X86::VBROADCASTSSZ128rmk:
1685   case X86::VBROADCASTSSZ256rmk:
1686   case X86::VBROADCASTSSZrmk:
1687   case X86::VPBROADCASTDZ128rmk:
1688   case X86::VPBROADCASTDZ256rmk:
1689   case X86::VPBROADCASTDZrmk:
1690   case X86::VPBROADCASTQZ128rmk:
1691   case X86::VPBROADCASTQZ256rmk:
1692   case X86::VPBROADCASTQZrmk: {
1693     unsigned Opc;
1694     switch (MIOpc) {
1695     default: llvm_unreachable("Unreachable!");
1696     case X86::VMOVDQU8Z128rmk:     Opc = X86::VPBLENDMBZ128rmk; break;
1697     case X86::VMOVDQU8Z256rmk:     Opc = X86::VPBLENDMBZ256rmk; break;
1698     case X86::VMOVDQU8Zrmk:        Opc = X86::VPBLENDMBZrmk;    break;
1699     case X86::VMOVDQU16Z128rmk:    Opc = X86::VPBLENDMWZ128rmk; break;
1700     case X86::VMOVDQU16Z256rmk:    Opc = X86::VPBLENDMWZ256rmk; break;
1701     case X86::VMOVDQU16Zrmk:       Opc = X86::VPBLENDMWZrmk;    break;
1702     case X86::VMOVDQU32Z128rmk:    Opc = X86::VPBLENDMDZ128rmk; break;
1703     case X86::VMOVDQU32Z256rmk:    Opc = X86::VPBLENDMDZ256rmk; break;
1704     case X86::VMOVDQU32Zrmk:       Opc = X86::VPBLENDMDZrmk;    break;
1705     case X86::VMOVDQU64Z128rmk:    Opc = X86::VPBLENDMQZ128rmk; break;
1706     case X86::VMOVDQU64Z256rmk:    Opc = X86::VPBLENDMQZ256rmk; break;
1707     case X86::VMOVDQU64Zrmk:       Opc = X86::VPBLENDMQZrmk;    break;
1708     case X86::VMOVUPDZ128rmk:      Opc = X86::VBLENDMPDZ128rmk; break;
1709     case X86::VMOVUPDZ256rmk:      Opc = X86::VBLENDMPDZ256rmk; break;
1710     case X86::VMOVUPDZrmk:         Opc = X86::VBLENDMPDZrmk;    break;
1711     case X86::VMOVUPSZ128rmk:      Opc = X86::VBLENDMPSZ128rmk; break;
1712     case X86::VMOVUPSZ256rmk:      Opc = X86::VBLENDMPSZ256rmk; break;
1713     case X86::VMOVUPSZrmk:         Opc = X86::VBLENDMPSZrmk;    break;
1714     case X86::VMOVDQA32Z128rmk:    Opc = X86::VPBLENDMDZ128rmk; break;
1715     case X86::VMOVDQA32Z256rmk:    Opc = X86::VPBLENDMDZ256rmk; break;
1716     case X86::VMOVDQA32Zrmk:       Opc = X86::VPBLENDMDZrmk;    break;
1717     case X86::VMOVDQA64Z128rmk:    Opc = X86::VPBLENDMQZ128rmk; break;
1718     case X86::VMOVDQA64Z256rmk:    Opc = X86::VPBLENDMQZ256rmk; break;
1719     case X86::VMOVDQA64Zrmk:       Opc = X86::VPBLENDMQZrmk;    break;
1720     case X86::VMOVAPDZ128rmk:      Opc = X86::VBLENDMPDZ128rmk; break;
1721     case X86::VMOVAPDZ256rmk:      Opc = X86::VBLENDMPDZ256rmk; break;
1722     case X86::VMOVAPDZrmk:         Opc = X86::VBLENDMPDZrmk;    break;
1723     case X86::VMOVAPSZ128rmk:      Opc = X86::VBLENDMPSZ128rmk; break;
1724     case X86::VMOVAPSZ256rmk:      Opc = X86::VBLENDMPSZ256rmk; break;
1725     case X86::VMOVAPSZrmk:         Opc = X86::VBLENDMPSZrmk;    break;
1726     case X86::VBROADCASTSDZ256rmk: Opc = X86::VBLENDMPDZ256rmbk; break;
1727     case X86::VBROADCASTSDZrmk:    Opc = X86::VBLENDMPDZrmbk;    break;
1728     case X86::VBROADCASTSSZ128rmk: Opc = X86::VBLENDMPSZ128rmbk; break;
1729     case X86::VBROADCASTSSZ256rmk: Opc = X86::VBLENDMPSZ256rmbk; break;
1730     case X86::VBROADCASTSSZrmk:    Opc = X86::VBLENDMPSZrmbk;    break;
1731     case X86::VPBROADCASTDZ128rmk: Opc = X86::VPBLENDMDZ128rmbk; break;
1732     case X86::VPBROADCASTDZ256rmk: Opc = X86::VPBLENDMDZ256rmbk; break;
1733     case X86::VPBROADCASTDZrmk:    Opc = X86::VPBLENDMDZrmbk;    break;
1734     case X86::VPBROADCASTQZ128rmk: Opc = X86::VPBLENDMQZ128rmbk; break;
1735     case X86::VPBROADCASTQZ256rmk: Opc = X86::VPBLENDMQZ256rmbk; break;
1736     case X86::VPBROADCASTQZrmk:    Opc = X86::VPBLENDMQZrmbk;    break;
1737     }
1738 
1739     NewMI = BuildMI(MF, MI.getDebugLoc(), get(Opc))
1740               .add(Dest)
1741               .add(MI.getOperand(2))
1742               .add(Src)
1743               .add(MI.getOperand(3))
1744               .add(MI.getOperand(4))
1745               .add(MI.getOperand(5))
1746               .add(MI.getOperand(6))
1747               .add(MI.getOperand(7));
1748     break;
1749   }
1750 
1751   case X86::VMOVDQU8Z128rrk:
1752   case X86::VMOVDQU8Z256rrk:
1753   case X86::VMOVDQU8Zrrk:
1754   case X86::VMOVDQU16Z128rrk:
1755   case X86::VMOVDQU16Z256rrk:
1756   case X86::VMOVDQU16Zrrk:
1757   case X86::VMOVDQU32Z128rrk: case X86::VMOVDQA32Z128rrk:
1758   case X86::VMOVDQU32Z256rrk: case X86::VMOVDQA32Z256rrk:
1759   case X86::VMOVDQU32Zrrk:    case X86::VMOVDQA32Zrrk:
1760   case X86::VMOVDQU64Z128rrk: case X86::VMOVDQA64Z128rrk:
1761   case X86::VMOVDQU64Z256rrk: case X86::VMOVDQA64Z256rrk:
1762   case X86::VMOVDQU64Zrrk:    case X86::VMOVDQA64Zrrk:
1763   case X86::VMOVUPDZ128rrk:   case X86::VMOVAPDZ128rrk:
1764   case X86::VMOVUPDZ256rrk:   case X86::VMOVAPDZ256rrk:
1765   case X86::VMOVUPDZrrk:      case X86::VMOVAPDZrrk:
1766   case X86::VMOVUPSZ128rrk:   case X86::VMOVAPSZ128rrk:
1767   case X86::VMOVUPSZ256rrk:   case X86::VMOVAPSZ256rrk:
1768   case X86::VMOVUPSZrrk:      case X86::VMOVAPSZrrk: {
1769     unsigned Opc;
1770     switch (MIOpc) {
1771     default: llvm_unreachable("Unreachable!");
1772     case X86::VMOVDQU8Z128rrk:  Opc = X86::VPBLENDMBZ128rrk; break;
1773     case X86::VMOVDQU8Z256rrk:  Opc = X86::VPBLENDMBZ256rrk; break;
1774     case X86::VMOVDQU8Zrrk:     Opc = X86::VPBLENDMBZrrk;    break;
1775     case X86::VMOVDQU16Z128rrk: Opc = X86::VPBLENDMWZ128rrk; break;
1776     case X86::VMOVDQU16Z256rrk: Opc = X86::VPBLENDMWZ256rrk; break;
1777     case X86::VMOVDQU16Zrrk:    Opc = X86::VPBLENDMWZrrk;    break;
1778     case X86::VMOVDQU32Z128rrk: Opc = X86::VPBLENDMDZ128rrk; break;
1779     case X86::VMOVDQU32Z256rrk: Opc = X86::VPBLENDMDZ256rrk; break;
1780     case X86::VMOVDQU32Zrrk:    Opc = X86::VPBLENDMDZrrk;    break;
1781     case X86::VMOVDQU64Z128rrk: Opc = X86::VPBLENDMQZ128rrk; break;
1782     case X86::VMOVDQU64Z256rrk: Opc = X86::VPBLENDMQZ256rrk; break;
1783     case X86::VMOVDQU64Zrrk:    Opc = X86::VPBLENDMQZrrk;    break;
1784     case X86::VMOVUPDZ128rrk:   Opc = X86::VBLENDMPDZ128rrk; break;
1785     case X86::VMOVUPDZ256rrk:   Opc = X86::VBLENDMPDZ256rrk; break;
1786     case X86::VMOVUPDZrrk:      Opc = X86::VBLENDMPDZrrk;    break;
1787     case X86::VMOVUPSZ128rrk:   Opc = X86::VBLENDMPSZ128rrk; break;
1788     case X86::VMOVUPSZ256rrk:   Opc = X86::VBLENDMPSZ256rrk; break;
1789     case X86::VMOVUPSZrrk:      Opc = X86::VBLENDMPSZrrk;    break;
1790     case X86::VMOVDQA32Z128rrk: Opc = X86::VPBLENDMDZ128rrk; break;
1791     case X86::VMOVDQA32Z256rrk: Opc = X86::VPBLENDMDZ256rrk; break;
1792     case X86::VMOVDQA32Zrrk:    Opc = X86::VPBLENDMDZrrk;    break;
1793     case X86::VMOVDQA64Z128rrk: Opc = X86::VPBLENDMQZ128rrk; break;
1794     case X86::VMOVDQA64Z256rrk: Opc = X86::VPBLENDMQZ256rrk; break;
1795     case X86::VMOVDQA64Zrrk:    Opc = X86::VPBLENDMQZrrk;    break;
1796     case X86::VMOVAPDZ128rrk:   Opc = X86::VBLENDMPDZ128rrk; break;
1797     case X86::VMOVAPDZ256rrk:   Opc = X86::VBLENDMPDZ256rrk; break;
1798     case X86::VMOVAPDZrrk:      Opc = X86::VBLENDMPDZrrk;    break;
1799     case X86::VMOVAPSZ128rrk:   Opc = X86::VBLENDMPSZ128rrk; break;
1800     case X86::VMOVAPSZ256rrk:   Opc = X86::VBLENDMPSZ256rrk; break;
1801     case X86::VMOVAPSZrrk:      Opc = X86::VBLENDMPSZrrk;    break;
1802     }
1803 
1804     NewMI = BuildMI(MF, MI.getDebugLoc(), get(Opc))
1805               .add(Dest)
1806               .add(MI.getOperand(2))
1807               .add(Src)
1808               .add(MI.getOperand(3));
1809     break;
1810   }
1811   }
1812 
1813   if (!NewMI) return nullptr;
1814 
1815   if (LV) {  // Update live variables
1816     if (Src.isKill())
1817       LV->replaceKillInstruction(Src.getReg(), MI, *NewMI);
1818     if (Dest.isDead())
1819       LV->replaceKillInstruction(Dest.getReg(), MI, *NewMI);
1820   }
1821 
1822   MachineBasicBlock &MBB = *MI.getParent();
1823   MBB.insert(MI.getIterator(), NewMI); // Insert the new inst
1824   return NewMI;
1825 }
1826 
1827 /// This determines which of three possible cases of a three source commute
1828 /// the source indexes correspond to taking into account any mask operands.
1829 /// All prevents commuting a passthru operand. Returns -1 if the commute isn't
1830 /// possible.
1831 /// Case 0 - Possible to commute the first and second operands.
1832 /// Case 1 - Possible to commute the first and third operands.
1833 /// Case 2 - Possible to commute the second and third operands.
1834 static unsigned getThreeSrcCommuteCase(uint64_t TSFlags, unsigned SrcOpIdx1,
1835                                        unsigned SrcOpIdx2) {
1836   // Put the lowest index to SrcOpIdx1 to simplify the checks below.
1837   if (SrcOpIdx1 > SrcOpIdx2)
1838     std::swap(SrcOpIdx1, SrcOpIdx2);
1839 
1840   unsigned Op1 = 1, Op2 = 2, Op3 = 3;
1841   if (X86II::isKMasked(TSFlags)) {
1842     Op2++;
1843     Op3++;
1844   }
1845 
1846   if (SrcOpIdx1 == Op1 && SrcOpIdx2 == Op2)
1847     return 0;
1848   if (SrcOpIdx1 == Op1 && SrcOpIdx2 == Op3)
1849     return 1;
1850   if (SrcOpIdx1 == Op2 && SrcOpIdx2 == Op3)
1851     return 2;
1852   llvm_unreachable("Unknown three src commute case.");
1853 }
1854 
1855 unsigned X86InstrInfo::getFMA3OpcodeToCommuteOperands(
1856     const MachineInstr &MI, unsigned SrcOpIdx1, unsigned SrcOpIdx2,
1857     const X86InstrFMA3Group &FMA3Group) const {
1858 
1859   unsigned Opc = MI.getOpcode();
1860 
1861   // TODO: Commuting the 1st operand of FMA*_Int requires some additional
1862   // analysis. The commute optimization is legal only if all users of FMA*_Int
1863   // use only the lowest element of the FMA*_Int instruction. Such analysis are
1864   // not implemented yet. So, just return 0 in that case.
1865   // When such analysis are available this place will be the right place for
1866   // calling it.
1867   assert(!(FMA3Group.isIntrinsic() && (SrcOpIdx1 == 1 || SrcOpIdx2 == 1)) &&
1868          "Intrinsic instructions can't commute operand 1");
1869 
1870   // Determine which case this commute is or if it can't be done.
1871   unsigned Case = getThreeSrcCommuteCase(MI.getDesc().TSFlags, SrcOpIdx1,
1872                                          SrcOpIdx2);
1873   assert(Case < 3 && "Unexpected case number!");
1874 
1875   // Define the FMA forms mapping array that helps to map input FMA form
1876   // to output FMA form to preserve the operation semantics after
1877   // commuting the operands.
1878   const unsigned Form132Index = 0;
1879   const unsigned Form213Index = 1;
1880   const unsigned Form231Index = 2;
1881   static const unsigned FormMapping[][3] = {
1882     // 0: SrcOpIdx1 == 1 && SrcOpIdx2 == 2;
1883     // FMA132 A, C, b; ==> FMA231 C, A, b;
1884     // FMA213 B, A, c; ==> FMA213 A, B, c;
1885     // FMA231 C, A, b; ==> FMA132 A, C, b;
1886     { Form231Index, Form213Index, Form132Index },
1887     // 1: SrcOpIdx1 == 1 && SrcOpIdx2 == 3;
1888     // FMA132 A, c, B; ==> FMA132 B, c, A;
1889     // FMA213 B, a, C; ==> FMA231 C, a, B;
1890     // FMA231 C, a, B; ==> FMA213 B, a, C;
1891     { Form132Index, Form231Index, Form213Index },
1892     // 2: SrcOpIdx1 == 2 && SrcOpIdx2 == 3;
1893     // FMA132 a, C, B; ==> FMA213 a, B, C;
1894     // FMA213 b, A, C; ==> FMA132 b, C, A;
1895     // FMA231 c, A, B; ==> FMA231 c, B, A;
1896     { Form213Index, Form132Index, Form231Index }
1897   };
1898 
1899   unsigned FMAForms[3];
1900   FMAForms[0] = FMA3Group.get132Opcode();
1901   FMAForms[1] = FMA3Group.get213Opcode();
1902   FMAForms[2] = FMA3Group.get231Opcode();
1903   unsigned FormIndex;
1904   for (FormIndex = 0; FormIndex < 3; FormIndex++)
1905     if (Opc == FMAForms[FormIndex])
1906       break;
1907 
1908   // Everything is ready, just adjust the FMA opcode and return it.
1909   FormIndex = FormMapping[Case][FormIndex];
1910   return FMAForms[FormIndex];
1911 }
1912 
1913 static void commuteVPTERNLOG(MachineInstr &MI, unsigned SrcOpIdx1,
1914                              unsigned SrcOpIdx2) {
1915   // Determine which case this commute is or if it can't be done.
1916   unsigned Case = getThreeSrcCommuteCase(MI.getDesc().TSFlags, SrcOpIdx1,
1917                                          SrcOpIdx2);
1918   assert(Case < 3 && "Unexpected case value!");
1919 
1920   // For each case we need to swap two pairs of bits in the final immediate.
1921   static const uint8_t SwapMasks[3][4] = {
1922     { 0x04, 0x10, 0x08, 0x20 }, // Swap bits 2/4 and 3/5.
1923     { 0x02, 0x10, 0x08, 0x40 }, // Swap bits 1/4 and 3/6.
1924     { 0x02, 0x04, 0x20, 0x40 }, // Swap bits 1/2 and 5/6.
1925   };
1926 
1927   uint8_t Imm = MI.getOperand(MI.getNumOperands()-1).getImm();
1928   // Clear out the bits we are swapping.
1929   uint8_t NewImm = Imm & ~(SwapMasks[Case][0] | SwapMasks[Case][1] |
1930                            SwapMasks[Case][2] | SwapMasks[Case][3]);
1931   // If the immediate had a bit of the pair set, then set the opposite bit.
1932   if (Imm & SwapMasks[Case][0]) NewImm |= SwapMasks[Case][1];
1933   if (Imm & SwapMasks[Case][1]) NewImm |= SwapMasks[Case][0];
1934   if (Imm & SwapMasks[Case][2]) NewImm |= SwapMasks[Case][3];
1935   if (Imm & SwapMasks[Case][3]) NewImm |= SwapMasks[Case][2];
1936   MI.getOperand(MI.getNumOperands()-1).setImm(NewImm);
1937 }
1938 
1939 // Returns true if this is a VPERMI2 or VPERMT2 instruction that can be
1940 // commuted.
1941 static bool isCommutableVPERMV3Instruction(unsigned Opcode) {
1942 #define VPERM_CASES(Suffix) \
1943   case X86::VPERMI2##Suffix##128rr:    case X86::VPERMT2##Suffix##128rr:    \
1944   case X86::VPERMI2##Suffix##256rr:    case X86::VPERMT2##Suffix##256rr:    \
1945   case X86::VPERMI2##Suffix##rr:       case X86::VPERMT2##Suffix##rr:       \
1946   case X86::VPERMI2##Suffix##128rm:    case X86::VPERMT2##Suffix##128rm:    \
1947   case X86::VPERMI2##Suffix##256rm:    case X86::VPERMT2##Suffix##256rm:    \
1948   case X86::VPERMI2##Suffix##rm:       case X86::VPERMT2##Suffix##rm:       \
1949   case X86::VPERMI2##Suffix##128rrkz:  case X86::VPERMT2##Suffix##128rrkz:  \
1950   case X86::VPERMI2##Suffix##256rrkz:  case X86::VPERMT2##Suffix##256rrkz:  \
1951   case X86::VPERMI2##Suffix##rrkz:     case X86::VPERMT2##Suffix##rrkz:     \
1952   case X86::VPERMI2##Suffix##128rmkz:  case X86::VPERMT2##Suffix##128rmkz:  \
1953   case X86::VPERMI2##Suffix##256rmkz:  case X86::VPERMT2##Suffix##256rmkz:  \
1954   case X86::VPERMI2##Suffix##rmkz:     case X86::VPERMT2##Suffix##rmkz:
1955 
1956 #define VPERM_CASES_BROADCAST(Suffix) \
1957   VPERM_CASES(Suffix) \
1958   case X86::VPERMI2##Suffix##128rmb:   case X86::VPERMT2##Suffix##128rmb:   \
1959   case X86::VPERMI2##Suffix##256rmb:   case X86::VPERMT2##Suffix##256rmb:   \
1960   case X86::VPERMI2##Suffix##rmb:      case X86::VPERMT2##Suffix##rmb:      \
1961   case X86::VPERMI2##Suffix##128rmbkz: case X86::VPERMT2##Suffix##128rmbkz: \
1962   case X86::VPERMI2##Suffix##256rmbkz: case X86::VPERMT2##Suffix##256rmbkz: \
1963   case X86::VPERMI2##Suffix##rmbkz:    case X86::VPERMT2##Suffix##rmbkz:
1964 
1965   switch (Opcode) {
1966   default: return false;
1967   VPERM_CASES(B)
1968   VPERM_CASES_BROADCAST(D)
1969   VPERM_CASES_BROADCAST(PD)
1970   VPERM_CASES_BROADCAST(PS)
1971   VPERM_CASES_BROADCAST(Q)
1972   VPERM_CASES(W)
1973     return true;
1974   }
1975 #undef VPERM_CASES_BROADCAST
1976 #undef VPERM_CASES
1977 }
1978 
1979 // Returns commuted opcode for VPERMI2 and VPERMT2 instructions by switching
1980 // from the I opcode to the T opcode and vice versa.
1981 static unsigned getCommutedVPERMV3Opcode(unsigned Opcode) {
1982 #define VPERM_CASES(Orig, New) \
1983   case X86::Orig##128rr:    return X86::New##128rr;   \
1984   case X86::Orig##128rrkz:  return X86::New##128rrkz; \
1985   case X86::Orig##128rm:    return X86::New##128rm;   \
1986   case X86::Orig##128rmkz:  return X86::New##128rmkz; \
1987   case X86::Orig##256rr:    return X86::New##256rr;   \
1988   case X86::Orig##256rrkz:  return X86::New##256rrkz; \
1989   case X86::Orig##256rm:    return X86::New##256rm;   \
1990   case X86::Orig##256rmkz:  return X86::New##256rmkz; \
1991   case X86::Orig##rr:       return X86::New##rr;      \
1992   case X86::Orig##rrkz:     return X86::New##rrkz;    \
1993   case X86::Orig##rm:       return X86::New##rm;      \
1994   case X86::Orig##rmkz:     return X86::New##rmkz;
1995 
1996 #define VPERM_CASES_BROADCAST(Orig, New) \
1997   VPERM_CASES(Orig, New) \
1998   case X86::Orig##128rmb:   return X86::New##128rmb;   \
1999   case X86::Orig##128rmbkz: return X86::New##128rmbkz; \
2000   case X86::Orig##256rmb:   return X86::New##256rmb;   \
2001   case X86::Orig##256rmbkz: return X86::New##256rmbkz; \
2002   case X86::Orig##rmb:      return X86::New##rmb;      \
2003   case X86::Orig##rmbkz:    return X86::New##rmbkz;
2004 
2005   switch (Opcode) {
2006   VPERM_CASES(VPERMI2B, VPERMT2B)
2007   VPERM_CASES_BROADCAST(VPERMI2D,  VPERMT2D)
2008   VPERM_CASES_BROADCAST(VPERMI2PD, VPERMT2PD)
2009   VPERM_CASES_BROADCAST(VPERMI2PS, VPERMT2PS)
2010   VPERM_CASES_BROADCAST(VPERMI2Q,  VPERMT2Q)
2011   VPERM_CASES(VPERMI2W, VPERMT2W)
2012   VPERM_CASES(VPERMT2B, VPERMI2B)
2013   VPERM_CASES_BROADCAST(VPERMT2D,  VPERMI2D)
2014   VPERM_CASES_BROADCAST(VPERMT2PD, VPERMI2PD)
2015   VPERM_CASES_BROADCAST(VPERMT2PS, VPERMI2PS)
2016   VPERM_CASES_BROADCAST(VPERMT2Q,  VPERMI2Q)
2017   VPERM_CASES(VPERMT2W, VPERMI2W)
2018   }
2019 
2020   llvm_unreachable("Unreachable!");
2021 #undef VPERM_CASES_BROADCAST
2022 #undef VPERM_CASES
2023 }
2024 
2025 MachineInstr *X86InstrInfo::commuteInstructionImpl(MachineInstr &MI, bool NewMI,
2026                                                    unsigned OpIdx1,
2027                                                    unsigned OpIdx2) const {
2028   auto cloneIfNew = [NewMI](MachineInstr &MI) -> MachineInstr & {
2029     if (NewMI)
2030       return *MI.getParent()->getParent()->CloneMachineInstr(&MI);
2031     return MI;
2032   };
2033 
2034   switch (MI.getOpcode()) {
2035   case X86::SHRD16rri8: // A = SHRD16rri8 B, C, I -> A = SHLD16rri8 C, B, (16-I)
2036   case X86::SHLD16rri8: // A = SHLD16rri8 B, C, I -> A = SHRD16rri8 C, B, (16-I)
2037   case X86::SHRD32rri8: // A = SHRD32rri8 B, C, I -> A = SHLD32rri8 C, B, (32-I)
2038   case X86::SHLD32rri8: // A = SHLD32rri8 B, C, I -> A = SHRD32rri8 C, B, (32-I)
2039   case X86::SHRD64rri8: // A = SHRD64rri8 B, C, I -> A = SHLD64rri8 C, B, (64-I)
2040   case X86::SHLD64rri8:{// A = SHLD64rri8 B, C, I -> A = SHRD64rri8 C, B, (64-I)
2041     unsigned Opc;
2042     unsigned Size;
2043     switch (MI.getOpcode()) {
2044     default: llvm_unreachable("Unreachable!");
2045     case X86::SHRD16rri8: Size = 16; Opc = X86::SHLD16rri8; break;
2046     case X86::SHLD16rri8: Size = 16; Opc = X86::SHRD16rri8; break;
2047     case X86::SHRD32rri8: Size = 32; Opc = X86::SHLD32rri8; break;
2048     case X86::SHLD32rri8: Size = 32; Opc = X86::SHRD32rri8; break;
2049     case X86::SHRD64rri8: Size = 64; Opc = X86::SHLD64rri8; break;
2050     case X86::SHLD64rri8: Size = 64; Opc = X86::SHRD64rri8; break;
2051     }
2052     unsigned Amt = MI.getOperand(3).getImm();
2053     auto &WorkingMI = cloneIfNew(MI);
2054     WorkingMI.setDesc(get(Opc));
2055     WorkingMI.getOperand(3).setImm(Size - Amt);
2056     return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
2057                                                    OpIdx1, OpIdx2);
2058   }
2059   case X86::PFSUBrr:
2060   case X86::PFSUBRrr: {
2061     // PFSUB  x, y: x = x - y
2062     // PFSUBR x, y: x = y - x
2063     unsigned Opc =
2064         (X86::PFSUBRrr == MI.getOpcode() ? X86::PFSUBrr : X86::PFSUBRrr);
2065     auto &WorkingMI = cloneIfNew(MI);
2066     WorkingMI.setDesc(get(Opc));
2067     return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
2068                                                    OpIdx1, OpIdx2);
2069   }
2070   case X86::BLENDPDrri:
2071   case X86::BLENDPSrri:
2072   case X86::VBLENDPDrri:
2073   case X86::VBLENDPSrri:
2074     // If we're optimizing for size, try to use MOVSD/MOVSS.
2075     if (MI.getParent()->getParent()->getFunction().hasOptSize()) {
2076       unsigned Mask, Opc;
2077       switch (MI.getOpcode()) {
2078       default: llvm_unreachable("Unreachable!");
2079       case X86::BLENDPDrri:  Opc = X86::MOVSDrr;  Mask = 0x03; break;
2080       case X86::BLENDPSrri:  Opc = X86::MOVSSrr;  Mask = 0x0F; break;
2081       case X86::VBLENDPDrri: Opc = X86::VMOVSDrr; Mask = 0x03; break;
2082       case X86::VBLENDPSrri: Opc = X86::VMOVSSrr; Mask = 0x0F; break;
2083       }
2084       if ((MI.getOperand(3).getImm() ^ Mask) == 1) {
2085         auto &WorkingMI = cloneIfNew(MI);
2086         WorkingMI.setDesc(get(Opc));
2087         WorkingMI.RemoveOperand(3);
2088         return TargetInstrInfo::commuteInstructionImpl(WorkingMI,
2089                                                        /*NewMI=*/false,
2090                                                        OpIdx1, OpIdx2);
2091       }
2092     }
2093     LLVM_FALLTHROUGH;
2094   case X86::PBLENDWrri:
2095   case X86::VBLENDPDYrri:
2096   case X86::VBLENDPSYrri:
2097   case X86::VPBLENDDrri:
2098   case X86::VPBLENDWrri:
2099   case X86::VPBLENDDYrri:
2100   case X86::VPBLENDWYrri:{
2101     int8_t Mask;
2102     switch (MI.getOpcode()) {
2103     default: llvm_unreachable("Unreachable!");
2104     case X86::BLENDPDrri:    Mask = (int8_t)0x03; break;
2105     case X86::BLENDPSrri:    Mask = (int8_t)0x0F; break;
2106     case X86::PBLENDWrri:    Mask = (int8_t)0xFF; break;
2107     case X86::VBLENDPDrri:   Mask = (int8_t)0x03; break;
2108     case X86::VBLENDPSrri:   Mask = (int8_t)0x0F; break;
2109     case X86::VBLENDPDYrri:  Mask = (int8_t)0x0F; break;
2110     case X86::VBLENDPSYrri:  Mask = (int8_t)0xFF; break;
2111     case X86::VPBLENDDrri:   Mask = (int8_t)0x0F; break;
2112     case X86::VPBLENDWrri:   Mask = (int8_t)0xFF; break;
2113     case X86::VPBLENDDYrri:  Mask = (int8_t)0xFF; break;
2114     case X86::VPBLENDWYrri:  Mask = (int8_t)0xFF; break;
2115     }
2116     // Only the least significant bits of Imm are used.
2117     // Using int8_t to ensure it will be sign extended to the int64_t that
2118     // setImm takes in order to match isel behavior.
2119     int8_t Imm = MI.getOperand(3).getImm() & Mask;
2120     auto &WorkingMI = cloneIfNew(MI);
2121     WorkingMI.getOperand(3).setImm(Mask ^ Imm);
2122     return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
2123                                                    OpIdx1, OpIdx2);
2124   }
2125   case X86::INSERTPSrr:
2126   case X86::VINSERTPSrr:
2127   case X86::VINSERTPSZrr: {
2128     unsigned Imm = MI.getOperand(MI.getNumOperands() - 1).getImm();
2129     unsigned ZMask = Imm & 15;
2130     unsigned DstIdx = (Imm >> 4) & 3;
2131     unsigned SrcIdx = (Imm >> 6) & 3;
2132 
2133     // We can commute insertps if we zero 2 of the elements, the insertion is
2134     // "inline" and we don't override the insertion with a zero.
2135     if (DstIdx == SrcIdx && (ZMask & (1 << DstIdx)) == 0 &&
2136         countPopulation(ZMask) == 2) {
2137       unsigned AltIdx = findFirstSet((ZMask | (1 << DstIdx)) ^ 15);
2138       assert(AltIdx < 4 && "Illegal insertion index");
2139       unsigned AltImm = (AltIdx << 6) | (AltIdx << 4) | ZMask;
2140       auto &WorkingMI = cloneIfNew(MI);
2141       WorkingMI.getOperand(MI.getNumOperands() - 1).setImm(AltImm);
2142       return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
2143                                                      OpIdx1, OpIdx2);
2144     }
2145     return nullptr;
2146   }
2147   case X86::MOVSDrr:
2148   case X86::MOVSSrr:
2149   case X86::VMOVSDrr:
2150   case X86::VMOVSSrr:{
2151     // On SSE41 or later we can commute a MOVSS/MOVSD to a BLENDPS/BLENDPD.
2152     if (Subtarget.hasSSE41()) {
2153       unsigned Mask, Opc;
2154       switch (MI.getOpcode()) {
2155       default: llvm_unreachable("Unreachable!");
2156       case X86::MOVSDrr:  Opc = X86::BLENDPDrri;  Mask = 0x02; break;
2157       case X86::MOVSSrr:  Opc = X86::BLENDPSrri;  Mask = 0x0E; break;
2158       case X86::VMOVSDrr: Opc = X86::VBLENDPDrri; Mask = 0x02; break;
2159       case X86::VMOVSSrr: Opc = X86::VBLENDPSrri; Mask = 0x0E; break;
2160       }
2161 
2162       auto &WorkingMI = cloneIfNew(MI);
2163       WorkingMI.setDesc(get(Opc));
2164       WorkingMI.addOperand(MachineOperand::CreateImm(Mask));
2165       return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
2166                                                      OpIdx1, OpIdx2);
2167     }
2168 
2169     // Convert to SHUFPD.
2170     assert(MI.getOpcode() == X86::MOVSDrr &&
2171            "Can only commute MOVSDrr without SSE4.1");
2172 
2173     auto &WorkingMI = cloneIfNew(MI);
2174     WorkingMI.setDesc(get(X86::SHUFPDrri));
2175     WorkingMI.addOperand(MachineOperand::CreateImm(0x02));
2176     return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
2177                                                    OpIdx1, OpIdx2);
2178   }
2179   case X86::SHUFPDrri: {
2180     // Commute to MOVSD.
2181     assert(MI.getOperand(3).getImm() == 0x02 && "Unexpected immediate!");
2182     auto &WorkingMI = cloneIfNew(MI);
2183     WorkingMI.setDesc(get(X86::MOVSDrr));
2184     WorkingMI.RemoveOperand(3);
2185     return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
2186                                                    OpIdx1, OpIdx2);
2187   }
2188   case X86::PCLMULQDQrr:
2189   case X86::VPCLMULQDQrr:
2190   case X86::VPCLMULQDQYrr:
2191   case X86::VPCLMULQDQZrr:
2192   case X86::VPCLMULQDQZ128rr:
2193   case X86::VPCLMULQDQZ256rr: {
2194     // SRC1 64bits = Imm[0] ? SRC1[127:64] : SRC1[63:0]
2195     // SRC2 64bits = Imm[4] ? SRC2[127:64] : SRC2[63:0]
2196     unsigned Imm = MI.getOperand(3).getImm();
2197     unsigned Src1Hi = Imm & 0x01;
2198     unsigned Src2Hi = Imm & 0x10;
2199     auto &WorkingMI = cloneIfNew(MI);
2200     WorkingMI.getOperand(3).setImm((Src1Hi << 4) | (Src2Hi >> 4));
2201     return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
2202                                                    OpIdx1, OpIdx2);
2203   }
2204   case X86::VPCMPBZ128rri:  case X86::VPCMPUBZ128rri:
2205   case X86::VPCMPBZ256rri:  case X86::VPCMPUBZ256rri:
2206   case X86::VPCMPBZrri:     case X86::VPCMPUBZrri:
2207   case X86::VPCMPDZ128rri:  case X86::VPCMPUDZ128rri:
2208   case X86::VPCMPDZ256rri:  case X86::VPCMPUDZ256rri:
2209   case X86::VPCMPDZrri:     case X86::VPCMPUDZrri:
2210   case X86::VPCMPQZ128rri:  case X86::VPCMPUQZ128rri:
2211   case X86::VPCMPQZ256rri:  case X86::VPCMPUQZ256rri:
2212   case X86::VPCMPQZrri:     case X86::VPCMPUQZrri:
2213   case X86::VPCMPWZ128rri:  case X86::VPCMPUWZ128rri:
2214   case X86::VPCMPWZ256rri:  case X86::VPCMPUWZ256rri:
2215   case X86::VPCMPWZrri:     case X86::VPCMPUWZrri:
2216   case X86::VPCMPBZ128rrik: case X86::VPCMPUBZ128rrik:
2217   case X86::VPCMPBZ256rrik: case X86::VPCMPUBZ256rrik:
2218   case X86::VPCMPBZrrik:    case X86::VPCMPUBZrrik:
2219   case X86::VPCMPDZ128rrik: case X86::VPCMPUDZ128rrik:
2220   case X86::VPCMPDZ256rrik: case X86::VPCMPUDZ256rrik:
2221   case X86::VPCMPDZrrik:    case X86::VPCMPUDZrrik:
2222   case X86::VPCMPQZ128rrik: case X86::VPCMPUQZ128rrik:
2223   case X86::VPCMPQZ256rrik: case X86::VPCMPUQZ256rrik:
2224   case X86::VPCMPQZrrik:    case X86::VPCMPUQZrrik:
2225   case X86::VPCMPWZ128rrik: case X86::VPCMPUWZ128rrik:
2226   case X86::VPCMPWZ256rrik: case X86::VPCMPUWZ256rrik:
2227   case X86::VPCMPWZrrik:    case X86::VPCMPUWZrrik: {
2228     // Flip comparison mode immediate (if necessary).
2229     unsigned Imm = MI.getOperand(MI.getNumOperands() - 1).getImm() & 0x7;
2230     Imm = X86::getSwappedVPCMPImm(Imm);
2231     auto &WorkingMI = cloneIfNew(MI);
2232     WorkingMI.getOperand(MI.getNumOperands() - 1).setImm(Imm);
2233     return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
2234                                                    OpIdx1, OpIdx2);
2235   }
2236   case X86::VPCOMBri: case X86::VPCOMUBri:
2237   case X86::VPCOMDri: case X86::VPCOMUDri:
2238   case X86::VPCOMQri: case X86::VPCOMUQri:
2239   case X86::VPCOMWri: case X86::VPCOMUWri: {
2240     // Flip comparison mode immediate (if necessary).
2241     unsigned Imm = MI.getOperand(3).getImm() & 0x7;
2242     Imm = X86::getSwappedVPCOMImm(Imm);
2243     auto &WorkingMI = cloneIfNew(MI);
2244     WorkingMI.getOperand(3).setImm(Imm);
2245     return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
2246                                                    OpIdx1, OpIdx2);
2247   }
2248   case X86::VCMPSDZrr:
2249   case X86::VCMPSSZrr:
2250   case X86::VCMPPDZrri:
2251   case X86::VCMPPSZrri:
2252   case X86::VCMPSHZrr:
2253   case X86::VCMPPHZrri:
2254   case X86::VCMPPHZ128rri:
2255   case X86::VCMPPHZ256rri:
2256   case X86::VCMPPDZ128rri:
2257   case X86::VCMPPSZ128rri:
2258   case X86::VCMPPDZ256rri:
2259   case X86::VCMPPSZ256rri:
2260   case X86::VCMPPDZrrik:
2261   case X86::VCMPPSZrrik:
2262   case X86::VCMPPDZ128rrik:
2263   case X86::VCMPPSZ128rrik:
2264   case X86::VCMPPDZ256rrik:
2265   case X86::VCMPPSZ256rrik: {
2266     unsigned Imm =
2267                 MI.getOperand(MI.getNumExplicitOperands() - 1).getImm() & 0x1f;
2268     Imm = X86::getSwappedVCMPImm(Imm);
2269     auto &WorkingMI = cloneIfNew(MI);
2270     WorkingMI.getOperand(MI.getNumExplicitOperands() - 1).setImm(Imm);
2271     return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
2272                                                    OpIdx1, OpIdx2);
2273   }
2274   case X86::VPERM2F128rr:
2275   case X86::VPERM2I128rr: {
2276     // Flip permute source immediate.
2277     // Imm & 0x02: lo = if set, select Op1.lo/hi else Op0.lo/hi.
2278     // Imm & 0x20: hi = if set, select Op1.lo/hi else Op0.lo/hi.
2279     int8_t Imm = MI.getOperand(3).getImm() & 0xFF;
2280     auto &WorkingMI = cloneIfNew(MI);
2281     WorkingMI.getOperand(3).setImm(Imm ^ 0x22);
2282     return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
2283                                                    OpIdx1, OpIdx2);
2284   }
2285   case X86::MOVHLPSrr:
2286   case X86::UNPCKHPDrr:
2287   case X86::VMOVHLPSrr:
2288   case X86::VUNPCKHPDrr:
2289   case X86::VMOVHLPSZrr:
2290   case X86::VUNPCKHPDZ128rr: {
2291     assert(Subtarget.hasSSE2() && "Commuting MOVHLP/UNPCKHPD requires SSE2!");
2292 
2293     unsigned Opc = MI.getOpcode();
2294     switch (Opc) {
2295     default: llvm_unreachable("Unreachable!");
2296     case X86::MOVHLPSrr:       Opc = X86::UNPCKHPDrr;      break;
2297     case X86::UNPCKHPDrr:      Opc = X86::MOVHLPSrr;       break;
2298     case X86::VMOVHLPSrr:      Opc = X86::VUNPCKHPDrr;     break;
2299     case X86::VUNPCKHPDrr:     Opc = X86::VMOVHLPSrr;      break;
2300     case X86::VMOVHLPSZrr:     Opc = X86::VUNPCKHPDZ128rr; break;
2301     case X86::VUNPCKHPDZ128rr: Opc = X86::VMOVHLPSZrr;     break;
2302     }
2303     auto &WorkingMI = cloneIfNew(MI);
2304     WorkingMI.setDesc(get(Opc));
2305     return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
2306                                                    OpIdx1, OpIdx2);
2307   }
2308   case X86::CMOV16rr:  case X86::CMOV32rr:  case X86::CMOV64rr: {
2309     auto &WorkingMI = cloneIfNew(MI);
2310     unsigned OpNo = MI.getDesc().getNumOperands() - 1;
2311     X86::CondCode CC = static_cast<X86::CondCode>(MI.getOperand(OpNo).getImm());
2312     WorkingMI.getOperand(OpNo).setImm(X86::GetOppositeBranchCondition(CC));
2313     return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
2314                                                    OpIdx1, OpIdx2);
2315   }
2316   case X86::VPTERNLOGDZrri:      case X86::VPTERNLOGDZrmi:
2317   case X86::VPTERNLOGDZ128rri:   case X86::VPTERNLOGDZ128rmi:
2318   case X86::VPTERNLOGDZ256rri:   case X86::VPTERNLOGDZ256rmi:
2319   case X86::VPTERNLOGQZrri:      case X86::VPTERNLOGQZrmi:
2320   case X86::VPTERNLOGQZ128rri:   case X86::VPTERNLOGQZ128rmi:
2321   case X86::VPTERNLOGQZ256rri:   case X86::VPTERNLOGQZ256rmi:
2322   case X86::VPTERNLOGDZrrik:
2323   case X86::VPTERNLOGDZ128rrik:
2324   case X86::VPTERNLOGDZ256rrik:
2325   case X86::VPTERNLOGQZrrik:
2326   case X86::VPTERNLOGQZ128rrik:
2327   case X86::VPTERNLOGQZ256rrik:
2328   case X86::VPTERNLOGDZrrikz:    case X86::VPTERNLOGDZrmikz:
2329   case X86::VPTERNLOGDZ128rrikz: case X86::VPTERNLOGDZ128rmikz:
2330   case X86::VPTERNLOGDZ256rrikz: case X86::VPTERNLOGDZ256rmikz:
2331   case X86::VPTERNLOGQZrrikz:    case X86::VPTERNLOGQZrmikz:
2332   case X86::VPTERNLOGQZ128rrikz: case X86::VPTERNLOGQZ128rmikz:
2333   case X86::VPTERNLOGQZ256rrikz: case X86::VPTERNLOGQZ256rmikz:
2334   case X86::VPTERNLOGDZ128rmbi:
2335   case X86::VPTERNLOGDZ256rmbi:
2336   case X86::VPTERNLOGDZrmbi:
2337   case X86::VPTERNLOGQZ128rmbi:
2338   case X86::VPTERNLOGQZ256rmbi:
2339   case X86::VPTERNLOGQZrmbi:
2340   case X86::VPTERNLOGDZ128rmbikz:
2341   case X86::VPTERNLOGDZ256rmbikz:
2342   case X86::VPTERNLOGDZrmbikz:
2343   case X86::VPTERNLOGQZ128rmbikz:
2344   case X86::VPTERNLOGQZ256rmbikz:
2345   case X86::VPTERNLOGQZrmbikz: {
2346     auto &WorkingMI = cloneIfNew(MI);
2347     commuteVPTERNLOG(WorkingMI, OpIdx1, OpIdx2);
2348     return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
2349                                                    OpIdx1, OpIdx2);
2350   }
2351   default: {
2352     if (isCommutableVPERMV3Instruction(MI.getOpcode())) {
2353       unsigned Opc = getCommutedVPERMV3Opcode(MI.getOpcode());
2354       auto &WorkingMI = cloneIfNew(MI);
2355       WorkingMI.setDesc(get(Opc));
2356       return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
2357                                                      OpIdx1, OpIdx2);
2358     }
2359 
2360     const X86InstrFMA3Group *FMA3Group = getFMA3Group(MI.getOpcode(),
2361                                                       MI.getDesc().TSFlags);
2362     if (FMA3Group) {
2363       unsigned Opc =
2364         getFMA3OpcodeToCommuteOperands(MI, OpIdx1, OpIdx2, *FMA3Group);
2365       auto &WorkingMI = cloneIfNew(MI);
2366       WorkingMI.setDesc(get(Opc));
2367       return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
2368                                                      OpIdx1, OpIdx2);
2369     }
2370 
2371     return TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
2372   }
2373   }
2374 }
2375 
2376 bool
2377 X86InstrInfo::findThreeSrcCommutedOpIndices(const MachineInstr &MI,
2378                                             unsigned &SrcOpIdx1,
2379                                             unsigned &SrcOpIdx2,
2380                                             bool IsIntrinsic) const {
2381   uint64_t TSFlags = MI.getDesc().TSFlags;
2382 
2383   unsigned FirstCommutableVecOp = 1;
2384   unsigned LastCommutableVecOp = 3;
2385   unsigned KMaskOp = -1U;
2386   if (X86II::isKMasked(TSFlags)) {
2387     // For k-zero-masked operations it is Ok to commute the first vector
2388     // operand. Unless this is an intrinsic instruction.
2389     // For regular k-masked operations a conservative choice is done as the
2390     // elements of the first vector operand, for which the corresponding bit
2391     // in the k-mask operand is set to 0, are copied to the result of the
2392     // instruction.
2393     // TODO/FIXME: The commute still may be legal if it is known that the
2394     // k-mask operand is set to either all ones or all zeroes.
2395     // It is also Ok to commute the 1st operand if all users of MI use only
2396     // the elements enabled by the k-mask operand. For example,
2397     //   v4 = VFMADD213PSZrk v1, k, v2, v3; // v1[i] = k[i] ? v2[i]*v1[i]+v3[i]
2398     //                                                     : v1[i];
2399     //   VMOVAPSZmrk <mem_addr>, k, v4; // this is the ONLY user of v4 ->
2400     //                                  // Ok, to commute v1 in FMADD213PSZrk.
2401 
2402     // The k-mask operand has index = 2 for masked and zero-masked operations.
2403     KMaskOp = 2;
2404 
2405     // The operand with index = 1 is used as a source for those elements for
2406     // which the corresponding bit in the k-mask is set to 0.
2407     if (X86II::isKMergeMasked(TSFlags) || IsIntrinsic)
2408       FirstCommutableVecOp = 3;
2409 
2410     LastCommutableVecOp++;
2411   } else if (IsIntrinsic) {
2412     // Commuting the first operand of an intrinsic instruction isn't possible
2413     // unless we can prove that only the lowest element of the result is used.
2414     FirstCommutableVecOp = 2;
2415   }
2416 
2417   if (isMem(MI, LastCommutableVecOp))
2418     LastCommutableVecOp--;
2419 
2420   // Only the first RegOpsNum operands are commutable.
2421   // Also, the value 'CommuteAnyOperandIndex' is valid here as it means
2422   // that the operand is not specified/fixed.
2423   if (SrcOpIdx1 != CommuteAnyOperandIndex &&
2424       (SrcOpIdx1 < FirstCommutableVecOp || SrcOpIdx1 > LastCommutableVecOp ||
2425        SrcOpIdx1 == KMaskOp))
2426     return false;
2427   if (SrcOpIdx2 != CommuteAnyOperandIndex &&
2428       (SrcOpIdx2 < FirstCommutableVecOp || SrcOpIdx2 > LastCommutableVecOp ||
2429        SrcOpIdx2 == KMaskOp))
2430     return false;
2431 
2432   // Look for two different register operands assumed to be commutable
2433   // regardless of the FMA opcode. The FMA opcode is adjusted later.
2434   if (SrcOpIdx1 == CommuteAnyOperandIndex ||
2435       SrcOpIdx2 == CommuteAnyOperandIndex) {
2436     unsigned CommutableOpIdx2 = SrcOpIdx2;
2437 
2438     // At least one of operands to be commuted is not specified and
2439     // this method is free to choose appropriate commutable operands.
2440     if (SrcOpIdx1 == SrcOpIdx2)
2441       // Both of operands are not fixed. By default set one of commutable
2442       // operands to the last register operand of the instruction.
2443       CommutableOpIdx2 = LastCommutableVecOp;
2444     else if (SrcOpIdx2 == CommuteAnyOperandIndex)
2445       // Only one of operands is not fixed.
2446       CommutableOpIdx2 = SrcOpIdx1;
2447 
2448     // CommutableOpIdx2 is well defined now. Let's choose another commutable
2449     // operand and assign its index to CommutableOpIdx1.
2450     Register Op2Reg = MI.getOperand(CommutableOpIdx2).getReg();
2451 
2452     unsigned CommutableOpIdx1;
2453     for (CommutableOpIdx1 = LastCommutableVecOp;
2454          CommutableOpIdx1 >= FirstCommutableVecOp; CommutableOpIdx1--) {
2455       // Just ignore and skip the k-mask operand.
2456       if (CommutableOpIdx1 == KMaskOp)
2457         continue;
2458 
2459       // The commuted operands must have different registers.
2460       // Otherwise, the commute transformation does not change anything and
2461       // is useless then.
2462       if (Op2Reg != MI.getOperand(CommutableOpIdx1).getReg())
2463         break;
2464     }
2465 
2466     // No appropriate commutable operands were found.
2467     if (CommutableOpIdx1 < FirstCommutableVecOp)
2468       return false;
2469 
2470     // Assign the found pair of commutable indices to SrcOpIdx1 and SrcOpidx2
2471     // to return those values.
2472     if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2,
2473                               CommutableOpIdx1, CommutableOpIdx2))
2474       return false;
2475   }
2476 
2477   return true;
2478 }
2479 
2480 bool X86InstrInfo::findCommutedOpIndices(const MachineInstr &MI,
2481                                          unsigned &SrcOpIdx1,
2482                                          unsigned &SrcOpIdx2) const {
2483   const MCInstrDesc &Desc = MI.getDesc();
2484   if (!Desc.isCommutable())
2485     return false;
2486 
2487   switch (MI.getOpcode()) {
2488   case X86::CMPSDrr:
2489   case X86::CMPSSrr:
2490   case X86::CMPPDrri:
2491   case X86::CMPPSrri:
2492   case X86::VCMPSDrr:
2493   case X86::VCMPSSrr:
2494   case X86::VCMPPDrri:
2495   case X86::VCMPPSrri:
2496   case X86::VCMPPDYrri:
2497   case X86::VCMPPSYrri:
2498   case X86::VCMPSDZrr:
2499   case X86::VCMPSSZrr:
2500   case X86::VCMPPDZrri:
2501   case X86::VCMPPSZrri:
2502   case X86::VCMPSHZrr:
2503   case X86::VCMPPHZrri:
2504   case X86::VCMPPHZ128rri:
2505   case X86::VCMPPHZ256rri:
2506   case X86::VCMPPDZ128rri:
2507   case X86::VCMPPSZ128rri:
2508   case X86::VCMPPDZ256rri:
2509   case X86::VCMPPSZ256rri:
2510   case X86::VCMPPDZrrik:
2511   case X86::VCMPPSZrrik:
2512   case X86::VCMPPDZ128rrik:
2513   case X86::VCMPPSZ128rrik:
2514   case X86::VCMPPDZ256rrik:
2515   case X86::VCMPPSZ256rrik: {
2516     unsigned OpOffset = X86II::isKMasked(Desc.TSFlags) ? 1 : 0;
2517 
2518     // Float comparison can be safely commuted for
2519     // Ordered/Unordered/Equal/NotEqual tests
2520     unsigned Imm = MI.getOperand(3 + OpOffset).getImm() & 0x7;
2521     switch (Imm) {
2522     default:
2523       // EVEX versions can be commuted.
2524       if ((Desc.TSFlags & X86II::EncodingMask) == X86II::EVEX)
2525         break;
2526       return false;
2527     case 0x00: // EQUAL
2528     case 0x03: // UNORDERED
2529     case 0x04: // NOT EQUAL
2530     case 0x07: // ORDERED
2531       break;
2532     }
2533 
2534     // The indices of the commutable operands are 1 and 2 (or 2 and 3
2535     // when masked).
2536     // Assign them to the returned operand indices here.
2537     return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1 + OpOffset,
2538                                 2 + OpOffset);
2539   }
2540   case X86::MOVSSrr:
2541     // X86::MOVSDrr is always commutable. MOVSS is only commutable if we can
2542     // form sse4.1 blend. We assume VMOVSSrr/VMOVSDrr is always commutable since
2543     // AVX implies sse4.1.
2544     if (Subtarget.hasSSE41())
2545       return TargetInstrInfo::findCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2);
2546     return false;
2547   case X86::SHUFPDrri:
2548     // We can commute this to MOVSD.
2549     if (MI.getOperand(3).getImm() == 0x02)
2550       return TargetInstrInfo::findCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2);
2551     return false;
2552   case X86::MOVHLPSrr:
2553   case X86::UNPCKHPDrr:
2554   case X86::VMOVHLPSrr:
2555   case X86::VUNPCKHPDrr:
2556   case X86::VMOVHLPSZrr:
2557   case X86::VUNPCKHPDZ128rr:
2558     if (Subtarget.hasSSE2())
2559       return TargetInstrInfo::findCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2);
2560     return false;
2561   case X86::VPTERNLOGDZrri:      case X86::VPTERNLOGDZrmi:
2562   case X86::VPTERNLOGDZ128rri:   case X86::VPTERNLOGDZ128rmi:
2563   case X86::VPTERNLOGDZ256rri:   case X86::VPTERNLOGDZ256rmi:
2564   case X86::VPTERNLOGQZrri:      case X86::VPTERNLOGQZrmi:
2565   case X86::VPTERNLOGQZ128rri:   case X86::VPTERNLOGQZ128rmi:
2566   case X86::VPTERNLOGQZ256rri:   case X86::VPTERNLOGQZ256rmi:
2567   case X86::VPTERNLOGDZrrik:
2568   case X86::VPTERNLOGDZ128rrik:
2569   case X86::VPTERNLOGDZ256rrik:
2570   case X86::VPTERNLOGQZrrik:
2571   case X86::VPTERNLOGQZ128rrik:
2572   case X86::VPTERNLOGQZ256rrik:
2573   case X86::VPTERNLOGDZrrikz:    case X86::VPTERNLOGDZrmikz:
2574   case X86::VPTERNLOGDZ128rrikz: case X86::VPTERNLOGDZ128rmikz:
2575   case X86::VPTERNLOGDZ256rrikz: case X86::VPTERNLOGDZ256rmikz:
2576   case X86::VPTERNLOGQZrrikz:    case X86::VPTERNLOGQZrmikz:
2577   case X86::VPTERNLOGQZ128rrikz: case X86::VPTERNLOGQZ128rmikz:
2578   case X86::VPTERNLOGQZ256rrikz: case X86::VPTERNLOGQZ256rmikz:
2579   case X86::VPTERNLOGDZ128rmbi:
2580   case X86::VPTERNLOGDZ256rmbi:
2581   case X86::VPTERNLOGDZrmbi:
2582   case X86::VPTERNLOGQZ128rmbi:
2583   case X86::VPTERNLOGQZ256rmbi:
2584   case X86::VPTERNLOGQZrmbi:
2585   case X86::VPTERNLOGDZ128rmbikz:
2586   case X86::VPTERNLOGDZ256rmbikz:
2587   case X86::VPTERNLOGDZrmbikz:
2588   case X86::VPTERNLOGQZ128rmbikz:
2589   case X86::VPTERNLOGQZ256rmbikz:
2590   case X86::VPTERNLOGQZrmbikz:
2591     return findThreeSrcCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2);
2592   case X86::VPDPWSSDYrr:
2593   case X86::VPDPWSSDrr:
2594   case X86::VPDPWSSDSYrr:
2595   case X86::VPDPWSSDSrr:
2596   case X86::VPDPWSSDZ128r:
2597   case X86::VPDPWSSDZ128rk:
2598   case X86::VPDPWSSDZ128rkz:
2599   case X86::VPDPWSSDZ256r:
2600   case X86::VPDPWSSDZ256rk:
2601   case X86::VPDPWSSDZ256rkz:
2602   case X86::VPDPWSSDZr:
2603   case X86::VPDPWSSDZrk:
2604   case X86::VPDPWSSDZrkz:
2605   case X86::VPDPWSSDSZ128r:
2606   case X86::VPDPWSSDSZ128rk:
2607   case X86::VPDPWSSDSZ128rkz:
2608   case X86::VPDPWSSDSZ256r:
2609   case X86::VPDPWSSDSZ256rk:
2610   case X86::VPDPWSSDSZ256rkz:
2611   case X86::VPDPWSSDSZr:
2612   case X86::VPDPWSSDSZrk:
2613   case X86::VPDPWSSDSZrkz:
2614   case X86::VPMADD52HUQZ128r:
2615   case X86::VPMADD52HUQZ128rk:
2616   case X86::VPMADD52HUQZ128rkz:
2617   case X86::VPMADD52HUQZ256r:
2618   case X86::VPMADD52HUQZ256rk:
2619   case X86::VPMADD52HUQZ256rkz:
2620   case X86::VPMADD52HUQZr:
2621   case X86::VPMADD52HUQZrk:
2622   case X86::VPMADD52HUQZrkz:
2623   case X86::VPMADD52LUQZ128r:
2624   case X86::VPMADD52LUQZ128rk:
2625   case X86::VPMADD52LUQZ128rkz:
2626   case X86::VPMADD52LUQZ256r:
2627   case X86::VPMADD52LUQZ256rk:
2628   case X86::VPMADD52LUQZ256rkz:
2629   case X86::VPMADD52LUQZr:
2630   case X86::VPMADD52LUQZrk:
2631   case X86::VPMADD52LUQZrkz:
2632   case X86::VFMADDCPHZr:
2633   case X86::VFMADDCPHZrk:
2634   case X86::VFMADDCPHZrkz:
2635   case X86::VFMADDCPHZ128r:
2636   case X86::VFMADDCPHZ128rk:
2637   case X86::VFMADDCPHZ128rkz:
2638   case X86::VFMADDCPHZ256r:
2639   case X86::VFMADDCPHZ256rk:
2640   case X86::VFMADDCPHZ256rkz:
2641   case X86::VFMADDCSHZr:
2642   case X86::VFMADDCSHZrk:
2643   case X86::VFMADDCSHZrkz: {
2644     unsigned CommutableOpIdx1 = 2;
2645     unsigned CommutableOpIdx2 = 3;
2646     if (X86II::isKMasked(Desc.TSFlags)) {
2647       // Skip the mask register.
2648       ++CommutableOpIdx1;
2649       ++CommutableOpIdx2;
2650     }
2651     if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2,
2652                               CommutableOpIdx1, CommutableOpIdx2))
2653       return false;
2654     if (!MI.getOperand(SrcOpIdx1).isReg() ||
2655         !MI.getOperand(SrcOpIdx2).isReg())
2656       // No idea.
2657       return false;
2658     return true;
2659   }
2660 
2661   default:
2662     const X86InstrFMA3Group *FMA3Group = getFMA3Group(MI.getOpcode(),
2663                                                       MI.getDesc().TSFlags);
2664     if (FMA3Group)
2665       return findThreeSrcCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2,
2666                                            FMA3Group->isIntrinsic());
2667 
2668     // Handled masked instructions since we need to skip over the mask input
2669     // and the preserved input.
2670     if (X86II::isKMasked(Desc.TSFlags)) {
2671       // First assume that the first input is the mask operand and skip past it.
2672       unsigned CommutableOpIdx1 = Desc.getNumDefs() + 1;
2673       unsigned CommutableOpIdx2 = Desc.getNumDefs() + 2;
2674       // Check if the first input is tied. If there isn't one then we only
2675       // need to skip the mask operand which we did above.
2676       if ((MI.getDesc().getOperandConstraint(Desc.getNumDefs(),
2677                                              MCOI::TIED_TO) != -1)) {
2678         // If this is zero masking instruction with a tied operand, we need to
2679         // move the first index back to the first input since this must
2680         // be a 3 input instruction and we want the first two non-mask inputs.
2681         // Otherwise this is a 2 input instruction with a preserved input and
2682         // mask, so we need to move the indices to skip one more input.
2683         if (X86II::isKMergeMasked(Desc.TSFlags)) {
2684           ++CommutableOpIdx1;
2685           ++CommutableOpIdx2;
2686         } else {
2687           --CommutableOpIdx1;
2688         }
2689       }
2690 
2691       if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2,
2692                                 CommutableOpIdx1, CommutableOpIdx2))
2693         return false;
2694 
2695       if (!MI.getOperand(SrcOpIdx1).isReg() ||
2696           !MI.getOperand(SrcOpIdx2).isReg())
2697         // No idea.
2698         return false;
2699       return true;
2700     }
2701 
2702     return TargetInstrInfo::findCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2);
2703   }
2704   return false;
2705 }
2706 
2707 static bool isConvertibleLEA(MachineInstr *MI) {
2708   unsigned Opcode = MI->getOpcode();
2709   if (Opcode != X86::LEA32r && Opcode != X86::LEA64r &&
2710       Opcode != X86::LEA64_32r)
2711     return false;
2712 
2713   const MachineOperand &Scale = MI->getOperand(1 + X86::AddrScaleAmt);
2714   const MachineOperand &Disp = MI->getOperand(1 + X86::AddrDisp);
2715   const MachineOperand &Segment = MI->getOperand(1 + X86::AddrSegmentReg);
2716 
2717   if (Segment.getReg() != 0 || !Disp.isImm() || Disp.getImm() != 0 ||
2718       Scale.getImm() > 1)
2719     return false;
2720 
2721   return true;
2722 }
2723 
2724 bool X86InstrInfo::hasCommutePreference(MachineInstr &MI, bool &Commute) const {
2725   // Currently we're interested in following sequence only.
2726   //   r3 = lea r1, r2
2727   //   r5 = add r3, r4
2728   // Both r3 and r4 are killed in add, we hope the add instruction has the
2729   // operand order
2730   //   r5 = add r4, r3
2731   // So later in X86FixupLEAs the lea instruction can be rewritten as add.
2732   unsigned Opcode = MI.getOpcode();
2733   if (Opcode != X86::ADD32rr && Opcode != X86::ADD64rr)
2734     return false;
2735 
2736   const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
2737   Register Reg1 = MI.getOperand(1).getReg();
2738   Register Reg2 = MI.getOperand(2).getReg();
2739 
2740   // Check if Reg1 comes from LEA in the same MBB.
2741   if (MachineInstr *Inst = MRI.getUniqueVRegDef(Reg1)) {
2742     if (isConvertibleLEA(Inst) && Inst->getParent() == MI.getParent()) {
2743       Commute = true;
2744       return true;
2745     }
2746   }
2747 
2748   // Check if Reg2 comes from LEA in the same MBB.
2749   if (MachineInstr *Inst = MRI.getUniqueVRegDef(Reg2)) {
2750     if (isConvertibleLEA(Inst) && Inst->getParent() == MI.getParent()) {
2751       Commute = false;
2752       return true;
2753     }
2754   }
2755 
2756   return false;
2757 }
2758 
2759 X86::CondCode X86::getCondFromBranch(const MachineInstr &MI) {
2760   switch (MI.getOpcode()) {
2761   default: return X86::COND_INVALID;
2762   case X86::JCC_1:
2763     return static_cast<X86::CondCode>(
2764         MI.getOperand(MI.getDesc().getNumOperands() - 1).getImm());
2765   }
2766 }
2767 
2768 /// Return condition code of a SETCC opcode.
2769 X86::CondCode X86::getCondFromSETCC(const MachineInstr &MI) {
2770   switch (MI.getOpcode()) {
2771   default: return X86::COND_INVALID;
2772   case X86::SETCCr: case X86::SETCCm:
2773     return static_cast<X86::CondCode>(
2774         MI.getOperand(MI.getDesc().getNumOperands() - 1).getImm());
2775   }
2776 }
2777 
2778 /// Return condition code of a CMov opcode.
2779 X86::CondCode X86::getCondFromCMov(const MachineInstr &MI) {
2780   switch (MI.getOpcode()) {
2781   default: return X86::COND_INVALID;
2782   case X86::CMOV16rr: case X86::CMOV32rr: case X86::CMOV64rr:
2783   case X86::CMOV16rm: case X86::CMOV32rm: case X86::CMOV64rm:
2784     return static_cast<X86::CondCode>(
2785         MI.getOperand(MI.getDesc().getNumOperands() - 1).getImm());
2786   }
2787 }
2788 
2789 /// Return the inverse of the specified condition,
2790 /// e.g. turning COND_E to COND_NE.
2791 X86::CondCode X86::GetOppositeBranchCondition(X86::CondCode CC) {
2792   switch (CC) {
2793   default: llvm_unreachable("Illegal condition code!");
2794   case X86::COND_E:  return X86::COND_NE;
2795   case X86::COND_NE: return X86::COND_E;
2796   case X86::COND_L:  return X86::COND_GE;
2797   case X86::COND_LE: return X86::COND_G;
2798   case X86::COND_G:  return X86::COND_LE;
2799   case X86::COND_GE: return X86::COND_L;
2800   case X86::COND_B:  return X86::COND_AE;
2801   case X86::COND_BE: return X86::COND_A;
2802   case X86::COND_A:  return X86::COND_BE;
2803   case X86::COND_AE: return X86::COND_B;
2804   case X86::COND_S:  return X86::COND_NS;
2805   case X86::COND_NS: return X86::COND_S;
2806   case X86::COND_P:  return X86::COND_NP;
2807   case X86::COND_NP: return X86::COND_P;
2808   case X86::COND_O:  return X86::COND_NO;
2809   case X86::COND_NO: return X86::COND_O;
2810   case X86::COND_NE_OR_P:  return X86::COND_E_AND_NP;
2811   case X86::COND_E_AND_NP: return X86::COND_NE_OR_P;
2812   }
2813 }
2814 
2815 /// Assuming the flags are set by MI(a,b), return the condition code if we
2816 /// modify the instructions such that flags are set by MI(b,a).
2817 static X86::CondCode getSwappedCondition(X86::CondCode CC) {
2818   switch (CC) {
2819   default: return X86::COND_INVALID;
2820   case X86::COND_E:  return X86::COND_E;
2821   case X86::COND_NE: return X86::COND_NE;
2822   case X86::COND_L:  return X86::COND_G;
2823   case X86::COND_LE: return X86::COND_GE;
2824   case X86::COND_G:  return X86::COND_L;
2825   case X86::COND_GE: return X86::COND_LE;
2826   case X86::COND_B:  return X86::COND_A;
2827   case X86::COND_BE: return X86::COND_AE;
2828   case X86::COND_A:  return X86::COND_B;
2829   case X86::COND_AE: return X86::COND_BE;
2830   }
2831 }
2832 
2833 std::pair<X86::CondCode, bool>
2834 X86::getX86ConditionCode(CmpInst::Predicate Predicate) {
2835   X86::CondCode CC = X86::COND_INVALID;
2836   bool NeedSwap = false;
2837   switch (Predicate) {
2838   default: break;
2839   // Floating-point Predicates
2840   case CmpInst::FCMP_UEQ: CC = X86::COND_E;       break;
2841   case CmpInst::FCMP_OLT: NeedSwap = true;        LLVM_FALLTHROUGH;
2842   case CmpInst::FCMP_OGT: CC = X86::COND_A;       break;
2843   case CmpInst::FCMP_OLE: NeedSwap = true;        LLVM_FALLTHROUGH;
2844   case CmpInst::FCMP_OGE: CC = X86::COND_AE;      break;
2845   case CmpInst::FCMP_UGT: NeedSwap = true;        LLVM_FALLTHROUGH;
2846   case CmpInst::FCMP_ULT: CC = X86::COND_B;       break;
2847   case CmpInst::FCMP_UGE: NeedSwap = true;        LLVM_FALLTHROUGH;
2848   case CmpInst::FCMP_ULE: CC = X86::COND_BE;      break;
2849   case CmpInst::FCMP_ONE: CC = X86::COND_NE;      break;
2850   case CmpInst::FCMP_UNO: CC = X86::COND_P;       break;
2851   case CmpInst::FCMP_ORD: CC = X86::COND_NP;      break;
2852   case CmpInst::FCMP_OEQ:                         LLVM_FALLTHROUGH;
2853   case CmpInst::FCMP_UNE: CC = X86::COND_INVALID; break;
2854 
2855   // Integer Predicates
2856   case CmpInst::ICMP_EQ:  CC = X86::COND_E;       break;
2857   case CmpInst::ICMP_NE:  CC = X86::COND_NE;      break;
2858   case CmpInst::ICMP_UGT: CC = X86::COND_A;       break;
2859   case CmpInst::ICMP_UGE: CC = X86::COND_AE;      break;
2860   case CmpInst::ICMP_ULT: CC = X86::COND_B;       break;
2861   case CmpInst::ICMP_ULE: CC = X86::COND_BE;      break;
2862   case CmpInst::ICMP_SGT: CC = X86::COND_G;       break;
2863   case CmpInst::ICMP_SGE: CC = X86::COND_GE;      break;
2864   case CmpInst::ICMP_SLT: CC = X86::COND_L;       break;
2865   case CmpInst::ICMP_SLE: CC = X86::COND_LE;      break;
2866   }
2867 
2868   return std::make_pair(CC, NeedSwap);
2869 }
2870 
2871 /// Return a cmov opcode for the given register size in bytes, and operand type.
2872 unsigned X86::getCMovOpcode(unsigned RegBytes, bool HasMemoryOperand) {
2873   switch(RegBytes) {
2874   default: llvm_unreachable("Illegal register size!");
2875   case 2: return HasMemoryOperand ? X86::CMOV16rm : X86::CMOV16rr;
2876   case 4: return HasMemoryOperand ? X86::CMOV32rm : X86::CMOV32rr;
2877   case 8: return HasMemoryOperand ? X86::CMOV64rm : X86::CMOV64rr;
2878   }
2879 }
2880 
2881 /// Get the VPCMP immediate for the given condition.
2882 unsigned X86::getVPCMPImmForCond(ISD::CondCode CC) {
2883   switch (CC) {
2884   default: llvm_unreachable("Unexpected SETCC condition");
2885   case ISD::SETNE:  return 4;
2886   case ISD::SETEQ:  return 0;
2887   case ISD::SETULT:
2888   case ISD::SETLT: return 1;
2889   case ISD::SETUGT:
2890   case ISD::SETGT: return 6;
2891   case ISD::SETUGE:
2892   case ISD::SETGE: return 5;
2893   case ISD::SETULE:
2894   case ISD::SETLE: return 2;
2895   }
2896 }
2897 
2898 /// Get the VPCMP immediate if the operands are swapped.
2899 unsigned X86::getSwappedVPCMPImm(unsigned Imm) {
2900   switch (Imm) {
2901   default: llvm_unreachable("Unreachable!");
2902   case 0x01: Imm = 0x06; break; // LT  -> NLE
2903   case 0x02: Imm = 0x05; break; // LE  -> NLT
2904   case 0x05: Imm = 0x02; break; // NLT -> LE
2905   case 0x06: Imm = 0x01; break; // NLE -> LT
2906   case 0x00: // EQ
2907   case 0x03: // FALSE
2908   case 0x04: // NE
2909   case 0x07: // TRUE
2910     break;
2911   }
2912 
2913   return Imm;
2914 }
2915 
2916 /// Get the VPCOM immediate if the operands are swapped.
2917 unsigned X86::getSwappedVPCOMImm(unsigned Imm) {
2918   switch (Imm) {
2919   default: llvm_unreachable("Unreachable!");
2920   case 0x00: Imm = 0x02; break; // LT -> GT
2921   case 0x01: Imm = 0x03; break; // LE -> GE
2922   case 0x02: Imm = 0x00; break; // GT -> LT
2923   case 0x03: Imm = 0x01; break; // GE -> LE
2924   case 0x04: // EQ
2925   case 0x05: // NE
2926   case 0x06: // FALSE
2927   case 0x07: // TRUE
2928     break;
2929   }
2930 
2931   return Imm;
2932 }
2933 
2934 /// Get the VCMP immediate if the operands are swapped.
2935 unsigned X86::getSwappedVCMPImm(unsigned Imm) {
2936   // Only need the lower 2 bits to distinquish.
2937   switch (Imm & 0x3) {
2938   default: llvm_unreachable("Unreachable!");
2939   case 0x00: case 0x03:
2940     // EQ/NE/TRUE/FALSE/ORD/UNORD don't change immediate when commuted.
2941     break;
2942   case 0x01: case 0x02:
2943     // Need to toggle bits 3:0. Bit 4 stays the same.
2944     Imm ^= 0xf;
2945     break;
2946   }
2947 
2948   return Imm;
2949 }
2950 
2951 /// Return true if the Reg is X87 register.
2952 static bool isX87Reg(unsigned Reg) {
2953   return (Reg == X86::FPCW || Reg == X86::FPSW ||
2954           (Reg >= X86::ST0 && Reg <= X86::ST7));
2955 }
2956 
2957 /// check if the instruction is X87 instruction
2958 bool X86::isX87Instruction(MachineInstr &MI) {
2959   for (const MachineOperand &MO : MI.operands()) {
2960     if (!MO.isReg())
2961       continue;
2962     if (isX87Reg(MO.getReg()))
2963       return true;
2964   }
2965   return false;
2966 }
2967 
2968 bool X86InstrInfo::isUnconditionalTailCall(const MachineInstr &MI) const {
2969   switch (MI.getOpcode()) {
2970   case X86::TCRETURNdi:
2971   case X86::TCRETURNri:
2972   case X86::TCRETURNmi:
2973   case X86::TCRETURNdi64:
2974   case X86::TCRETURNri64:
2975   case X86::TCRETURNmi64:
2976     return true;
2977   default:
2978     return false;
2979   }
2980 }
2981 
2982 bool X86InstrInfo::canMakeTailCallConditional(
2983     SmallVectorImpl<MachineOperand> &BranchCond,
2984     const MachineInstr &TailCall) const {
2985   if (TailCall.getOpcode() != X86::TCRETURNdi &&
2986       TailCall.getOpcode() != X86::TCRETURNdi64) {
2987     // Only direct calls can be done with a conditional branch.
2988     return false;
2989   }
2990 
2991   const MachineFunction *MF = TailCall.getParent()->getParent();
2992   if (Subtarget.isTargetWin64() && MF->hasWinCFI()) {
2993     // Conditional tail calls confuse the Win64 unwinder.
2994     return false;
2995   }
2996 
2997   assert(BranchCond.size() == 1);
2998   if (BranchCond[0].getImm() > X86::LAST_VALID_COND) {
2999     // Can't make a conditional tail call with this condition.
3000     return false;
3001   }
3002 
3003   const X86MachineFunctionInfo *X86FI = MF->getInfo<X86MachineFunctionInfo>();
3004   if (X86FI->getTCReturnAddrDelta() != 0 ||
3005       TailCall.getOperand(1).getImm() != 0) {
3006     // A conditional tail call cannot do any stack adjustment.
3007     return false;
3008   }
3009 
3010   return true;
3011 }
3012 
3013 void X86InstrInfo::replaceBranchWithTailCall(
3014     MachineBasicBlock &MBB, SmallVectorImpl<MachineOperand> &BranchCond,
3015     const MachineInstr &TailCall) const {
3016   assert(canMakeTailCallConditional(BranchCond, TailCall));
3017 
3018   MachineBasicBlock::iterator I = MBB.end();
3019   while (I != MBB.begin()) {
3020     --I;
3021     if (I->isDebugInstr())
3022       continue;
3023     if (!I->isBranch())
3024       assert(0 && "Can't find the branch to replace!");
3025 
3026     X86::CondCode CC = X86::getCondFromBranch(*I);
3027     assert(BranchCond.size() == 1);
3028     if (CC != BranchCond[0].getImm())
3029       continue;
3030 
3031     break;
3032   }
3033 
3034   unsigned Opc = TailCall.getOpcode() == X86::TCRETURNdi ? X86::TCRETURNdicc
3035                                                          : X86::TCRETURNdi64cc;
3036 
3037   auto MIB = BuildMI(MBB, I, MBB.findDebugLoc(I), get(Opc));
3038   MIB->addOperand(TailCall.getOperand(0)); // Destination.
3039   MIB.addImm(0); // Stack offset (not used).
3040   MIB->addOperand(BranchCond[0]); // Condition.
3041   MIB.copyImplicitOps(TailCall); // Regmask and (imp-used) parameters.
3042 
3043   // Add implicit uses and defs of all live regs potentially clobbered by the
3044   // call. This way they still appear live across the call.
3045   LivePhysRegs LiveRegs(getRegisterInfo());
3046   LiveRegs.addLiveOuts(MBB);
3047   SmallVector<std::pair<MCPhysReg, const MachineOperand *>, 8> Clobbers;
3048   LiveRegs.stepForward(*MIB, Clobbers);
3049   for (const auto &C : Clobbers) {
3050     MIB.addReg(C.first, RegState::Implicit);
3051     MIB.addReg(C.first, RegState::Implicit | RegState::Define);
3052   }
3053 
3054   I->eraseFromParent();
3055 }
3056 
3057 // Given a MBB and its TBB, find the FBB which was a fallthrough MBB (it may
3058 // not be a fallthrough MBB now due to layout changes). Return nullptr if the
3059 // fallthrough MBB cannot be identified.
3060 static MachineBasicBlock *getFallThroughMBB(MachineBasicBlock *MBB,
3061                                             MachineBasicBlock *TBB) {
3062   // Look for non-EHPad successors other than TBB. If we find exactly one, it
3063   // is the fallthrough MBB. If we find zero, then TBB is both the target MBB
3064   // and fallthrough MBB. If we find more than one, we cannot identify the
3065   // fallthrough MBB and should return nullptr.
3066   MachineBasicBlock *FallthroughBB = nullptr;
3067   for (MachineBasicBlock *Succ : MBB->successors()) {
3068     if (Succ->isEHPad() || (Succ == TBB && FallthroughBB))
3069       continue;
3070     // Return a nullptr if we found more than one fallthrough successor.
3071     if (FallthroughBB && FallthroughBB != TBB)
3072       return nullptr;
3073     FallthroughBB = Succ;
3074   }
3075   return FallthroughBB;
3076 }
3077 
3078 bool X86InstrInfo::AnalyzeBranchImpl(
3079     MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB,
3080     SmallVectorImpl<MachineOperand> &Cond,
3081     SmallVectorImpl<MachineInstr *> &CondBranches, bool AllowModify) const {
3082 
3083   // Start from the bottom of the block and work up, examining the
3084   // terminator instructions.
3085   MachineBasicBlock::iterator I = MBB.end();
3086   MachineBasicBlock::iterator UnCondBrIter = MBB.end();
3087   while (I != MBB.begin()) {
3088     --I;
3089     if (I->isDebugInstr())
3090       continue;
3091 
3092     // Working from the bottom, when we see a non-terminator instruction, we're
3093     // done.
3094     if (!isUnpredicatedTerminator(*I))
3095       break;
3096 
3097     // A terminator that isn't a branch can't easily be handled by this
3098     // analysis.
3099     if (!I->isBranch())
3100       return true;
3101 
3102     // Handle unconditional branches.
3103     if (I->getOpcode() == X86::JMP_1) {
3104       UnCondBrIter = I;
3105 
3106       if (!AllowModify) {
3107         TBB = I->getOperand(0).getMBB();
3108         continue;
3109       }
3110 
3111       // If the block has any instructions after a JMP, delete them.
3112       while (std::next(I) != MBB.end())
3113         std::next(I)->eraseFromParent();
3114 
3115       Cond.clear();
3116       FBB = nullptr;
3117 
3118       // Delete the JMP if it's equivalent to a fall-through.
3119       if (MBB.isLayoutSuccessor(I->getOperand(0).getMBB())) {
3120         TBB = nullptr;
3121         I->eraseFromParent();
3122         I = MBB.end();
3123         UnCondBrIter = MBB.end();
3124         continue;
3125       }
3126 
3127       // TBB is used to indicate the unconditional destination.
3128       TBB = I->getOperand(0).getMBB();
3129       continue;
3130     }
3131 
3132     // Handle conditional branches.
3133     X86::CondCode BranchCode = X86::getCondFromBranch(*I);
3134     if (BranchCode == X86::COND_INVALID)
3135       return true;  // Can't handle indirect branch.
3136 
3137     // In practice we should never have an undef eflags operand, if we do
3138     // abort here as we are not prepared to preserve the flag.
3139     if (I->findRegisterUseOperand(X86::EFLAGS)->isUndef())
3140       return true;
3141 
3142     // Working from the bottom, handle the first conditional branch.
3143     if (Cond.empty()) {
3144       MachineBasicBlock *TargetBB = I->getOperand(0).getMBB();
3145       if (AllowModify && UnCondBrIter != MBB.end() &&
3146           MBB.isLayoutSuccessor(TargetBB)) {
3147         // If we can modify the code and it ends in something like:
3148         //
3149         //     jCC L1
3150         //     jmp L2
3151         //   L1:
3152         //     ...
3153         //   L2:
3154         //
3155         // Then we can change this to:
3156         //
3157         //     jnCC L2
3158         //   L1:
3159         //     ...
3160         //   L2:
3161         //
3162         // Which is a bit more efficient.
3163         // We conditionally jump to the fall-through block.
3164         BranchCode = GetOppositeBranchCondition(BranchCode);
3165         MachineBasicBlock::iterator OldInst = I;
3166 
3167         BuildMI(MBB, UnCondBrIter, MBB.findDebugLoc(I), get(X86::JCC_1))
3168           .addMBB(UnCondBrIter->getOperand(0).getMBB())
3169           .addImm(BranchCode);
3170         BuildMI(MBB, UnCondBrIter, MBB.findDebugLoc(I), get(X86::JMP_1))
3171           .addMBB(TargetBB);
3172 
3173         OldInst->eraseFromParent();
3174         UnCondBrIter->eraseFromParent();
3175 
3176         // Restart the analysis.
3177         UnCondBrIter = MBB.end();
3178         I = MBB.end();
3179         continue;
3180       }
3181 
3182       FBB = TBB;
3183       TBB = I->getOperand(0).getMBB();
3184       Cond.push_back(MachineOperand::CreateImm(BranchCode));
3185       CondBranches.push_back(&*I);
3186       continue;
3187     }
3188 
3189     // Handle subsequent conditional branches. Only handle the case where all
3190     // conditional branches branch to the same destination and their condition
3191     // opcodes fit one of the special multi-branch idioms.
3192     assert(Cond.size() == 1);
3193     assert(TBB);
3194 
3195     // If the conditions are the same, we can leave them alone.
3196     X86::CondCode OldBranchCode = (X86::CondCode)Cond[0].getImm();
3197     auto NewTBB = I->getOperand(0).getMBB();
3198     if (OldBranchCode == BranchCode && TBB == NewTBB)
3199       continue;
3200 
3201     // If they differ, see if they fit one of the known patterns. Theoretically,
3202     // we could handle more patterns here, but we shouldn't expect to see them
3203     // if instruction selection has done a reasonable job.
3204     if (TBB == NewTBB &&
3205                ((OldBranchCode == X86::COND_P && BranchCode == X86::COND_NE) ||
3206                 (OldBranchCode == X86::COND_NE && BranchCode == X86::COND_P))) {
3207       BranchCode = X86::COND_NE_OR_P;
3208     } else if ((OldBranchCode == X86::COND_NP && BranchCode == X86::COND_NE) ||
3209                (OldBranchCode == X86::COND_E && BranchCode == X86::COND_P)) {
3210       if (NewTBB != (FBB ? FBB : getFallThroughMBB(&MBB, TBB)))
3211         return true;
3212 
3213       // X86::COND_E_AND_NP usually has two different branch destinations.
3214       //
3215       // JP B1
3216       // JE B2
3217       // JMP B1
3218       // B1:
3219       // B2:
3220       //
3221       // Here this condition branches to B2 only if NP && E. It has another
3222       // equivalent form:
3223       //
3224       // JNE B1
3225       // JNP B2
3226       // JMP B1
3227       // B1:
3228       // B2:
3229       //
3230       // Similarly it branches to B2 only if E && NP. That is why this condition
3231       // is named with COND_E_AND_NP.
3232       BranchCode = X86::COND_E_AND_NP;
3233     } else
3234       return true;
3235 
3236     // Update the MachineOperand.
3237     Cond[0].setImm(BranchCode);
3238     CondBranches.push_back(&*I);
3239   }
3240 
3241   return false;
3242 }
3243 
3244 bool X86InstrInfo::analyzeBranch(MachineBasicBlock &MBB,
3245                                  MachineBasicBlock *&TBB,
3246                                  MachineBasicBlock *&FBB,
3247                                  SmallVectorImpl<MachineOperand> &Cond,
3248                                  bool AllowModify) const {
3249   SmallVector<MachineInstr *, 4> CondBranches;
3250   return AnalyzeBranchImpl(MBB, TBB, FBB, Cond, CondBranches, AllowModify);
3251 }
3252 
3253 bool X86InstrInfo::analyzeBranchPredicate(MachineBasicBlock &MBB,
3254                                           MachineBranchPredicate &MBP,
3255                                           bool AllowModify) const {
3256   using namespace std::placeholders;
3257 
3258   SmallVector<MachineOperand, 4> Cond;
3259   SmallVector<MachineInstr *, 4> CondBranches;
3260   if (AnalyzeBranchImpl(MBB, MBP.TrueDest, MBP.FalseDest, Cond, CondBranches,
3261                         AllowModify))
3262     return true;
3263 
3264   if (Cond.size() != 1)
3265     return true;
3266 
3267   assert(MBP.TrueDest && "expected!");
3268 
3269   if (!MBP.FalseDest)
3270     MBP.FalseDest = MBB.getNextNode();
3271 
3272   const TargetRegisterInfo *TRI = &getRegisterInfo();
3273 
3274   MachineInstr *ConditionDef = nullptr;
3275   bool SingleUseCondition = true;
3276 
3277   for (MachineInstr &MI : llvm::drop_begin(llvm::reverse(MBB))) {
3278     if (MI.modifiesRegister(X86::EFLAGS, TRI)) {
3279       ConditionDef = &MI;
3280       break;
3281     }
3282 
3283     if (MI.readsRegister(X86::EFLAGS, TRI))
3284       SingleUseCondition = false;
3285   }
3286 
3287   if (!ConditionDef)
3288     return true;
3289 
3290   if (SingleUseCondition) {
3291     for (auto *Succ : MBB.successors())
3292       if (Succ->isLiveIn(X86::EFLAGS))
3293         SingleUseCondition = false;
3294   }
3295 
3296   MBP.ConditionDef = ConditionDef;
3297   MBP.SingleUseCondition = SingleUseCondition;
3298 
3299   // Currently we only recognize the simple pattern:
3300   //
3301   //   test %reg, %reg
3302   //   je %label
3303   //
3304   const unsigned TestOpcode =
3305       Subtarget.is64Bit() ? X86::TEST64rr : X86::TEST32rr;
3306 
3307   if (ConditionDef->getOpcode() == TestOpcode &&
3308       ConditionDef->getNumOperands() == 3 &&
3309       ConditionDef->getOperand(0).isIdenticalTo(ConditionDef->getOperand(1)) &&
3310       (Cond[0].getImm() == X86::COND_NE || Cond[0].getImm() == X86::COND_E)) {
3311     MBP.LHS = ConditionDef->getOperand(0);
3312     MBP.RHS = MachineOperand::CreateImm(0);
3313     MBP.Predicate = Cond[0].getImm() == X86::COND_NE
3314                         ? MachineBranchPredicate::PRED_NE
3315                         : MachineBranchPredicate::PRED_EQ;
3316     return false;
3317   }
3318 
3319   return true;
3320 }
3321 
3322 unsigned X86InstrInfo::removeBranch(MachineBasicBlock &MBB,
3323                                     int *BytesRemoved) const {
3324   assert(!BytesRemoved && "code size not handled");
3325 
3326   MachineBasicBlock::iterator I = MBB.end();
3327   unsigned Count = 0;
3328 
3329   while (I != MBB.begin()) {
3330     --I;
3331     if (I->isDebugInstr())
3332       continue;
3333     if (I->getOpcode() != X86::JMP_1 &&
3334         X86::getCondFromBranch(*I) == X86::COND_INVALID)
3335       break;
3336     // Remove the branch.
3337     I->eraseFromParent();
3338     I = MBB.end();
3339     ++Count;
3340   }
3341 
3342   return Count;
3343 }
3344 
3345 unsigned X86InstrInfo::insertBranch(MachineBasicBlock &MBB,
3346                                     MachineBasicBlock *TBB,
3347                                     MachineBasicBlock *FBB,
3348                                     ArrayRef<MachineOperand> Cond,
3349                                     const DebugLoc &DL,
3350                                     int *BytesAdded) const {
3351   // Shouldn't be a fall through.
3352   assert(TBB && "insertBranch must not be told to insert a fallthrough");
3353   assert((Cond.size() == 1 || Cond.size() == 0) &&
3354          "X86 branch conditions have one component!");
3355   assert(!BytesAdded && "code size not handled");
3356 
3357   if (Cond.empty()) {
3358     // Unconditional branch?
3359     assert(!FBB && "Unconditional branch with multiple successors!");
3360     BuildMI(&MBB, DL, get(X86::JMP_1)).addMBB(TBB);
3361     return 1;
3362   }
3363 
3364   // If FBB is null, it is implied to be a fall-through block.
3365   bool FallThru = FBB == nullptr;
3366 
3367   // Conditional branch.
3368   unsigned Count = 0;
3369   X86::CondCode CC = (X86::CondCode)Cond[0].getImm();
3370   switch (CC) {
3371   case X86::COND_NE_OR_P:
3372     // Synthesize NE_OR_P with two branches.
3373     BuildMI(&MBB, DL, get(X86::JCC_1)).addMBB(TBB).addImm(X86::COND_NE);
3374     ++Count;
3375     BuildMI(&MBB, DL, get(X86::JCC_1)).addMBB(TBB).addImm(X86::COND_P);
3376     ++Count;
3377     break;
3378   case X86::COND_E_AND_NP:
3379     // Use the next block of MBB as FBB if it is null.
3380     if (FBB == nullptr) {
3381       FBB = getFallThroughMBB(&MBB, TBB);
3382       assert(FBB && "MBB cannot be the last block in function when the false "
3383                     "body is a fall-through.");
3384     }
3385     // Synthesize COND_E_AND_NP with two branches.
3386     BuildMI(&MBB, DL, get(X86::JCC_1)).addMBB(FBB).addImm(X86::COND_NE);
3387     ++Count;
3388     BuildMI(&MBB, DL, get(X86::JCC_1)).addMBB(TBB).addImm(X86::COND_NP);
3389     ++Count;
3390     break;
3391   default: {
3392     BuildMI(&MBB, DL, get(X86::JCC_1)).addMBB(TBB).addImm(CC);
3393     ++Count;
3394   }
3395   }
3396   if (!FallThru) {
3397     // Two-way Conditional branch. Insert the second branch.
3398     BuildMI(&MBB, DL, get(X86::JMP_1)).addMBB(FBB);
3399     ++Count;
3400   }
3401   return Count;
3402 }
3403 
3404 bool X86InstrInfo::canInsertSelect(const MachineBasicBlock &MBB,
3405                                    ArrayRef<MachineOperand> Cond,
3406                                    Register DstReg, Register TrueReg,
3407                                    Register FalseReg, int &CondCycles,
3408                                    int &TrueCycles, int &FalseCycles) const {
3409   // Not all subtargets have cmov instructions.
3410   if (!Subtarget.hasCMov())
3411     return false;
3412   if (Cond.size() != 1)
3413     return false;
3414   // We cannot do the composite conditions, at least not in SSA form.
3415   if ((X86::CondCode)Cond[0].getImm() > X86::LAST_VALID_COND)
3416     return false;
3417 
3418   // Check register classes.
3419   const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
3420   const TargetRegisterClass *RC =
3421     RI.getCommonSubClass(MRI.getRegClass(TrueReg), MRI.getRegClass(FalseReg));
3422   if (!RC)
3423     return false;
3424 
3425   // We have cmov instructions for 16, 32, and 64 bit general purpose registers.
3426   if (X86::GR16RegClass.hasSubClassEq(RC) ||
3427       X86::GR32RegClass.hasSubClassEq(RC) ||
3428       X86::GR64RegClass.hasSubClassEq(RC)) {
3429     // This latency applies to Pentium M, Merom, Wolfdale, Nehalem, and Sandy
3430     // Bridge. Probably Ivy Bridge as well.
3431     CondCycles = 2;
3432     TrueCycles = 2;
3433     FalseCycles = 2;
3434     return true;
3435   }
3436 
3437   // Can't do vectors.
3438   return false;
3439 }
3440 
3441 void X86InstrInfo::insertSelect(MachineBasicBlock &MBB,
3442                                 MachineBasicBlock::iterator I,
3443                                 const DebugLoc &DL, Register DstReg,
3444                                 ArrayRef<MachineOperand> Cond, Register TrueReg,
3445                                 Register FalseReg) const {
3446   MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
3447   const TargetRegisterInfo &TRI = *MRI.getTargetRegisterInfo();
3448   const TargetRegisterClass &RC = *MRI.getRegClass(DstReg);
3449   assert(Cond.size() == 1 && "Invalid Cond array");
3450   unsigned Opc = X86::getCMovOpcode(TRI.getRegSizeInBits(RC) / 8,
3451                                     false /*HasMemoryOperand*/);
3452   BuildMI(MBB, I, DL, get(Opc), DstReg)
3453       .addReg(FalseReg)
3454       .addReg(TrueReg)
3455       .addImm(Cond[0].getImm());
3456 }
3457 
3458 /// Test if the given register is a physical h register.
3459 static bool isHReg(unsigned Reg) {
3460   return X86::GR8_ABCD_HRegClass.contains(Reg);
3461 }
3462 
3463 // Try and copy between VR128/VR64 and GR64 registers.
3464 static unsigned CopyToFromAsymmetricReg(unsigned DestReg, unsigned SrcReg,
3465                                         const X86Subtarget &Subtarget) {
3466   bool HasAVX = Subtarget.hasAVX();
3467   bool HasAVX512 = Subtarget.hasAVX512();
3468 
3469   // SrcReg(MaskReg) -> DestReg(GR64)
3470   // SrcReg(MaskReg) -> DestReg(GR32)
3471 
3472   // All KMASK RegClasses hold the same k registers, can be tested against anyone.
3473   if (X86::VK16RegClass.contains(SrcReg)) {
3474     if (X86::GR64RegClass.contains(DestReg)) {
3475       assert(Subtarget.hasBWI());
3476       return X86::KMOVQrk;
3477     }
3478     if (X86::GR32RegClass.contains(DestReg))
3479       return Subtarget.hasBWI() ? X86::KMOVDrk : X86::KMOVWrk;
3480   }
3481 
3482   // SrcReg(GR64) -> DestReg(MaskReg)
3483   // SrcReg(GR32) -> DestReg(MaskReg)
3484 
3485   // All KMASK RegClasses hold the same k registers, can be tested against anyone.
3486   if (X86::VK16RegClass.contains(DestReg)) {
3487     if (X86::GR64RegClass.contains(SrcReg)) {
3488       assert(Subtarget.hasBWI());
3489       return X86::KMOVQkr;
3490     }
3491     if (X86::GR32RegClass.contains(SrcReg))
3492       return Subtarget.hasBWI() ? X86::KMOVDkr : X86::KMOVWkr;
3493   }
3494 
3495 
3496   // SrcReg(VR128) -> DestReg(GR64)
3497   // SrcReg(VR64)  -> DestReg(GR64)
3498   // SrcReg(GR64)  -> DestReg(VR128)
3499   // SrcReg(GR64)  -> DestReg(VR64)
3500 
3501   if (X86::GR64RegClass.contains(DestReg)) {
3502     if (X86::VR128XRegClass.contains(SrcReg))
3503       // Copy from a VR128 register to a GR64 register.
3504       return HasAVX512 ? X86::VMOVPQIto64Zrr :
3505              HasAVX    ? X86::VMOVPQIto64rr  :
3506                          X86::MOVPQIto64rr;
3507     if (X86::VR64RegClass.contains(SrcReg))
3508       // Copy from a VR64 register to a GR64 register.
3509       return X86::MMX_MOVD64from64rr;
3510   } else if (X86::GR64RegClass.contains(SrcReg)) {
3511     // Copy from a GR64 register to a VR128 register.
3512     if (X86::VR128XRegClass.contains(DestReg))
3513       return HasAVX512 ? X86::VMOV64toPQIZrr :
3514              HasAVX    ? X86::VMOV64toPQIrr  :
3515                          X86::MOV64toPQIrr;
3516     // Copy from a GR64 register to a VR64 register.
3517     if (X86::VR64RegClass.contains(DestReg))
3518       return X86::MMX_MOVD64to64rr;
3519   }
3520 
3521   // SrcReg(VR128) -> DestReg(GR32)
3522   // SrcReg(GR32)  -> DestReg(VR128)
3523 
3524   if (X86::GR32RegClass.contains(DestReg) &&
3525       X86::VR128XRegClass.contains(SrcReg))
3526     // Copy from a VR128 register to a GR32 register.
3527     return HasAVX512 ? X86::VMOVPDI2DIZrr :
3528            HasAVX    ? X86::VMOVPDI2DIrr  :
3529                        X86::MOVPDI2DIrr;
3530 
3531   if (X86::VR128XRegClass.contains(DestReg) &&
3532       X86::GR32RegClass.contains(SrcReg))
3533     // Copy from a VR128 register to a VR128 register.
3534     return HasAVX512 ? X86::VMOVDI2PDIZrr :
3535            HasAVX    ? X86::VMOVDI2PDIrr  :
3536                        X86::MOVDI2PDIrr;
3537   return 0;
3538 }
3539 
3540 void X86InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
3541                                MachineBasicBlock::iterator MI,
3542                                const DebugLoc &DL, MCRegister DestReg,
3543                                MCRegister SrcReg, bool KillSrc) const {
3544   // First deal with the normal symmetric copies.
3545   bool HasAVX = Subtarget.hasAVX();
3546   bool HasVLX = Subtarget.hasVLX();
3547   unsigned Opc = 0;
3548   if (X86::GR64RegClass.contains(DestReg, SrcReg))
3549     Opc = X86::MOV64rr;
3550   else if (X86::GR32RegClass.contains(DestReg, SrcReg))
3551     Opc = X86::MOV32rr;
3552   else if (X86::GR16RegClass.contains(DestReg, SrcReg))
3553     Opc = X86::MOV16rr;
3554   else if (X86::GR8RegClass.contains(DestReg, SrcReg)) {
3555     // Copying to or from a physical H register on x86-64 requires a NOREX
3556     // move.  Otherwise use a normal move.
3557     if ((isHReg(DestReg) || isHReg(SrcReg)) &&
3558         Subtarget.is64Bit()) {
3559       Opc = X86::MOV8rr_NOREX;
3560       // Both operands must be encodable without an REX prefix.
3561       assert(X86::GR8_NOREXRegClass.contains(SrcReg, DestReg) &&
3562              "8-bit H register can not be copied outside GR8_NOREX");
3563     } else
3564       Opc = X86::MOV8rr;
3565   }
3566   else if (X86::VR64RegClass.contains(DestReg, SrcReg))
3567     Opc = X86::MMX_MOVQ64rr;
3568   else if (X86::VR128XRegClass.contains(DestReg, SrcReg)) {
3569     if (HasVLX)
3570       Opc = X86::VMOVAPSZ128rr;
3571     else if (X86::VR128RegClass.contains(DestReg, SrcReg))
3572       Opc = HasAVX ? X86::VMOVAPSrr : X86::MOVAPSrr;
3573     else {
3574       // If this an extended register and we don't have VLX we need to use a
3575       // 512-bit move.
3576       Opc = X86::VMOVAPSZrr;
3577       const TargetRegisterInfo *TRI = &getRegisterInfo();
3578       DestReg = TRI->getMatchingSuperReg(DestReg, X86::sub_xmm,
3579                                          &X86::VR512RegClass);
3580       SrcReg = TRI->getMatchingSuperReg(SrcReg, X86::sub_xmm,
3581                                         &X86::VR512RegClass);
3582     }
3583   } else if (X86::VR256XRegClass.contains(DestReg, SrcReg)) {
3584     if (HasVLX)
3585       Opc = X86::VMOVAPSZ256rr;
3586     else if (X86::VR256RegClass.contains(DestReg, SrcReg))
3587       Opc = X86::VMOVAPSYrr;
3588     else {
3589       // If this an extended register and we don't have VLX we need to use a
3590       // 512-bit move.
3591       Opc = X86::VMOVAPSZrr;
3592       const TargetRegisterInfo *TRI = &getRegisterInfo();
3593       DestReg = TRI->getMatchingSuperReg(DestReg, X86::sub_ymm,
3594                                          &X86::VR512RegClass);
3595       SrcReg = TRI->getMatchingSuperReg(SrcReg, X86::sub_ymm,
3596                                         &X86::VR512RegClass);
3597     }
3598   } else if (X86::VR512RegClass.contains(DestReg, SrcReg))
3599     Opc = X86::VMOVAPSZrr;
3600   // All KMASK RegClasses hold the same k registers, can be tested against anyone.
3601   else if (X86::VK16RegClass.contains(DestReg, SrcReg))
3602     Opc = Subtarget.hasBWI() ? X86::KMOVQkk : X86::KMOVWkk;
3603   if (!Opc)
3604     Opc = CopyToFromAsymmetricReg(DestReg, SrcReg, Subtarget);
3605 
3606   if (Opc) {
3607     BuildMI(MBB, MI, DL, get(Opc), DestReg)
3608       .addReg(SrcReg, getKillRegState(KillSrc));
3609     return;
3610   }
3611 
3612   if (SrcReg == X86::EFLAGS || DestReg == X86::EFLAGS) {
3613     // FIXME: We use a fatal error here because historically LLVM has tried
3614     // lower some of these physreg copies and we want to ensure we get
3615     // reasonable bug reports if someone encounters a case no other testing
3616     // found. This path should be removed after the LLVM 7 release.
3617     report_fatal_error("Unable to copy EFLAGS physical register!");
3618   }
3619 
3620   LLVM_DEBUG(dbgs() << "Cannot copy " << RI.getName(SrcReg) << " to "
3621                     << RI.getName(DestReg) << '\n');
3622   report_fatal_error("Cannot emit physreg copy instruction");
3623 }
3624 
3625 Optional<DestSourcePair>
3626 X86InstrInfo::isCopyInstrImpl(const MachineInstr &MI) const {
3627   if (MI.isMoveReg())
3628     return DestSourcePair{MI.getOperand(0), MI.getOperand(1)};
3629   return None;
3630 }
3631 
3632 static unsigned getLoadStoreRegOpcode(Register Reg,
3633                                       const TargetRegisterClass *RC,
3634                                       bool IsStackAligned,
3635                                       const X86Subtarget &STI, bool load) {
3636   bool HasAVX = STI.hasAVX();
3637   bool HasAVX512 = STI.hasAVX512();
3638   bool HasVLX = STI.hasVLX();
3639 
3640   switch (STI.getRegisterInfo()->getSpillSize(*RC)) {
3641   default:
3642     llvm_unreachable("Unknown spill size");
3643   case 1:
3644     assert(X86::GR8RegClass.hasSubClassEq(RC) && "Unknown 1-byte regclass");
3645     if (STI.is64Bit())
3646       // Copying to or from a physical H register on x86-64 requires a NOREX
3647       // move.  Otherwise use a normal move.
3648       if (isHReg(Reg) || X86::GR8_ABCD_HRegClass.hasSubClassEq(RC))
3649         return load ? X86::MOV8rm_NOREX : X86::MOV8mr_NOREX;
3650     return load ? X86::MOV8rm : X86::MOV8mr;
3651   case 2:
3652     if (X86::VK16RegClass.hasSubClassEq(RC))
3653       return load ? X86::KMOVWkm : X86::KMOVWmk;
3654     if (X86::FR16XRegClass.hasSubClassEq(RC)) {
3655       assert(STI.hasFP16());
3656       return load ? X86::VMOVSHZrm_alt : X86::VMOVSHZmr;
3657     }
3658     assert(X86::GR16RegClass.hasSubClassEq(RC) && "Unknown 2-byte regclass");
3659     return load ? X86::MOV16rm : X86::MOV16mr;
3660   case 4:
3661     if (X86::GR32RegClass.hasSubClassEq(RC))
3662       return load ? X86::MOV32rm : X86::MOV32mr;
3663     if (X86::FR32XRegClass.hasSubClassEq(RC))
3664       return load ?
3665         (HasAVX512 ? X86::VMOVSSZrm_alt :
3666          HasAVX    ? X86::VMOVSSrm_alt :
3667                      X86::MOVSSrm_alt) :
3668         (HasAVX512 ? X86::VMOVSSZmr :
3669          HasAVX    ? X86::VMOVSSmr :
3670                      X86::MOVSSmr);
3671     if (X86::RFP32RegClass.hasSubClassEq(RC))
3672       return load ? X86::LD_Fp32m : X86::ST_Fp32m;
3673     if (X86::VK32RegClass.hasSubClassEq(RC)) {
3674       assert(STI.hasBWI() && "KMOVD requires BWI");
3675       return load ? X86::KMOVDkm : X86::KMOVDmk;
3676     }
3677     // All of these mask pair classes have the same spill size, the same kind
3678     // of kmov instructions can be used with all of them.
3679     if (X86::VK1PAIRRegClass.hasSubClassEq(RC) ||
3680         X86::VK2PAIRRegClass.hasSubClassEq(RC) ||
3681         X86::VK4PAIRRegClass.hasSubClassEq(RC) ||
3682         X86::VK8PAIRRegClass.hasSubClassEq(RC) ||
3683         X86::VK16PAIRRegClass.hasSubClassEq(RC))
3684       return load ? X86::MASKPAIR16LOAD : X86::MASKPAIR16STORE;
3685     llvm_unreachable("Unknown 4-byte regclass");
3686   case 8:
3687     if (X86::GR64RegClass.hasSubClassEq(RC))
3688       return load ? X86::MOV64rm : X86::MOV64mr;
3689     if (X86::FR64XRegClass.hasSubClassEq(RC))
3690       return load ?
3691         (HasAVX512 ? X86::VMOVSDZrm_alt :
3692          HasAVX    ? X86::VMOVSDrm_alt :
3693                      X86::MOVSDrm_alt) :
3694         (HasAVX512 ? X86::VMOVSDZmr :
3695          HasAVX    ? X86::VMOVSDmr :
3696                      X86::MOVSDmr);
3697     if (X86::VR64RegClass.hasSubClassEq(RC))
3698       return load ? X86::MMX_MOVQ64rm : X86::MMX_MOVQ64mr;
3699     if (X86::RFP64RegClass.hasSubClassEq(RC))
3700       return load ? X86::LD_Fp64m : X86::ST_Fp64m;
3701     if (X86::VK64RegClass.hasSubClassEq(RC)) {
3702       assert(STI.hasBWI() && "KMOVQ requires BWI");
3703       return load ? X86::KMOVQkm : X86::KMOVQmk;
3704     }
3705     llvm_unreachable("Unknown 8-byte regclass");
3706   case 10:
3707     assert(X86::RFP80RegClass.hasSubClassEq(RC) && "Unknown 10-byte regclass");
3708     return load ? X86::LD_Fp80m : X86::ST_FpP80m;
3709   case 16: {
3710     if (X86::VR128XRegClass.hasSubClassEq(RC)) {
3711       // If stack is realigned we can use aligned stores.
3712       if (IsStackAligned)
3713         return load ?
3714           (HasVLX    ? X86::VMOVAPSZ128rm :
3715            HasAVX512 ? X86::VMOVAPSZ128rm_NOVLX :
3716            HasAVX    ? X86::VMOVAPSrm :
3717                        X86::MOVAPSrm):
3718           (HasVLX    ? X86::VMOVAPSZ128mr :
3719            HasAVX512 ? X86::VMOVAPSZ128mr_NOVLX :
3720            HasAVX    ? X86::VMOVAPSmr :
3721                        X86::MOVAPSmr);
3722       else
3723         return load ?
3724           (HasVLX    ? X86::VMOVUPSZ128rm :
3725            HasAVX512 ? X86::VMOVUPSZ128rm_NOVLX :
3726            HasAVX    ? X86::VMOVUPSrm :
3727                        X86::MOVUPSrm):
3728           (HasVLX    ? X86::VMOVUPSZ128mr :
3729            HasAVX512 ? X86::VMOVUPSZ128mr_NOVLX :
3730            HasAVX    ? X86::VMOVUPSmr :
3731                        X86::MOVUPSmr);
3732     }
3733     llvm_unreachable("Unknown 16-byte regclass");
3734   }
3735   case 32:
3736     assert(X86::VR256XRegClass.hasSubClassEq(RC) && "Unknown 32-byte regclass");
3737     // If stack is realigned we can use aligned stores.
3738     if (IsStackAligned)
3739       return load ?
3740         (HasVLX    ? X86::VMOVAPSZ256rm :
3741          HasAVX512 ? X86::VMOVAPSZ256rm_NOVLX :
3742                      X86::VMOVAPSYrm) :
3743         (HasVLX    ? X86::VMOVAPSZ256mr :
3744          HasAVX512 ? X86::VMOVAPSZ256mr_NOVLX :
3745                      X86::VMOVAPSYmr);
3746     else
3747       return load ?
3748         (HasVLX    ? X86::VMOVUPSZ256rm :
3749          HasAVX512 ? X86::VMOVUPSZ256rm_NOVLX :
3750                      X86::VMOVUPSYrm) :
3751         (HasVLX    ? X86::VMOVUPSZ256mr :
3752          HasAVX512 ? X86::VMOVUPSZ256mr_NOVLX :
3753                      X86::VMOVUPSYmr);
3754   case 64:
3755     assert(X86::VR512RegClass.hasSubClassEq(RC) && "Unknown 64-byte regclass");
3756     assert(STI.hasAVX512() && "Using 512-bit register requires AVX512");
3757     if (IsStackAligned)
3758       return load ? X86::VMOVAPSZrm : X86::VMOVAPSZmr;
3759     else
3760       return load ? X86::VMOVUPSZrm : X86::VMOVUPSZmr;
3761   }
3762 }
3763 
3764 Optional<ExtAddrMode>
3765 X86InstrInfo::getAddrModeFromMemoryOp(const MachineInstr &MemI,
3766                                       const TargetRegisterInfo *TRI) const {
3767   const MCInstrDesc &Desc = MemI.getDesc();
3768   int MemRefBegin = X86II::getMemoryOperandNo(Desc.TSFlags);
3769   if (MemRefBegin < 0)
3770     return None;
3771 
3772   MemRefBegin += X86II::getOperandBias(Desc);
3773 
3774   auto &BaseOp = MemI.getOperand(MemRefBegin + X86::AddrBaseReg);
3775   if (!BaseOp.isReg()) // Can be an MO_FrameIndex
3776     return None;
3777 
3778   const MachineOperand &DispMO = MemI.getOperand(MemRefBegin + X86::AddrDisp);
3779   // Displacement can be symbolic
3780   if (!DispMO.isImm())
3781     return None;
3782 
3783   ExtAddrMode AM;
3784   AM.BaseReg = BaseOp.getReg();
3785   AM.ScaledReg = MemI.getOperand(MemRefBegin + X86::AddrIndexReg).getReg();
3786   AM.Scale = MemI.getOperand(MemRefBegin + X86::AddrScaleAmt).getImm();
3787   AM.Displacement = DispMO.getImm();
3788   return AM;
3789 }
3790 
3791 bool X86InstrInfo::getConstValDefinedInReg(const MachineInstr &MI,
3792                                            const Register Reg,
3793                                            int64_t &ImmVal) const {
3794   if (MI.getOpcode() != X86::MOV32ri && MI.getOpcode() != X86::MOV64ri)
3795     return false;
3796   // Mov Src can be a global address.
3797   if (!MI.getOperand(1).isImm() || MI.getOperand(0).getReg() != Reg)
3798     return false;
3799   ImmVal = MI.getOperand(1).getImm();
3800   return true;
3801 }
3802 
3803 bool X86InstrInfo::preservesZeroValueInReg(
3804     const MachineInstr *MI, const Register NullValueReg,
3805     const TargetRegisterInfo *TRI) const {
3806   if (!MI->modifiesRegister(NullValueReg, TRI))
3807     return true;
3808   switch (MI->getOpcode()) {
3809   // Shift right/left of a null unto itself is still a null, i.e. rax = shl rax
3810   // X.
3811   case X86::SHR64ri:
3812   case X86::SHR32ri:
3813   case X86::SHL64ri:
3814   case X86::SHL32ri:
3815     assert(MI->getOperand(0).isDef() && MI->getOperand(1).isUse() &&
3816            "expected for shift opcode!");
3817     return MI->getOperand(0).getReg() == NullValueReg &&
3818            MI->getOperand(1).getReg() == NullValueReg;
3819   // Zero extend of a sub-reg of NullValueReg into itself does not change the
3820   // null value.
3821   case X86::MOV32rr:
3822     return llvm::all_of(MI->operands(), [&](const MachineOperand &MO) {
3823       return TRI->isSubRegisterEq(NullValueReg, MO.getReg());
3824     });
3825   default:
3826     return false;
3827   }
3828   llvm_unreachable("Should be handled above!");
3829 }
3830 
3831 bool X86InstrInfo::getMemOperandsWithOffsetWidth(
3832     const MachineInstr &MemOp, SmallVectorImpl<const MachineOperand *> &BaseOps,
3833     int64_t &Offset, bool &OffsetIsScalable, unsigned &Width,
3834     const TargetRegisterInfo *TRI) const {
3835   const MCInstrDesc &Desc = MemOp.getDesc();
3836   int MemRefBegin = X86II::getMemoryOperandNo(Desc.TSFlags);
3837   if (MemRefBegin < 0)
3838     return false;
3839 
3840   MemRefBegin += X86II::getOperandBias(Desc);
3841 
3842   const MachineOperand *BaseOp =
3843       &MemOp.getOperand(MemRefBegin + X86::AddrBaseReg);
3844   if (!BaseOp->isReg()) // Can be an MO_FrameIndex
3845     return false;
3846 
3847   if (MemOp.getOperand(MemRefBegin + X86::AddrScaleAmt).getImm() != 1)
3848     return false;
3849 
3850   if (MemOp.getOperand(MemRefBegin + X86::AddrIndexReg).getReg() !=
3851       X86::NoRegister)
3852     return false;
3853 
3854   const MachineOperand &DispMO = MemOp.getOperand(MemRefBegin + X86::AddrDisp);
3855 
3856   // Displacement can be symbolic
3857   if (!DispMO.isImm())
3858     return false;
3859 
3860   Offset = DispMO.getImm();
3861 
3862   if (!BaseOp->isReg())
3863     return false;
3864 
3865   OffsetIsScalable = false;
3866   // FIXME: Relying on memoperands() may not be right thing to do here. Check
3867   // with X86 maintainers, and fix it accordingly. For now, it is ok, since
3868   // there is no use of `Width` for X86 back-end at the moment.
3869   Width =
3870       !MemOp.memoperands_empty() ? MemOp.memoperands().front()->getSize() : 0;
3871   BaseOps.push_back(BaseOp);
3872   return true;
3873 }
3874 
3875 static unsigned getStoreRegOpcode(Register SrcReg,
3876                                   const TargetRegisterClass *RC,
3877                                   bool IsStackAligned,
3878                                   const X86Subtarget &STI) {
3879   return getLoadStoreRegOpcode(SrcReg, RC, IsStackAligned, STI, false);
3880 }
3881 
3882 static unsigned getLoadRegOpcode(Register DestReg,
3883                                  const TargetRegisterClass *RC,
3884                                  bool IsStackAligned, const X86Subtarget &STI) {
3885   return getLoadStoreRegOpcode(DestReg, RC, IsStackAligned, STI, true);
3886 }
3887 
3888 void X86InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
3889                                        MachineBasicBlock::iterator MI,
3890                                        Register SrcReg, bool isKill, int FrameIdx,
3891                                        const TargetRegisterClass *RC,
3892                                        const TargetRegisterInfo *TRI) const {
3893   const MachineFunction &MF = *MBB.getParent();
3894   const MachineFrameInfo &MFI = MF.getFrameInfo();
3895   assert(MFI.getObjectSize(FrameIdx) >= TRI->getSpillSize(*RC) &&
3896          "Stack slot too small for store");
3897   if (RC->getID() == X86::TILERegClassID) {
3898     unsigned Opc = X86::TILESTORED;
3899     // tilestored %tmm, (%sp, %idx)
3900     MachineRegisterInfo &RegInfo = MBB.getParent()->getRegInfo();
3901     Register VirtReg = RegInfo.createVirtualRegister(&X86::GR64_NOSPRegClass);
3902     BuildMI(MBB, MI, DebugLoc(), get(X86::MOV64ri), VirtReg).addImm(64);
3903     MachineInstr *NewMI =
3904         addFrameReference(BuildMI(MBB, MI, DebugLoc(), get(Opc)), FrameIdx)
3905             .addReg(SrcReg, getKillRegState(isKill));
3906     MachineOperand &MO = NewMI->getOperand(2);
3907     MO.setReg(VirtReg);
3908     MO.setIsKill(true);
3909   } else {
3910     unsigned Alignment = std::max<uint32_t>(TRI->getSpillSize(*RC), 16);
3911     bool isAligned =
3912         (Subtarget.getFrameLowering()->getStackAlign() >= Alignment) ||
3913         (RI.canRealignStack(MF) && !MFI.isFixedObjectIndex(FrameIdx));
3914     unsigned Opc = getStoreRegOpcode(SrcReg, RC, isAligned, Subtarget);
3915     addFrameReference(BuildMI(MBB, MI, DebugLoc(), get(Opc)), FrameIdx)
3916         .addReg(SrcReg, getKillRegState(isKill));
3917   }
3918 }
3919 
3920 void X86InstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
3921                                         MachineBasicBlock::iterator MI,
3922                                         Register DestReg, int FrameIdx,
3923                                         const TargetRegisterClass *RC,
3924                                         const TargetRegisterInfo *TRI) const {
3925   if (RC->getID() == X86::TILERegClassID) {
3926     unsigned Opc = X86::TILELOADD;
3927     // tileloadd (%sp, %idx), %tmm
3928     MachineRegisterInfo &RegInfo = MBB.getParent()->getRegInfo();
3929     Register VirtReg = RegInfo.createVirtualRegister(&X86::GR64_NOSPRegClass);
3930     MachineInstr *NewMI =
3931         BuildMI(MBB, MI, DebugLoc(), get(X86::MOV64ri), VirtReg).addImm(64);
3932     NewMI = addFrameReference(BuildMI(MBB, MI, DebugLoc(), get(Opc), DestReg),
3933                               FrameIdx);
3934     MachineOperand &MO = NewMI->getOperand(3);
3935     MO.setReg(VirtReg);
3936     MO.setIsKill(true);
3937   } else {
3938     const MachineFunction &MF = *MBB.getParent();
3939     const MachineFrameInfo &MFI = MF.getFrameInfo();
3940     unsigned Alignment = std::max<uint32_t>(TRI->getSpillSize(*RC), 16);
3941     bool isAligned =
3942         (Subtarget.getFrameLowering()->getStackAlign() >= Alignment) ||
3943         (RI.canRealignStack(MF) && !MFI.isFixedObjectIndex(FrameIdx));
3944     unsigned Opc = getLoadRegOpcode(DestReg, RC, isAligned, Subtarget);
3945     addFrameReference(BuildMI(MBB, MI, DebugLoc(), get(Opc), DestReg),
3946                       FrameIdx);
3947   }
3948 }
3949 
3950 bool X86InstrInfo::analyzeCompare(const MachineInstr &MI, Register &SrcReg,
3951                                   Register &SrcReg2, int64_t &CmpMask,
3952                                   int64_t &CmpValue) const {
3953   switch (MI.getOpcode()) {
3954   default: break;
3955   case X86::CMP64ri32:
3956   case X86::CMP64ri8:
3957   case X86::CMP32ri:
3958   case X86::CMP32ri8:
3959   case X86::CMP16ri:
3960   case X86::CMP16ri8:
3961   case X86::CMP8ri:
3962     SrcReg = MI.getOperand(0).getReg();
3963     SrcReg2 = 0;
3964     if (MI.getOperand(1).isImm()) {
3965       CmpMask = ~0;
3966       CmpValue = MI.getOperand(1).getImm();
3967     } else {
3968       CmpMask = CmpValue = 0;
3969     }
3970     return true;
3971   // A SUB can be used to perform comparison.
3972   case X86::SUB64rm:
3973   case X86::SUB32rm:
3974   case X86::SUB16rm:
3975   case X86::SUB8rm:
3976     SrcReg = MI.getOperand(1).getReg();
3977     SrcReg2 = 0;
3978     CmpMask = 0;
3979     CmpValue = 0;
3980     return true;
3981   case X86::SUB64rr:
3982   case X86::SUB32rr:
3983   case X86::SUB16rr:
3984   case X86::SUB8rr:
3985     SrcReg = MI.getOperand(1).getReg();
3986     SrcReg2 = MI.getOperand(2).getReg();
3987     CmpMask = 0;
3988     CmpValue = 0;
3989     return true;
3990   case X86::SUB64ri32:
3991   case X86::SUB64ri8:
3992   case X86::SUB32ri:
3993   case X86::SUB32ri8:
3994   case X86::SUB16ri:
3995   case X86::SUB16ri8:
3996   case X86::SUB8ri:
3997     SrcReg = MI.getOperand(1).getReg();
3998     SrcReg2 = 0;
3999     if (MI.getOperand(2).isImm()) {
4000       CmpMask = ~0;
4001       CmpValue = MI.getOperand(2).getImm();
4002     } else {
4003       CmpMask = CmpValue = 0;
4004     }
4005     return true;
4006   case X86::CMP64rr:
4007   case X86::CMP32rr:
4008   case X86::CMP16rr:
4009   case X86::CMP8rr:
4010     SrcReg = MI.getOperand(0).getReg();
4011     SrcReg2 = MI.getOperand(1).getReg();
4012     CmpMask = 0;
4013     CmpValue = 0;
4014     return true;
4015   case X86::TEST8rr:
4016   case X86::TEST16rr:
4017   case X86::TEST32rr:
4018   case X86::TEST64rr:
4019     SrcReg = MI.getOperand(0).getReg();
4020     if (MI.getOperand(1).getReg() != SrcReg)
4021       return false;
4022     // Compare against zero.
4023     SrcReg2 = 0;
4024     CmpMask = ~0;
4025     CmpValue = 0;
4026     return true;
4027   }
4028   return false;
4029 }
4030 
4031 bool X86InstrInfo::isRedundantFlagInstr(const MachineInstr &FlagI,
4032                                         Register SrcReg, Register SrcReg2,
4033                                         int64_t ImmMask, int64_t ImmValue,
4034                                         const MachineInstr &OI, bool *IsSwapped,
4035                                         int64_t *ImmDelta) const {
4036   switch (OI.getOpcode()) {
4037   case X86::CMP64rr:
4038   case X86::CMP32rr:
4039   case X86::CMP16rr:
4040   case X86::CMP8rr:
4041   case X86::SUB64rr:
4042   case X86::SUB32rr:
4043   case X86::SUB16rr:
4044   case X86::SUB8rr: {
4045     Register OISrcReg;
4046     Register OISrcReg2;
4047     int64_t OIMask;
4048     int64_t OIValue;
4049     if (!analyzeCompare(OI, OISrcReg, OISrcReg2, OIMask, OIValue) ||
4050         OIMask != ImmMask || OIValue != ImmValue)
4051       return false;
4052     if (SrcReg == OISrcReg && SrcReg2 == OISrcReg2) {
4053       *IsSwapped = false;
4054       return true;
4055     }
4056     if (SrcReg == OISrcReg2 && SrcReg2 == OISrcReg) {
4057       *IsSwapped = true;
4058       return true;
4059     }
4060     return false;
4061   }
4062   case X86::CMP64ri32:
4063   case X86::CMP64ri8:
4064   case X86::CMP32ri:
4065   case X86::CMP32ri8:
4066   case X86::CMP16ri:
4067   case X86::CMP16ri8:
4068   case X86::CMP8ri:
4069   case X86::SUB64ri32:
4070   case X86::SUB64ri8:
4071   case X86::SUB32ri:
4072   case X86::SUB32ri8:
4073   case X86::SUB16ri:
4074   case X86::SUB16ri8:
4075   case X86::SUB8ri:
4076   case X86::TEST64rr:
4077   case X86::TEST32rr:
4078   case X86::TEST16rr:
4079   case X86::TEST8rr: {
4080     if (ImmMask != 0) {
4081       Register OISrcReg;
4082       Register OISrcReg2;
4083       int64_t OIMask;
4084       int64_t OIValue;
4085       if (analyzeCompare(OI, OISrcReg, OISrcReg2, OIMask, OIValue) &&
4086           SrcReg == OISrcReg && ImmMask == OIMask) {
4087         if (OIValue == ImmValue) {
4088           *ImmDelta = 0;
4089           return true;
4090         } else if (static_cast<uint64_t>(ImmValue) ==
4091                    static_cast<uint64_t>(OIValue) - 1) {
4092           *ImmDelta = -1;
4093           return true;
4094         } else if (static_cast<uint64_t>(ImmValue) ==
4095                    static_cast<uint64_t>(OIValue) + 1) {
4096           *ImmDelta = 1;
4097           return true;
4098         } else {
4099           return false;
4100         }
4101       }
4102     }
4103     return FlagI.isIdenticalTo(OI);
4104   }
4105   default:
4106     return false;
4107   }
4108 }
4109 
4110 /// Check whether the definition can be converted
4111 /// to remove a comparison against zero.
4112 inline static bool isDefConvertible(const MachineInstr &MI, bool &NoSignFlag,
4113                                     bool &ClearsOverflowFlag) {
4114   NoSignFlag = false;
4115   ClearsOverflowFlag = false;
4116 
4117   switch (MI.getOpcode()) {
4118   default: return false;
4119 
4120   // The shift instructions only modify ZF if their shift count is non-zero.
4121   // N.B.: The processor truncates the shift count depending on the encoding.
4122   case X86::SAR8ri:    case X86::SAR16ri:  case X86::SAR32ri:case X86::SAR64ri:
4123   case X86::SHR8ri:    case X86::SHR16ri:  case X86::SHR32ri:case X86::SHR64ri:
4124      return getTruncatedShiftCount(MI, 2) != 0;
4125 
4126   // Some left shift instructions can be turned into LEA instructions but only
4127   // if their flags aren't used. Avoid transforming such instructions.
4128   case X86::SHL8ri:    case X86::SHL16ri:  case X86::SHL32ri:case X86::SHL64ri:{
4129     unsigned ShAmt = getTruncatedShiftCount(MI, 2);
4130     if (isTruncatedShiftCountForLEA(ShAmt)) return false;
4131     return ShAmt != 0;
4132   }
4133 
4134   case X86::SHRD16rri8:case X86::SHRD32rri8:case X86::SHRD64rri8:
4135   case X86::SHLD16rri8:case X86::SHLD32rri8:case X86::SHLD64rri8:
4136      return getTruncatedShiftCount(MI, 3) != 0;
4137 
4138   case X86::SUB64ri32: case X86::SUB64ri8: case X86::SUB32ri:
4139   case X86::SUB32ri8:  case X86::SUB16ri:  case X86::SUB16ri8:
4140   case X86::SUB8ri:    case X86::SUB64rr:  case X86::SUB32rr:
4141   case X86::SUB16rr:   case X86::SUB8rr:   case X86::SUB64rm:
4142   case X86::SUB32rm:   case X86::SUB16rm:  case X86::SUB8rm:
4143   case X86::DEC64r:    case X86::DEC32r:   case X86::DEC16r: case X86::DEC8r:
4144   case X86::ADD64ri32: case X86::ADD64ri8: case X86::ADD32ri:
4145   case X86::ADD32ri8:  case X86::ADD16ri:  case X86::ADD16ri8:
4146   case X86::ADD8ri:    case X86::ADD64rr:  case X86::ADD32rr:
4147   case X86::ADD16rr:   case X86::ADD8rr:   case X86::ADD64rm:
4148   case X86::ADD32rm:   case X86::ADD16rm:  case X86::ADD8rm:
4149   case X86::INC64r:    case X86::INC32r:   case X86::INC16r: case X86::INC8r:
4150   case X86::ADC64ri32: case X86::ADC64ri8: case X86::ADC32ri:
4151   case X86::ADC32ri8:  case X86::ADC16ri:  case X86::ADC16ri8:
4152   case X86::ADC8ri:    case X86::ADC64rr:  case X86::ADC32rr:
4153   case X86::ADC16rr:   case X86::ADC8rr:   case X86::ADC64rm:
4154   case X86::ADC32rm:   case X86::ADC16rm:  case X86::ADC8rm:
4155   case X86::SBB64ri32: case X86::SBB64ri8: case X86::SBB32ri:
4156   case X86::SBB32ri8:  case X86::SBB16ri:  case X86::SBB16ri8:
4157   case X86::SBB8ri:    case X86::SBB64rr:  case X86::SBB32rr:
4158   case X86::SBB16rr:   case X86::SBB8rr:   case X86::SBB64rm:
4159   case X86::SBB32rm:   case X86::SBB16rm:  case X86::SBB8rm:
4160   case X86::NEG8r:     case X86::NEG16r:   case X86::NEG32r: case X86::NEG64r:
4161   case X86::SAR8r1:    case X86::SAR16r1:  case X86::SAR32r1:case X86::SAR64r1:
4162   case X86::SHR8r1:    case X86::SHR16r1:  case X86::SHR32r1:case X86::SHR64r1:
4163   case X86::SHL8r1:    case X86::SHL16r1:  case X86::SHL32r1:case X86::SHL64r1:
4164   case X86::LZCNT16rr: case X86::LZCNT16rm:
4165   case X86::LZCNT32rr: case X86::LZCNT32rm:
4166   case X86::LZCNT64rr: case X86::LZCNT64rm:
4167   case X86::POPCNT16rr:case X86::POPCNT16rm:
4168   case X86::POPCNT32rr:case X86::POPCNT32rm:
4169   case X86::POPCNT64rr:case X86::POPCNT64rm:
4170   case X86::TZCNT16rr: case X86::TZCNT16rm:
4171   case X86::TZCNT32rr: case X86::TZCNT32rm:
4172   case X86::TZCNT64rr: case X86::TZCNT64rm:
4173     return true;
4174   case X86::AND64ri32:   case X86::AND64ri8:  case X86::AND32ri:
4175   case X86::AND32ri8:    case X86::AND16ri:   case X86::AND16ri8:
4176   case X86::AND8ri:      case X86::AND64rr:   case X86::AND32rr:
4177   case X86::AND16rr:     case X86::AND8rr:    case X86::AND64rm:
4178   case X86::AND32rm:     case X86::AND16rm:   case X86::AND8rm:
4179   case X86::XOR64ri32:   case X86::XOR64ri8:  case X86::XOR32ri:
4180   case X86::XOR32ri8:    case X86::XOR16ri:   case X86::XOR16ri8:
4181   case X86::XOR8ri:      case X86::XOR64rr:   case X86::XOR32rr:
4182   case X86::XOR16rr:     case X86::XOR8rr:    case X86::XOR64rm:
4183   case X86::XOR32rm:     case X86::XOR16rm:   case X86::XOR8rm:
4184   case X86::OR64ri32:    case X86::OR64ri8:   case X86::OR32ri:
4185   case X86::OR32ri8:     case X86::OR16ri:    case X86::OR16ri8:
4186   case X86::OR8ri:       case X86::OR64rr:    case X86::OR32rr:
4187   case X86::OR16rr:      case X86::OR8rr:     case X86::OR64rm:
4188   case X86::OR32rm:      case X86::OR16rm:    case X86::OR8rm:
4189   case X86::ANDN32rr:    case X86::ANDN32rm:
4190   case X86::ANDN64rr:    case X86::ANDN64rm:
4191   case X86::BLSI32rr:    case X86::BLSI32rm:
4192   case X86::BLSI64rr:    case X86::BLSI64rm:
4193   case X86::BLSMSK32rr:  case X86::BLSMSK32rm:
4194   case X86::BLSMSK64rr:  case X86::BLSMSK64rm:
4195   case X86::BLSR32rr:    case X86::BLSR32rm:
4196   case X86::BLSR64rr:    case X86::BLSR64rm:
4197   case X86::BLCFILL32rr: case X86::BLCFILL32rm:
4198   case X86::BLCFILL64rr: case X86::BLCFILL64rm:
4199   case X86::BLCI32rr:    case X86::BLCI32rm:
4200   case X86::BLCI64rr:    case X86::BLCI64rm:
4201   case X86::BLCIC32rr:   case X86::BLCIC32rm:
4202   case X86::BLCIC64rr:   case X86::BLCIC64rm:
4203   case X86::BLCMSK32rr:  case X86::BLCMSK32rm:
4204   case X86::BLCMSK64rr:  case X86::BLCMSK64rm:
4205   case X86::BLCS32rr:    case X86::BLCS32rm:
4206   case X86::BLCS64rr:    case X86::BLCS64rm:
4207   case X86::BLSFILL32rr: case X86::BLSFILL32rm:
4208   case X86::BLSFILL64rr: case X86::BLSFILL64rm:
4209   case X86::BLSIC32rr:   case X86::BLSIC32rm:
4210   case X86::BLSIC64rr:   case X86::BLSIC64rm:
4211   case X86::BZHI32rr:    case X86::BZHI32rm:
4212   case X86::BZHI64rr:    case X86::BZHI64rm:
4213   case X86::T1MSKC32rr:  case X86::T1MSKC32rm:
4214   case X86::T1MSKC64rr:  case X86::T1MSKC64rm:
4215   case X86::TZMSK32rr:   case X86::TZMSK32rm:
4216   case X86::TZMSK64rr:   case X86::TZMSK64rm:
4217     // These instructions clear the overflow flag just like TEST.
4218     // FIXME: These are not the only instructions in this switch that clear the
4219     // overflow flag.
4220     ClearsOverflowFlag = true;
4221     return true;
4222   case X86::BEXTR32rr:   case X86::BEXTR64rr:
4223   case X86::BEXTR32rm:   case X86::BEXTR64rm:
4224   case X86::BEXTRI32ri:  case X86::BEXTRI32mi:
4225   case X86::BEXTRI64ri:  case X86::BEXTRI64mi:
4226     // BEXTR doesn't update the sign flag so we can't use it. It does clear
4227     // the overflow flag, but that's not useful without the sign flag.
4228     NoSignFlag = true;
4229     return true;
4230   }
4231 }
4232 
4233 /// Check whether the use can be converted to remove a comparison against zero.
4234 static X86::CondCode isUseDefConvertible(const MachineInstr &MI) {
4235   switch (MI.getOpcode()) {
4236   default: return X86::COND_INVALID;
4237   case X86::NEG8r:
4238   case X86::NEG16r:
4239   case X86::NEG32r:
4240   case X86::NEG64r:
4241     return X86::COND_AE;
4242   case X86::LZCNT16rr:
4243   case X86::LZCNT32rr:
4244   case X86::LZCNT64rr:
4245     return X86::COND_B;
4246   case X86::POPCNT16rr:
4247   case X86::POPCNT32rr:
4248   case X86::POPCNT64rr:
4249     return X86::COND_E;
4250   case X86::TZCNT16rr:
4251   case X86::TZCNT32rr:
4252   case X86::TZCNT64rr:
4253     return X86::COND_B;
4254   case X86::BSF16rr:
4255   case X86::BSF32rr:
4256   case X86::BSF64rr:
4257   case X86::BSR16rr:
4258   case X86::BSR32rr:
4259   case X86::BSR64rr:
4260     return X86::COND_E;
4261   case X86::BLSI32rr:
4262   case X86::BLSI64rr:
4263     return X86::COND_AE;
4264   case X86::BLSR32rr:
4265   case X86::BLSR64rr:
4266   case X86::BLSMSK32rr:
4267   case X86::BLSMSK64rr:
4268     return X86::COND_B;
4269   // TODO: TBM instructions.
4270   }
4271 }
4272 
4273 /// Check if there exists an earlier instruction that
4274 /// operates on the same source operands and sets flags in the same way as
4275 /// Compare; remove Compare if possible.
4276 bool X86InstrInfo::optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg,
4277                                         Register SrcReg2, int64_t CmpMask,
4278                                         int64_t CmpValue,
4279                                         const MachineRegisterInfo *MRI) const {
4280   // Check whether we can replace SUB with CMP.
4281   switch (CmpInstr.getOpcode()) {
4282   default: break;
4283   case X86::SUB64ri32:
4284   case X86::SUB64ri8:
4285   case X86::SUB32ri:
4286   case X86::SUB32ri8:
4287   case X86::SUB16ri:
4288   case X86::SUB16ri8:
4289   case X86::SUB8ri:
4290   case X86::SUB64rm:
4291   case X86::SUB32rm:
4292   case X86::SUB16rm:
4293   case X86::SUB8rm:
4294   case X86::SUB64rr:
4295   case X86::SUB32rr:
4296   case X86::SUB16rr:
4297   case X86::SUB8rr: {
4298     if (!MRI->use_nodbg_empty(CmpInstr.getOperand(0).getReg()))
4299       return false;
4300     // There is no use of the destination register, we can replace SUB with CMP.
4301     unsigned NewOpcode = 0;
4302     switch (CmpInstr.getOpcode()) {
4303     default: llvm_unreachable("Unreachable!");
4304     case X86::SUB64rm:   NewOpcode = X86::CMP64rm;   break;
4305     case X86::SUB32rm:   NewOpcode = X86::CMP32rm;   break;
4306     case X86::SUB16rm:   NewOpcode = X86::CMP16rm;   break;
4307     case X86::SUB8rm:    NewOpcode = X86::CMP8rm;    break;
4308     case X86::SUB64rr:   NewOpcode = X86::CMP64rr;   break;
4309     case X86::SUB32rr:   NewOpcode = X86::CMP32rr;   break;
4310     case X86::SUB16rr:   NewOpcode = X86::CMP16rr;   break;
4311     case X86::SUB8rr:    NewOpcode = X86::CMP8rr;    break;
4312     case X86::SUB64ri32: NewOpcode = X86::CMP64ri32; break;
4313     case X86::SUB64ri8:  NewOpcode = X86::CMP64ri8;  break;
4314     case X86::SUB32ri:   NewOpcode = X86::CMP32ri;   break;
4315     case X86::SUB32ri8:  NewOpcode = X86::CMP32ri8;  break;
4316     case X86::SUB16ri:   NewOpcode = X86::CMP16ri;   break;
4317     case X86::SUB16ri8:  NewOpcode = X86::CMP16ri8;  break;
4318     case X86::SUB8ri:    NewOpcode = X86::CMP8ri;    break;
4319     }
4320     CmpInstr.setDesc(get(NewOpcode));
4321     CmpInstr.RemoveOperand(0);
4322     // Mutating this instruction invalidates any debug data associated with it.
4323     CmpInstr.dropDebugNumber();
4324     // Fall through to optimize Cmp if Cmp is CMPrr or CMPri.
4325     if (NewOpcode == X86::CMP64rm || NewOpcode == X86::CMP32rm ||
4326         NewOpcode == X86::CMP16rm || NewOpcode == X86::CMP8rm)
4327       return false;
4328   }
4329   }
4330 
4331   // The following code tries to remove the comparison by re-using EFLAGS
4332   // from earlier instructions.
4333 
4334   bool IsCmpZero = (CmpMask != 0 && CmpValue == 0);
4335 
4336   // Transformation currently requires SSA values.
4337   if (SrcReg2.isPhysical())
4338     return false;
4339   MachineInstr *SrcRegDef = MRI->getVRegDef(SrcReg);
4340   assert(SrcRegDef && "Must have a definition (SSA)");
4341 
4342   MachineInstr *MI = nullptr;
4343   MachineInstr *Sub = nullptr;
4344   MachineInstr *Movr0Inst = nullptr;
4345   bool NoSignFlag = false;
4346   bool ClearsOverflowFlag = false;
4347   bool ShouldUpdateCC = false;
4348   bool IsSwapped = false;
4349   X86::CondCode NewCC = X86::COND_INVALID;
4350   int64_t ImmDelta = 0;
4351 
4352   // Search backward from CmpInstr for the next instruction defining EFLAGS.
4353   const TargetRegisterInfo *TRI = &getRegisterInfo();
4354   MachineBasicBlock &CmpMBB = *CmpInstr.getParent();
4355   MachineBasicBlock::reverse_iterator From =
4356       std::next(MachineBasicBlock::reverse_iterator(CmpInstr));
4357   for (MachineBasicBlock *MBB = &CmpMBB;;) {
4358     for (MachineInstr &Inst : make_range(From, MBB->rend())) {
4359       // Try to use EFLAGS from the instruction defining %SrcReg. Example:
4360       //     %eax = addl ...
4361       //     ...                // EFLAGS not changed
4362       //     testl %eax, %eax   // <-- can be removed
4363       if (&Inst == SrcRegDef) {
4364         if (IsCmpZero &&
4365             isDefConvertible(Inst, NoSignFlag, ClearsOverflowFlag)) {
4366           MI = &Inst;
4367           break;
4368         }
4369         // Cannot find other candidates before definition of SrcReg.
4370         return false;
4371       }
4372 
4373       if (Inst.modifiesRegister(X86::EFLAGS, TRI)) {
4374         // Try to use EFLAGS produced by an instruction reading %SrcReg.
4375         // Example:
4376         //      %eax = ...
4377         //      ...
4378         //      popcntl %eax
4379         //      ...                 // EFLAGS not changed
4380         //      testl %eax, %eax    // <-- can be removed
4381         if (IsCmpZero) {
4382           NewCC = isUseDefConvertible(Inst);
4383           if (NewCC != X86::COND_INVALID && Inst.getOperand(1).isReg() &&
4384               Inst.getOperand(1).getReg() == SrcReg) {
4385             ShouldUpdateCC = true;
4386             MI = &Inst;
4387             break;
4388           }
4389         }
4390 
4391         // Try to use EFLAGS from an instruction with similar flag results.
4392         // Example:
4393         //     sub x, y  or  cmp x, y
4394         //     ...           // EFLAGS not changed
4395         //     cmp x, y      // <-- can be removed
4396         if (isRedundantFlagInstr(CmpInstr, SrcReg, SrcReg2, CmpMask, CmpValue,
4397                                  Inst, &IsSwapped, &ImmDelta)) {
4398           Sub = &Inst;
4399           break;
4400         }
4401 
4402         // MOV32r0 is implemented with xor which clobbers condition code. It is
4403         // safe to move up, if the definition to EFLAGS is dead and earlier
4404         // instructions do not read or write EFLAGS.
4405         if (!Movr0Inst && Inst.getOpcode() == X86::MOV32r0 &&
4406             Inst.registerDefIsDead(X86::EFLAGS, TRI)) {
4407           Movr0Inst = &Inst;
4408           continue;
4409         }
4410 
4411         // Cannot do anything for any other EFLAG changes.
4412         return false;
4413       }
4414     }
4415 
4416     if (MI || Sub)
4417       break;
4418 
4419     // Reached begin of basic block. Continue in predecessor if there is
4420     // exactly one.
4421     if (MBB->pred_size() != 1)
4422       return false;
4423     MBB = *MBB->pred_begin();
4424     From = MBB->rbegin();
4425   }
4426 
4427   // Scan forward from the instruction after CmpInstr for uses of EFLAGS.
4428   // It is safe to remove CmpInstr if EFLAGS is redefined or killed.
4429   // If we are done with the basic block, we need to check whether EFLAGS is
4430   // live-out.
4431   bool FlagsMayLiveOut = true;
4432   SmallVector<std::pair<MachineInstr*, X86::CondCode>, 4> OpsToUpdate;
4433   MachineBasicBlock::iterator AfterCmpInstr =
4434       std::next(MachineBasicBlock::iterator(CmpInstr));
4435   for (MachineInstr &Instr : make_range(AfterCmpInstr, CmpMBB.end())) {
4436     bool ModifyEFLAGS = Instr.modifiesRegister(X86::EFLAGS, TRI);
4437     bool UseEFLAGS = Instr.readsRegister(X86::EFLAGS, TRI);
4438     // We should check the usage if this instruction uses and updates EFLAGS.
4439     if (!UseEFLAGS && ModifyEFLAGS) {
4440       // It is safe to remove CmpInstr if EFLAGS is updated again.
4441       FlagsMayLiveOut = false;
4442       break;
4443     }
4444     if (!UseEFLAGS && !ModifyEFLAGS)
4445       continue;
4446 
4447     // EFLAGS is used by this instruction.
4448     X86::CondCode OldCC = X86::COND_INVALID;
4449     if (MI || IsSwapped || ImmDelta != 0) {
4450       // We decode the condition code from opcode.
4451       if (Instr.isBranch())
4452         OldCC = X86::getCondFromBranch(Instr);
4453       else {
4454         OldCC = X86::getCondFromSETCC(Instr);
4455         if (OldCC == X86::COND_INVALID)
4456           OldCC = X86::getCondFromCMov(Instr);
4457       }
4458       if (OldCC == X86::COND_INVALID) return false;
4459     }
4460     X86::CondCode ReplacementCC = X86::COND_INVALID;
4461     if (MI) {
4462       switch (OldCC) {
4463       default: break;
4464       case X86::COND_A: case X86::COND_AE:
4465       case X86::COND_B: case X86::COND_BE:
4466         // CF is used, we can't perform this optimization.
4467         return false;
4468       case X86::COND_G: case X86::COND_GE:
4469       case X86::COND_L: case X86::COND_LE:
4470       case X86::COND_O: case X86::COND_NO:
4471         // If OF is used, the instruction needs to clear it like CmpZero does.
4472         if (!ClearsOverflowFlag)
4473           return false;
4474         break;
4475       case X86::COND_S: case X86::COND_NS:
4476         // If SF is used, but the instruction doesn't update the SF, then we
4477         // can't do the optimization.
4478         if (NoSignFlag)
4479           return false;
4480         break;
4481       }
4482 
4483       // If we're updating the condition code check if we have to reverse the
4484       // condition.
4485       if (ShouldUpdateCC)
4486         switch (OldCC) {
4487         default:
4488           return false;
4489         case X86::COND_E:
4490           ReplacementCC = NewCC;
4491           break;
4492         case X86::COND_NE:
4493           ReplacementCC = GetOppositeBranchCondition(NewCC);
4494           break;
4495         }
4496     } else if (IsSwapped) {
4497       // If we have SUB(r1, r2) and CMP(r2, r1), the condition code needs
4498       // to be changed from r2 > r1 to r1 < r2, from r2 < r1 to r1 > r2, etc.
4499       // We swap the condition code and synthesize the new opcode.
4500       ReplacementCC = getSwappedCondition(OldCC);
4501       if (ReplacementCC == X86::COND_INVALID) return false;
4502       ShouldUpdateCC = true;
4503     } else if (ImmDelta != 0) {
4504       unsigned BitWidth = TRI->getRegSizeInBits(*MRI->getRegClass(SrcReg));
4505       // Shift amount for min/max constants to adjust for 8/16/32 instruction
4506       // sizes.
4507       switch (OldCC) {
4508       case X86::COND_L: // x <s (C + 1)  -->  x <=s C
4509         if (ImmDelta != 1 || APInt::getSignedMinValue(BitWidth) == CmpValue)
4510           return false;
4511         ReplacementCC = X86::COND_LE;
4512         break;
4513       case X86::COND_B: // x <u (C + 1)  -->  x <=u C
4514         if (ImmDelta != 1 || CmpValue == 0)
4515           return false;
4516         ReplacementCC = X86::COND_BE;
4517         break;
4518       case X86::COND_GE: // x >=s (C + 1)  -->  x >s C
4519         if (ImmDelta != 1 || APInt::getSignedMinValue(BitWidth) == CmpValue)
4520           return false;
4521         ReplacementCC = X86::COND_G;
4522         break;
4523       case X86::COND_AE: // x >=u (C + 1)  -->  x >u C
4524         if (ImmDelta != 1 || CmpValue == 0)
4525           return false;
4526         ReplacementCC = X86::COND_A;
4527         break;
4528       case X86::COND_G: // x >s (C - 1)  -->  x >=s C
4529         if (ImmDelta != -1 || APInt::getSignedMaxValue(BitWidth) == CmpValue)
4530           return false;
4531         ReplacementCC = X86::COND_GE;
4532         break;
4533       case X86::COND_A: // x >u (C - 1)  -->  x >=u C
4534         if (ImmDelta != -1 || APInt::getMaxValue(BitWidth) == CmpValue)
4535           return false;
4536         ReplacementCC = X86::COND_AE;
4537         break;
4538       case X86::COND_LE: // x <=s (C - 1)  -->  x <s C
4539         if (ImmDelta != -1 || APInt::getSignedMaxValue(BitWidth) == CmpValue)
4540           return false;
4541         ReplacementCC = X86::COND_L;
4542         break;
4543       case X86::COND_BE: // x <=u (C - 1)  -->  x <u C
4544         if (ImmDelta != -1 || APInt::getMaxValue(BitWidth) == CmpValue)
4545           return false;
4546         ReplacementCC = X86::COND_B;
4547         break;
4548       default:
4549         return false;
4550       }
4551       ShouldUpdateCC = true;
4552     }
4553 
4554     if (ShouldUpdateCC && ReplacementCC != OldCC) {
4555       // Push the MachineInstr to OpsToUpdate.
4556       // If it is safe to remove CmpInstr, the condition code of these
4557       // instructions will be modified.
4558       OpsToUpdate.push_back(std::make_pair(&Instr, ReplacementCC));
4559     }
4560     if (ModifyEFLAGS || Instr.killsRegister(X86::EFLAGS, TRI)) {
4561       // It is safe to remove CmpInstr if EFLAGS is updated again or killed.
4562       FlagsMayLiveOut = false;
4563       break;
4564     }
4565   }
4566 
4567   // If we have to update users but EFLAGS is live-out abort, since we cannot
4568   // easily find all of the users.
4569   if (ShouldUpdateCC && FlagsMayLiveOut) {
4570     for (MachineBasicBlock *Successor : CmpMBB.successors())
4571       if (Successor->isLiveIn(X86::EFLAGS))
4572         return false;
4573   }
4574 
4575   // The instruction to be updated is either Sub or MI.
4576   assert((MI == nullptr || Sub == nullptr) && "Should not have Sub and MI set");
4577   Sub = MI != nullptr ? MI : Sub;
4578   MachineBasicBlock *SubBB = Sub->getParent();
4579   // Move Movr0Inst to the appropriate place before Sub.
4580   if (Movr0Inst) {
4581     // Only move within the same block so we don't accidentally move to a
4582     // block with higher execution frequency.
4583     if (&CmpMBB != SubBB)
4584       return false;
4585     // Look backwards until we find a def that doesn't use the current EFLAGS.
4586     MachineBasicBlock::reverse_iterator InsertI = Sub,
4587                                         InsertE = Sub->getParent()->rend();
4588     for (; InsertI != InsertE; ++InsertI) {
4589       MachineInstr *Instr = &*InsertI;
4590       if (!Instr->readsRegister(X86::EFLAGS, TRI) &&
4591           Instr->modifiesRegister(X86::EFLAGS, TRI)) {
4592         Movr0Inst->getParent()->remove(Movr0Inst);
4593         Instr->getParent()->insert(MachineBasicBlock::iterator(Instr),
4594                                    Movr0Inst);
4595         break;
4596       }
4597     }
4598     if (InsertI == InsertE)
4599       return false;
4600   }
4601 
4602   // Make sure Sub instruction defines EFLAGS and mark the def live.
4603   MachineOperand *FlagDef = Sub->findRegisterDefOperand(X86::EFLAGS);
4604   assert(FlagDef && "Unable to locate a def EFLAGS operand");
4605   FlagDef->setIsDead(false);
4606 
4607   CmpInstr.eraseFromParent();
4608 
4609   // Modify the condition code of instructions in OpsToUpdate.
4610   for (auto &Op : OpsToUpdate) {
4611     Op.first->getOperand(Op.first->getDesc().getNumOperands() - 1)
4612         .setImm(Op.second);
4613   }
4614   // Add EFLAGS to block live-ins between CmpBB and block of flags producer.
4615   for (MachineBasicBlock *MBB = &CmpMBB; MBB != SubBB;
4616        MBB = *MBB->pred_begin()) {
4617     assert(MBB->pred_size() == 1 && "Expected exactly one predecessor");
4618     if (!MBB->isLiveIn(X86::EFLAGS))
4619       MBB->addLiveIn(X86::EFLAGS);
4620   }
4621   return true;
4622 }
4623 
4624 /// Try to remove the load by folding it to a register
4625 /// operand at the use. We fold the load instructions if load defines a virtual
4626 /// register, the virtual register is used once in the same BB, and the
4627 /// instructions in-between do not load or store, and have no side effects.
4628 MachineInstr *X86InstrInfo::optimizeLoadInstr(MachineInstr &MI,
4629                                               const MachineRegisterInfo *MRI,
4630                                               Register &FoldAsLoadDefReg,
4631                                               MachineInstr *&DefMI) const {
4632   // Check whether we can move DefMI here.
4633   DefMI = MRI->getVRegDef(FoldAsLoadDefReg);
4634   assert(DefMI);
4635   bool SawStore = false;
4636   if (!DefMI->isSafeToMove(nullptr, SawStore))
4637     return nullptr;
4638 
4639   // Collect information about virtual register operands of MI.
4640   SmallVector<unsigned, 1> SrcOperandIds;
4641   for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
4642     MachineOperand &MO = MI.getOperand(i);
4643     if (!MO.isReg())
4644       continue;
4645     Register Reg = MO.getReg();
4646     if (Reg != FoldAsLoadDefReg)
4647       continue;
4648     // Do not fold if we have a subreg use or a def.
4649     if (MO.getSubReg() || MO.isDef())
4650       return nullptr;
4651     SrcOperandIds.push_back(i);
4652   }
4653   if (SrcOperandIds.empty())
4654     return nullptr;
4655 
4656   // Check whether we can fold the def into SrcOperandId.
4657   if (MachineInstr *FoldMI = foldMemoryOperand(MI, SrcOperandIds, *DefMI)) {
4658     FoldAsLoadDefReg = 0;
4659     return FoldMI;
4660   }
4661 
4662   return nullptr;
4663 }
4664 
4665 /// Expand a single-def pseudo instruction to a two-addr
4666 /// instruction with two undef reads of the register being defined.
4667 /// This is used for mapping:
4668 ///   %xmm4 = V_SET0
4669 /// to:
4670 ///   %xmm4 = PXORrr undef %xmm4, undef %xmm4
4671 ///
4672 static bool Expand2AddrUndef(MachineInstrBuilder &MIB,
4673                              const MCInstrDesc &Desc) {
4674   assert(Desc.getNumOperands() == 3 && "Expected two-addr instruction.");
4675   Register Reg = MIB.getReg(0);
4676   MIB->setDesc(Desc);
4677 
4678   // MachineInstr::addOperand() will insert explicit operands before any
4679   // implicit operands.
4680   MIB.addReg(Reg, RegState::Undef).addReg(Reg, RegState::Undef);
4681   // But we don't trust that.
4682   assert(MIB.getReg(1) == Reg &&
4683          MIB.getReg(2) == Reg && "Misplaced operand");
4684   return true;
4685 }
4686 
4687 /// Expand a single-def pseudo instruction to a two-addr
4688 /// instruction with two %k0 reads.
4689 /// This is used for mapping:
4690 ///   %k4 = K_SET1
4691 /// to:
4692 ///   %k4 = KXNORrr %k0, %k0
4693 static bool Expand2AddrKreg(MachineInstrBuilder &MIB, const MCInstrDesc &Desc,
4694                             Register Reg) {
4695   assert(Desc.getNumOperands() == 3 && "Expected two-addr instruction.");
4696   MIB->setDesc(Desc);
4697   MIB.addReg(Reg, RegState::Undef).addReg(Reg, RegState::Undef);
4698   return true;
4699 }
4700 
4701 static bool expandMOV32r1(MachineInstrBuilder &MIB, const TargetInstrInfo &TII,
4702                           bool MinusOne) {
4703   MachineBasicBlock &MBB = *MIB->getParent();
4704   const DebugLoc &DL = MIB->getDebugLoc();
4705   Register Reg = MIB.getReg(0);
4706 
4707   // Insert the XOR.
4708   BuildMI(MBB, MIB.getInstr(), DL, TII.get(X86::XOR32rr), Reg)
4709       .addReg(Reg, RegState::Undef)
4710       .addReg(Reg, RegState::Undef);
4711 
4712   // Turn the pseudo into an INC or DEC.
4713   MIB->setDesc(TII.get(MinusOne ? X86::DEC32r : X86::INC32r));
4714   MIB.addReg(Reg);
4715 
4716   return true;
4717 }
4718 
4719 static bool ExpandMOVImmSExti8(MachineInstrBuilder &MIB,
4720                                const TargetInstrInfo &TII,
4721                                const X86Subtarget &Subtarget) {
4722   MachineBasicBlock &MBB = *MIB->getParent();
4723   const DebugLoc &DL = MIB->getDebugLoc();
4724   int64_t Imm = MIB->getOperand(1).getImm();
4725   assert(Imm != 0 && "Using push/pop for 0 is not efficient.");
4726   MachineBasicBlock::iterator I = MIB.getInstr();
4727 
4728   int StackAdjustment;
4729 
4730   if (Subtarget.is64Bit()) {
4731     assert(MIB->getOpcode() == X86::MOV64ImmSExti8 ||
4732            MIB->getOpcode() == X86::MOV32ImmSExti8);
4733 
4734     // Can't use push/pop lowering if the function might write to the red zone.
4735     X86MachineFunctionInfo *X86FI =
4736         MBB.getParent()->getInfo<X86MachineFunctionInfo>();
4737     if (X86FI->getUsesRedZone()) {
4738       MIB->setDesc(TII.get(MIB->getOpcode() ==
4739                            X86::MOV32ImmSExti8 ? X86::MOV32ri : X86::MOV64ri));
4740       return true;
4741     }
4742 
4743     // 64-bit mode doesn't have 32-bit push/pop, so use 64-bit operations and
4744     // widen the register if necessary.
4745     StackAdjustment = 8;
4746     BuildMI(MBB, I, DL, TII.get(X86::PUSH64i8)).addImm(Imm);
4747     MIB->setDesc(TII.get(X86::POP64r));
4748     MIB->getOperand(0)
4749         .setReg(getX86SubSuperRegister(MIB.getReg(0), 64));
4750   } else {
4751     assert(MIB->getOpcode() == X86::MOV32ImmSExti8);
4752     StackAdjustment = 4;
4753     BuildMI(MBB, I, DL, TII.get(X86::PUSH32i8)).addImm(Imm);
4754     MIB->setDesc(TII.get(X86::POP32r));
4755   }
4756   MIB->RemoveOperand(1);
4757   MIB->addImplicitDefUseOperands(*MBB.getParent());
4758 
4759   // Build CFI if necessary.
4760   MachineFunction &MF = *MBB.getParent();
4761   const X86FrameLowering *TFL = Subtarget.getFrameLowering();
4762   bool IsWin64Prologue = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
4763   bool NeedsDwarfCFI = !IsWin64Prologue && MF.needsFrameMoves();
4764   bool EmitCFI = !TFL->hasFP(MF) && NeedsDwarfCFI;
4765   if (EmitCFI) {
4766     TFL->BuildCFI(MBB, I, DL,
4767         MCCFIInstruction::createAdjustCfaOffset(nullptr, StackAdjustment));
4768     TFL->BuildCFI(MBB, std::next(I), DL,
4769         MCCFIInstruction::createAdjustCfaOffset(nullptr, -StackAdjustment));
4770   }
4771 
4772   return true;
4773 }
4774 
4775 // LoadStackGuard has so far only been implemented for 64-bit MachO. Different
4776 // code sequence is needed for other targets.
4777 static void expandLoadStackGuard(MachineInstrBuilder &MIB,
4778                                  const TargetInstrInfo &TII) {
4779   MachineBasicBlock &MBB = *MIB->getParent();
4780   const DebugLoc &DL = MIB->getDebugLoc();
4781   Register Reg = MIB.getReg(0);
4782   const GlobalValue *GV =
4783       cast<GlobalValue>((*MIB->memoperands_begin())->getValue());
4784   auto Flags = MachineMemOperand::MOLoad |
4785                MachineMemOperand::MODereferenceable |
4786                MachineMemOperand::MOInvariant;
4787   MachineMemOperand *MMO = MBB.getParent()->getMachineMemOperand(
4788       MachinePointerInfo::getGOT(*MBB.getParent()), Flags, 8, Align(8));
4789   MachineBasicBlock::iterator I = MIB.getInstr();
4790 
4791   BuildMI(MBB, I, DL, TII.get(X86::MOV64rm), Reg).addReg(X86::RIP).addImm(1)
4792       .addReg(0).addGlobalAddress(GV, 0, X86II::MO_GOTPCREL).addReg(0)
4793       .addMemOperand(MMO);
4794   MIB->setDebugLoc(DL);
4795   MIB->setDesc(TII.get(X86::MOV64rm));
4796   MIB.addReg(Reg, RegState::Kill).addImm(1).addReg(0).addImm(0).addReg(0);
4797 }
4798 
4799 static bool expandXorFP(MachineInstrBuilder &MIB, const TargetInstrInfo &TII) {
4800   MachineBasicBlock &MBB = *MIB->getParent();
4801   MachineFunction &MF = *MBB.getParent();
4802   const X86Subtarget &Subtarget = MF.getSubtarget<X86Subtarget>();
4803   const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
4804   unsigned XorOp =
4805       MIB->getOpcode() == X86::XOR64_FP ? X86::XOR64rr : X86::XOR32rr;
4806   MIB->setDesc(TII.get(XorOp));
4807   MIB.addReg(TRI->getFrameRegister(MF), RegState::Undef);
4808   return true;
4809 }
4810 
4811 // This is used to handle spills for 128/256-bit registers when we have AVX512,
4812 // but not VLX. If it uses an extended register we need to use an instruction
4813 // that loads the lower 128/256-bit, but is available with only AVX512F.
4814 static bool expandNOVLXLoad(MachineInstrBuilder &MIB,
4815                             const TargetRegisterInfo *TRI,
4816                             const MCInstrDesc &LoadDesc,
4817                             const MCInstrDesc &BroadcastDesc,
4818                             unsigned SubIdx) {
4819   Register DestReg = MIB.getReg(0);
4820   // Check if DestReg is XMM16-31 or YMM16-31.
4821   if (TRI->getEncodingValue(DestReg) < 16) {
4822     // We can use a normal VEX encoded load.
4823     MIB->setDesc(LoadDesc);
4824   } else {
4825     // Use a 128/256-bit VBROADCAST instruction.
4826     MIB->setDesc(BroadcastDesc);
4827     // Change the destination to a 512-bit register.
4828     DestReg = TRI->getMatchingSuperReg(DestReg, SubIdx, &X86::VR512RegClass);
4829     MIB->getOperand(0).setReg(DestReg);
4830   }
4831   return true;
4832 }
4833 
4834 // This is used to handle spills for 128/256-bit registers when we have AVX512,
4835 // but not VLX. If it uses an extended register we need to use an instruction
4836 // that stores the lower 128/256-bit, but is available with only AVX512F.
4837 static bool expandNOVLXStore(MachineInstrBuilder &MIB,
4838                              const TargetRegisterInfo *TRI,
4839                              const MCInstrDesc &StoreDesc,
4840                              const MCInstrDesc &ExtractDesc,
4841                              unsigned SubIdx) {
4842   Register SrcReg = MIB.getReg(X86::AddrNumOperands);
4843   // Check if DestReg is XMM16-31 or YMM16-31.
4844   if (TRI->getEncodingValue(SrcReg) < 16) {
4845     // We can use a normal VEX encoded store.
4846     MIB->setDesc(StoreDesc);
4847   } else {
4848     // Use a VEXTRACTF instruction.
4849     MIB->setDesc(ExtractDesc);
4850     // Change the destination to a 512-bit register.
4851     SrcReg = TRI->getMatchingSuperReg(SrcReg, SubIdx, &X86::VR512RegClass);
4852     MIB->getOperand(X86::AddrNumOperands).setReg(SrcReg);
4853     MIB.addImm(0x0); // Append immediate to extract from the lower bits.
4854   }
4855 
4856   return true;
4857 }
4858 
4859 static bool expandSHXDROT(MachineInstrBuilder &MIB, const MCInstrDesc &Desc) {
4860   MIB->setDesc(Desc);
4861   int64_t ShiftAmt = MIB->getOperand(2).getImm();
4862   // Temporarily remove the immediate so we can add another source register.
4863   MIB->RemoveOperand(2);
4864   // Add the register. Don't copy the kill flag if there is one.
4865   MIB.addReg(MIB.getReg(1),
4866              getUndefRegState(MIB->getOperand(1).isUndef()));
4867   // Add back the immediate.
4868   MIB.addImm(ShiftAmt);
4869   return true;
4870 }
4871 
4872 bool X86InstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
4873   bool HasAVX = Subtarget.hasAVX();
4874   MachineInstrBuilder MIB(*MI.getParent()->getParent(), MI);
4875   switch (MI.getOpcode()) {
4876   case X86::MOV32r0:
4877     return Expand2AddrUndef(MIB, get(X86::XOR32rr));
4878   case X86::MOV32r1:
4879     return expandMOV32r1(MIB, *this, /*MinusOne=*/ false);
4880   case X86::MOV32r_1:
4881     return expandMOV32r1(MIB, *this, /*MinusOne=*/ true);
4882   case X86::MOV32ImmSExti8:
4883   case X86::MOV64ImmSExti8:
4884     return ExpandMOVImmSExti8(MIB, *this, Subtarget);
4885   case X86::SETB_C32r:
4886     return Expand2AddrUndef(MIB, get(X86::SBB32rr));
4887   case X86::SETB_C64r:
4888     return Expand2AddrUndef(MIB, get(X86::SBB64rr));
4889   case X86::MMX_SET0:
4890     return Expand2AddrUndef(MIB, get(X86::MMX_PXORirr));
4891   case X86::V_SET0:
4892   case X86::FsFLD0SS:
4893   case X86::FsFLD0SD:
4894   case X86::FsFLD0F128:
4895     return Expand2AddrUndef(MIB, get(HasAVX ? X86::VXORPSrr : X86::XORPSrr));
4896   case X86::AVX_SET0: {
4897     assert(HasAVX && "AVX not supported");
4898     const TargetRegisterInfo *TRI = &getRegisterInfo();
4899     Register SrcReg = MIB.getReg(0);
4900     Register XReg = TRI->getSubReg(SrcReg, X86::sub_xmm);
4901     MIB->getOperand(0).setReg(XReg);
4902     Expand2AddrUndef(MIB, get(X86::VXORPSrr));
4903     MIB.addReg(SrcReg, RegState::ImplicitDefine);
4904     return true;
4905   }
4906   case X86::AVX512_128_SET0:
4907   case X86::AVX512_FsFLD0SH:
4908   case X86::AVX512_FsFLD0SS:
4909   case X86::AVX512_FsFLD0SD:
4910   case X86::AVX512_FsFLD0F128: {
4911     bool HasVLX = Subtarget.hasVLX();
4912     Register SrcReg = MIB.getReg(0);
4913     const TargetRegisterInfo *TRI = &getRegisterInfo();
4914     if (HasVLX || TRI->getEncodingValue(SrcReg) < 16)
4915       return Expand2AddrUndef(MIB,
4916                               get(HasVLX ? X86::VPXORDZ128rr : X86::VXORPSrr));
4917     // Extended register without VLX. Use a larger XOR.
4918     SrcReg =
4919         TRI->getMatchingSuperReg(SrcReg, X86::sub_xmm, &X86::VR512RegClass);
4920     MIB->getOperand(0).setReg(SrcReg);
4921     return Expand2AddrUndef(MIB, get(X86::VPXORDZrr));
4922   }
4923   case X86::AVX512_256_SET0:
4924   case X86::AVX512_512_SET0: {
4925     bool HasVLX = Subtarget.hasVLX();
4926     Register SrcReg = MIB.getReg(0);
4927     const TargetRegisterInfo *TRI = &getRegisterInfo();
4928     if (HasVLX || TRI->getEncodingValue(SrcReg) < 16) {
4929       Register XReg = TRI->getSubReg(SrcReg, X86::sub_xmm);
4930       MIB->getOperand(0).setReg(XReg);
4931       Expand2AddrUndef(MIB,
4932                        get(HasVLX ? X86::VPXORDZ128rr : X86::VXORPSrr));
4933       MIB.addReg(SrcReg, RegState::ImplicitDefine);
4934       return true;
4935     }
4936     if (MI.getOpcode() == X86::AVX512_256_SET0) {
4937       // No VLX so we must reference a zmm.
4938       unsigned ZReg =
4939         TRI->getMatchingSuperReg(SrcReg, X86::sub_ymm, &X86::VR512RegClass);
4940       MIB->getOperand(0).setReg(ZReg);
4941     }
4942     return Expand2AddrUndef(MIB, get(X86::VPXORDZrr));
4943   }
4944   case X86::V_SETALLONES:
4945     return Expand2AddrUndef(MIB, get(HasAVX ? X86::VPCMPEQDrr : X86::PCMPEQDrr));
4946   case X86::AVX2_SETALLONES:
4947     return Expand2AddrUndef(MIB, get(X86::VPCMPEQDYrr));
4948   case X86::AVX1_SETALLONES: {
4949     Register Reg = MIB.getReg(0);
4950     // VCMPPSYrri with an immediate 0xf should produce VCMPTRUEPS.
4951     MIB->setDesc(get(X86::VCMPPSYrri));
4952     MIB.addReg(Reg, RegState::Undef).addReg(Reg, RegState::Undef).addImm(0xf);
4953     return true;
4954   }
4955   case X86::AVX512_512_SETALLONES: {
4956     Register Reg = MIB.getReg(0);
4957     MIB->setDesc(get(X86::VPTERNLOGDZrri));
4958     // VPTERNLOGD needs 3 register inputs and an immediate.
4959     // 0xff will return 1s for any input.
4960     MIB.addReg(Reg, RegState::Undef).addReg(Reg, RegState::Undef)
4961        .addReg(Reg, RegState::Undef).addImm(0xff);
4962     return true;
4963   }
4964   case X86::AVX512_512_SEXT_MASK_32:
4965   case X86::AVX512_512_SEXT_MASK_64: {
4966     Register Reg = MIB.getReg(0);
4967     Register MaskReg = MIB.getReg(1);
4968     unsigned MaskState = getRegState(MIB->getOperand(1));
4969     unsigned Opc = (MI.getOpcode() == X86::AVX512_512_SEXT_MASK_64) ?
4970                    X86::VPTERNLOGQZrrikz : X86::VPTERNLOGDZrrikz;
4971     MI.RemoveOperand(1);
4972     MIB->setDesc(get(Opc));
4973     // VPTERNLOG needs 3 register inputs and an immediate.
4974     // 0xff will return 1s for any input.
4975     MIB.addReg(Reg, RegState::Undef).addReg(MaskReg, MaskState)
4976        .addReg(Reg, RegState::Undef).addReg(Reg, RegState::Undef).addImm(0xff);
4977     return true;
4978   }
4979   case X86::VMOVAPSZ128rm_NOVLX:
4980     return expandNOVLXLoad(MIB, &getRegisterInfo(), get(X86::VMOVAPSrm),
4981                            get(X86::VBROADCASTF32X4rm), X86::sub_xmm);
4982   case X86::VMOVUPSZ128rm_NOVLX:
4983     return expandNOVLXLoad(MIB, &getRegisterInfo(), get(X86::VMOVUPSrm),
4984                            get(X86::VBROADCASTF32X4rm), X86::sub_xmm);
4985   case X86::VMOVAPSZ256rm_NOVLX:
4986     return expandNOVLXLoad(MIB, &getRegisterInfo(), get(X86::VMOVAPSYrm),
4987                            get(X86::VBROADCASTF64X4rm), X86::sub_ymm);
4988   case X86::VMOVUPSZ256rm_NOVLX:
4989     return expandNOVLXLoad(MIB, &getRegisterInfo(), get(X86::VMOVUPSYrm),
4990                            get(X86::VBROADCASTF64X4rm), X86::sub_ymm);
4991   case X86::VMOVAPSZ128mr_NOVLX:
4992     return expandNOVLXStore(MIB, &getRegisterInfo(), get(X86::VMOVAPSmr),
4993                             get(X86::VEXTRACTF32x4Zmr), X86::sub_xmm);
4994   case X86::VMOVUPSZ128mr_NOVLX:
4995     return expandNOVLXStore(MIB, &getRegisterInfo(), get(X86::VMOVUPSmr),
4996                             get(X86::VEXTRACTF32x4Zmr), X86::sub_xmm);
4997   case X86::VMOVAPSZ256mr_NOVLX:
4998     return expandNOVLXStore(MIB, &getRegisterInfo(), get(X86::VMOVAPSYmr),
4999                             get(X86::VEXTRACTF64x4Zmr), X86::sub_ymm);
5000   case X86::VMOVUPSZ256mr_NOVLX:
5001     return expandNOVLXStore(MIB, &getRegisterInfo(), get(X86::VMOVUPSYmr),
5002                             get(X86::VEXTRACTF64x4Zmr), X86::sub_ymm);
5003   case X86::MOV32ri64: {
5004     Register Reg = MIB.getReg(0);
5005     Register Reg32 = RI.getSubReg(Reg, X86::sub_32bit);
5006     MI.setDesc(get(X86::MOV32ri));
5007     MIB->getOperand(0).setReg(Reg32);
5008     MIB.addReg(Reg, RegState::ImplicitDefine);
5009     return true;
5010   }
5011 
5012   // KNL does not recognize dependency-breaking idioms for mask registers,
5013   // so kxnor %k1, %k1, %k2 has a RAW dependence on %k1.
5014   // Using %k0 as the undef input register is a performance heuristic based
5015   // on the assumption that %k0 is used less frequently than the other mask
5016   // registers, since it is not usable as a write mask.
5017   // FIXME: A more advanced approach would be to choose the best input mask
5018   // register based on context.
5019   case X86::KSET0W: return Expand2AddrKreg(MIB, get(X86::KXORWrr), X86::K0);
5020   case X86::KSET0D: return Expand2AddrKreg(MIB, get(X86::KXORDrr), X86::K0);
5021   case X86::KSET0Q: return Expand2AddrKreg(MIB, get(X86::KXORQrr), X86::K0);
5022   case X86::KSET1W: return Expand2AddrKreg(MIB, get(X86::KXNORWrr), X86::K0);
5023   case X86::KSET1D: return Expand2AddrKreg(MIB, get(X86::KXNORDrr), X86::K0);
5024   case X86::KSET1Q: return Expand2AddrKreg(MIB, get(X86::KXNORQrr), X86::K0);
5025   case TargetOpcode::LOAD_STACK_GUARD:
5026     expandLoadStackGuard(MIB, *this);
5027     return true;
5028   case X86::XOR64_FP:
5029   case X86::XOR32_FP:
5030     return expandXorFP(MIB, *this);
5031   case X86::SHLDROT32ri: return expandSHXDROT(MIB, get(X86::SHLD32rri8));
5032   case X86::SHLDROT64ri: return expandSHXDROT(MIB, get(X86::SHLD64rri8));
5033   case X86::SHRDROT32ri: return expandSHXDROT(MIB, get(X86::SHRD32rri8));
5034   case X86::SHRDROT64ri: return expandSHXDROT(MIB, get(X86::SHRD64rri8));
5035   case X86::ADD8rr_DB:    MIB->setDesc(get(X86::OR8rr));    break;
5036   case X86::ADD16rr_DB:   MIB->setDesc(get(X86::OR16rr));   break;
5037   case X86::ADD32rr_DB:   MIB->setDesc(get(X86::OR32rr));   break;
5038   case X86::ADD64rr_DB:   MIB->setDesc(get(X86::OR64rr));   break;
5039   case X86::ADD8ri_DB:    MIB->setDesc(get(X86::OR8ri));    break;
5040   case X86::ADD16ri_DB:   MIB->setDesc(get(X86::OR16ri));   break;
5041   case X86::ADD32ri_DB:   MIB->setDesc(get(X86::OR32ri));   break;
5042   case X86::ADD64ri32_DB: MIB->setDesc(get(X86::OR64ri32)); break;
5043   case X86::ADD16ri8_DB:  MIB->setDesc(get(X86::OR16ri8));  break;
5044   case X86::ADD32ri8_DB:  MIB->setDesc(get(X86::OR32ri8));  break;
5045   case X86::ADD64ri8_DB:  MIB->setDesc(get(X86::OR64ri8));  break;
5046   }
5047   return false;
5048 }
5049 
5050 /// Return true for all instructions that only update
5051 /// the first 32 or 64-bits of the destination register and leave the rest
5052 /// unmodified. This can be used to avoid folding loads if the instructions
5053 /// only update part of the destination register, and the non-updated part is
5054 /// not needed. e.g. cvtss2sd, sqrtss. Unfolding the load from these
5055 /// instructions breaks the partial register dependency and it can improve
5056 /// performance. e.g.:
5057 ///
5058 ///   movss (%rdi), %xmm0
5059 ///   cvtss2sd %xmm0, %xmm0
5060 ///
5061 /// Instead of
5062 ///   cvtss2sd (%rdi), %xmm0
5063 ///
5064 /// FIXME: This should be turned into a TSFlags.
5065 ///
5066 static bool hasPartialRegUpdate(unsigned Opcode,
5067                                 const X86Subtarget &Subtarget,
5068                                 bool ForLoadFold = false) {
5069   switch (Opcode) {
5070   case X86::CVTSI2SSrr:
5071   case X86::CVTSI2SSrm:
5072   case X86::CVTSI642SSrr:
5073   case X86::CVTSI642SSrm:
5074   case X86::CVTSI2SDrr:
5075   case X86::CVTSI2SDrm:
5076   case X86::CVTSI642SDrr:
5077   case X86::CVTSI642SDrm:
5078     // Load folding won't effect the undef register update since the input is
5079     // a GPR.
5080     return !ForLoadFold;
5081   case X86::CVTSD2SSrr:
5082   case X86::CVTSD2SSrm:
5083   case X86::CVTSS2SDrr:
5084   case X86::CVTSS2SDrm:
5085   case X86::MOVHPDrm:
5086   case X86::MOVHPSrm:
5087   case X86::MOVLPDrm:
5088   case X86::MOVLPSrm:
5089   case X86::RCPSSr:
5090   case X86::RCPSSm:
5091   case X86::RCPSSr_Int:
5092   case X86::RCPSSm_Int:
5093   case X86::ROUNDSDr:
5094   case X86::ROUNDSDm:
5095   case X86::ROUNDSSr:
5096   case X86::ROUNDSSm:
5097   case X86::RSQRTSSr:
5098   case X86::RSQRTSSm:
5099   case X86::RSQRTSSr_Int:
5100   case X86::RSQRTSSm_Int:
5101   case X86::SQRTSSr:
5102   case X86::SQRTSSm:
5103   case X86::SQRTSSr_Int:
5104   case X86::SQRTSSm_Int:
5105   case X86::SQRTSDr:
5106   case X86::SQRTSDm:
5107   case X86::SQRTSDr_Int:
5108   case X86::SQRTSDm_Int:
5109     return true;
5110   // GPR
5111   case X86::POPCNT32rm:
5112   case X86::POPCNT32rr:
5113   case X86::POPCNT64rm:
5114   case X86::POPCNT64rr:
5115     return Subtarget.hasPOPCNTFalseDeps();
5116   case X86::LZCNT32rm:
5117   case X86::LZCNT32rr:
5118   case X86::LZCNT64rm:
5119   case X86::LZCNT64rr:
5120   case X86::TZCNT32rm:
5121   case X86::TZCNT32rr:
5122   case X86::TZCNT64rm:
5123   case X86::TZCNT64rr:
5124     return Subtarget.hasLZCNTFalseDeps();
5125   }
5126 
5127   return false;
5128 }
5129 
5130 /// Inform the BreakFalseDeps pass how many idle
5131 /// instructions we would like before a partial register update.
5132 unsigned X86InstrInfo::getPartialRegUpdateClearance(
5133     const MachineInstr &MI, unsigned OpNum,
5134     const TargetRegisterInfo *TRI) const {
5135   if (OpNum != 0 || !hasPartialRegUpdate(MI.getOpcode(), Subtarget))
5136     return 0;
5137 
5138   // If MI is marked as reading Reg, the partial register update is wanted.
5139   const MachineOperand &MO = MI.getOperand(0);
5140   Register Reg = MO.getReg();
5141   if (Reg.isVirtual()) {
5142     if (MO.readsReg() || MI.readsVirtualRegister(Reg))
5143       return 0;
5144   } else {
5145     if (MI.readsRegister(Reg, TRI))
5146       return 0;
5147   }
5148 
5149   // If any instructions in the clearance range are reading Reg, insert a
5150   // dependency breaking instruction, which is inexpensive and is likely to
5151   // be hidden in other instruction's cycles.
5152   return PartialRegUpdateClearance;
5153 }
5154 
5155 // Return true for any instruction the copies the high bits of the first source
5156 // operand into the unused high bits of the destination operand.
5157 // Also returns true for instructions that have two inputs where one may
5158 // be undef and we want it to use the same register as the other input.
5159 static bool hasUndefRegUpdate(unsigned Opcode, unsigned OpNum,
5160                               bool ForLoadFold = false) {
5161   // Set the OpNum parameter to the first source operand.
5162   switch (Opcode) {
5163   case X86::MMX_PUNPCKHBWirr:
5164   case X86::MMX_PUNPCKHWDirr:
5165   case X86::MMX_PUNPCKHDQirr:
5166   case X86::MMX_PUNPCKLBWirr:
5167   case X86::MMX_PUNPCKLWDirr:
5168   case X86::MMX_PUNPCKLDQirr:
5169   case X86::MOVHLPSrr:
5170   case X86::PACKSSWBrr:
5171   case X86::PACKUSWBrr:
5172   case X86::PACKSSDWrr:
5173   case X86::PACKUSDWrr:
5174   case X86::PUNPCKHBWrr:
5175   case X86::PUNPCKLBWrr:
5176   case X86::PUNPCKHWDrr:
5177   case X86::PUNPCKLWDrr:
5178   case X86::PUNPCKHDQrr:
5179   case X86::PUNPCKLDQrr:
5180   case X86::PUNPCKHQDQrr:
5181   case X86::PUNPCKLQDQrr:
5182   case X86::SHUFPDrri:
5183   case X86::SHUFPSrri:
5184     // These instructions are sometimes used with an undef first or second
5185     // source. Return true here so BreakFalseDeps will assign this source to the
5186     // same register as the first source to avoid a false dependency.
5187     // Operand 1 of these instructions is tied so they're separate from their
5188     // VEX counterparts.
5189     return OpNum == 2 && !ForLoadFold;
5190 
5191   case X86::VMOVLHPSrr:
5192   case X86::VMOVLHPSZrr:
5193   case X86::VPACKSSWBrr:
5194   case X86::VPACKUSWBrr:
5195   case X86::VPACKSSDWrr:
5196   case X86::VPACKUSDWrr:
5197   case X86::VPACKSSWBZ128rr:
5198   case X86::VPACKUSWBZ128rr:
5199   case X86::VPACKSSDWZ128rr:
5200   case X86::VPACKUSDWZ128rr:
5201   case X86::VPERM2F128rr:
5202   case X86::VPERM2I128rr:
5203   case X86::VSHUFF32X4Z256rri:
5204   case X86::VSHUFF32X4Zrri:
5205   case X86::VSHUFF64X2Z256rri:
5206   case X86::VSHUFF64X2Zrri:
5207   case X86::VSHUFI32X4Z256rri:
5208   case X86::VSHUFI32X4Zrri:
5209   case X86::VSHUFI64X2Z256rri:
5210   case X86::VSHUFI64X2Zrri:
5211   case X86::VPUNPCKHBWrr:
5212   case X86::VPUNPCKLBWrr:
5213   case X86::VPUNPCKHBWYrr:
5214   case X86::VPUNPCKLBWYrr:
5215   case X86::VPUNPCKHBWZ128rr:
5216   case X86::VPUNPCKLBWZ128rr:
5217   case X86::VPUNPCKHBWZ256rr:
5218   case X86::VPUNPCKLBWZ256rr:
5219   case X86::VPUNPCKHBWZrr:
5220   case X86::VPUNPCKLBWZrr:
5221   case X86::VPUNPCKHWDrr:
5222   case X86::VPUNPCKLWDrr:
5223   case X86::VPUNPCKHWDYrr:
5224   case X86::VPUNPCKLWDYrr:
5225   case X86::VPUNPCKHWDZ128rr:
5226   case X86::VPUNPCKLWDZ128rr:
5227   case X86::VPUNPCKHWDZ256rr:
5228   case X86::VPUNPCKLWDZ256rr:
5229   case X86::VPUNPCKHWDZrr:
5230   case X86::VPUNPCKLWDZrr:
5231   case X86::VPUNPCKHDQrr:
5232   case X86::VPUNPCKLDQrr:
5233   case X86::VPUNPCKHDQYrr:
5234   case X86::VPUNPCKLDQYrr:
5235   case X86::VPUNPCKHDQZ128rr:
5236   case X86::VPUNPCKLDQZ128rr:
5237   case X86::VPUNPCKHDQZ256rr:
5238   case X86::VPUNPCKLDQZ256rr:
5239   case X86::VPUNPCKHDQZrr:
5240   case X86::VPUNPCKLDQZrr:
5241   case X86::VPUNPCKHQDQrr:
5242   case X86::VPUNPCKLQDQrr:
5243   case X86::VPUNPCKHQDQYrr:
5244   case X86::VPUNPCKLQDQYrr:
5245   case X86::VPUNPCKHQDQZ128rr:
5246   case X86::VPUNPCKLQDQZ128rr:
5247   case X86::VPUNPCKHQDQZ256rr:
5248   case X86::VPUNPCKLQDQZ256rr:
5249   case X86::VPUNPCKHQDQZrr:
5250   case X86::VPUNPCKLQDQZrr:
5251     // These instructions are sometimes used with an undef first or second
5252     // source. Return true here so BreakFalseDeps will assign this source to the
5253     // same register as the first source to avoid a false dependency.
5254     return (OpNum == 1 || OpNum == 2) && !ForLoadFold;
5255 
5256   case X86::VCVTSI2SSrr:
5257   case X86::VCVTSI2SSrm:
5258   case X86::VCVTSI2SSrr_Int:
5259   case X86::VCVTSI2SSrm_Int:
5260   case X86::VCVTSI642SSrr:
5261   case X86::VCVTSI642SSrm:
5262   case X86::VCVTSI642SSrr_Int:
5263   case X86::VCVTSI642SSrm_Int:
5264   case X86::VCVTSI2SDrr:
5265   case X86::VCVTSI2SDrm:
5266   case X86::VCVTSI2SDrr_Int:
5267   case X86::VCVTSI2SDrm_Int:
5268   case X86::VCVTSI642SDrr:
5269   case X86::VCVTSI642SDrm:
5270   case X86::VCVTSI642SDrr_Int:
5271   case X86::VCVTSI642SDrm_Int:
5272   // AVX-512
5273   case X86::VCVTSI2SSZrr:
5274   case X86::VCVTSI2SSZrm:
5275   case X86::VCVTSI2SSZrr_Int:
5276   case X86::VCVTSI2SSZrrb_Int:
5277   case X86::VCVTSI2SSZrm_Int:
5278   case X86::VCVTSI642SSZrr:
5279   case X86::VCVTSI642SSZrm:
5280   case X86::VCVTSI642SSZrr_Int:
5281   case X86::VCVTSI642SSZrrb_Int:
5282   case X86::VCVTSI642SSZrm_Int:
5283   case X86::VCVTSI2SDZrr:
5284   case X86::VCVTSI2SDZrm:
5285   case X86::VCVTSI2SDZrr_Int:
5286   case X86::VCVTSI2SDZrm_Int:
5287   case X86::VCVTSI642SDZrr:
5288   case X86::VCVTSI642SDZrm:
5289   case X86::VCVTSI642SDZrr_Int:
5290   case X86::VCVTSI642SDZrrb_Int:
5291   case X86::VCVTSI642SDZrm_Int:
5292   case X86::VCVTUSI2SSZrr:
5293   case X86::VCVTUSI2SSZrm:
5294   case X86::VCVTUSI2SSZrr_Int:
5295   case X86::VCVTUSI2SSZrrb_Int:
5296   case X86::VCVTUSI2SSZrm_Int:
5297   case X86::VCVTUSI642SSZrr:
5298   case X86::VCVTUSI642SSZrm:
5299   case X86::VCVTUSI642SSZrr_Int:
5300   case X86::VCVTUSI642SSZrrb_Int:
5301   case X86::VCVTUSI642SSZrm_Int:
5302   case X86::VCVTUSI2SDZrr:
5303   case X86::VCVTUSI2SDZrm:
5304   case X86::VCVTUSI2SDZrr_Int:
5305   case X86::VCVTUSI2SDZrm_Int:
5306   case X86::VCVTUSI642SDZrr:
5307   case X86::VCVTUSI642SDZrm:
5308   case X86::VCVTUSI642SDZrr_Int:
5309   case X86::VCVTUSI642SDZrrb_Int:
5310   case X86::VCVTUSI642SDZrm_Int:
5311   case X86::VCVTSI2SHZrr:
5312   case X86::VCVTSI2SHZrm:
5313   case X86::VCVTSI2SHZrr_Int:
5314   case X86::VCVTSI2SHZrrb_Int:
5315   case X86::VCVTSI2SHZrm_Int:
5316   case X86::VCVTSI642SHZrr:
5317   case X86::VCVTSI642SHZrm:
5318   case X86::VCVTSI642SHZrr_Int:
5319   case X86::VCVTSI642SHZrrb_Int:
5320   case X86::VCVTSI642SHZrm_Int:
5321   case X86::VCVTUSI2SHZrr:
5322   case X86::VCVTUSI2SHZrm:
5323   case X86::VCVTUSI2SHZrr_Int:
5324   case X86::VCVTUSI2SHZrrb_Int:
5325   case X86::VCVTUSI2SHZrm_Int:
5326   case X86::VCVTUSI642SHZrr:
5327   case X86::VCVTUSI642SHZrm:
5328   case X86::VCVTUSI642SHZrr_Int:
5329   case X86::VCVTUSI642SHZrrb_Int:
5330   case X86::VCVTUSI642SHZrm_Int:
5331     // Load folding won't effect the undef register update since the input is
5332     // a GPR.
5333     return OpNum == 1 && !ForLoadFold;
5334   case X86::VCVTSD2SSrr:
5335   case X86::VCVTSD2SSrm:
5336   case X86::VCVTSD2SSrr_Int:
5337   case X86::VCVTSD2SSrm_Int:
5338   case X86::VCVTSS2SDrr:
5339   case X86::VCVTSS2SDrm:
5340   case X86::VCVTSS2SDrr_Int:
5341   case X86::VCVTSS2SDrm_Int:
5342   case X86::VRCPSSr:
5343   case X86::VRCPSSr_Int:
5344   case X86::VRCPSSm:
5345   case X86::VRCPSSm_Int:
5346   case X86::VROUNDSDr:
5347   case X86::VROUNDSDm:
5348   case X86::VROUNDSDr_Int:
5349   case X86::VROUNDSDm_Int:
5350   case X86::VROUNDSSr:
5351   case X86::VROUNDSSm:
5352   case X86::VROUNDSSr_Int:
5353   case X86::VROUNDSSm_Int:
5354   case X86::VRSQRTSSr:
5355   case X86::VRSQRTSSr_Int:
5356   case X86::VRSQRTSSm:
5357   case X86::VRSQRTSSm_Int:
5358   case X86::VSQRTSSr:
5359   case X86::VSQRTSSr_Int:
5360   case X86::VSQRTSSm:
5361   case X86::VSQRTSSm_Int:
5362   case X86::VSQRTSDr:
5363   case X86::VSQRTSDr_Int:
5364   case X86::VSQRTSDm:
5365   case X86::VSQRTSDm_Int:
5366   // AVX-512
5367   case X86::VCVTSD2SSZrr:
5368   case X86::VCVTSD2SSZrr_Int:
5369   case X86::VCVTSD2SSZrrb_Int:
5370   case X86::VCVTSD2SSZrm:
5371   case X86::VCVTSD2SSZrm_Int:
5372   case X86::VCVTSS2SDZrr:
5373   case X86::VCVTSS2SDZrr_Int:
5374   case X86::VCVTSS2SDZrrb_Int:
5375   case X86::VCVTSS2SDZrm:
5376   case X86::VCVTSS2SDZrm_Int:
5377   case X86::VGETEXPSDZr:
5378   case X86::VGETEXPSDZrb:
5379   case X86::VGETEXPSDZm:
5380   case X86::VGETEXPSSZr:
5381   case X86::VGETEXPSSZrb:
5382   case X86::VGETEXPSSZm:
5383   case X86::VGETMANTSDZrri:
5384   case X86::VGETMANTSDZrrib:
5385   case X86::VGETMANTSDZrmi:
5386   case X86::VGETMANTSSZrri:
5387   case X86::VGETMANTSSZrrib:
5388   case X86::VGETMANTSSZrmi:
5389   case X86::VRNDSCALESDZr:
5390   case X86::VRNDSCALESDZr_Int:
5391   case X86::VRNDSCALESDZrb_Int:
5392   case X86::VRNDSCALESDZm:
5393   case X86::VRNDSCALESDZm_Int:
5394   case X86::VRNDSCALESSZr:
5395   case X86::VRNDSCALESSZr_Int:
5396   case X86::VRNDSCALESSZrb_Int:
5397   case X86::VRNDSCALESSZm:
5398   case X86::VRNDSCALESSZm_Int:
5399   case X86::VRCP14SDZrr:
5400   case X86::VRCP14SDZrm:
5401   case X86::VRCP14SSZrr:
5402   case X86::VRCP14SSZrm:
5403   case X86::VRCPSHZrr:
5404   case X86::VRCPSHZrm:
5405   case X86::VRSQRTSHZrr:
5406   case X86::VRSQRTSHZrm:
5407   case X86::VREDUCESHZrmi:
5408   case X86::VREDUCESHZrri:
5409   case X86::VREDUCESHZrrib:
5410   case X86::VGETEXPSHZr:
5411   case X86::VGETEXPSHZrb:
5412   case X86::VGETEXPSHZm:
5413   case X86::VGETMANTSHZrri:
5414   case X86::VGETMANTSHZrrib:
5415   case X86::VGETMANTSHZrmi:
5416   case X86::VRNDSCALESHZr:
5417   case X86::VRNDSCALESHZr_Int:
5418   case X86::VRNDSCALESHZrb_Int:
5419   case X86::VRNDSCALESHZm:
5420   case X86::VRNDSCALESHZm_Int:
5421   case X86::VSQRTSHZr:
5422   case X86::VSQRTSHZr_Int:
5423   case X86::VSQRTSHZrb_Int:
5424   case X86::VSQRTSHZm:
5425   case X86::VSQRTSHZm_Int:
5426   case X86::VRCP28SDZr:
5427   case X86::VRCP28SDZrb:
5428   case X86::VRCP28SDZm:
5429   case X86::VRCP28SSZr:
5430   case X86::VRCP28SSZrb:
5431   case X86::VRCP28SSZm:
5432   case X86::VREDUCESSZrmi:
5433   case X86::VREDUCESSZrri:
5434   case X86::VREDUCESSZrrib:
5435   case X86::VRSQRT14SDZrr:
5436   case X86::VRSQRT14SDZrm:
5437   case X86::VRSQRT14SSZrr:
5438   case X86::VRSQRT14SSZrm:
5439   case X86::VRSQRT28SDZr:
5440   case X86::VRSQRT28SDZrb:
5441   case X86::VRSQRT28SDZm:
5442   case X86::VRSQRT28SSZr:
5443   case X86::VRSQRT28SSZrb:
5444   case X86::VRSQRT28SSZm:
5445   case X86::VSQRTSSZr:
5446   case X86::VSQRTSSZr_Int:
5447   case X86::VSQRTSSZrb_Int:
5448   case X86::VSQRTSSZm:
5449   case X86::VSQRTSSZm_Int:
5450   case X86::VSQRTSDZr:
5451   case X86::VSQRTSDZr_Int:
5452   case X86::VSQRTSDZrb_Int:
5453   case X86::VSQRTSDZm:
5454   case X86::VSQRTSDZm_Int:
5455   case X86::VCVTSD2SHZrr:
5456   case X86::VCVTSD2SHZrr_Int:
5457   case X86::VCVTSD2SHZrrb_Int:
5458   case X86::VCVTSD2SHZrm:
5459   case X86::VCVTSD2SHZrm_Int:
5460   case X86::VCVTSS2SHZrr:
5461   case X86::VCVTSS2SHZrr_Int:
5462   case X86::VCVTSS2SHZrrb_Int:
5463   case X86::VCVTSS2SHZrm:
5464   case X86::VCVTSS2SHZrm_Int:
5465   case X86::VCVTSH2SDZrr:
5466   case X86::VCVTSH2SDZrr_Int:
5467   case X86::VCVTSH2SDZrrb_Int:
5468   case X86::VCVTSH2SDZrm:
5469   case X86::VCVTSH2SDZrm_Int:
5470   case X86::VCVTSH2SSZrr:
5471   case X86::VCVTSH2SSZrr_Int:
5472   case X86::VCVTSH2SSZrrb_Int:
5473   case X86::VCVTSH2SSZrm:
5474   case X86::VCVTSH2SSZrm_Int:
5475     return OpNum == 1;
5476   case X86::VMOVSSZrrk:
5477   case X86::VMOVSDZrrk:
5478     return OpNum == 3 && !ForLoadFold;
5479   case X86::VMOVSSZrrkz:
5480   case X86::VMOVSDZrrkz:
5481     return OpNum == 2 && !ForLoadFold;
5482   }
5483 
5484   return false;
5485 }
5486 
5487 /// Inform the BreakFalseDeps pass how many idle instructions we would like
5488 /// before certain undef register reads.
5489 ///
5490 /// This catches the VCVTSI2SD family of instructions:
5491 ///
5492 /// vcvtsi2sdq %rax, undef %xmm0, %xmm14
5493 ///
5494 /// We should to be careful *not* to catch VXOR idioms which are presumably
5495 /// handled specially in the pipeline:
5496 ///
5497 /// vxorps undef %xmm1, undef %xmm1, %xmm1
5498 ///
5499 /// Like getPartialRegUpdateClearance, this makes a strong assumption that the
5500 /// high bits that are passed-through are not live.
5501 unsigned
5502 X86InstrInfo::getUndefRegClearance(const MachineInstr &MI, unsigned OpNum,
5503                                    const TargetRegisterInfo *TRI) const {
5504   const MachineOperand &MO = MI.getOperand(OpNum);
5505   if (Register::isPhysicalRegister(MO.getReg()) &&
5506       hasUndefRegUpdate(MI.getOpcode(), OpNum))
5507     return UndefRegClearance;
5508 
5509   return 0;
5510 }
5511 
5512 void X86InstrInfo::breakPartialRegDependency(
5513     MachineInstr &MI, unsigned OpNum, const TargetRegisterInfo *TRI) const {
5514   Register Reg = MI.getOperand(OpNum).getReg();
5515   // If MI kills this register, the false dependence is already broken.
5516   if (MI.killsRegister(Reg, TRI))
5517     return;
5518 
5519   if (X86::VR128RegClass.contains(Reg)) {
5520     // These instructions are all floating point domain, so xorps is the best
5521     // choice.
5522     unsigned Opc = Subtarget.hasAVX() ? X86::VXORPSrr : X86::XORPSrr;
5523     BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(Opc), Reg)
5524         .addReg(Reg, RegState::Undef)
5525         .addReg(Reg, RegState::Undef);
5526     MI.addRegisterKilled(Reg, TRI, true);
5527   } else if (X86::VR256RegClass.contains(Reg)) {
5528     // Use vxorps to clear the full ymm register.
5529     // It wants to read and write the xmm sub-register.
5530     Register XReg = TRI->getSubReg(Reg, X86::sub_xmm);
5531     BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(X86::VXORPSrr), XReg)
5532         .addReg(XReg, RegState::Undef)
5533         .addReg(XReg, RegState::Undef)
5534         .addReg(Reg, RegState::ImplicitDefine);
5535     MI.addRegisterKilled(Reg, TRI, true);
5536   } else if (X86::GR64RegClass.contains(Reg)) {
5537     // Using XOR32rr because it has shorter encoding and zeros up the upper bits
5538     // as well.
5539     Register XReg = TRI->getSubReg(Reg, X86::sub_32bit);
5540     BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(X86::XOR32rr), XReg)
5541         .addReg(XReg, RegState::Undef)
5542         .addReg(XReg, RegState::Undef)
5543         .addReg(Reg, RegState::ImplicitDefine);
5544     MI.addRegisterKilled(Reg, TRI, true);
5545   } else if (X86::GR32RegClass.contains(Reg)) {
5546     BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(X86::XOR32rr), Reg)
5547         .addReg(Reg, RegState::Undef)
5548         .addReg(Reg, RegState::Undef);
5549     MI.addRegisterKilled(Reg, TRI, true);
5550   }
5551 }
5552 
5553 static void addOperands(MachineInstrBuilder &MIB, ArrayRef<MachineOperand> MOs,
5554                         int PtrOffset = 0) {
5555   unsigned NumAddrOps = MOs.size();
5556 
5557   if (NumAddrOps < 4) {
5558     // FrameIndex only - add an immediate offset (whether its zero or not).
5559     for (unsigned i = 0; i != NumAddrOps; ++i)
5560       MIB.add(MOs[i]);
5561     addOffset(MIB, PtrOffset);
5562   } else {
5563     // General Memory Addressing - we need to add any offset to an existing
5564     // offset.
5565     assert(MOs.size() == 5 && "Unexpected memory operand list length");
5566     for (unsigned i = 0; i != NumAddrOps; ++i) {
5567       const MachineOperand &MO = MOs[i];
5568       if (i == 3 && PtrOffset != 0) {
5569         MIB.addDisp(MO, PtrOffset);
5570       } else {
5571         MIB.add(MO);
5572       }
5573     }
5574   }
5575 }
5576 
5577 static void updateOperandRegConstraints(MachineFunction &MF,
5578                                         MachineInstr &NewMI,
5579                                         const TargetInstrInfo &TII) {
5580   MachineRegisterInfo &MRI = MF.getRegInfo();
5581   const TargetRegisterInfo &TRI = *MRI.getTargetRegisterInfo();
5582 
5583   for (int Idx : llvm::seq<int>(0, NewMI.getNumOperands())) {
5584     MachineOperand &MO = NewMI.getOperand(Idx);
5585     // We only need to update constraints on virtual register operands.
5586     if (!MO.isReg())
5587       continue;
5588     Register Reg = MO.getReg();
5589     if (!Reg.isVirtual())
5590       continue;
5591 
5592     auto *NewRC = MRI.constrainRegClass(
5593         Reg, TII.getRegClass(NewMI.getDesc(), Idx, &TRI, MF));
5594     if (!NewRC) {
5595       LLVM_DEBUG(
5596           dbgs() << "WARNING: Unable to update register constraint for operand "
5597                  << Idx << " of instruction:\n";
5598           NewMI.dump(); dbgs() << "\n");
5599     }
5600   }
5601 }
5602 
5603 static MachineInstr *FuseTwoAddrInst(MachineFunction &MF, unsigned Opcode,
5604                                      ArrayRef<MachineOperand> MOs,
5605                                      MachineBasicBlock::iterator InsertPt,
5606                                      MachineInstr &MI,
5607                                      const TargetInstrInfo &TII) {
5608   // Create the base instruction with the memory operand as the first part.
5609   // Omit the implicit operands, something BuildMI can't do.
5610   MachineInstr *NewMI =
5611       MF.CreateMachineInstr(TII.get(Opcode), MI.getDebugLoc(), true);
5612   MachineInstrBuilder MIB(MF, NewMI);
5613   addOperands(MIB, MOs);
5614 
5615   // Loop over the rest of the ri operands, converting them over.
5616   unsigned NumOps = MI.getDesc().getNumOperands() - 2;
5617   for (unsigned i = 0; i != NumOps; ++i) {
5618     MachineOperand &MO = MI.getOperand(i + 2);
5619     MIB.add(MO);
5620   }
5621   for (unsigned i = NumOps + 2, e = MI.getNumOperands(); i != e; ++i) {
5622     MachineOperand &MO = MI.getOperand(i);
5623     MIB.add(MO);
5624   }
5625 
5626   updateOperandRegConstraints(MF, *NewMI, TII);
5627 
5628   MachineBasicBlock *MBB = InsertPt->getParent();
5629   MBB->insert(InsertPt, NewMI);
5630 
5631   return MIB;
5632 }
5633 
5634 static MachineInstr *FuseInst(MachineFunction &MF, unsigned Opcode,
5635                               unsigned OpNo, ArrayRef<MachineOperand> MOs,
5636                               MachineBasicBlock::iterator InsertPt,
5637                               MachineInstr &MI, const TargetInstrInfo &TII,
5638                               int PtrOffset = 0) {
5639   // Omit the implicit operands, something BuildMI can't do.
5640   MachineInstr *NewMI =
5641       MF.CreateMachineInstr(TII.get(Opcode), MI.getDebugLoc(), true);
5642   MachineInstrBuilder MIB(MF, NewMI);
5643 
5644   for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
5645     MachineOperand &MO = MI.getOperand(i);
5646     if (i == OpNo) {
5647       assert(MO.isReg() && "Expected to fold into reg operand!");
5648       addOperands(MIB, MOs, PtrOffset);
5649     } else {
5650       MIB.add(MO);
5651     }
5652   }
5653 
5654   updateOperandRegConstraints(MF, *NewMI, TII);
5655 
5656   // Copy the NoFPExcept flag from the instruction we're fusing.
5657   if (MI.getFlag(MachineInstr::MIFlag::NoFPExcept))
5658     NewMI->setFlag(MachineInstr::MIFlag::NoFPExcept);
5659 
5660   MachineBasicBlock *MBB = InsertPt->getParent();
5661   MBB->insert(InsertPt, NewMI);
5662 
5663   return MIB;
5664 }
5665 
5666 static MachineInstr *MakeM0Inst(const TargetInstrInfo &TII, unsigned Opcode,
5667                                 ArrayRef<MachineOperand> MOs,
5668                                 MachineBasicBlock::iterator InsertPt,
5669                                 MachineInstr &MI) {
5670   MachineInstrBuilder MIB = BuildMI(*InsertPt->getParent(), InsertPt,
5671                                     MI.getDebugLoc(), TII.get(Opcode));
5672   addOperands(MIB, MOs);
5673   return MIB.addImm(0);
5674 }
5675 
5676 MachineInstr *X86InstrInfo::foldMemoryOperandCustom(
5677     MachineFunction &MF, MachineInstr &MI, unsigned OpNum,
5678     ArrayRef<MachineOperand> MOs, MachineBasicBlock::iterator InsertPt,
5679     unsigned Size, Align Alignment) const {
5680   switch (MI.getOpcode()) {
5681   case X86::INSERTPSrr:
5682   case X86::VINSERTPSrr:
5683   case X86::VINSERTPSZrr:
5684     // Attempt to convert the load of inserted vector into a fold load
5685     // of a single float.
5686     if (OpNum == 2) {
5687       unsigned Imm = MI.getOperand(MI.getNumOperands() - 1).getImm();
5688       unsigned ZMask = Imm & 15;
5689       unsigned DstIdx = (Imm >> 4) & 3;
5690       unsigned SrcIdx = (Imm >> 6) & 3;
5691 
5692       const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
5693       const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum, &RI, MF);
5694       unsigned RCSize = TRI.getRegSizeInBits(*RC) / 8;
5695       if ((Size == 0 || Size >= 16) && RCSize >= 16 && Alignment >= Align(4)) {
5696         int PtrOffset = SrcIdx * 4;
5697         unsigned NewImm = (DstIdx << 4) | ZMask;
5698         unsigned NewOpCode =
5699             (MI.getOpcode() == X86::VINSERTPSZrr) ? X86::VINSERTPSZrm :
5700             (MI.getOpcode() == X86::VINSERTPSrr)  ? X86::VINSERTPSrm  :
5701                                                     X86::INSERTPSrm;
5702         MachineInstr *NewMI =
5703             FuseInst(MF, NewOpCode, OpNum, MOs, InsertPt, MI, *this, PtrOffset);
5704         NewMI->getOperand(NewMI->getNumOperands() - 1).setImm(NewImm);
5705         return NewMI;
5706       }
5707     }
5708     break;
5709   case X86::MOVHLPSrr:
5710   case X86::VMOVHLPSrr:
5711   case X86::VMOVHLPSZrr:
5712     // Move the upper 64-bits of the second operand to the lower 64-bits.
5713     // To fold the load, adjust the pointer to the upper and use (V)MOVLPS.
5714     // TODO: In most cases AVX doesn't have a 8-byte alignment requirement.
5715     if (OpNum == 2) {
5716       const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
5717       const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum, &RI, MF);
5718       unsigned RCSize = TRI.getRegSizeInBits(*RC) / 8;
5719       if ((Size == 0 || Size >= 16) && RCSize >= 16 && Alignment >= Align(8)) {
5720         unsigned NewOpCode =
5721             (MI.getOpcode() == X86::VMOVHLPSZrr) ? X86::VMOVLPSZ128rm :
5722             (MI.getOpcode() == X86::VMOVHLPSrr)  ? X86::VMOVLPSrm     :
5723                                                    X86::MOVLPSrm;
5724         MachineInstr *NewMI =
5725             FuseInst(MF, NewOpCode, OpNum, MOs, InsertPt, MI, *this, 8);
5726         return NewMI;
5727       }
5728     }
5729     break;
5730   case X86::UNPCKLPDrr:
5731     // If we won't be able to fold this to the memory form of UNPCKL, use
5732     // MOVHPD instead. Done as custom because we can't have this in the load
5733     // table twice.
5734     if (OpNum == 2) {
5735       const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
5736       const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum, &RI, MF);
5737       unsigned RCSize = TRI.getRegSizeInBits(*RC) / 8;
5738       if ((Size == 0 || Size >= 16) && RCSize >= 16 && Alignment < Align(16)) {
5739         MachineInstr *NewMI =
5740             FuseInst(MF, X86::MOVHPDrm, OpNum, MOs, InsertPt, MI, *this);
5741         return NewMI;
5742       }
5743     }
5744     break;
5745   }
5746 
5747   return nullptr;
5748 }
5749 
5750 static bool shouldPreventUndefRegUpdateMemFold(MachineFunction &MF,
5751                                                MachineInstr &MI) {
5752   if (!hasUndefRegUpdate(MI.getOpcode(), 1, /*ForLoadFold*/true) ||
5753       !MI.getOperand(1).isReg())
5754     return false;
5755 
5756   // The are two cases we need to handle depending on where in the pipeline
5757   // the folding attempt is being made.
5758   // -Register has the undef flag set.
5759   // -Register is produced by the IMPLICIT_DEF instruction.
5760 
5761   if (MI.getOperand(1).isUndef())
5762     return true;
5763 
5764   MachineRegisterInfo &RegInfo = MF.getRegInfo();
5765   MachineInstr *VRegDef = RegInfo.getUniqueVRegDef(MI.getOperand(1).getReg());
5766   return VRegDef && VRegDef->isImplicitDef();
5767 }
5768 
5769 MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
5770     MachineFunction &MF, MachineInstr &MI, unsigned OpNum,
5771     ArrayRef<MachineOperand> MOs, MachineBasicBlock::iterator InsertPt,
5772     unsigned Size, Align Alignment, bool AllowCommute) const {
5773   bool isSlowTwoMemOps = Subtarget.slowTwoMemOps();
5774   bool isTwoAddrFold = false;
5775 
5776   // For CPUs that favor the register form of a call or push,
5777   // do not fold loads into calls or pushes, unless optimizing for size
5778   // aggressively.
5779   if (isSlowTwoMemOps && !MF.getFunction().hasMinSize() &&
5780       (MI.getOpcode() == X86::CALL32r || MI.getOpcode() == X86::CALL64r ||
5781        MI.getOpcode() == X86::PUSH16r || MI.getOpcode() == X86::PUSH32r ||
5782        MI.getOpcode() == X86::PUSH64r))
5783     return nullptr;
5784 
5785   // Avoid partial and undef register update stalls unless optimizing for size.
5786   if (!MF.getFunction().hasOptSize() &&
5787       (hasPartialRegUpdate(MI.getOpcode(), Subtarget, /*ForLoadFold*/true) ||
5788        shouldPreventUndefRegUpdateMemFold(MF, MI)))
5789     return nullptr;
5790 
5791   unsigned NumOps = MI.getDesc().getNumOperands();
5792   bool isTwoAddr =
5793       NumOps > 1 && MI.getDesc().getOperandConstraint(1, MCOI::TIED_TO) != -1;
5794 
5795   // FIXME: AsmPrinter doesn't know how to handle
5796   // X86II::MO_GOT_ABSOLUTE_ADDRESS after folding.
5797   if (MI.getOpcode() == X86::ADD32ri &&
5798       MI.getOperand(2).getTargetFlags() == X86II::MO_GOT_ABSOLUTE_ADDRESS)
5799     return nullptr;
5800 
5801   // GOTTPOFF relocation loads can only be folded into add instructions.
5802   // FIXME: Need to exclude other relocations that only support specific
5803   // instructions.
5804   if (MOs.size() == X86::AddrNumOperands &&
5805       MOs[X86::AddrDisp].getTargetFlags() == X86II::MO_GOTTPOFF &&
5806       MI.getOpcode() != X86::ADD64rr)
5807     return nullptr;
5808 
5809   MachineInstr *NewMI = nullptr;
5810 
5811   // Attempt to fold any custom cases we have.
5812   if (MachineInstr *CustomMI = foldMemoryOperandCustom(
5813           MF, MI, OpNum, MOs, InsertPt, Size, Alignment))
5814     return CustomMI;
5815 
5816   const X86MemoryFoldTableEntry *I = nullptr;
5817 
5818   // Folding a memory location into the two-address part of a two-address
5819   // instruction is different than folding it other places.  It requires
5820   // replacing the *two* registers with the memory location.
5821   if (isTwoAddr && NumOps >= 2 && OpNum < 2 && MI.getOperand(0).isReg() &&
5822       MI.getOperand(1).isReg() &&
5823       MI.getOperand(0).getReg() == MI.getOperand(1).getReg()) {
5824     I = lookupTwoAddrFoldTable(MI.getOpcode());
5825     isTwoAddrFold = true;
5826   } else {
5827     if (OpNum == 0) {
5828       if (MI.getOpcode() == X86::MOV32r0) {
5829         NewMI = MakeM0Inst(*this, X86::MOV32mi, MOs, InsertPt, MI);
5830         if (NewMI)
5831           return NewMI;
5832       }
5833     }
5834 
5835     I = lookupFoldTable(MI.getOpcode(), OpNum);
5836   }
5837 
5838   if (I != nullptr) {
5839     unsigned Opcode = I->DstOp;
5840     bool FoldedLoad =
5841         isTwoAddrFold || (OpNum == 0 && I->Flags & TB_FOLDED_LOAD) || OpNum > 0;
5842     bool FoldedStore =
5843         isTwoAddrFold || (OpNum == 0 && I->Flags & TB_FOLDED_STORE);
5844     MaybeAlign MinAlign =
5845         decodeMaybeAlign((I->Flags & TB_ALIGN_MASK) >> TB_ALIGN_SHIFT);
5846     if (MinAlign && Alignment < *MinAlign)
5847       return nullptr;
5848     bool NarrowToMOV32rm = false;
5849     if (Size) {
5850       const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
5851       const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum,
5852                                                   &RI, MF);
5853       unsigned RCSize = TRI.getRegSizeInBits(*RC) / 8;
5854       // Check if it's safe to fold the load. If the size of the object is
5855       // narrower than the load width, then it's not.
5856       // FIXME: Allow scalar intrinsic instructions like ADDSSrm_Int.
5857       if (FoldedLoad && Size < RCSize) {
5858         // If this is a 64-bit load, but the spill slot is 32, then we can do
5859         // a 32-bit load which is implicitly zero-extended. This likely is
5860         // due to live interval analysis remat'ing a load from stack slot.
5861         if (Opcode != X86::MOV64rm || RCSize != 8 || Size != 4)
5862           return nullptr;
5863         if (MI.getOperand(0).getSubReg() || MI.getOperand(1).getSubReg())
5864           return nullptr;
5865         Opcode = X86::MOV32rm;
5866         NarrowToMOV32rm = true;
5867       }
5868       // For stores, make sure the size of the object is equal to the size of
5869       // the store. If the object is larger, the extra bits would be garbage. If
5870       // the object is smaller we might overwrite another object or fault.
5871       if (FoldedStore && Size != RCSize)
5872         return nullptr;
5873     }
5874 
5875     if (isTwoAddrFold)
5876       NewMI = FuseTwoAddrInst(MF, Opcode, MOs, InsertPt, MI, *this);
5877     else
5878       NewMI = FuseInst(MF, Opcode, OpNum, MOs, InsertPt, MI, *this);
5879 
5880     if (NarrowToMOV32rm) {
5881       // If this is the special case where we use a MOV32rm to load a 32-bit
5882       // value and zero-extend the top bits. Change the destination register
5883       // to a 32-bit one.
5884       Register DstReg = NewMI->getOperand(0).getReg();
5885       if (DstReg.isPhysical())
5886         NewMI->getOperand(0).setReg(RI.getSubReg(DstReg, X86::sub_32bit));
5887       else
5888         NewMI->getOperand(0).setSubReg(X86::sub_32bit);
5889     }
5890     return NewMI;
5891   }
5892 
5893   // If the instruction and target operand are commutable, commute the
5894   // instruction and try again.
5895   if (AllowCommute) {
5896     unsigned CommuteOpIdx1 = OpNum, CommuteOpIdx2 = CommuteAnyOperandIndex;
5897     if (findCommutedOpIndices(MI, CommuteOpIdx1, CommuteOpIdx2)) {
5898       bool HasDef = MI.getDesc().getNumDefs();
5899       Register Reg0 = HasDef ? MI.getOperand(0).getReg() : Register();
5900       Register Reg1 = MI.getOperand(CommuteOpIdx1).getReg();
5901       Register Reg2 = MI.getOperand(CommuteOpIdx2).getReg();
5902       bool Tied1 =
5903           0 == MI.getDesc().getOperandConstraint(CommuteOpIdx1, MCOI::TIED_TO);
5904       bool Tied2 =
5905           0 == MI.getDesc().getOperandConstraint(CommuteOpIdx2, MCOI::TIED_TO);
5906 
5907       // If either of the commutable operands are tied to the destination
5908       // then we can not commute + fold.
5909       if ((HasDef && Reg0 == Reg1 && Tied1) ||
5910           (HasDef && Reg0 == Reg2 && Tied2))
5911         return nullptr;
5912 
5913       MachineInstr *CommutedMI =
5914           commuteInstruction(MI, false, CommuteOpIdx1, CommuteOpIdx2);
5915       if (!CommutedMI) {
5916         // Unable to commute.
5917         return nullptr;
5918       }
5919       if (CommutedMI != &MI) {
5920         // New instruction. We can't fold from this.
5921         CommutedMI->eraseFromParent();
5922         return nullptr;
5923       }
5924 
5925       // Attempt to fold with the commuted version of the instruction.
5926       NewMI = foldMemoryOperandImpl(MF, MI, CommuteOpIdx2, MOs, InsertPt, Size,
5927                                     Alignment, /*AllowCommute=*/false);
5928       if (NewMI)
5929         return NewMI;
5930 
5931       // Folding failed again - undo the commute before returning.
5932       MachineInstr *UncommutedMI =
5933           commuteInstruction(MI, false, CommuteOpIdx1, CommuteOpIdx2);
5934       if (!UncommutedMI) {
5935         // Unable to commute.
5936         return nullptr;
5937       }
5938       if (UncommutedMI != &MI) {
5939         // New instruction. It doesn't need to be kept.
5940         UncommutedMI->eraseFromParent();
5941         return nullptr;
5942       }
5943 
5944       // Return here to prevent duplicate fuse failure report.
5945       return nullptr;
5946     }
5947   }
5948 
5949   // No fusion
5950   if (PrintFailedFusing && !MI.isCopy())
5951     dbgs() << "We failed to fuse operand " << OpNum << " in " << MI;
5952   return nullptr;
5953 }
5954 
5955 MachineInstr *
5956 X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI,
5957                                     ArrayRef<unsigned> Ops,
5958                                     MachineBasicBlock::iterator InsertPt,
5959                                     int FrameIndex, LiveIntervals *LIS,
5960                                     VirtRegMap *VRM) const {
5961   // Check switch flag
5962   if (NoFusing)
5963     return nullptr;
5964 
5965   // Avoid partial and undef register update stalls unless optimizing for size.
5966   if (!MF.getFunction().hasOptSize() &&
5967       (hasPartialRegUpdate(MI.getOpcode(), Subtarget, /*ForLoadFold*/true) ||
5968        shouldPreventUndefRegUpdateMemFold(MF, MI)))
5969     return nullptr;
5970 
5971   // Don't fold subreg spills, or reloads that use a high subreg.
5972   for (auto Op : Ops) {
5973     MachineOperand &MO = MI.getOperand(Op);
5974     auto SubReg = MO.getSubReg();
5975     if (SubReg && (MO.isDef() || SubReg == X86::sub_8bit_hi))
5976       return nullptr;
5977   }
5978 
5979   const MachineFrameInfo &MFI = MF.getFrameInfo();
5980   unsigned Size = MFI.getObjectSize(FrameIndex);
5981   Align Alignment = MFI.getObjectAlign(FrameIndex);
5982   // If the function stack isn't realigned we don't want to fold instructions
5983   // that need increased alignment.
5984   if (!RI.hasStackRealignment(MF))
5985     Alignment =
5986         std::min(Alignment, Subtarget.getFrameLowering()->getStackAlign());
5987   if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) {
5988     unsigned NewOpc = 0;
5989     unsigned RCSize = 0;
5990     switch (MI.getOpcode()) {
5991     default: return nullptr;
5992     case X86::TEST8rr:  NewOpc = X86::CMP8ri; RCSize = 1; break;
5993     case X86::TEST16rr: NewOpc = X86::CMP16ri8; RCSize = 2; break;
5994     case X86::TEST32rr: NewOpc = X86::CMP32ri8; RCSize = 4; break;
5995     case X86::TEST64rr: NewOpc = X86::CMP64ri8; RCSize = 8; break;
5996     }
5997     // Check if it's safe to fold the load. If the size of the object is
5998     // narrower than the load width, then it's not.
5999     if (Size < RCSize)
6000       return nullptr;
6001     // Change to CMPXXri r, 0 first.
6002     MI.setDesc(get(NewOpc));
6003     MI.getOperand(1).ChangeToImmediate(0);
6004   } else if (Ops.size() != 1)
6005     return nullptr;
6006 
6007   return foldMemoryOperandImpl(MF, MI, Ops[0],
6008                                MachineOperand::CreateFI(FrameIndex), InsertPt,
6009                                Size, Alignment, /*AllowCommute=*/true);
6010 }
6011 
6012 /// Check if \p LoadMI is a partial register load that we can't fold into \p MI
6013 /// because the latter uses contents that wouldn't be defined in the folded
6014 /// version.  For instance, this transformation isn't legal:
6015 ///   movss (%rdi), %xmm0
6016 ///   addps %xmm0, %xmm0
6017 /// ->
6018 ///   addps (%rdi), %xmm0
6019 ///
6020 /// But this one is:
6021 ///   movss (%rdi), %xmm0
6022 ///   addss %xmm0, %xmm0
6023 /// ->
6024 ///   addss (%rdi), %xmm0
6025 ///
6026 static bool isNonFoldablePartialRegisterLoad(const MachineInstr &LoadMI,
6027                                              const MachineInstr &UserMI,
6028                                              const MachineFunction &MF) {
6029   unsigned Opc = LoadMI.getOpcode();
6030   unsigned UserOpc = UserMI.getOpcode();
6031   const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
6032   const TargetRegisterClass *RC =
6033       MF.getRegInfo().getRegClass(LoadMI.getOperand(0).getReg());
6034   unsigned RegSize = TRI.getRegSizeInBits(*RC);
6035 
6036   if ((Opc == X86::MOVSSrm || Opc == X86::VMOVSSrm || Opc == X86::VMOVSSZrm ||
6037        Opc == X86::MOVSSrm_alt || Opc == X86::VMOVSSrm_alt ||
6038        Opc == X86::VMOVSSZrm_alt) &&
6039       RegSize > 32) {
6040     // These instructions only load 32 bits, we can't fold them if the
6041     // destination register is wider than 32 bits (4 bytes), and its user
6042     // instruction isn't scalar (SS).
6043     switch (UserOpc) {
6044     case X86::CVTSS2SDrr_Int:
6045     case X86::VCVTSS2SDrr_Int:
6046     case X86::VCVTSS2SDZrr_Int:
6047     case X86::VCVTSS2SDZrr_Intk:
6048     case X86::VCVTSS2SDZrr_Intkz:
6049     case X86::CVTSS2SIrr_Int:     case X86::CVTSS2SI64rr_Int:
6050     case X86::VCVTSS2SIrr_Int:    case X86::VCVTSS2SI64rr_Int:
6051     case X86::VCVTSS2SIZrr_Int:   case X86::VCVTSS2SI64Zrr_Int:
6052     case X86::CVTTSS2SIrr_Int:    case X86::CVTTSS2SI64rr_Int:
6053     case X86::VCVTTSS2SIrr_Int:   case X86::VCVTTSS2SI64rr_Int:
6054     case X86::VCVTTSS2SIZrr_Int:  case X86::VCVTTSS2SI64Zrr_Int:
6055     case X86::VCVTSS2USIZrr_Int:  case X86::VCVTSS2USI64Zrr_Int:
6056     case X86::VCVTTSS2USIZrr_Int: case X86::VCVTTSS2USI64Zrr_Int:
6057     case X86::RCPSSr_Int:   case X86::VRCPSSr_Int:
6058     case X86::RSQRTSSr_Int: case X86::VRSQRTSSr_Int:
6059     case X86::ROUNDSSr_Int: case X86::VROUNDSSr_Int:
6060     case X86::COMISSrr_Int: case X86::VCOMISSrr_Int: case X86::VCOMISSZrr_Int:
6061     case X86::UCOMISSrr_Int:case X86::VUCOMISSrr_Int:case X86::VUCOMISSZrr_Int:
6062     case X86::ADDSSrr_Int: case X86::VADDSSrr_Int: case X86::VADDSSZrr_Int:
6063     case X86::CMPSSrr_Int: case X86::VCMPSSrr_Int: case X86::VCMPSSZrr_Int:
6064     case X86::DIVSSrr_Int: case X86::VDIVSSrr_Int: case X86::VDIVSSZrr_Int:
6065     case X86::MAXSSrr_Int: case X86::VMAXSSrr_Int: case X86::VMAXSSZrr_Int:
6066     case X86::MINSSrr_Int: case X86::VMINSSrr_Int: case X86::VMINSSZrr_Int:
6067     case X86::MULSSrr_Int: case X86::VMULSSrr_Int: case X86::VMULSSZrr_Int:
6068     case X86::SQRTSSr_Int: case X86::VSQRTSSr_Int: case X86::VSQRTSSZr_Int:
6069     case X86::SUBSSrr_Int: case X86::VSUBSSrr_Int: case X86::VSUBSSZrr_Int:
6070     case X86::VADDSSZrr_Intk: case X86::VADDSSZrr_Intkz:
6071     case X86::VCMPSSZrr_Intk:
6072     case X86::VDIVSSZrr_Intk: case X86::VDIVSSZrr_Intkz:
6073     case X86::VMAXSSZrr_Intk: case X86::VMAXSSZrr_Intkz:
6074     case X86::VMINSSZrr_Intk: case X86::VMINSSZrr_Intkz:
6075     case X86::VMULSSZrr_Intk: case X86::VMULSSZrr_Intkz:
6076     case X86::VSQRTSSZr_Intk: case X86::VSQRTSSZr_Intkz:
6077     case X86::VSUBSSZrr_Intk: case X86::VSUBSSZrr_Intkz:
6078     case X86::VFMADDSS4rr_Int:   case X86::VFNMADDSS4rr_Int:
6079     case X86::VFMSUBSS4rr_Int:   case X86::VFNMSUBSS4rr_Int:
6080     case X86::VFMADD132SSr_Int:  case X86::VFNMADD132SSr_Int:
6081     case X86::VFMADD213SSr_Int:  case X86::VFNMADD213SSr_Int:
6082     case X86::VFMADD231SSr_Int:  case X86::VFNMADD231SSr_Int:
6083     case X86::VFMSUB132SSr_Int:  case X86::VFNMSUB132SSr_Int:
6084     case X86::VFMSUB213SSr_Int:  case X86::VFNMSUB213SSr_Int:
6085     case X86::VFMSUB231SSr_Int:  case X86::VFNMSUB231SSr_Int:
6086     case X86::VFMADD132SSZr_Int: case X86::VFNMADD132SSZr_Int:
6087     case X86::VFMADD213SSZr_Int: case X86::VFNMADD213SSZr_Int:
6088     case X86::VFMADD231SSZr_Int: case X86::VFNMADD231SSZr_Int:
6089     case X86::VFMSUB132SSZr_Int: case X86::VFNMSUB132SSZr_Int:
6090     case X86::VFMSUB213SSZr_Int: case X86::VFNMSUB213SSZr_Int:
6091     case X86::VFMSUB231SSZr_Int: case X86::VFNMSUB231SSZr_Int:
6092     case X86::VFMADD132SSZr_Intk: case X86::VFNMADD132SSZr_Intk:
6093     case X86::VFMADD213SSZr_Intk: case X86::VFNMADD213SSZr_Intk:
6094     case X86::VFMADD231SSZr_Intk: case X86::VFNMADD231SSZr_Intk:
6095     case X86::VFMSUB132SSZr_Intk: case X86::VFNMSUB132SSZr_Intk:
6096     case X86::VFMSUB213SSZr_Intk: case X86::VFNMSUB213SSZr_Intk:
6097     case X86::VFMSUB231SSZr_Intk: case X86::VFNMSUB231SSZr_Intk:
6098     case X86::VFMADD132SSZr_Intkz: case X86::VFNMADD132SSZr_Intkz:
6099     case X86::VFMADD213SSZr_Intkz: case X86::VFNMADD213SSZr_Intkz:
6100     case X86::VFMADD231SSZr_Intkz: case X86::VFNMADD231SSZr_Intkz:
6101     case X86::VFMSUB132SSZr_Intkz: case X86::VFNMSUB132SSZr_Intkz:
6102     case X86::VFMSUB213SSZr_Intkz: case X86::VFNMSUB213SSZr_Intkz:
6103     case X86::VFMSUB231SSZr_Intkz: case X86::VFNMSUB231SSZr_Intkz:
6104     case X86::VFIXUPIMMSSZrri:
6105     case X86::VFIXUPIMMSSZrrik:
6106     case X86::VFIXUPIMMSSZrrikz:
6107     case X86::VFPCLASSSSZrr:
6108     case X86::VFPCLASSSSZrrk:
6109     case X86::VGETEXPSSZr:
6110     case X86::VGETEXPSSZrk:
6111     case X86::VGETEXPSSZrkz:
6112     case X86::VGETMANTSSZrri:
6113     case X86::VGETMANTSSZrrik:
6114     case X86::VGETMANTSSZrrikz:
6115     case X86::VRANGESSZrri:
6116     case X86::VRANGESSZrrik:
6117     case X86::VRANGESSZrrikz:
6118     case X86::VRCP14SSZrr:
6119     case X86::VRCP14SSZrrk:
6120     case X86::VRCP14SSZrrkz:
6121     case X86::VRCP28SSZr:
6122     case X86::VRCP28SSZrk:
6123     case X86::VRCP28SSZrkz:
6124     case X86::VREDUCESSZrri:
6125     case X86::VREDUCESSZrrik:
6126     case X86::VREDUCESSZrrikz:
6127     case X86::VRNDSCALESSZr_Int:
6128     case X86::VRNDSCALESSZr_Intk:
6129     case X86::VRNDSCALESSZr_Intkz:
6130     case X86::VRSQRT14SSZrr:
6131     case X86::VRSQRT14SSZrrk:
6132     case X86::VRSQRT14SSZrrkz:
6133     case X86::VRSQRT28SSZr:
6134     case X86::VRSQRT28SSZrk:
6135     case X86::VRSQRT28SSZrkz:
6136     case X86::VSCALEFSSZrr:
6137     case X86::VSCALEFSSZrrk:
6138     case X86::VSCALEFSSZrrkz:
6139       return false;
6140     default:
6141       return true;
6142     }
6143   }
6144 
6145   if ((Opc == X86::MOVSDrm || Opc == X86::VMOVSDrm || Opc == X86::VMOVSDZrm ||
6146        Opc == X86::MOVSDrm_alt || Opc == X86::VMOVSDrm_alt ||
6147        Opc == X86::VMOVSDZrm_alt) &&
6148       RegSize > 64) {
6149     // These instructions only load 64 bits, we can't fold them if the
6150     // destination register is wider than 64 bits (8 bytes), and its user
6151     // instruction isn't scalar (SD).
6152     switch (UserOpc) {
6153     case X86::CVTSD2SSrr_Int:
6154     case X86::VCVTSD2SSrr_Int:
6155     case X86::VCVTSD2SSZrr_Int:
6156     case X86::VCVTSD2SSZrr_Intk:
6157     case X86::VCVTSD2SSZrr_Intkz:
6158     case X86::CVTSD2SIrr_Int:     case X86::CVTSD2SI64rr_Int:
6159     case X86::VCVTSD2SIrr_Int:    case X86::VCVTSD2SI64rr_Int:
6160     case X86::VCVTSD2SIZrr_Int:   case X86::VCVTSD2SI64Zrr_Int:
6161     case X86::CVTTSD2SIrr_Int:    case X86::CVTTSD2SI64rr_Int:
6162     case X86::VCVTTSD2SIrr_Int:   case X86::VCVTTSD2SI64rr_Int:
6163     case X86::VCVTTSD2SIZrr_Int:  case X86::VCVTTSD2SI64Zrr_Int:
6164     case X86::VCVTSD2USIZrr_Int:  case X86::VCVTSD2USI64Zrr_Int:
6165     case X86::VCVTTSD2USIZrr_Int: case X86::VCVTTSD2USI64Zrr_Int:
6166     case X86::ROUNDSDr_Int: case X86::VROUNDSDr_Int:
6167     case X86::COMISDrr_Int: case X86::VCOMISDrr_Int: case X86::VCOMISDZrr_Int:
6168     case X86::UCOMISDrr_Int:case X86::VUCOMISDrr_Int:case X86::VUCOMISDZrr_Int:
6169     case X86::ADDSDrr_Int: case X86::VADDSDrr_Int: case X86::VADDSDZrr_Int:
6170     case X86::CMPSDrr_Int: case X86::VCMPSDrr_Int: case X86::VCMPSDZrr_Int:
6171     case X86::DIVSDrr_Int: case X86::VDIVSDrr_Int: case X86::VDIVSDZrr_Int:
6172     case X86::MAXSDrr_Int: case X86::VMAXSDrr_Int: case X86::VMAXSDZrr_Int:
6173     case X86::MINSDrr_Int: case X86::VMINSDrr_Int: case X86::VMINSDZrr_Int:
6174     case X86::MULSDrr_Int: case X86::VMULSDrr_Int: case X86::VMULSDZrr_Int:
6175     case X86::SQRTSDr_Int: case X86::VSQRTSDr_Int: case X86::VSQRTSDZr_Int:
6176     case X86::SUBSDrr_Int: case X86::VSUBSDrr_Int: case X86::VSUBSDZrr_Int:
6177     case X86::VADDSDZrr_Intk: case X86::VADDSDZrr_Intkz:
6178     case X86::VCMPSDZrr_Intk:
6179     case X86::VDIVSDZrr_Intk: case X86::VDIVSDZrr_Intkz:
6180     case X86::VMAXSDZrr_Intk: case X86::VMAXSDZrr_Intkz:
6181     case X86::VMINSDZrr_Intk: case X86::VMINSDZrr_Intkz:
6182     case X86::VMULSDZrr_Intk: case X86::VMULSDZrr_Intkz:
6183     case X86::VSQRTSDZr_Intk: case X86::VSQRTSDZr_Intkz:
6184     case X86::VSUBSDZrr_Intk: case X86::VSUBSDZrr_Intkz:
6185     case X86::VFMADDSD4rr_Int:   case X86::VFNMADDSD4rr_Int:
6186     case X86::VFMSUBSD4rr_Int:   case X86::VFNMSUBSD4rr_Int:
6187     case X86::VFMADD132SDr_Int:  case X86::VFNMADD132SDr_Int:
6188     case X86::VFMADD213SDr_Int:  case X86::VFNMADD213SDr_Int:
6189     case X86::VFMADD231SDr_Int:  case X86::VFNMADD231SDr_Int:
6190     case X86::VFMSUB132SDr_Int:  case X86::VFNMSUB132SDr_Int:
6191     case X86::VFMSUB213SDr_Int:  case X86::VFNMSUB213SDr_Int:
6192     case X86::VFMSUB231SDr_Int:  case X86::VFNMSUB231SDr_Int:
6193     case X86::VFMADD132SDZr_Int: case X86::VFNMADD132SDZr_Int:
6194     case X86::VFMADD213SDZr_Int: case X86::VFNMADD213SDZr_Int:
6195     case X86::VFMADD231SDZr_Int: case X86::VFNMADD231SDZr_Int:
6196     case X86::VFMSUB132SDZr_Int: case X86::VFNMSUB132SDZr_Int:
6197     case X86::VFMSUB213SDZr_Int: case X86::VFNMSUB213SDZr_Int:
6198     case X86::VFMSUB231SDZr_Int: case X86::VFNMSUB231SDZr_Int:
6199     case X86::VFMADD132SDZr_Intk: case X86::VFNMADD132SDZr_Intk:
6200     case X86::VFMADD213SDZr_Intk: case X86::VFNMADD213SDZr_Intk:
6201     case X86::VFMADD231SDZr_Intk: case X86::VFNMADD231SDZr_Intk:
6202     case X86::VFMSUB132SDZr_Intk: case X86::VFNMSUB132SDZr_Intk:
6203     case X86::VFMSUB213SDZr_Intk: case X86::VFNMSUB213SDZr_Intk:
6204     case X86::VFMSUB231SDZr_Intk: case X86::VFNMSUB231SDZr_Intk:
6205     case X86::VFMADD132SDZr_Intkz: case X86::VFNMADD132SDZr_Intkz:
6206     case X86::VFMADD213SDZr_Intkz: case X86::VFNMADD213SDZr_Intkz:
6207     case X86::VFMADD231SDZr_Intkz: case X86::VFNMADD231SDZr_Intkz:
6208     case X86::VFMSUB132SDZr_Intkz: case X86::VFNMSUB132SDZr_Intkz:
6209     case X86::VFMSUB213SDZr_Intkz: case X86::VFNMSUB213SDZr_Intkz:
6210     case X86::VFMSUB231SDZr_Intkz: case X86::VFNMSUB231SDZr_Intkz:
6211     case X86::VFIXUPIMMSDZrri:
6212     case X86::VFIXUPIMMSDZrrik:
6213     case X86::VFIXUPIMMSDZrrikz:
6214     case X86::VFPCLASSSDZrr:
6215     case X86::VFPCLASSSDZrrk:
6216     case X86::VGETEXPSDZr:
6217     case X86::VGETEXPSDZrk:
6218     case X86::VGETEXPSDZrkz:
6219     case X86::VGETMANTSDZrri:
6220     case X86::VGETMANTSDZrrik:
6221     case X86::VGETMANTSDZrrikz:
6222     case X86::VRANGESDZrri:
6223     case X86::VRANGESDZrrik:
6224     case X86::VRANGESDZrrikz:
6225     case X86::VRCP14SDZrr:
6226     case X86::VRCP14SDZrrk:
6227     case X86::VRCP14SDZrrkz:
6228     case X86::VRCP28SDZr:
6229     case X86::VRCP28SDZrk:
6230     case X86::VRCP28SDZrkz:
6231     case X86::VREDUCESDZrri:
6232     case X86::VREDUCESDZrrik:
6233     case X86::VREDUCESDZrrikz:
6234     case X86::VRNDSCALESDZr_Int:
6235     case X86::VRNDSCALESDZr_Intk:
6236     case X86::VRNDSCALESDZr_Intkz:
6237     case X86::VRSQRT14SDZrr:
6238     case X86::VRSQRT14SDZrrk:
6239     case X86::VRSQRT14SDZrrkz:
6240     case X86::VRSQRT28SDZr:
6241     case X86::VRSQRT28SDZrk:
6242     case X86::VRSQRT28SDZrkz:
6243     case X86::VSCALEFSDZrr:
6244     case X86::VSCALEFSDZrrk:
6245     case X86::VSCALEFSDZrrkz:
6246       return false;
6247     default:
6248       return true;
6249     }
6250   }
6251 
6252   if ((Opc == X86::VMOVSHZrm || Opc == X86::VMOVSHZrm_alt) && RegSize > 16) {
6253     // These instructions only load 16 bits, we can't fold them if the
6254     // destination register is wider than 16 bits (2 bytes), and its user
6255     // instruction isn't scalar (SH).
6256     switch (UserOpc) {
6257     case X86::VADDSHZrr_Int:
6258     case X86::VCMPSHZrr_Int:
6259     case X86::VDIVSHZrr_Int:
6260     case X86::VMAXSHZrr_Int:
6261     case X86::VMINSHZrr_Int:
6262     case X86::VMULSHZrr_Int:
6263     case X86::VSUBSHZrr_Int:
6264     case X86::VADDSHZrr_Intk: case X86::VADDSHZrr_Intkz:
6265     case X86::VCMPSHZrr_Intk:
6266     case X86::VDIVSHZrr_Intk: case X86::VDIVSHZrr_Intkz:
6267     case X86::VMAXSHZrr_Intk: case X86::VMAXSHZrr_Intkz:
6268     case X86::VMINSHZrr_Intk: case X86::VMINSHZrr_Intkz:
6269     case X86::VMULSHZrr_Intk: case X86::VMULSHZrr_Intkz:
6270     case X86::VSUBSHZrr_Intk: case X86::VSUBSHZrr_Intkz:
6271     case X86::VFMADD132SHZr_Int: case X86::VFNMADD132SHZr_Int:
6272     case X86::VFMADD213SHZr_Int: case X86::VFNMADD213SHZr_Int:
6273     case X86::VFMADD231SHZr_Int: case X86::VFNMADD231SHZr_Int:
6274     case X86::VFMSUB132SHZr_Int: case X86::VFNMSUB132SHZr_Int:
6275     case X86::VFMSUB213SHZr_Int: case X86::VFNMSUB213SHZr_Int:
6276     case X86::VFMSUB231SHZr_Int: case X86::VFNMSUB231SHZr_Int:
6277     case X86::VFMADD132SHZr_Intk: case X86::VFNMADD132SHZr_Intk:
6278     case X86::VFMADD213SHZr_Intk: case X86::VFNMADD213SHZr_Intk:
6279     case X86::VFMADD231SHZr_Intk: case X86::VFNMADD231SHZr_Intk:
6280     case X86::VFMSUB132SHZr_Intk: case X86::VFNMSUB132SHZr_Intk:
6281     case X86::VFMSUB213SHZr_Intk: case X86::VFNMSUB213SHZr_Intk:
6282     case X86::VFMSUB231SHZr_Intk: case X86::VFNMSUB231SHZr_Intk:
6283     case X86::VFMADD132SHZr_Intkz: case X86::VFNMADD132SHZr_Intkz:
6284     case X86::VFMADD213SHZr_Intkz: case X86::VFNMADD213SHZr_Intkz:
6285     case X86::VFMADD231SHZr_Intkz: case X86::VFNMADD231SHZr_Intkz:
6286     case X86::VFMSUB132SHZr_Intkz: case X86::VFNMSUB132SHZr_Intkz:
6287     case X86::VFMSUB213SHZr_Intkz: case X86::VFNMSUB213SHZr_Intkz:
6288     case X86::VFMSUB231SHZr_Intkz: case X86::VFNMSUB231SHZr_Intkz:
6289       return false;
6290     default:
6291       return true;
6292     }
6293   }
6294 
6295   return false;
6296 }
6297 
6298 MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
6299     MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops,
6300     MachineBasicBlock::iterator InsertPt, MachineInstr &LoadMI,
6301     LiveIntervals *LIS) const {
6302 
6303   // TODO: Support the case where LoadMI loads a wide register, but MI
6304   // only uses a subreg.
6305   for (auto Op : Ops) {
6306     if (MI.getOperand(Op).getSubReg())
6307       return nullptr;
6308   }
6309 
6310   // If loading from a FrameIndex, fold directly from the FrameIndex.
6311   unsigned NumOps = LoadMI.getDesc().getNumOperands();
6312   int FrameIndex;
6313   if (isLoadFromStackSlot(LoadMI, FrameIndex)) {
6314     if (isNonFoldablePartialRegisterLoad(LoadMI, MI, MF))
6315       return nullptr;
6316     return foldMemoryOperandImpl(MF, MI, Ops, InsertPt, FrameIndex, LIS);
6317   }
6318 
6319   // Check switch flag
6320   if (NoFusing) return nullptr;
6321 
6322   // Avoid partial and undef register update stalls unless optimizing for size.
6323   if (!MF.getFunction().hasOptSize() &&
6324       (hasPartialRegUpdate(MI.getOpcode(), Subtarget, /*ForLoadFold*/true) ||
6325        shouldPreventUndefRegUpdateMemFold(MF, MI)))
6326     return nullptr;
6327 
6328   // Determine the alignment of the load.
6329   Align Alignment;
6330   if (LoadMI.hasOneMemOperand())
6331     Alignment = (*LoadMI.memoperands_begin())->getAlign();
6332   else
6333     switch (LoadMI.getOpcode()) {
6334     case X86::AVX512_512_SET0:
6335     case X86::AVX512_512_SETALLONES:
6336       Alignment = Align(64);
6337       break;
6338     case X86::AVX2_SETALLONES:
6339     case X86::AVX1_SETALLONES:
6340     case X86::AVX_SET0:
6341     case X86::AVX512_256_SET0:
6342       Alignment = Align(32);
6343       break;
6344     case X86::V_SET0:
6345     case X86::V_SETALLONES:
6346     case X86::AVX512_128_SET0:
6347     case X86::FsFLD0F128:
6348     case X86::AVX512_FsFLD0F128:
6349       Alignment = Align(16);
6350       break;
6351     case X86::MMX_SET0:
6352     case X86::FsFLD0SD:
6353     case X86::AVX512_FsFLD0SD:
6354       Alignment = Align(8);
6355       break;
6356     case X86::FsFLD0SS:
6357     case X86::AVX512_FsFLD0SS:
6358       Alignment = Align(4);
6359       break;
6360     case X86::AVX512_FsFLD0SH:
6361       Alignment = Align(2);
6362       break;
6363     default:
6364       return nullptr;
6365     }
6366   if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) {
6367     unsigned NewOpc = 0;
6368     switch (MI.getOpcode()) {
6369     default: return nullptr;
6370     case X86::TEST8rr:  NewOpc = X86::CMP8ri; break;
6371     case X86::TEST16rr: NewOpc = X86::CMP16ri8; break;
6372     case X86::TEST32rr: NewOpc = X86::CMP32ri8; break;
6373     case X86::TEST64rr: NewOpc = X86::CMP64ri8; break;
6374     }
6375     // Change to CMPXXri r, 0 first.
6376     MI.setDesc(get(NewOpc));
6377     MI.getOperand(1).ChangeToImmediate(0);
6378   } else if (Ops.size() != 1)
6379     return nullptr;
6380 
6381   // Make sure the subregisters match.
6382   // Otherwise we risk changing the size of the load.
6383   if (LoadMI.getOperand(0).getSubReg() != MI.getOperand(Ops[0]).getSubReg())
6384     return nullptr;
6385 
6386   SmallVector<MachineOperand,X86::AddrNumOperands> MOs;
6387   switch (LoadMI.getOpcode()) {
6388   case X86::MMX_SET0:
6389   case X86::V_SET0:
6390   case X86::V_SETALLONES:
6391   case X86::AVX2_SETALLONES:
6392   case X86::AVX1_SETALLONES:
6393   case X86::AVX_SET0:
6394   case X86::AVX512_128_SET0:
6395   case X86::AVX512_256_SET0:
6396   case X86::AVX512_512_SET0:
6397   case X86::AVX512_512_SETALLONES:
6398   case X86::AVX512_FsFLD0SH:
6399   case X86::FsFLD0SD:
6400   case X86::AVX512_FsFLD0SD:
6401   case X86::FsFLD0SS:
6402   case X86::AVX512_FsFLD0SS:
6403   case X86::FsFLD0F128:
6404   case X86::AVX512_FsFLD0F128: {
6405     // Folding a V_SET0 or V_SETALLONES as a load, to ease register pressure.
6406     // Create a constant-pool entry and operands to load from it.
6407 
6408     // Medium and large mode can't fold loads this way.
6409     if (MF.getTarget().getCodeModel() != CodeModel::Small &&
6410         MF.getTarget().getCodeModel() != CodeModel::Kernel)
6411       return nullptr;
6412 
6413     // x86-32 PIC requires a PIC base register for constant pools.
6414     unsigned PICBase = 0;
6415     // Since we're using Small or Kernel code model, we can always use
6416     // RIP-relative addressing for a smaller encoding.
6417     if (Subtarget.is64Bit()) {
6418       PICBase = X86::RIP;
6419     } else if (MF.getTarget().isPositionIndependent()) {
6420       // FIXME: PICBase = getGlobalBaseReg(&MF);
6421       // This doesn't work for several reasons.
6422       // 1. GlobalBaseReg may have been spilled.
6423       // 2. It may not be live at MI.
6424       return nullptr;
6425     }
6426 
6427     // Create a constant-pool entry.
6428     MachineConstantPool &MCP = *MF.getConstantPool();
6429     Type *Ty;
6430     unsigned Opc = LoadMI.getOpcode();
6431     if (Opc == X86::FsFLD0SS || Opc == X86::AVX512_FsFLD0SS)
6432       Ty = Type::getFloatTy(MF.getFunction().getContext());
6433     else if (Opc == X86::FsFLD0SD || Opc == X86::AVX512_FsFLD0SD)
6434       Ty = Type::getDoubleTy(MF.getFunction().getContext());
6435     else if (Opc == X86::FsFLD0F128 || Opc == X86::AVX512_FsFLD0F128)
6436       Ty = Type::getFP128Ty(MF.getFunction().getContext());
6437     else if (Opc == X86::AVX512_FsFLD0SH)
6438       Ty = Type::getHalfTy(MF.getFunction().getContext());
6439     else if (Opc == X86::AVX512_512_SET0 || Opc == X86::AVX512_512_SETALLONES)
6440       Ty = FixedVectorType::get(Type::getInt32Ty(MF.getFunction().getContext()),
6441                                 16);
6442     else if (Opc == X86::AVX2_SETALLONES || Opc == X86::AVX_SET0 ||
6443              Opc == X86::AVX512_256_SET0 || Opc == X86::AVX1_SETALLONES)
6444       Ty = FixedVectorType::get(Type::getInt32Ty(MF.getFunction().getContext()),
6445                                 8);
6446     else if (Opc == X86::MMX_SET0)
6447       Ty = FixedVectorType::get(Type::getInt32Ty(MF.getFunction().getContext()),
6448                                 2);
6449     else
6450       Ty = FixedVectorType::get(Type::getInt32Ty(MF.getFunction().getContext()),
6451                                 4);
6452 
6453     bool IsAllOnes = (Opc == X86::V_SETALLONES || Opc == X86::AVX2_SETALLONES ||
6454                       Opc == X86::AVX512_512_SETALLONES ||
6455                       Opc == X86::AVX1_SETALLONES);
6456     const Constant *C = IsAllOnes ? Constant::getAllOnesValue(Ty) :
6457                                     Constant::getNullValue(Ty);
6458     unsigned CPI = MCP.getConstantPoolIndex(C, Alignment);
6459 
6460     // Create operands to load from the constant pool entry.
6461     MOs.push_back(MachineOperand::CreateReg(PICBase, false));
6462     MOs.push_back(MachineOperand::CreateImm(1));
6463     MOs.push_back(MachineOperand::CreateReg(0, false));
6464     MOs.push_back(MachineOperand::CreateCPI(CPI, 0));
6465     MOs.push_back(MachineOperand::CreateReg(0, false));
6466     break;
6467   }
6468   default: {
6469     if (isNonFoldablePartialRegisterLoad(LoadMI, MI, MF))
6470       return nullptr;
6471 
6472     // Folding a normal load. Just copy the load's address operands.
6473     MOs.append(LoadMI.operands_begin() + NumOps - X86::AddrNumOperands,
6474                LoadMI.operands_begin() + NumOps);
6475     break;
6476   }
6477   }
6478   return foldMemoryOperandImpl(MF, MI, Ops[0], MOs, InsertPt,
6479                                /*Size=*/0, Alignment, /*AllowCommute=*/true);
6480 }
6481 
6482 static SmallVector<MachineMemOperand *, 2>
6483 extractLoadMMOs(ArrayRef<MachineMemOperand *> MMOs, MachineFunction &MF) {
6484   SmallVector<MachineMemOperand *, 2> LoadMMOs;
6485 
6486   for (MachineMemOperand *MMO : MMOs) {
6487     if (!MMO->isLoad())
6488       continue;
6489 
6490     if (!MMO->isStore()) {
6491       // Reuse the MMO.
6492       LoadMMOs.push_back(MMO);
6493     } else {
6494       // Clone the MMO and unset the store flag.
6495       LoadMMOs.push_back(MF.getMachineMemOperand(
6496           MMO, MMO->getFlags() & ~MachineMemOperand::MOStore));
6497     }
6498   }
6499 
6500   return LoadMMOs;
6501 }
6502 
6503 static SmallVector<MachineMemOperand *, 2>
6504 extractStoreMMOs(ArrayRef<MachineMemOperand *> MMOs, MachineFunction &MF) {
6505   SmallVector<MachineMemOperand *, 2> StoreMMOs;
6506 
6507   for (MachineMemOperand *MMO : MMOs) {
6508     if (!MMO->isStore())
6509       continue;
6510 
6511     if (!MMO->isLoad()) {
6512       // Reuse the MMO.
6513       StoreMMOs.push_back(MMO);
6514     } else {
6515       // Clone the MMO and unset the load flag.
6516       StoreMMOs.push_back(MF.getMachineMemOperand(
6517           MMO, MMO->getFlags() & ~MachineMemOperand::MOLoad));
6518     }
6519   }
6520 
6521   return StoreMMOs;
6522 }
6523 
6524 static unsigned getBroadcastOpcode(const X86MemoryFoldTableEntry *I,
6525                                    const TargetRegisterClass *RC,
6526                                    const X86Subtarget &STI) {
6527   assert(STI.hasAVX512() && "Expected at least AVX512!");
6528   unsigned SpillSize = STI.getRegisterInfo()->getSpillSize(*RC);
6529   assert((SpillSize == 64 || STI.hasVLX()) &&
6530          "Can't broadcast less than 64 bytes without AVX512VL!");
6531 
6532   switch (I->Flags & TB_BCAST_MASK) {
6533   default: llvm_unreachable("Unexpected broadcast type!");
6534   case TB_BCAST_D:
6535     switch (SpillSize) {
6536     default: llvm_unreachable("Unknown spill size");
6537     case 16: return X86::VPBROADCASTDZ128rm;
6538     case 32: return X86::VPBROADCASTDZ256rm;
6539     case 64: return X86::VPBROADCASTDZrm;
6540     }
6541     break;
6542   case TB_BCAST_Q:
6543     switch (SpillSize) {
6544     default: llvm_unreachable("Unknown spill size");
6545     case 16: return X86::VPBROADCASTQZ128rm;
6546     case 32: return X86::VPBROADCASTQZ256rm;
6547     case 64: return X86::VPBROADCASTQZrm;
6548     }
6549     break;
6550   case TB_BCAST_SS:
6551     switch (SpillSize) {
6552     default: llvm_unreachable("Unknown spill size");
6553     case 16: return X86::VBROADCASTSSZ128rm;
6554     case 32: return X86::VBROADCASTSSZ256rm;
6555     case 64: return X86::VBROADCASTSSZrm;
6556     }
6557     break;
6558   case TB_BCAST_SD:
6559     switch (SpillSize) {
6560     default: llvm_unreachable("Unknown spill size");
6561     case 16: return X86::VMOVDDUPZ128rm;
6562     case 32: return X86::VBROADCASTSDZ256rm;
6563     case 64: return X86::VBROADCASTSDZrm;
6564     }
6565     break;
6566   }
6567 }
6568 
6569 bool X86InstrInfo::unfoldMemoryOperand(
6570     MachineFunction &MF, MachineInstr &MI, unsigned Reg, bool UnfoldLoad,
6571     bool UnfoldStore, SmallVectorImpl<MachineInstr *> &NewMIs) const {
6572   const X86MemoryFoldTableEntry *I = lookupUnfoldTable(MI.getOpcode());
6573   if (I == nullptr)
6574     return false;
6575   unsigned Opc = I->DstOp;
6576   unsigned Index = I->Flags & TB_INDEX_MASK;
6577   bool FoldedLoad = I->Flags & TB_FOLDED_LOAD;
6578   bool FoldedStore = I->Flags & TB_FOLDED_STORE;
6579   bool FoldedBCast = I->Flags & TB_FOLDED_BCAST;
6580   if (UnfoldLoad && !FoldedLoad)
6581     return false;
6582   UnfoldLoad &= FoldedLoad;
6583   if (UnfoldStore && !FoldedStore)
6584     return false;
6585   UnfoldStore &= FoldedStore;
6586 
6587   const MCInstrDesc &MCID = get(Opc);
6588 
6589   const TargetRegisterClass *RC = getRegClass(MCID, Index, &RI, MF);
6590   const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
6591   // TODO: Check if 32-byte or greater accesses are slow too?
6592   if (!MI.hasOneMemOperand() && RC == &X86::VR128RegClass &&
6593       Subtarget.isUnalignedMem16Slow())
6594     // Without memoperands, loadRegFromAddr and storeRegToStackSlot will
6595     // conservatively assume the address is unaligned. That's bad for
6596     // performance.
6597     return false;
6598   SmallVector<MachineOperand, X86::AddrNumOperands> AddrOps;
6599   SmallVector<MachineOperand,2> BeforeOps;
6600   SmallVector<MachineOperand,2> AfterOps;
6601   SmallVector<MachineOperand,4> ImpOps;
6602   for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
6603     MachineOperand &Op = MI.getOperand(i);
6604     if (i >= Index && i < Index + X86::AddrNumOperands)
6605       AddrOps.push_back(Op);
6606     else if (Op.isReg() && Op.isImplicit())
6607       ImpOps.push_back(Op);
6608     else if (i < Index)
6609       BeforeOps.push_back(Op);
6610     else if (i > Index)
6611       AfterOps.push_back(Op);
6612   }
6613 
6614   // Emit the load or broadcast instruction.
6615   if (UnfoldLoad) {
6616     auto MMOs = extractLoadMMOs(MI.memoperands(), MF);
6617 
6618     unsigned Opc;
6619     if (FoldedBCast) {
6620       Opc = getBroadcastOpcode(I, RC, Subtarget);
6621     } else {
6622       unsigned Alignment = std::max<uint32_t>(TRI.getSpillSize(*RC), 16);
6623       bool isAligned = !MMOs.empty() && MMOs.front()->getAlign() >= Alignment;
6624       Opc = getLoadRegOpcode(Reg, RC, isAligned, Subtarget);
6625     }
6626 
6627     DebugLoc DL;
6628     MachineInstrBuilder MIB = BuildMI(MF, DL, get(Opc), Reg);
6629     for (unsigned i = 0, e = AddrOps.size(); i != e; ++i)
6630       MIB.add(AddrOps[i]);
6631     MIB.setMemRefs(MMOs);
6632     NewMIs.push_back(MIB);
6633 
6634     if (UnfoldStore) {
6635       // Address operands cannot be marked isKill.
6636       for (unsigned i = 1; i != 1 + X86::AddrNumOperands; ++i) {
6637         MachineOperand &MO = NewMIs[0]->getOperand(i);
6638         if (MO.isReg())
6639           MO.setIsKill(false);
6640       }
6641     }
6642   }
6643 
6644   // Emit the data processing instruction.
6645   MachineInstr *DataMI = MF.CreateMachineInstr(MCID, MI.getDebugLoc(), true);
6646   MachineInstrBuilder MIB(MF, DataMI);
6647 
6648   if (FoldedStore)
6649     MIB.addReg(Reg, RegState::Define);
6650   for (MachineOperand &BeforeOp : BeforeOps)
6651     MIB.add(BeforeOp);
6652   if (FoldedLoad)
6653     MIB.addReg(Reg);
6654   for (MachineOperand &AfterOp : AfterOps)
6655     MIB.add(AfterOp);
6656   for (MachineOperand &ImpOp : ImpOps) {
6657     MIB.addReg(ImpOp.getReg(),
6658                getDefRegState(ImpOp.isDef()) |
6659                RegState::Implicit |
6660                getKillRegState(ImpOp.isKill()) |
6661                getDeadRegState(ImpOp.isDead()) |
6662                getUndefRegState(ImpOp.isUndef()));
6663   }
6664   // Change CMP32ri r, 0 back to TEST32rr r, r, etc.
6665   switch (DataMI->getOpcode()) {
6666   default: break;
6667   case X86::CMP64ri32:
6668   case X86::CMP64ri8:
6669   case X86::CMP32ri:
6670   case X86::CMP32ri8:
6671   case X86::CMP16ri:
6672   case X86::CMP16ri8:
6673   case X86::CMP8ri: {
6674     MachineOperand &MO0 = DataMI->getOperand(0);
6675     MachineOperand &MO1 = DataMI->getOperand(1);
6676     if (MO1.isImm() && MO1.getImm() == 0) {
6677       unsigned NewOpc;
6678       switch (DataMI->getOpcode()) {
6679       default: llvm_unreachable("Unreachable!");
6680       case X86::CMP64ri8:
6681       case X86::CMP64ri32: NewOpc = X86::TEST64rr; break;
6682       case X86::CMP32ri8:
6683       case X86::CMP32ri:   NewOpc = X86::TEST32rr; break;
6684       case X86::CMP16ri8:
6685       case X86::CMP16ri:   NewOpc = X86::TEST16rr; break;
6686       case X86::CMP8ri:    NewOpc = X86::TEST8rr; break;
6687       }
6688       DataMI->setDesc(get(NewOpc));
6689       MO1.ChangeToRegister(MO0.getReg(), false);
6690     }
6691   }
6692   }
6693   NewMIs.push_back(DataMI);
6694 
6695   // Emit the store instruction.
6696   if (UnfoldStore) {
6697     const TargetRegisterClass *DstRC = getRegClass(MCID, 0, &RI, MF);
6698     auto MMOs = extractStoreMMOs(MI.memoperands(), MF);
6699     unsigned Alignment = std::max<uint32_t>(TRI.getSpillSize(*DstRC), 16);
6700     bool isAligned = !MMOs.empty() && MMOs.front()->getAlign() >= Alignment;
6701     unsigned Opc = getStoreRegOpcode(Reg, DstRC, isAligned, Subtarget);
6702     DebugLoc DL;
6703     MachineInstrBuilder MIB = BuildMI(MF, DL, get(Opc));
6704     for (unsigned i = 0, e = AddrOps.size(); i != e; ++i)
6705       MIB.add(AddrOps[i]);
6706     MIB.addReg(Reg, RegState::Kill);
6707     MIB.setMemRefs(MMOs);
6708     NewMIs.push_back(MIB);
6709   }
6710 
6711   return true;
6712 }
6713 
6714 bool
6715 X86InstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
6716                                   SmallVectorImpl<SDNode*> &NewNodes) const {
6717   if (!N->isMachineOpcode())
6718     return false;
6719 
6720   const X86MemoryFoldTableEntry *I = lookupUnfoldTable(N->getMachineOpcode());
6721   if (I == nullptr)
6722     return false;
6723   unsigned Opc = I->DstOp;
6724   unsigned Index = I->Flags & TB_INDEX_MASK;
6725   bool FoldedLoad = I->Flags & TB_FOLDED_LOAD;
6726   bool FoldedStore = I->Flags & TB_FOLDED_STORE;
6727   bool FoldedBCast = I->Flags & TB_FOLDED_BCAST;
6728   const MCInstrDesc &MCID = get(Opc);
6729   MachineFunction &MF = DAG.getMachineFunction();
6730   const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
6731   const TargetRegisterClass *RC = getRegClass(MCID, Index, &RI, MF);
6732   unsigned NumDefs = MCID.NumDefs;
6733   std::vector<SDValue> AddrOps;
6734   std::vector<SDValue> BeforeOps;
6735   std::vector<SDValue> AfterOps;
6736   SDLoc dl(N);
6737   unsigned NumOps = N->getNumOperands();
6738   for (unsigned i = 0; i != NumOps-1; ++i) {
6739     SDValue Op = N->getOperand(i);
6740     if (i >= Index-NumDefs && i < Index-NumDefs + X86::AddrNumOperands)
6741       AddrOps.push_back(Op);
6742     else if (i < Index-NumDefs)
6743       BeforeOps.push_back(Op);
6744     else if (i > Index-NumDefs)
6745       AfterOps.push_back(Op);
6746   }
6747   SDValue Chain = N->getOperand(NumOps-1);
6748   AddrOps.push_back(Chain);
6749 
6750   // Emit the load instruction.
6751   SDNode *Load = nullptr;
6752   if (FoldedLoad) {
6753     EVT VT = *TRI.legalclasstypes_begin(*RC);
6754     auto MMOs = extractLoadMMOs(cast<MachineSDNode>(N)->memoperands(), MF);
6755     if (MMOs.empty() && RC == &X86::VR128RegClass &&
6756         Subtarget.isUnalignedMem16Slow())
6757       // Do not introduce a slow unaligned load.
6758       return false;
6759     // FIXME: If a VR128 can have size 32, we should be checking if a 32-byte
6760     // memory access is slow above.
6761 
6762     unsigned Opc;
6763     if (FoldedBCast) {
6764       Opc = getBroadcastOpcode(I, RC, Subtarget);
6765     } else {
6766       unsigned Alignment = std::max<uint32_t>(TRI.getSpillSize(*RC), 16);
6767       bool isAligned = !MMOs.empty() && MMOs.front()->getAlign() >= Alignment;
6768       Opc = getLoadRegOpcode(0, RC, isAligned, Subtarget);
6769     }
6770 
6771     Load = DAG.getMachineNode(Opc, dl, VT, MVT::Other, AddrOps);
6772     NewNodes.push_back(Load);
6773 
6774     // Preserve memory reference information.
6775     DAG.setNodeMemRefs(cast<MachineSDNode>(Load), MMOs);
6776   }
6777 
6778   // Emit the data processing instruction.
6779   std::vector<EVT> VTs;
6780   const TargetRegisterClass *DstRC = nullptr;
6781   if (MCID.getNumDefs() > 0) {
6782     DstRC = getRegClass(MCID, 0, &RI, MF);
6783     VTs.push_back(*TRI.legalclasstypes_begin(*DstRC));
6784   }
6785   for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) {
6786     EVT VT = N->getValueType(i);
6787     if (VT != MVT::Other && i >= (unsigned)MCID.getNumDefs())
6788       VTs.push_back(VT);
6789   }
6790   if (Load)
6791     BeforeOps.push_back(SDValue(Load, 0));
6792   llvm::append_range(BeforeOps, AfterOps);
6793   // Change CMP32ri r, 0 back to TEST32rr r, r, etc.
6794   switch (Opc) {
6795     default: break;
6796     case X86::CMP64ri32:
6797     case X86::CMP64ri8:
6798     case X86::CMP32ri:
6799     case X86::CMP32ri8:
6800     case X86::CMP16ri:
6801     case X86::CMP16ri8:
6802     case X86::CMP8ri:
6803       if (isNullConstant(BeforeOps[1])) {
6804         switch (Opc) {
6805           default: llvm_unreachable("Unreachable!");
6806           case X86::CMP64ri8:
6807           case X86::CMP64ri32: Opc = X86::TEST64rr; break;
6808           case X86::CMP32ri8:
6809           case X86::CMP32ri:   Opc = X86::TEST32rr; break;
6810           case X86::CMP16ri8:
6811           case X86::CMP16ri:   Opc = X86::TEST16rr; break;
6812           case X86::CMP8ri:    Opc = X86::TEST8rr; break;
6813         }
6814         BeforeOps[1] = BeforeOps[0];
6815       }
6816   }
6817   SDNode *NewNode= DAG.getMachineNode(Opc, dl, VTs, BeforeOps);
6818   NewNodes.push_back(NewNode);
6819 
6820   // Emit the store instruction.
6821   if (FoldedStore) {
6822     AddrOps.pop_back();
6823     AddrOps.push_back(SDValue(NewNode, 0));
6824     AddrOps.push_back(Chain);
6825     auto MMOs = extractStoreMMOs(cast<MachineSDNode>(N)->memoperands(), MF);
6826     if (MMOs.empty() && RC == &X86::VR128RegClass &&
6827         Subtarget.isUnalignedMem16Slow())
6828       // Do not introduce a slow unaligned store.
6829       return false;
6830     // FIXME: If a VR128 can have size 32, we should be checking if a 32-byte
6831     // memory access is slow above.
6832     unsigned Alignment = std::max<uint32_t>(TRI.getSpillSize(*RC), 16);
6833     bool isAligned = !MMOs.empty() && MMOs.front()->getAlign() >= Alignment;
6834     SDNode *Store =
6835         DAG.getMachineNode(getStoreRegOpcode(0, DstRC, isAligned, Subtarget),
6836                            dl, MVT::Other, AddrOps);
6837     NewNodes.push_back(Store);
6838 
6839     // Preserve memory reference information.
6840     DAG.setNodeMemRefs(cast<MachineSDNode>(Store), MMOs);
6841   }
6842 
6843   return true;
6844 }
6845 
6846 unsigned X86InstrInfo::getOpcodeAfterMemoryUnfold(unsigned Opc,
6847                                       bool UnfoldLoad, bool UnfoldStore,
6848                                       unsigned *LoadRegIndex) const {
6849   const X86MemoryFoldTableEntry *I = lookupUnfoldTable(Opc);
6850   if (I == nullptr)
6851     return 0;
6852   bool FoldedLoad = I->Flags & TB_FOLDED_LOAD;
6853   bool FoldedStore = I->Flags & TB_FOLDED_STORE;
6854   if (UnfoldLoad && !FoldedLoad)
6855     return 0;
6856   if (UnfoldStore && !FoldedStore)
6857     return 0;
6858   if (LoadRegIndex)
6859     *LoadRegIndex = I->Flags & TB_INDEX_MASK;
6860   return I->DstOp;
6861 }
6862 
6863 bool
6864 X86InstrInfo::areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2,
6865                                      int64_t &Offset1, int64_t &Offset2) const {
6866   if (!Load1->isMachineOpcode() || !Load2->isMachineOpcode())
6867     return false;
6868   unsigned Opc1 = Load1->getMachineOpcode();
6869   unsigned Opc2 = Load2->getMachineOpcode();
6870   switch (Opc1) {
6871   default: return false;
6872   case X86::MOV8rm:
6873   case X86::MOV16rm:
6874   case X86::MOV32rm:
6875   case X86::MOV64rm:
6876   case X86::LD_Fp32m:
6877   case X86::LD_Fp64m:
6878   case X86::LD_Fp80m:
6879   case X86::MOVSSrm:
6880   case X86::MOVSSrm_alt:
6881   case X86::MOVSDrm:
6882   case X86::MOVSDrm_alt:
6883   case X86::MMX_MOVD64rm:
6884   case X86::MMX_MOVQ64rm:
6885   case X86::MOVAPSrm:
6886   case X86::MOVUPSrm:
6887   case X86::MOVAPDrm:
6888   case X86::MOVUPDrm:
6889   case X86::MOVDQArm:
6890   case X86::MOVDQUrm:
6891   // AVX load instructions
6892   case X86::VMOVSSrm:
6893   case X86::VMOVSSrm_alt:
6894   case X86::VMOVSDrm:
6895   case X86::VMOVSDrm_alt:
6896   case X86::VMOVAPSrm:
6897   case X86::VMOVUPSrm:
6898   case X86::VMOVAPDrm:
6899   case X86::VMOVUPDrm:
6900   case X86::VMOVDQArm:
6901   case X86::VMOVDQUrm:
6902   case X86::VMOVAPSYrm:
6903   case X86::VMOVUPSYrm:
6904   case X86::VMOVAPDYrm:
6905   case X86::VMOVUPDYrm:
6906   case X86::VMOVDQAYrm:
6907   case X86::VMOVDQUYrm:
6908   // AVX512 load instructions
6909   case X86::VMOVSSZrm:
6910   case X86::VMOVSSZrm_alt:
6911   case X86::VMOVSDZrm:
6912   case X86::VMOVSDZrm_alt:
6913   case X86::VMOVAPSZ128rm:
6914   case X86::VMOVUPSZ128rm:
6915   case X86::VMOVAPSZ128rm_NOVLX:
6916   case X86::VMOVUPSZ128rm_NOVLX:
6917   case X86::VMOVAPDZ128rm:
6918   case X86::VMOVUPDZ128rm:
6919   case X86::VMOVDQU8Z128rm:
6920   case X86::VMOVDQU16Z128rm:
6921   case X86::VMOVDQA32Z128rm:
6922   case X86::VMOVDQU32Z128rm:
6923   case X86::VMOVDQA64Z128rm:
6924   case X86::VMOVDQU64Z128rm:
6925   case X86::VMOVAPSZ256rm:
6926   case X86::VMOVUPSZ256rm:
6927   case X86::VMOVAPSZ256rm_NOVLX:
6928   case X86::VMOVUPSZ256rm_NOVLX:
6929   case X86::VMOVAPDZ256rm:
6930   case X86::VMOVUPDZ256rm:
6931   case X86::VMOVDQU8Z256rm:
6932   case X86::VMOVDQU16Z256rm:
6933   case X86::VMOVDQA32Z256rm:
6934   case X86::VMOVDQU32Z256rm:
6935   case X86::VMOVDQA64Z256rm:
6936   case X86::VMOVDQU64Z256rm:
6937   case X86::VMOVAPSZrm:
6938   case X86::VMOVUPSZrm:
6939   case X86::VMOVAPDZrm:
6940   case X86::VMOVUPDZrm:
6941   case X86::VMOVDQU8Zrm:
6942   case X86::VMOVDQU16Zrm:
6943   case X86::VMOVDQA32Zrm:
6944   case X86::VMOVDQU32Zrm:
6945   case X86::VMOVDQA64Zrm:
6946   case X86::VMOVDQU64Zrm:
6947   case X86::KMOVBkm:
6948   case X86::KMOVWkm:
6949   case X86::KMOVDkm:
6950   case X86::KMOVQkm:
6951     break;
6952   }
6953   switch (Opc2) {
6954   default: return false;
6955   case X86::MOV8rm:
6956   case X86::MOV16rm:
6957   case X86::MOV32rm:
6958   case X86::MOV64rm:
6959   case X86::LD_Fp32m:
6960   case X86::LD_Fp64m:
6961   case X86::LD_Fp80m:
6962   case X86::MOVSSrm:
6963   case X86::MOVSSrm_alt:
6964   case X86::MOVSDrm:
6965   case X86::MOVSDrm_alt:
6966   case X86::MMX_MOVD64rm:
6967   case X86::MMX_MOVQ64rm:
6968   case X86::MOVAPSrm:
6969   case X86::MOVUPSrm:
6970   case X86::MOVAPDrm:
6971   case X86::MOVUPDrm:
6972   case X86::MOVDQArm:
6973   case X86::MOVDQUrm:
6974   // AVX load instructions
6975   case X86::VMOVSSrm:
6976   case X86::VMOVSSrm_alt:
6977   case X86::VMOVSDrm:
6978   case X86::VMOVSDrm_alt:
6979   case X86::VMOVAPSrm:
6980   case X86::VMOVUPSrm:
6981   case X86::VMOVAPDrm:
6982   case X86::VMOVUPDrm:
6983   case X86::VMOVDQArm:
6984   case X86::VMOVDQUrm:
6985   case X86::VMOVAPSYrm:
6986   case X86::VMOVUPSYrm:
6987   case X86::VMOVAPDYrm:
6988   case X86::VMOVUPDYrm:
6989   case X86::VMOVDQAYrm:
6990   case X86::VMOVDQUYrm:
6991   // AVX512 load instructions
6992   case X86::VMOVSSZrm:
6993   case X86::VMOVSSZrm_alt:
6994   case X86::VMOVSDZrm:
6995   case X86::VMOVSDZrm_alt:
6996   case X86::VMOVAPSZ128rm:
6997   case X86::VMOVUPSZ128rm:
6998   case X86::VMOVAPSZ128rm_NOVLX:
6999   case X86::VMOVUPSZ128rm_NOVLX:
7000   case X86::VMOVAPDZ128rm:
7001   case X86::VMOVUPDZ128rm:
7002   case X86::VMOVDQU8Z128rm:
7003   case X86::VMOVDQU16Z128rm:
7004   case X86::VMOVDQA32Z128rm:
7005   case X86::VMOVDQU32Z128rm:
7006   case X86::VMOVDQA64Z128rm:
7007   case X86::VMOVDQU64Z128rm:
7008   case X86::VMOVAPSZ256rm:
7009   case X86::VMOVUPSZ256rm:
7010   case X86::VMOVAPSZ256rm_NOVLX:
7011   case X86::VMOVUPSZ256rm_NOVLX:
7012   case X86::VMOVAPDZ256rm:
7013   case X86::VMOVUPDZ256rm:
7014   case X86::VMOVDQU8Z256rm:
7015   case X86::VMOVDQU16Z256rm:
7016   case X86::VMOVDQA32Z256rm:
7017   case X86::VMOVDQU32Z256rm:
7018   case X86::VMOVDQA64Z256rm:
7019   case X86::VMOVDQU64Z256rm:
7020   case X86::VMOVAPSZrm:
7021   case X86::VMOVUPSZrm:
7022   case X86::VMOVAPDZrm:
7023   case X86::VMOVUPDZrm:
7024   case X86::VMOVDQU8Zrm:
7025   case X86::VMOVDQU16Zrm:
7026   case X86::VMOVDQA32Zrm:
7027   case X86::VMOVDQU32Zrm:
7028   case X86::VMOVDQA64Zrm:
7029   case X86::VMOVDQU64Zrm:
7030   case X86::KMOVBkm:
7031   case X86::KMOVWkm:
7032   case X86::KMOVDkm:
7033   case X86::KMOVQkm:
7034     break;
7035   }
7036 
7037   // Lambda to check if both the loads have the same value for an operand index.
7038   auto HasSameOp = [&](int I) {
7039     return Load1->getOperand(I) == Load2->getOperand(I);
7040   };
7041 
7042   // All operands except the displacement should match.
7043   if (!HasSameOp(X86::AddrBaseReg) || !HasSameOp(X86::AddrScaleAmt) ||
7044       !HasSameOp(X86::AddrIndexReg) || !HasSameOp(X86::AddrSegmentReg))
7045     return false;
7046 
7047   // Chain Operand must be the same.
7048   if (!HasSameOp(5))
7049     return false;
7050 
7051   // Now let's examine if the displacements are constants.
7052   auto Disp1 = dyn_cast<ConstantSDNode>(Load1->getOperand(X86::AddrDisp));
7053   auto Disp2 = dyn_cast<ConstantSDNode>(Load2->getOperand(X86::AddrDisp));
7054   if (!Disp1 || !Disp2)
7055     return false;
7056 
7057   Offset1 = Disp1->getSExtValue();
7058   Offset2 = Disp2->getSExtValue();
7059   return true;
7060 }
7061 
7062 bool X86InstrInfo::shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
7063                                            int64_t Offset1, int64_t Offset2,
7064                                            unsigned NumLoads) const {
7065   assert(Offset2 > Offset1);
7066   if ((Offset2 - Offset1) / 8 > 64)
7067     return false;
7068 
7069   unsigned Opc1 = Load1->getMachineOpcode();
7070   unsigned Opc2 = Load2->getMachineOpcode();
7071   if (Opc1 != Opc2)
7072     return false;  // FIXME: overly conservative?
7073 
7074   switch (Opc1) {
7075   default: break;
7076   case X86::LD_Fp32m:
7077   case X86::LD_Fp64m:
7078   case X86::LD_Fp80m:
7079   case X86::MMX_MOVD64rm:
7080   case X86::MMX_MOVQ64rm:
7081     return false;
7082   }
7083 
7084   EVT VT = Load1->getValueType(0);
7085   switch (VT.getSimpleVT().SimpleTy) {
7086   default:
7087     // XMM registers. In 64-bit mode we can be a bit more aggressive since we
7088     // have 16 of them to play with.
7089     if (Subtarget.is64Bit()) {
7090       if (NumLoads >= 3)
7091         return false;
7092     } else if (NumLoads) {
7093       return false;
7094     }
7095     break;
7096   case MVT::i8:
7097   case MVT::i16:
7098   case MVT::i32:
7099   case MVT::i64:
7100   case MVT::f32:
7101   case MVT::f64:
7102     if (NumLoads)
7103       return false;
7104     break;
7105   }
7106 
7107   return true;
7108 }
7109 
7110 bool X86InstrInfo::isSchedulingBoundary(const MachineInstr &MI,
7111                                         const MachineBasicBlock *MBB,
7112                                         const MachineFunction &MF) const {
7113 
7114   // ENDBR instructions should not be scheduled around.
7115   unsigned Opcode = MI.getOpcode();
7116   if (Opcode == X86::ENDBR64 || Opcode == X86::ENDBR32 ||
7117       Opcode == X86::LDTILECFG)
7118     return true;
7119 
7120   return TargetInstrInfo::isSchedulingBoundary(MI, MBB, MF);
7121 }
7122 
7123 bool X86InstrInfo::
7124 reverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
7125   assert(Cond.size() == 1 && "Invalid X86 branch condition!");
7126   X86::CondCode CC = static_cast<X86::CondCode>(Cond[0].getImm());
7127   Cond[0].setImm(GetOppositeBranchCondition(CC));
7128   return false;
7129 }
7130 
7131 bool X86InstrInfo::
7132 isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const {
7133   // FIXME: Return false for x87 stack register classes for now. We can't
7134   // allow any loads of these registers before FpGet_ST0_80.
7135   return !(RC == &X86::CCRRegClass || RC == &X86::DFCCRRegClass ||
7136            RC == &X86::RFP32RegClass || RC == &X86::RFP64RegClass ||
7137            RC == &X86::RFP80RegClass);
7138 }
7139 
7140 /// Return a virtual register initialized with the
7141 /// the global base register value. Output instructions required to
7142 /// initialize the register in the function entry block, if necessary.
7143 ///
7144 /// TODO: Eliminate this and move the code to X86MachineFunctionInfo.
7145 ///
7146 unsigned X86InstrInfo::getGlobalBaseReg(MachineFunction *MF) const {
7147   assert((!Subtarget.is64Bit() ||
7148           MF->getTarget().getCodeModel() == CodeModel::Medium ||
7149           MF->getTarget().getCodeModel() == CodeModel::Large) &&
7150          "X86-64 PIC uses RIP relative addressing");
7151 
7152   X86MachineFunctionInfo *X86FI = MF->getInfo<X86MachineFunctionInfo>();
7153   Register GlobalBaseReg = X86FI->getGlobalBaseReg();
7154   if (GlobalBaseReg != 0)
7155     return GlobalBaseReg;
7156 
7157   // Create the register. The code to initialize it is inserted
7158   // later, by the CGBR pass (below).
7159   MachineRegisterInfo &RegInfo = MF->getRegInfo();
7160   GlobalBaseReg = RegInfo.createVirtualRegister(
7161       Subtarget.is64Bit() ? &X86::GR64_NOSPRegClass : &X86::GR32_NOSPRegClass);
7162   X86FI->setGlobalBaseReg(GlobalBaseReg);
7163   return GlobalBaseReg;
7164 }
7165 
7166 // These are the replaceable SSE instructions. Some of these have Int variants
7167 // that we don't include here. We don't want to replace instructions selected
7168 // by intrinsics.
7169 static const uint16_t ReplaceableInstrs[][3] = {
7170   //PackedSingle     PackedDouble    PackedInt
7171   { X86::MOVAPSmr,   X86::MOVAPDmr,  X86::MOVDQAmr  },
7172   { X86::MOVAPSrm,   X86::MOVAPDrm,  X86::MOVDQArm  },
7173   { X86::MOVAPSrr,   X86::MOVAPDrr,  X86::MOVDQArr  },
7174   { X86::MOVUPSmr,   X86::MOVUPDmr,  X86::MOVDQUmr  },
7175   { X86::MOVUPSrm,   X86::MOVUPDrm,  X86::MOVDQUrm  },
7176   { X86::MOVLPSmr,   X86::MOVLPDmr,  X86::MOVPQI2QImr },
7177   { X86::MOVSDmr,    X86::MOVSDmr,   X86::MOVPQI2QImr },
7178   { X86::MOVSSmr,    X86::MOVSSmr,   X86::MOVPDI2DImr },
7179   { X86::MOVSDrm,    X86::MOVSDrm,   X86::MOVQI2PQIrm },
7180   { X86::MOVSDrm_alt,X86::MOVSDrm_alt,X86::MOVQI2PQIrm },
7181   { X86::MOVSSrm,    X86::MOVSSrm,   X86::MOVDI2PDIrm },
7182   { X86::MOVSSrm_alt,X86::MOVSSrm_alt,X86::MOVDI2PDIrm },
7183   { X86::MOVNTPSmr,  X86::MOVNTPDmr, X86::MOVNTDQmr },
7184   { X86::ANDNPSrm,   X86::ANDNPDrm,  X86::PANDNrm   },
7185   { X86::ANDNPSrr,   X86::ANDNPDrr,  X86::PANDNrr   },
7186   { X86::ANDPSrm,    X86::ANDPDrm,   X86::PANDrm    },
7187   { X86::ANDPSrr,    X86::ANDPDrr,   X86::PANDrr    },
7188   { X86::ORPSrm,     X86::ORPDrm,    X86::PORrm     },
7189   { X86::ORPSrr,     X86::ORPDrr,    X86::PORrr     },
7190   { X86::XORPSrm,    X86::XORPDrm,   X86::PXORrm    },
7191   { X86::XORPSrr,    X86::XORPDrr,   X86::PXORrr    },
7192   { X86::UNPCKLPDrm, X86::UNPCKLPDrm, X86::PUNPCKLQDQrm },
7193   { X86::MOVLHPSrr,  X86::UNPCKLPDrr, X86::PUNPCKLQDQrr },
7194   { X86::UNPCKHPDrm, X86::UNPCKHPDrm, X86::PUNPCKHQDQrm },
7195   { X86::UNPCKHPDrr, X86::UNPCKHPDrr, X86::PUNPCKHQDQrr },
7196   { X86::UNPCKLPSrm, X86::UNPCKLPSrm, X86::PUNPCKLDQrm },
7197   { X86::UNPCKLPSrr, X86::UNPCKLPSrr, X86::PUNPCKLDQrr },
7198   { X86::UNPCKHPSrm, X86::UNPCKHPSrm, X86::PUNPCKHDQrm },
7199   { X86::UNPCKHPSrr, X86::UNPCKHPSrr, X86::PUNPCKHDQrr },
7200   { X86::EXTRACTPSmr, X86::EXTRACTPSmr, X86::PEXTRDmr },
7201   { X86::EXTRACTPSrr, X86::EXTRACTPSrr, X86::PEXTRDrr },
7202   // AVX 128-bit support
7203   { X86::VMOVAPSmr,  X86::VMOVAPDmr,  X86::VMOVDQAmr  },
7204   { X86::VMOVAPSrm,  X86::VMOVAPDrm,  X86::VMOVDQArm  },
7205   { X86::VMOVAPSrr,  X86::VMOVAPDrr,  X86::VMOVDQArr  },
7206   { X86::VMOVUPSmr,  X86::VMOVUPDmr,  X86::VMOVDQUmr  },
7207   { X86::VMOVUPSrm,  X86::VMOVUPDrm,  X86::VMOVDQUrm  },
7208   { X86::VMOVLPSmr,  X86::VMOVLPDmr,  X86::VMOVPQI2QImr },
7209   { X86::VMOVSDmr,   X86::VMOVSDmr,   X86::VMOVPQI2QImr },
7210   { X86::VMOVSSmr,   X86::VMOVSSmr,   X86::VMOVPDI2DImr },
7211   { X86::VMOVSDrm,   X86::VMOVSDrm,   X86::VMOVQI2PQIrm },
7212   { X86::VMOVSDrm_alt,X86::VMOVSDrm_alt,X86::VMOVQI2PQIrm },
7213   { X86::VMOVSSrm,   X86::VMOVSSrm,   X86::VMOVDI2PDIrm },
7214   { X86::VMOVSSrm_alt,X86::VMOVSSrm_alt,X86::VMOVDI2PDIrm },
7215   { X86::VMOVNTPSmr, X86::VMOVNTPDmr, X86::VMOVNTDQmr },
7216   { X86::VANDNPSrm,  X86::VANDNPDrm,  X86::VPANDNrm   },
7217   { X86::VANDNPSrr,  X86::VANDNPDrr,  X86::VPANDNrr   },
7218   { X86::VANDPSrm,   X86::VANDPDrm,   X86::VPANDrm    },
7219   { X86::VANDPSrr,   X86::VANDPDrr,   X86::VPANDrr    },
7220   { X86::VORPSrm,    X86::VORPDrm,    X86::VPORrm     },
7221   { X86::VORPSrr,    X86::VORPDrr,    X86::VPORrr     },
7222   { X86::VXORPSrm,   X86::VXORPDrm,   X86::VPXORrm    },
7223   { X86::VXORPSrr,   X86::VXORPDrr,   X86::VPXORrr    },
7224   { X86::VUNPCKLPDrm, X86::VUNPCKLPDrm, X86::VPUNPCKLQDQrm },
7225   { X86::VMOVLHPSrr,  X86::VUNPCKLPDrr, X86::VPUNPCKLQDQrr },
7226   { X86::VUNPCKHPDrm, X86::VUNPCKHPDrm, X86::VPUNPCKHQDQrm },
7227   { X86::VUNPCKHPDrr, X86::VUNPCKHPDrr, X86::VPUNPCKHQDQrr },
7228   { X86::VUNPCKLPSrm, X86::VUNPCKLPSrm, X86::VPUNPCKLDQrm },
7229   { X86::VUNPCKLPSrr, X86::VUNPCKLPSrr, X86::VPUNPCKLDQrr },
7230   { X86::VUNPCKHPSrm, X86::VUNPCKHPSrm, X86::VPUNPCKHDQrm },
7231   { X86::VUNPCKHPSrr, X86::VUNPCKHPSrr, X86::VPUNPCKHDQrr },
7232   { X86::VEXTRACTPSmr, X86::VEXTRACTPSmr, X86::VPEXTRDmr },
7233   { X86::VEXTRACTPSrr, X86::VEXTRACTPSrr, X86::VPEXTRDrr },
7234   // AVX 256-bit support
7235   { X86::VMOVAPSYmr,   X86::VMOVAPDYmr,   X86::VMOVDQAYmr  },
7236   { X86::VMOVAPSYrm,   X86::VMOVAPDYrm,   X86::VMOVDQAYrm  },
7237   { X86::VMOVAPSYrr,   X86::VMOVAPDYrr,   X86::VMOVDQAYrr  },
7238   { X86::VMOVUPSYmr,   X86::VMOVUPDYmr,   X86::VMOVDQUYmr  },
7239   { X86::VMOVUPSYrm,   X86::VMOVUPDYrm,   X86::VMOVDQUYrm  },
7240   { X86::VMOVNTPSYmr,  X86::VMOVNTPDYmr,  X86::VMOVNTDQYmr },
7241   { X86::VPERMPSYrm,   X86::VPERMPSYrm,   X86::VPERMDYrm },
7242   { X86::VPERMPSYrr,   X86::VPERMPSYrr,   X86::VPERMDYrr },
7243   { X86::VPERMPDYmi,   X86::VPERMPDYmi,   X86::VPERMQYmi },
7244   { X86::VPERMPDYri,   X86::VPERMPDYri,   X86::VPERMQYri },
7245   // AVX512 support
7246   { X86::VMOVLPSZ128mr,  X86::VMOVLPDZ128mr,  X86::VMOVPQI2QIZmr  },
7247   { X86::VMOVNTPSZ128mr, X86::VMOVNTPDZ128mr, X86::VMOVNTDQZ128mr },
7248   { X86::VMOVNTPSZ256mr, X86::VMOVNTPDZ256mr, X86::VMOVNTDQZ256mr },
7249   { X86::VMOVNTPSZmr,    X86::VMOVNTPDZmr,    X86::VMOVNTDQZmr    },
7250   { X86::VMOVSDZmr,      X86::VMOVSDZmr,      X86::VMOVPQI2QIZmr  },
7251   { X86::VMOVSSZmr,      X86::VMOVSSZmr,      X86::VMOVPDI2DIZmr  },
7252   { X86::VMOVSDZrm,      X86::VMOVSDZrm,      X86::VMOVQI2PQIZrm  },
7253   { X86::VMOVSDZrm_alt,  X86::VMOVSDZrm_alt,  X86::VMOVQI2PQIZrm  },
7254   { X86::VMOVSSZrm,      X86::VMOVSSZrm,      X86::VMOVDI2PDIZrm  },
7255   { X86::VMOVSSZrm_alt,  X86::VMOVSSZrm_alt,  X86::VMOVDI2PDIZrm  },
7256   { X86::VBROADCASTSSZ128rr,X86::VBROADCASTSSZ128rr,X86::VPBROADCASTDZ128rr },
7257   { X86::VBROADCASTSSZ128rm,X86::VBROADCASTSSZ128rm,X86::VPBROADCASTDZ128rm },
7258   { X86::VBROADCASTSSZ256rr,X86::VBROADCASTSSZ256rr,X86::VPBROADCASTDZ256rr },
7259   { X86::VBROADCASTSSZ256rm,X86::VBROADCASTSSZ256rm,X86::VPBROADCASTDZ256rm },
7260   { X86::VBROADCASTSSZrr,   X86::VBROADCASTSSZrr,   X86::VPBROADCASTDZrr },
7261   { X86::VBROADCASTSSZrm,   X86::VBROADCASTSSZrm,   X86::VPBROADCASTDZrm },
7262   { X86::VMOVDDUPZ128rr,    X86::VMOVDDUPZ128rr,    X86::VPBROADCASTQZ128rr },
7263   { X86::VMOVDDUPZ128rm,    X86::VMOVDDUPZ128rm,    X86::VPBROADCASTQZ128rm },
7264   { X86::VBROADCASTSDZ256rr,X86::VBROADCASTSDZ256rr,X86::VPBROADCASTQZ256rr },
7265   { X86::VBROADCASTSDZ256rm,X86::VBROADCASTSDZ256rm,X86::VPBROADCASTQZ256rm },
7266   { X86::VBROADCASTSDZrr,   X86::VBROADCASTSDZrr,   X86::VPBROADCASTQZrr },
7267   { X86::VBROADCASTSDZrm,   X86::VBROADCASTSDZrm,   X86::VPBROADCASTQZrm },
7268   { X86::VINSERTF32x4Zrr,   X86::VINSERTF32x4Zrr,   X86::VINSERTI32x4Zrr },
7269   { X86::VINSERTF32x4Zrm,   X86::VINSERTF32x4Zrm,   X86::VINSERTI32x4Zrm },
7270   { X86::VINSERTF32x8Zrr,   X86::VINSERTF32x8Zrr,   X86::VINSERTI32x8Zrr },
7271   { X86::VINSERTF32x8Zrm,   X86::VINSERTF32x8Zrm,   X86::VINSERTI32x8Zrm },
7272   { X86::VINSERTF64x2Zrr,   X86::VINSERTF64x2Zrr,   X86::VINSERTI64x2Zrr },
7273   { X86::VINSERTF64x2Zrm,   X86::VINSERTF64x2Zrm,   X86::VINSERTI64x2Zrm },
7274   { X86::VINSERTF64x4Zrr,   X86::VINSERTF64x4Zrr,   X86::VINSERTI64x4Zrr },
7275   { X86::VINSERTF64x4Zrm,   X86::VINSERTF64x4Zrm,   X86::VINSERTI64x4Zrm },
7276   { X86::VINSERTF32x4Z256rr,X86::VINSERTF32x4Z256rr,X86::VINSERTI32x4Z256rr },
7277   { X86::VINSERTF32x4Z256rm,X86::VINSERTF32x4Z256rm,X86::VINSERTI32x4Z256rm },
7278   { X86::VINSERTF64x2Z256rr,X86::VINSERTF64x2Z256rr,X86::VINSERTI64x2Z256rr },
7279   { X86::VINSERTF64x2Z256rm,X86::VINSERTF64x2Z256rm,X86::VINSERTI64x2Z256rm },
7280   { X86::VEXTRACTF32x4Zrr,   X86::VEXTRACTF32x4Zrr,   X86::VEXTRACTI32x4Zrr },
7281   { X86::VEXTRACTF32x4Zmr,   X86::VEXTRACTF32x4Zmr,   X86::VEXTRACTI32x4Zmr },
7282   { X86::VEXTRACTF32x8Zrr,   X86::VEXTRACTF32x8Zrr,   X86::VEXTRACTI32x8Zrr },
7283   { X86::VEXTRACTF32x8Zmr,   X86::VEXTRACTF32x8Zmr,   X86::VEXTRACTI32x8Zmr },
7284   { X86::VEXTRACTF64x2Zrr,   X86::VEXTRACTF64x2Zrr,   X86::VEXTRACTI64x2Zrr },
7285   { X86::VEXTRACTF64x2Zmr,   X86::VEXTRACTF64x2Zmr,   X86::VEXTRACTI64x2Zmr },
7286   { X86::VEXTRACTF64x4Zrr,   X86::VEXTRACTF64x4Zrr,   X86::VEXTRACTI64x4Zrr },
7287   { X86::VEXTRACTF64x4Zmr,   X86::VEXTRACTF64x4Zmr,   X86::VEXTRACTI64x4Zmr },
7288   { X86::VEXTRACTF32x4Z256rr,X86::VEXTRACTF32x4Z256rr,X86::VEXTRACTI32x4Z256rr },
7289   { X86::VEXTRACTF32x4Z256mr,X86::VEXTRACTF32x4Z256mr,X86::VEXTRACTI32x4Z256mr },
7290   { X86::VEXTRACTF64x2Z256rr,X86::VEXTRACTF64x2Z256rr,X86::VEXTRACTI64x2Z256rr },
7291   { X86::VEXTRACTF64x2Z256mr,X86::VEXTRACTF64x2Z256mr,X86::VEXTRACTI64x2Z256mr },
7292   { X86::VPERMILPSmi,        X86::VPERMILPSmi,        X86::VPSHUFDmi },
7293   { X86::VPERMILPSri,        X86::VPERMILPSri,        X86::VPSHUFDri },
7294   { X86::VPERMILPSZ128mi,    X86::VPERMILPSZ128mi,    X86::VPSHUFDZ128mi },
7295   { X86::VPERMILPSZ128ri,    X86::VPERMILPSZ128ri,    X86::VPSHUFDZ128ri },
7296   { X86::VPERMILPSZ256mi,    X86::VPERMILPSZ256mi,    X86::VPSHUFDZ256mi },
7297   { X86::VPERMILPSZ256ri,    X86::VPERMILPSZ256ri,    X86::VPSHUFDZ256ri },
7298   { X86::VPERMILPSZmi,       X86::VPERMILPSZmi,       X86::VPSHUFDZmi },
7299   { X86::VPERMILPSZri,       X86::VPERMILPSZri,       X86::VPSHUFDZri },
7300   { X86::VPERMPSZ256rm,      X86::VPERMPSZ256rm,      X86::VPERMDZ256rm },
7301   { X86::VPERMPSZ256rr,      X86::VPERMPSZ256rr,      X86::VPERMDZ256rr },
7302   { X86::VPERMPDZ256mi,      X86::VPERMPDZ256mi,      X86::VPERMQZ256mi },
7303   { X86::VPERMPDZ256ri,      X86::VPERMPDZ256ri,      X86::VPERMQZ256ri },
7304   { X86::VPERMPDZ256rm,      X86::VPERMPDZ256rm,      X86::VPERMQZ256rm },
7305   { X86::VPERMPDZ256rr,      X86::VPERMPDZ256rr,      X86::VPERMQZ256rr },
7306   { X86::VPERMPSZrm,         X86::VPERMPSZrm,         X86::VPERMDZrm },
7307   { X86::VPERMPSZrr,         X86::VPERMPSZrr,         X86::VPERMDZrr },
7308   { X86::VPERMPDZmi,         X86::VPERMPDZmi,         X86::VPERMQZmi },
7309   { X86::VPERMPDZri,         X86::VPERMPDZri,         X86::VPERMQZri },
7310   { X86::VPERMPDZrm,         X86::VPERMPDZrm,         X86::VPERMQZrm },
7311   { X86::VPERMPDZrr,         X86::VPERMPDZrr,         X86::VPERMQZrr },
7312   { X86::VUNPCKLPDZ256rm,    X86::VUNPCKLPDZ256rm,    X86::VPUNPCKLQDQZ256rm },
7313   { X86::VUNPCKLPDZ256rr,    X86::VUNPCKLPDZ256rr,    X86::VPUNPCKLQDQZ256rr },
7314   { X86::VUNPCKHPDZ256rm,    X86::VUNPCKHPDZ256rm,    X86::VPUNPCKHQDQZ256rm },
7315   { X86::VUNPCKHPDZ256rr,    X86::VUNPCKHPDZ256rr,    X86::VPUNPCKHQDQZ256rr },
7316   { X86::VUNPCKLPSZ256rm,    X86::VUNPCKLPSZ256rm,    X86::VPUNPCKLDQZ256rm },
7317   { X86::VUNPCKLPSZ256rr,    X86::VUNPCKLPSZ256rr,    X86::VPUNPCKLDQZ256rr },
7318   { X86::VUNPCKHPSZ256rm,    X86::VUNPCKHPSZ256rm,    X86::VPUNPCKHDQZ256rm },
7319   { X86::VUNPCKHPSZ256rr,    X86::VUNPCKHPSZ256rr,    X86::VPUNPCKHDQZ256rr },
7320   { X86::VUNPCKLPDZ128rm,    X86::VUNPCKLPDZ128rm,    X86::VPUNPCKLQDQZ128rm },
7321   { X86::VMOVLHPSZrr,        X86::VUNPCKLPDZ128rr,    X86::VPUNPCKLQDQZ128rr },
7322   { X86::VUNPCKHPDZ128rm,    X86::VUNPCKHPDZ128rm,    X86::VPUNPCKHQDQZ128rm },
7323   { X86::VUNPCKHPDZ128rr,    X86::VUNPCKHPDZ128rr,    X86::VPUNPCKHQDQZ128rr },
7324   { X86::VUNPCKLPSZ128rm,    X86::VUNPCKLPSZ128rm,    X86::VPUNPCKLDQZ128rm },
7325   { X86::VUNPCKLPSZ128rr,    X86::VUNPCKLPSZ128rr,    X86::VPUNPCKLDQZ128rr },
7326   { X86::VUNPCKHPSZ128rm,    X86::VUNPCKHPSZ128rm,    X86::VPUNPCKHDQZ128rm },
7327   { X86::VUNPCKHPSZ128rr,    X86::VUNPCKHPSZ128rr,    X86::VPUNPCKHDQZ128rr },
7328   { X86::VUNPCKLPDZrm,       X86::VUNPCKLPDZrm,       X86::VPUNPCKLQDQZrm },
7329   { X86::VUNPCKLPDZrr,       X86::VUNPCKLPDZrr,       X86::VPUNPCKLQDQZrr },
7330   { X86::VUNPCKHPDZrm,       X86::VUNPCKHPDZrm,       X86::VPUNPCKHQDQZrm },
7331   { X86::VUNPCKHPDZrr,       X86::VUNPCKHPDZrr,       X86::VPUNPCKHQDQZrr },
7332   { X86::VUNPCKLPSZrm,       X86::VUNPCKLPSZrm,       X86::VPUNPCKLDQZrm },
7333   { X86::VUNPCKLPSZrr,       X86::VUNPCKLPSZrr,       X86::VPUNPCKLDQZrr },
7334   { X86::VUNPCKHPSZrm,       X86::VUNPCKHPSZrm,       X86::VPUNPCKHDQZrm },
7335   { X86::VUNPCKHPSZrr,       X86::VUNPCKHPSZrr,       X86::VPUNPCKHDQZrr },
7336   { X86::VEXTRACTPSZmr,      X86::VEXTRACTPSZmr,      X86::VPEXTRDZmr },
7337   { X86::VEXTRACTPSZrr,      X86::VEXTRACTPSZrr,      X86::VPEXTRDZrr },
7338 };
7339 
7340 static const uint16_t ReplaceableInstrsAVX2[][3] = {
7341   //PackedSingle       PackedDouble       PackedInt
7342   { X86::VANDNPSYrm,   X86::VANDNPDYrm,   X86::VPANDNYrm   },
7343   { X86::VANDNPSYrr,   X86::VANDNPDYrr,   X86::VPANDNYrr   },
7344   { X86::VANDPSYrm,    X86::VANDPDYrm,    X86::VPANDYrm    },
7345   { X86::VANDPSYrr,    X86::VANDPDYrr,    X86::VPANDYrr    },
7346   { X86::VORPSYrm,     X86::VORPDYrm,     X86::VPORYrm     },
7347   { X86::VORPSYrr,     X86::VORPDYrr,     X86::VPORYrr     },
7348   { X86::VXORPSYrm,    X86::VXORPDYrm,    X86::VPXORYrm    },
7349   { X86::VXORPSYrr,    X86::VXORPDYrr,    X86::VPXORYrr    },
7350   { X86::VPERM2F128rm,   X86::VPERM2F128rm,   X86::VPERM2I128rm },
7351   { X86::VPERM2F128rr,   X86::VPERM2F128rr,   X86::VPERM2I128rr },
7352   { X86::VBROADCASTSSrm, X86::VBROADCASTSSrm, X86::VPBROADCASTDrm},
7353   { X86::VBROADCASTSSrr, X86::VBROADCASTSSrr, X86::VPBROADCASTDrr},
7354   { X86::VMOVDDUPrm,     X86::VMOVDDUPrm,     X86::VPBROADCASTQrm},
7355   { X86::VMOVDDUPrr,     X86::VMOVDDUPrr,     X86::VPBROADCASTQrr},
7356   { X86::VBROADCASTSSYrr, X86::VBROADCASTSSYrr, X86::VPBROADCASTDYrr},
7357   { X86::VBROADCASTSSYrm, X86::VBROADCASTSSYrm, X86::VPBROADCASTDYrm},
7358   { X86::VBROADCASTSDYrr, X86::VBROADCASTSDYrr, X86::VPBROADCASTQYrr},
7359   { X86::VBROADCASTSDYrm, X86::VBROADCASTSDYrm, X86::VPBROADCASTQYrm},
7360   { X86::VBROADCASTF128,  X86::VBROADCASTF128,  X86::VBROADCASTI128 },
7361   { X86::VBLENDPSYrri,    X86::VBLENDPSYrri,    X86::VPBLENDDYrri },
7362   { X86::VBLENDPSYrmi,    X86::VBLENDPSYrmi,    X86::VPBLENDDYrmi },
7363   { X86::VPERMILPSYmi,    X86::VPERMILPSYmi,    X86::VPSHUFDYmi },
7364   { X86::VPERMILPSYri,    X86::VPERMILPSYri,    X86::VPSHUFDYri },
7365   { X86::VUNPCKLPDYrm,    X86::VUNPCKLPDYrm,    X86::VPUNPCKLQDQYrm },
7366   { X86::VUNPCKLPDYrr,    X86::VUNPCKLPDYrr,    X86::VPUNPCKLQDQYrr },
7367   { X86::VUNPCKHPDYrm,    X86::VUNPCKHPDYrm,    X86::VPUNPCKHQDQYrm },
7368   { X86::VUNPCKHPDYrr,    X86::VUNPCKHPDYrr,    X86::VPUNPCKHQDQYrr },
7369   { X86::VUNPCKLPSYrm,    X86::VUNPCKLPSYrm,    X86::VPUNPCKLDQYrm },
7370   { X86::VUNPCKLPSYrr,    X86::VUNPCKLPSYrr,    X86::VPUNPCKLDQYrr },
7371   { X86::VUNPCKHPSYrm,    X86::VUNPCKHPSYrm,    X86::VPUNPCKHDQYrm },
7372   { X86::VUNPCKHPSYrr,    X86::VUNPCKHPSYrr,    X86::VPUNPCKHDQYrr },
7373 };
7374 
7375 static const uint16_t ReplaceableInstrsFP[][3] = {
7376   //PackedSingle         PackedDouble
7377   { X86::MOVLPSrm,       X86::MOVLPDrm,      X86::INSTRUCTION_LIST_END },
7378   { X86::MOVHPSrm,       X86::MOVHPDrm,      X86::INSTRUCTION_LIST_END },
7379   { X86::MOVHPSmr,       X86::MOVHPDmr,      X86::INSTRUCTION_LIST_END },
7380   { X86::VMOVLPSrm,      X86::VMOVLPDrm,     X86::INSTRUCTION_LIST_END },
7381   { X86::VMOVHPSrm,      X86::VMOVHPDrm,     X86::INSTRUCTION_LIST_END },
7382   { X86::VMOVHPSmr,      X86::VMOVHPDmr,     X86::INSTRUCTION_LIST_END },
7383   { X86::VMOVLPSZ128rm,  X86::VMOVLPDZ128rm, X86::INSTRUCTION_LIST_END },
7384   { X86::VMOVHPSZ128rm,  X86::VMOVHPDZ128rm, X86::INSTRUCTION_LIST_END },
7385   { X86::VMOVHPSZ128mr,  X86::VMOVHPDZ128mr, X86::INSTRUCTION_LIST_END },
7386 };
7387 
7388 static const uint16_t ReplaceableInstrsAVX2InsertExtract[][3] = {
7389   //PackedSingle       PackedDouble       PackedInt
7390   { X86::VEXTRACTF128mr, X86::VEXTRACTF128mr, X86::VEXTRACTI128mr },
7391   { X86::VEXTRACTF128rr, X86::VEXTRACTF128rr, X86::VEXTRACTI128rr },
7392   { X86::VINSERTF128rm,  X86::VINSERTF128rm,  X86::VINSERTI128rm },
7393   { X86::VINSERTF128rr,  X86::VINSERTF128rr,  X86::VINSERTI128rr },
7394 };
7395 
7396 static const uint16_t ReplaceableInstrsAVX512[][4] = {
7397   // Two integer columns for 64-bit and 32-bit elements.
7398   //PackedSingle        PackedDouble        PackedInt             PackedInt
7399   { X86::VMOVAPSZ128mr, X86::VMOVAPDZ128mr, X86::VMOVDQA64Z128mr, X86::VMOVDQA32Z128mr  },
7400   { X86::VMOVAPSZ128rm, X86::VMOVAPDZ128rm, X86::VMOVDQA64Z128rm, X86::VMOVDQA32Z128rm  },
7401   { X86::VMOVAPSZ128rr, X86::VMOVAPDZ128rr, X86::VMOVDQA64Z128rr, X86::VMOVDQA32Z128rr  },
7402   { X86::VMOVUPSZ128mr, X86::VMOVUPDZ128mr, X86::VMOVDQU64Z128mr, X86::VMOVDQU32Z128mr  },
7403   { X86::VMOVUPSZ128rm, X86::VMOVUPDZ128rm, X86::VMOVDQU64Z128rm, X86::VMOVDQU32Z128rm  },
7404   { X86::VMOVAPSZ256mr, X86::VMOVAPDZ256mr, X86::VMOVDQA64Z256mr, X86::VMOVDQA32Z256mr  },
7405   { X86::VMOVAPSZ256rm, X86::VMOVAPDZ256rm, X86::VMOVDQA64Z256rm, X86::VMOVDQA32Z256rm  },
7406   { X86::VMOVAPSZ256rr, X86::VMOVAPDZ256rr, X86::VMOVDQA64Z256rr, X86::VMOVDQA32Z256rr  },
7407   { X86::VMOVUPSZ256mr, X86::VMOVUPDZ256mr, X86::VMOVDQU64Z256mr, X86::VMOVDQU32Z256mr  },
7408   { X86::VMOVUPSZ256rm, X86::VMOVUPDZ256rm, X86::VMOVDQU64Z256rm, X86::VMOVDQU32Z256rm  },
7409   { X86::VMOVAPSZmr,    X86::VMOVAPDZmr,    X86::VMOVDQA64Zmr,    X86::VMOVDQA32Zmr     },
7410   { X86::VMOVAPSZrm,    X86::VMOVAPDZrm,    X86::VMOVDQA64Zrm,    X86::VMOVDQA32Zrm     },
7411   { X86::VMOVAPSZrr,    X86::VMOVAPDZrr,    X86::VMOVDQA64Zrr,    X86::VMOVDQA32Zrr     },
7412   { X86::VMOVUPSZmr,    X86::VMOVUPDZmr,    X86::VMOVDQU64Zmr,    X86::VMOVDQU32Zmr     },
7413   { X86::VMOVUPSZrm,    X86::VMOVUPDZrm,    X86::VMOVDQU64Zrm,    X86::VMOVDQU32Zrm     },
7414 };
7415 
7416 static const uint16_t ReplaceableInstrsAVX512DQ[][4] = {
7417   // Two integer columns for 64-bit and 32-bit elements.
7418   //PackedSingle        PackedDouble        PackedInt           PackedInt
7419   { X86::VANDNPSZ128rm, X86::VANDNPDZ128rm, X86::VPANDNQZ128rm, X86::VPANDNDZ128rm },
7420   { X86::VANDNPSZ128rr, X86::VANDNPDZ128rr, X86::VPANDNQZ128rr, X86::VPANDNDZ128rr },
7421   { X86::VANDPSZ128rm,  X86::VANDPDZ128rm,  X86::VPANDQZ128rm,  X86::VPANDDZ128rm  },
7422   { X86::VANDPSZ128rr,  X86::VANDPDZ128rr,  X86::VPANDQZ128rr,  X86::VPANDDZ128rr  },
7423   { X86::VORPSZ128rm,   X86::VORPDZ128rm,   X86::VPORQZ128rm,   X86::VPORDZ128rm   },
7424   { X86::VORPSZ128rr,   X86::VORPDZ128rr,   X86::VPORQZ128rr,   X86::VPORDZ128rr   },
7425   { X86::VXORPSZ128rm,  X86::VXORPDZ128rm,  X86::VPXORQZ128rm,  X86::VPXORDZ128rm  },
7426   { X86::VXORPSZ128rr,  X86::VXORPDZ128rr,  X86::VPXORQZ128rr,  X86::VPXORDZ128rr  },
7427   { X86::VANDNPSZ256rm, X86::VANDNPDZ256rm, X86::VPANDNQZ256rm, X86::VPANDNDZ256rm },
7428   { X86::VANDNPSZ256rr, X86::VANDNPDZ256rr, X86::VPANDNQZ256rr, X86::VPANDNDZ256rr },
7429   { X86::VANDPSZ256rm,  X86::VANDPDZ256rm,  X86::VPANDQZ256rm,  X86::VPANDDZ256rm  },
7430   { X86::VANDPSZ256rr,  X86::VANDPDZ256rr,  X86::VPANDQZ256rr,  X86::VPANDDZ256rr  },
7431   { X86::VORPSZ256rm,   X86::VORPDZ256rm,   X86::VPORQZ256rm,   X86::VPORDZ256rm   },
7432   { X86::VORPSZ256rr,   X86::VORPDZ256rr,   X86::VPORQZ256rr,   X86::VPORDZ256rr   },
7433   { X86::VXORPSZ256rm,  X86::VXORPDZ256rm,  X86::VPXORQZ256rm,  X86::VPXORDZ256rm  },
7434   { X86::VXORPSZ256rr,  X86::VXORPDZ256rr,  X86::VPXORQZ256rr,  X86::VPXORDZ256rr  },
7435   { X86::VANDNPSZrm,    X86::VANDNPDZrm,    X86::VPANDNQZrm,    X86::VPANDNDZrm    },
7436   { X86::VANDNPSZrr,    X86::VANDNPDZrr,    X86::VPANDNQZrr,    X86::VPANDNDZrr    },
7437   { X86::VANDPSZrm,     X86::VANDPDZrm,     X86::VPANDQZrm,     X86::VPANDDZrm     },
7438   { X86::VANDPSZrr,     X86::VANDPDZrr,     X86::VPANDQZrr,     X86::VPANDDZrr     },
7439   { X86::VORPSZrm,      X86::VORPDZrm,      X86::VPORQZrm,      X86::VPORDZrm      },
7440   { X86::VORPSZrr,      X86::VORPDZrr,      X86::VPORQZrr,      X86::VPORDZrr      },
7441   { X86::VXORPSZrm,     X86::VXORPDZrm,     X86::VPXORQZrm,     X86::VPXORDZrm     },
7442   { X86::VXORPSZrr,     X86::VXORPDZrr,     X86::VPXORQZrr,     X86::VPXORDZrr     },
7443 };
7444 
7445 static const uint16_t ReplaceableInstrsAVX512DQMasked[][4] = {
7446   // Two integer columns for 64-bit and 32-bit elements.
7447   //PackedSingle          PackedDouble
7448   //PackedInt             PackedInt
7449   { X86::VANDNPSZ128rmk,  X86::VANDNPDZ128rmk,
7450     X86::VPANDNQZ128rmk,  X86::VPANDNDZ128rmk  },
7451   { X86::VANDNPSZ128rmkz, X86::VANDNPDZ128rmkz,
7452     X86::VPANDNQZ128rmkz, X86::VPANDNDZ128rmkz },
7453   { X86::VANDNPSZ128rrk,  X86::VANDNPDZ128rrk,
7454     X86::VPANDNQZ128rrk,  X86::VPANDNDZ128rrk  },
7455   { X86::VANDNPSZ128rrkz, X86::VANDNPDZ128rrkz,
7456     X86::VPANDNQZ128rrkz, X86::VPANDNDZ128rrkz },
7457   { X86::VANDPSZ128rmk,   X86::VANDPDZ128rmk,
7458     X86::VPANDQZ128rmk,   X86::VPANDDZ128rmk   },
7459   { X86::VANDPSZ128rmkz,  X86::VANDPDZ128rmkz,
7460     X86::VPANDQZ128rmkz,  X86::VPANDDZ128rmkz  },
7461   { X86::VANDPSZ128rrk,   X86::VANDPDZ128rrk,
7462     X86::VPANDQZ128rrk,   X86::VPANDDZ128rrk   },
7463   { X86::VANDPSZ128rrkz,  X86::VANDPDZ128rrkz,
7464     X86::VPANDQZ128rrkz,  X86::VPANDDZ128rrkz  },
7465   { X86::VORPSZ128rmk,    X86::VORPDZ128rmk,
7466     X86::VPORQZ128rmk,    X86::VPORDZ128rmk    },
7467   { X86::VORPSZ128rmkz,   X86::VORPDZ128rmkz,
7468     X86::VPORQZ128rmkz,   X86::VPORDZ128rmkz   },
7469   { X86::VORPSZ128rrk,    X86::VORPDZ128rrk,
7470     X86::VPORQZ128rrk,    X86::VPORDZ128rrk    },
7471   { X86::VORPSZ128rrkz,   X86::VORPDZ128rrkz,
7472     X86::VPORQZ128rrkz,   X86::VPORDZ128rrkz   },
7473   { X86::VXORPSZ128rmk,   X86::VXORPDZ128rmk,
7474     X86::VPXORQZ128rmk,   X86::VPXORDZ128rmk   },
7475   { X86::VXORPSZ128rmkz,  X86::VXORPDZ128rmkz,
7476     X86::VPXORQZ128rmkz,  X86::VPXORDZ128rmkz  },
7477   { X86::VXORPSZ128rrk,   X86::VXORPDZ128rrk,
7478     X86::VPXORQZ128rrk,   X86::VPXORDZ128rrk   },
7479   { X86::VXORPSZ128rrkz,  X86::VXORPDZ128rrkz,
7480     X86::VPXORQZ128rrkz,  X86::VPXORDZ128rrkz  },
7481   { X86::VANDNPSZ256rmk,  X86::VANDNPDZ256rmk,
7482     X86::VPANDNQZ256rmk,  X86::VPANDNDZ256rmk  },
7483   { X86::VANDNPSZ256rmkz, X86::VANDNPDZ256rmkz,
7484     X86::VPANDNQZ256rmkz, X86::VPANDNDZ256rmkz },
7485   { X86::VANDNPSZ256rrk,  X86::VANDNPDZ256rrk,
7486     X86::VPANDNQZ256rrk,  X86::VPANDNDZ256rrk  },
7487   { X86::VANDNPSZ256rrkz, X86::VANDNPDZ256rrkz,
7488     X86::VPANDNQZ256rrkz, X86::VPANDNDZ256rrkz },
7489   { X86::VANDPSZ256rmk,   X86::VANDPDZ256rmk,
7490     X86::VPANDQZ256rmk,   X86::VPANDDZ256rmk   },
7491   { X86::VANDPSZ256rmkz,  X86::VANDPDZ256rmkz,
7492     X86::VPANDQZ256rmkz,  X86::VPANDDZ256rmkz  },
7493   { X86::VANDPSZ256rrk,   X86::VANDPDZ256rrk,
7494     X86::VPANDQZ256rrk,   X86::VPANDDZ256rrk   },
7495   { X86::VANDPSZ256rrkz,  X86::VANDPDZ256rrkz,
7496     X86::VPANDQZ256rrkz,  X86::VPANDDZ256rrkz  },
7497   { X86::VORPSZ256rmk,    X86::VORPDZ256rmk,
7498     X86::VPORQZ256rmk,    X86::VPORDZ256rmk    },
7499   { X86::VORPSZ256rmkz,   X86::VORPDZ256rmkz,
7500     X86::VPORQZ256rmkz,   X86::VPORDZ256rmkz   },
7501   { X86::VORPSZ256rrk,    X86::VORPDZ256rrk,
7502     X86::VPORQZ256rrk,    X86::VPORDZ256rrk    },
7503   { X86::VORPSZ256rrkz,   X86::VORPDZ256rrkz,
7504     X86::VPORQZ256rrkz,   X86::VPORDZ256rrkz   },
7505   { X86::VXORPSZ256rmk,   X86::VXORPDZ256rmk,
7506     X86::VPXORQZ256rmk,   X86::VPXORDZ256rmk   },
7507   { X86::VXORPSZ256rmkz,  X86::VXORPDZ256rmkz,
7508     X86::VPXORQZ256rmkz,  X86::VPXORDZ256rmkz  },
7509   { X86::VXORPSZ256rrk,   X86::VXORPDZ256rrk,
7510     X86::VPXORQZ256rrk,   X86::VPXORDZ256rrk   },
7511   { X86::VXORPSZ256rrkz,  X86::VXORPDZ256rrkz,
7512     X86::VPXORQZ256rrkz,  X86::VPXORDZ256rrkz  },
7513   { X86::VANDNPSZrmk,     X86::VANDNPDZrmk,
7514     X86::VPANDNQZrmk,     X86::VPANDNDZrmk     },
7515   { X86::VANDNPSZrmkz,    X86::VANDNPDZrmkz,
7516     X86::VPANDNQZrmkz,    X86::VPANDNDZrmkz    },
7517   { X86::VANDNPSZrrk,     X86::VANDNPDZrrk,
7518     X86::VPANDNQZrrk,     X86::VPANDNDZrrk     },
7519   { X86::VANDNPSZrrkz,    X86::VANDNPDZrrkz,
7520     X86::VPANDNQZrrkz,    X86::VPANDNDZrrkz    },
7521   { X86::VANDPSZrmk,      X86::VANDPDZrmk,
7522     X86::VPANDQZrmk,      X86::VPANDDZrmk      },
7523   { X86::VANDPSZrmkz,     X86::VANDPDZrmkz,
7524     X86::VPANDQZrmkz,     X86::VPANDDZrmkz     },
7525   { X86::VANDPSZrrk,      X86::VANDPDZrrk,
7526     X86::VPANDQZrrk,      X86::VPANDDZrrk      },
7527   { X86::VANDPSZrrkz,     X86::VANDPDZrrkz,
7528     X86::VPANDQZrrkz,     X86::VPANDDZrrkz     },
7529   { X86::VORPSZrmk,       X86::VORPDZrmk,
7530     X86::VPORQZrmk,       X86::VPORDZrmk       },
7531   { X86::VORPSZrmkz,      X86::VORPDZrmkz,
7532     X86::VPORQZrmkz,      X86::VPORDZrmkz      },
7533   { X86::VORPSZrrk,       X86::VORPDZrrk,
7534     X86::VPORQZrrk,       X86::VPORDZrrk       },
7535   { X86::VORPSZrrkz,      X86::VORPDZrrkz,
7536     X86::VPORQZrrkz,      X86::VPORDZrrkz      },
7537   { X86::VXORPSZrmk,      X86::VXORPDZrmk,
7538     X86::VPXORQZrmk,      X86::VPXORDZrmk      },
7539   { X86::VXORPSZrmkz,     X86::VXORPDZrmkz,
7540     X86::VPXORQZrmkz,     X86::VPXORDZrmkz     },
7541   { X86::VXORPSZrrk,      X86::VXORPDZrrk,
7542     X86::VPXORQZrrk,      X86::VPXORDZrrk      },
7543   { X86::VXORPSZrrkz,     X86::VXORPDZrrkz,
7544     X86::VPXORQZrrkz,     X86::VPXORDZrrkz     },
7545   // Broadcast loads can be handled the same as masked operations to avoid
7546   // changing element size.
7547   { X86::VANDNPSZ128rmb,  X86::VANDNPDZ128rmb,
7548     X86::VPANDNQZ128rmb,  X86::VPANDNDZ128rmb  },
7549   { X86::VANDPSZ128rmb,   X86::VANDPDZ128rmb,
7550     X86::VPANDQZ128rmb,   X86::VPANDDZ128rmb   },
7551   { X86::VORPSZ128rmb,    X86::VORPDZ128rmb,
7552     X86::VPORQZ128rmb,    X86::VPORDZ128rmb    },
7553   { X86::VXORPSZ128rmb,   X86::VXORPDZ128rmb,
7554     X86::VPXORQZ128rmb,   X86::VPXORDZ128rmb   },
7555   { X86::VANDNPSZ256rmb,  X86::VANDNPDZ256rmb,
7556     X86::VPANDNQZ256rmb,  X86::VPANDNDZ256rmb  },
7557   { X86::VANDPSZ256rmb,   X86::VANDPDZ256rmb,
7558     X86::VPANDQZ256rmb,   X86::VPANDDZ256rmb   },
7559   { X86::VORPSZ256rmb,    X86::VORPDZ256rmb,
7560     X86::VPORQZ256rmb,    X86::VPORDZ256rmb    },
7561   { X86::VXORPSZ256rmb,   X86::VXORPDZ256rmb,
7562     X86::VPXORQZ256rmb,   X86::VPXORDZ256rmb   },
7563   { X86::VANDNPSZrmb,     X86::VANDNPDZrmb,
7564     X86::VPANDNQZrmb,     X86::VPANDNDZrmb     },
7565   { X86::VANDPSZrmb,      X86::VANDPDZrmb,
7566     X86::VPANDQZrmb,      X86::VPANDDZrmb      },
7567   { X86::VANDPSZrmb,      X86::VANDPDZrmb,
7568     X86::VPANDQZrmb,      X86::VPANDDZrmb      },
7569   { X86::VORPSZrmb,       X86::VORPDZrmb,
7570     X86::VPORQZrmb,       X86::VPORDZrmb       },
7571   { X86::VXORPSZrmb,      X86::VXORPDZrmb,
7572     X86::VPXORQZrmb,      X86::VPXORDZrmb      },
7573   { X86::VANDNPSZ128rmbk, X86::VANDNPDZ128rmbk,
7574     X86::VPANDNQZ128rmbk, X86::VPANDNDZ128rmbk },
7575   { X86::VANDPSZ128rmbk,  X86::VANDPDZ128rmbk,
7576     X86::VPANDQZ128rmbk,  X86::VPANDDZ128rmbk  },
7577   { X86::VORPSZ128rmbk,   X86::VORPDZ128rmbk,
7578     X86::VPORQZ128rmbk,   X86::VPORDZ128rmbk   },
7579   { X86::VXORPSZ128rmbk,  X86::VXORPDZ128rmbk,
7580     X86::VPXORQZ128rmbk,  X86::VPXORDZ128rmbk  },
7581   { X86::VANDNPSZ256rmbk, X86::VANDNPDZ256rmbk,
7582     X86::VPANDNQZ256rmbk, X86::VPANDNDZ256rmbk },
7583   { X86::VANDPSZ256rmbk,  X86::VANDPDZ256rmbk,
7584     X86::VPANDQZ256rmbk,  X86::VPANDDZ256rmbk  },
7585   { X86::VORPSZ256rmbk,   X86::VORPDZ256rmbk,
7586     X86::VPORQZ256rmbk,   X86::VPORDZ256rmbk   },
7587   { X86::VXORPSZ256rmbk,  X86::VXORPDZ256rmbk,
7588     X86::VPXORQZ256rmbk,  X86::VPXORDZ256rmbk  },
7589   { X86::VANDNPSZrmbk,    X86::VANDNPDZrmbk,
7590     X86::VPANDNQZrmbk,    X86::VPANDNDZrmbk    },
7591   { X86::VANDPSZrmbk,     X86::VANDPDZrmbk,
7592     X86::VPANDQZrmbk,     X86::VPANDDZrmbk     },
7593   { X86::VANDPSZrmbk,     X86::VANDPDZrmbk,
7594     X86::VPANDQZrmbk,     X86::VPANDDZrmbk     },
7595   { X86::VORPSZrmbk,      X86::VORPDZrmbk,
7596     X86::VPORQZrmbk,      X86::VPORDZrmbk      },
7597   { X86::VXORPSZrmbk,     X86::VXORPDZrmbk,
7598     X86::VPXORQZrmbk,     X86::VPXORDZrmbk     },
7599   { X86::VANDNPSZ128rmbkz,X86::VANDNPDZ128rmbkz,
7600     X86::VPANDNQZ128rmbkz,X86::VPANDNDZ128rmbkz},
7601   { X86::VANDPSZ128rmbkz, X86::VANDPDZ128rmbkz,
7602     X86::VPANDQZ128rmbkz, X86::VPANDDZ128rmbkz },
7603   { X86::VORPSZ128rmbkz,  X86::VORPDZ128rmbkz,
7604     X86::VPORQZ128rmbkz,  X86::VPORDZ128rmbkz  },
7605   { X86::VXORPSZ128rmbkz, X86::VXORPDZ128rmbkz,
7606     X86::VPXORQZ128rmbkz, X86::VPXORDZ128rmbkz },
7607   { X86::VANDNPSZ256rmbkz,X86::VANDNPDZ256rmbkz,
7608     X86::VPANDNQZ256rmbkz,X86::VPANDNDZ256rmbkz},
7609   { X86::VANDPSZ256rmbkz, X86::VANDPDZ256rmbkz,
7610     X86::VPANDQZ256rmbkz, X86::VPANDDZ256rmbkz },
7611   { X86::VORPSZ256rmbkz,  X86::VORPDZ256rmbkz,
7612     X86::VPORQZ256rmbkz,  X86::VPORDZ256rmbkz  },
7613   { X86::VXORPSZ256rmbkz, X86::VXORPDZ256rmbkz,
7614     X86::VPXORQZ256rmbkz, X86::VPXORDZ256rmbkz },
7615   { X86::VANDNPSZrmbkz,   X86::VANDNPDZrmbkz,
7616     X86::VPANDNQZrmbkz,   X86::VPANDNDZrmbkz   },
7617   { X86::VANDPSZrmbkz,    X86::VANDPDZrmbkz,
7618     X86::VPANDQZrmbkz,    X86::VPANDDZrmbkz    },
7619   { X86::VANDPSZrmbkz,    X86::VANDPDZrmbkz,
7620     X86::VPANDQZrmbkz,    X86::VPANDDZrmbkz    },
7621   { X86::VORPSZrmbkz,     X86::VORPDZrmbkz,
7622     X86::VPORQZrmbkz,     X86::VPORDZrmbkz     },
7623   { X86::VXORPSZrmbkz,    X86::VXORPDZrmbkz,
7624     X86::VPXORQZrmbkz,    X86::VPXORDZrmbkz    },
7625 };
7626 
7627 // NOTE: These should only be used by the custom domain methods.
7628 static const uint16_t ReplaceableBlendInstrs[][3] = {
7629   //PackedSingle             PackedDouble             PackedInt
7630   { X86::BLENDPSrmi,         X86::BLENDPDrmi,         X86::PBLENDWrmi   },
7631   { X86::BLENDPSrri,         X86::BLENDPDrri,         X86::PBLENDWrri   },
7632   { X86::VBLENDPSrmi,        X86::VBLENDPDrmi,        X86::VPBLENDWrmi  },
7633   { X86::VBLENDPSrri,        X86::VBLENDPDrri,        X86::VPBLENDWrri  },
7634   { X86::VBLENDPSYrmi,       X86::VBLENDPDYrmi,       X86::VPBLENDWYrmi },
7635   { X86::VBLENDPSYrri,       X86::VBLENDPDYrri,       X86::VPBLENDWYrri },
7636 };
7637 static const uint16_t ReplaceableBlendAVX2Instrs[][3] = {
7638   //PackedSingle             PackedDouble             PackedInt
7639   { X86::VBLENDPSrmi,        X86::VBLENDPDrmi,        X86::VPBLENDDrmi  },
7640   { X86::VBLENDPSrri,        X86::VBLENDPDrri,        X86::VPBLENDDrri  },
7641   { X86::VBLENDPSYrmi,       X86::VBLENDPDYrmi,       X86::VPBLENDDYrmi },
7642   { X86::VBLENDPSYrri,       X86::VBLENDPDYrri,       X86::VPBLENDDYrri },
7643 };
7644 
7645 // Special table for changing EVEX logic instructions to VEX.
7646 // TODO: Should we run EVEX->VEX earlier?
7647 static const uint16_t ReplaceableCustomAVX512LogicInstrs[][4] = {
7648   // Two integer columns for 64-bit and 32-bit elements.
7649   //PackedSingle     PackedDouble     PackedInt           PackedInt
7650   { X86::VANDNPSrm,  X86::VANDNPDrm,  X86::VPANDNQZ128rm, X86::VPANDNDZ128rm },
7651   { X86::VANDNPSrr,  X86::VANDNPDrr,  X86::VPANDNQZ128rr, X86::VPANDNDZ128rr },
7652   { X86::VANDPSrm,   X86::VANDPDrm,   X86::VPANDQZ128rm,  X86::VPANDDZ128rm  },
7653   { X86::VANDPSrr,   X86::VANDPDrr,   X86::VPANDQZ128rr,  X86::VPANDDZ128rr  },
7654   { X86::VORPSrm,    X86::VORPDrm,    X86::VPORQZ128rm,   X86::VPORDZ128rm   },
7655   { X86::VORPSrr,    X86::VORPDrr,    X86::VPORQZ128rr,   X86::VPORDZ128rr   },
7656   { X86::VXORPSrm,   X86::VXORPDrm,   X86::VPXORQZ128rm,  X86::VPXORDZ128rm  },
7657   { X86::VXORPSrr,   X86::VXORPDrr,   X86::VPXORQZ128rr,  X86::VPXORDZ128rr  },
7658   { X86::VANDNPSYrm, X86::VANDNPDYrm, X86::VPANDNQZ256rm, X86::VPANDNDZ256rm },
7659   { X86::VANDNPSYrr, X86::VANDNPDYrr, X86::VPANDNQZ256rr, X86::VPANDNDZ256rr },
7660   { X86::VANDPSYrm,  X86::VANDPDYrm,  X86::VPANDQZ256rm,  X86::VPANDDZ256rm  },
7661   { X86::VANDPSYrr,  X86::VANDPDYrr,  X86::VPANDQZ256rr,  X86::VPANDDZ256rr  },
7662   { X86::VORPSYrm,   X86::VORPDYrm,   X86::VPORQZ256rm,   X86::VPORDZ256rm   },
7663   { X86::VORPSYrr,   X86::VORPDYrr,   X86::VPORQZ256rr,   X86::VPORDZ256rr   },
7664   { X86::VXORPSYrm,  X86::VXORPDYrm,  X86::VPXORQZ256rm,  X86::VPXORDZ256rm  },
7665   { X86::VXORPSYrr,  X86::VXORPDYrr,  X86::VPXORQZ256rr,  X86::VPXORDZ256rr  },
7666 };
7667 
7668 // FIXME: Some shuffle and unpack instructions have equivalents in different
7669 // domains, but they require a bit more work than just switching opcodes.
7670 
7671 static const uint16_t *lookup(unsigned opcode, unsigned domain,
7672                               ArrayRef<uint16_t[3]> Table) {
7673   for (const uint16_t (&Row)[3] : Table)
7674     if (Row[domain-1] == opcode)
7675       return Row;
7676   return nullptr;
7677 }
7678 
7679 static const uint16_t *lookupAVX512(unsigned opcode, unsigned domain,
7680                                     ArrayRef<uint16_t[4]> Table) {
7681   // If this is the integer domain make sure to check both integer columns.
7682   for (const uint16_t (&Row)[4] : Table)
7683     if (Row[domain-1] == opcode || (domain == 3 && Row[3] == opcode))
7684       return Row;
7685   return nullptr;
7686 }
7687 
7688 // Helper to attempt to widen/narrow blend masks.
7689 static bool AdjustBlendMask(unsigned OldMask, unsigned OldWidth,
7690                             unsigned NewWidth, unsigned *pNewMask = nullptr) {
7691   assert(((OldWidth % NewWidth) == 0 || (NewWidth % OldWidth) == 0) &&
7692          "Illegal blend mask scale");
7693   unsigned NewMask = 0;
7694 
7695   if ((OldWidth % NewWidth) == 0) {
7696     unsigned Scale = OldWidth / NewWidth;
7697     unsigned SubMask = (1u << Scale) - 1;
7698     for (unsigned i = 0; i != NewWidth; ++i) {
7699       unsigned Sub = (OldMask >> (i * Scale)) & SubMask;
7700       if (Sub == SubMask)
7701         NewMask |= (1u << i);
7702       else if (Sub != 0x0)
7703         return false;
7704     }
7705   } else {
7706     unsigned Scale = NewWidth / OldWidth;
7707     unsigned SubMask = (1u << Scale) - 1;
7708     for (unsigned i = 0; i != OldWidth; ++i) {
7709       if (OldMask & (1 << i)) {
7710         NewMask |= (SubMask << (i * Scale));
7711       }
7712     }
7713   }
7714 
7715   if (pNewMask)
7716     *pNewMask = NewMask;
7717   return true;
7718 }
7719 
7720 uint16_t X86InstrInfo::getExecutionDomainCustom(const MachineInstr &MI) const {
7721   unsigned Opcode = MI.getOpcode();
7722   unsigned NumOperands = MI.getDesc().getNumOperands();
7723 
7724   auto GetBlendDomains = [&](unsigned ImmWidth, bool Is256) {
7725     uint16_t validDomains = 0;
7726     if (MI.getOperand(NumOperands - 1).isImm()) {
7727       unsigned Imm = MI.getOperand(NumOperands - 1).getImm();
7728       if (AdjustBlendMask(Imm, ImmWidth, Is256 ? 8 : 4))
7729         validDomains |= 0x2; // PackedSingle
7730       if (AdjustBlendMask(Imm, ImmWidth, Is256 ? 4 : 2))
7731         validDomains |= 0x4; // PackedDouble
7732       if (!Is256 || Subtarget.hasAVX2())
7733         validDomains |= 0x8; // PackedInt
7734     }
7735     return validDomains;
7736   };
7737 
7738   switch (Opcode) {
7739   case X86::BLENDPDrmi:
7740   case X86::BLENDPDrri:
7741   case X86::VBLENDPDrmi:
7742   case X86::VBLENDPDrri:
7743     return GetBlendDomains(2, false);
7744   case X86::VBLENDPDYrmi:
7745   case X86::VBLENDPDYrri:
7746     return GetBlendDomains(4, true);
7747   case X86::BLENDPSrmi:
7748   case X86::BLENDPSrri:
7749   case X86::VBLENDPSrmi:
7750   case X86::VBLENDPSrri:
7751   case X86::VPBLENDDrmi:
7752   case X86::VPBLENDDrri:
7753     return GetBlendDomains(4, false);
7754   case X86::VBLENDPSYrmi:
7755   case X86::VBLENDPSYrri:
7756   case X86::VPBLENDDYrmi:
7757   case X86::VPBLENDDYrri:
7758     return GetBlendDomains(8, true);
7759   case X86::PBLENDWrmi:
7760   case X86::PBLENDWrri:
7761   case X86::VPBLENDWrmi:
7762   case X86::VPBLENDWrri:
7763   // Treat VPBLENDWY as a 128-bit vector as it repeats the lo/hi masks.
7764   case X86::VPBLENDWYrmi:
7765   case X86::VPBLENDWYrri:
7766     return GetBlendDomains(8, false);
7767   case X86::VPANDDZ128rr:  case X86::VPANDDZ128rm:
7768   case X86::VPANDDZ256rr:  case X86::VPANDDZ256rm:
7769   case X86::VPANDQZ128rr:  case X86::VPANDQZ128rm:
7770   case X86::VPANDQZ256rr:  case X86::VPANDQZ256rm:
7771   case X86::VPANDNDZ128rr: case X86::VPANDNDZ128rm:
7772   case X86::VPANDNDZ256rr: case X86::VPANDNDZ256rm:
7773   case X86::VPANDNQZ128rr: case X86::VPANDNQZ128rm:
7774   case X86::VPANDNQZ256rr: case X86::VPANDNQZ256rm:
7775   case X86::VPORDZ128rr:   case X86::VPORDZ128rm:
7776   case X86::VPORDZ256rr:   case X86::VPORDZ256rm:
7777   case X86::VPORQZ128rr:   case X86::VPORQZ128rm:
7778   case X86::VPORQZ256rr:   case X86::VPORQZ256rm:
7779   case X86::VPXORDZ128rr:  case X86::VPXORDZ128rm:
7780   case X86::VPXORDZ256rr:  case X86::VPXORDZ256rm:
7781   case X86::VPXORQZ128rr:  case X86::VPXORQZ128rm:
7782   case X86::VPXORQZ256rr:  case X86::VPXORQZ256rm:
7783     // If we don't have DQI see if we can still switch from an EVEX integer
7784     // instruction to a VEX floating point instruction.
7785     if (Subtarget.hasDQI())
7786       return 0;
7787 
7788     if (RI.getEncodingValue(MI.getOperand(0).getReg()) >= 16)
7789       return 0;
7790     if (RI.getEncodingValue(MI.getOperand(1).getReg()) >= 16)
7791       return 0;
7792     // Register forms will have 3 operands. Memory form will have more.
7793     if (NumOperands == 3 &&
7794         RI.getEncodingValue(MI.getOperand(2).getReg()) >= 16)
7795       return 0;
7796 
7797     // All domains are valid.
7798     return 0xe;
7799   case X86::MOVHLPSrr:
7800     // We can swap domains when both inputs are the same register.
7801     // FIXME: This doesn't catch all the cases we would like. If the input
7802     // register isn't KILLed by the instruction, the two address instruction
7803     // pass puts a COPY on one input. The other input uses the original
7804     // register. This prevents the same physical register from being used by
7805     // both inputs.
7806     if (MI.getOperand(1).getReg() == MI.getOperand(2).getReg() &&
7807         MI.getOperand(0).getSubReg() == 0 &&
7808         MI.getOperand(1).getSubReg() == 0 &&
7809         MI.getOperand(2).getSubReg() == 0)
7810       return 0x6;
7811     return 0;
7812   case X86::SHUFPDrri:
7813     return 0x6;
7814   }
7815   return 0;
7816 }
7817 
7818 bool X86InstrInfo::setExecutionDomainCustom(MachineInstr &MI,
7819                                             unsigned Domain) const {
7820   assert(Domain > 0 && Domain < 4 && "Invalid execution domain");
7821   uint16_t dom = (MI.getDesc().TSFlags >> X86II::SSEDomainShift) & 3;
7822   assert(dom && "Not an SSE instruction");
7823 
7824   unsigned Opcode = MI.getOpcode();
7825   unsigned NumOperands = MI.getDesc().getNumOperands();
7826 
7827   auto SetBlendDomain = [&](unsigned ImmWidth, bool Is256) {
7828     if (MI.getOperand(NumOperands - 1).isImm()) {
7829       unsigned Imm = MI.getOperand(NumOperands - 1).getImm() & 255;
7830       Imm = (ImmWidth == 16 ? ((Imm << 8) | Imm) : Imm);
7831       unsigned NewImm = Imm;
7832 
7833       const uint16_t *table = lookup(Opcode, dom, ReplaceableBlendInstrs);
7834       if (!table)
7835         table = lookup(Opcode, dom, ReplaceableBlendAVX2Instrs);
7836 
7837       if (Domain == 1) { // PackedSingle
7838         AdjustBlendMask(Imm, ImmWidth, Is256 ? 8 : 4, &NewImm);
7839       } else if (Domain == 2) { // PackedDouble
7840         AdjustBlendMask(Imm, ImmWidth, Is256 ? 4 : 2, &NewImm);
7841       } else if (Domain == 3) { // PackedInt
7842         if (Subtarget.hasAVX2()) {
7843           // If we are already VPBLENDW use that, else use VPBLENDD.
7844           if ((ImmWidth / (Is256 ? 2 : 1)) != 8) {
7845             table = lookup(Opcode, dom, ReplaceableBlendAVX2Instrs);
7846             AdjustBlendMask(Imm, ImmWidth, Is256 ? 8 : 4, &NewImm);
7847           }
7848         } else {
7849           assert(!Is256 && "128-bit vector expected");
7850           AdjustBlendMask(Imm, ImmWidth, 8, &NewImm);
7851         }
7852       }
7853 
7854       assert(table && table[Domain - 1] && "Unknown domain op");
7855       MI.setDesc(get(table[Domain - 1]));
7856       MI.getOperand(NumOperands - 1).setImm(NewImm & 255);
7857     }
7858     return true;
7859   };
7860 
7861   switch (Opcode) {
7862   case X86::BLENDPDrmi:
7863   case X86::BLENDPDrri:
7864   case X86::VBLENDPDrmi:
7865   case X86::VBLENDPDrri:
7866     return SetBlendDomain(2, false);
7867   case X86::VBLENDPDYrmi:
7868   case X86::VBLENDPDYrri:
7869     return SetBlendDomain(4, true);
7870   case X86::BLENDPSrmi:
7871   case X86::BLENDPSrri:
7872   case X86::VBLENDPSrmi:
7873   case X86::VBLENDPSrri:
7874   case X86::VPBLENDDrmi:
7875   case X86::VPBLENDDrri:
7876     return SetBlendDomain(4, false);
7877   case X86::VBLENDPSYrmi:
7878   case X86::VBLENDPSYrri:
7879   case X86::VPBLENDDYrmi:
7880   case X86::VPBLENDDYrri:
7881     return SetBlendDomain(8, true);
7882   case X86::PBLENDWrmi:
7883   case X86::PBLENDWrri:
7884   case X86::VPBLENDWrmi:
7885   case X86::VPBLENDWrri:
7886     return SetBlendDomain(8, false);
7887   case X86::VPBLENDWYrmi:
7888   case X86::VPBLENDWYrri:
7889     return SetBlendDomain(16, true);
7890   case X86::VPANDDZ128rr:  case X86::VPANDDZ128rm:
7891   case X86::VPANDDZ256rr:  case X86::VPANDDZ256rm:
7892   case X86::VPANDQZ128rr:  case X86::VPANDQZ128rm:
7893   case X86::VPANDQZ256rr:  case X86::VPANDQZ256rm:
7894   case X86::VPANDNDZ128rr: case X86::VPANDNDZ128rm:
7895   case X86::VPANDNDZ256rr: case X86::VPANDNDZ256rm:
7896   case X86::VPANDNQZ128rr: case X86::VPANDNQZ128rm:
7897   case X86::VPANDNQZ256rr: case X86::VPANDNQZ256rm:
7898   case X86::VPORDZ128rr:   case X86::VPORDZ128rm:
7899   case X86::VPORDZ256rr:   case X86::VPORDZ256rm:
7900   case X86::VPORQZ128rr:   case X86::VPORQZ128rm:
7901   case X86::VPORQZ256rr:   case X86::VPORQZ256rm:
7902   case X86::VPXORDZ128rr:  case X86::VPXORDZ128rm:
7903   case X86::VPXORDZ256rr:  case X86::VPXORDZ256rm:
7904   case X86::VPXORQZ128rr:  case X86::VPXORQZ128rm:
7905   case X86::VPXORQZ256rr:  case X86::VPXORQZ256rm: {
7906     // Without DQI, convert EVEX instructions to VEX instructions.
7907     if (Subtarget.hasDQI())
7908       return false;
7909 
7910     const uint16_t *table = lookupAVX512(MI.getOpcode(), dom,
7911                                          ReplaceableCustomAVX512LogicInstrs);
7912     assert(table && "Instruction not found in table?");
7913     // Don't change integer Q instructions to D instructions and
7914     // use D intructions if we started with a PS instruction.
7915     if (Domain == 3 && (dom == 1 || table[3] == MI.getOpcode()))
7916       Domain = 4;
7917     MI.setDesc(get(table[Domain - 1]));
7918     return true;
7919   }
7920   case X86::UNPCKHPDrr:
7921   case X86::MOVHLPSrr:
7922     // We just need to commute the instruction which will switch the domains.
7923     if (Domain != dom && Domain != 3 &&
7924         MI.getOperand(1).getReg() == MI.getOperand(2).getReg() &&
7925         MI.getOperand(0).getSubReg() == 0 &&
7926         MI.getOperand(1).getSubReg() == 0 &&
7927         MI.getOperand(2).getSubReg() == 0) {
7928       commuteInstruction(MI, false);
7929       return true;
7930     }
7931     // We must always return true for MOVHLPSrr.
7932     if (Opcode == X86::MOVHLPSrr)
7933       return true;
7934     break;
7935   case X86::SHUFPDrri: {
7936     if (Domain == 1) {
7937       unsigned Imm = MI.getOperand(3).getImm();
7938       unsigned NewImm = 0x44;
7939       if (Imm & 1) NewImm |= 0x0a;
7940       if (Imm & 2) NewImm |= 0xa0;
7941       MI.getOperand(3).setImm(NewImm);
7942       MI.setDesc(get(X86::SHUFPSrri));
7943     }
7944     return true;
7945   }
7946   }
7947   return false;
7948 }
7949 
7950 std::pair<uint16_t, uint16_t>
7951 X86InstrInfo::getExecutionDomain(const MachineInstr &MI) const {
7952   uint16_t domain = (MI.getDesc().TSFlags >> X86II::SSEDomainShift) & 3;
7953   unsigned opcode = MI.getOpcode();
7954   uint16_t validDomains = 0;
7955   if (domain) {
7956     // Attempt to match for custom instructions.
7957     validDomains = getExecutionDomainCustom(MI);
7958     if (validDomains)
7959       return std::make_pair(domain, validDomains);
7960 
7961     if (lookup(opcode, domain, ReplaceableInstrs)) {
7962       validDomains = 0xe;
7963     } else if (lookup(opcode, domain, ReplaceableInstrsAVX2)) {
7964       validDomains = Subtarget.hasAVX2() ? 0xe : 0x6;
7965     } else if (lookup(opcode, domain, ReplaceableInstrsFP)) {
7966       validDomains = 0x6;
7967     } else if (lookup(opcode, domain, ReplaceableInstrsAVX2InsertExtract)) {
7968       // Insert/extract instructions should only effect domain if AVX2
7969       // is enabled.
7970       if (!Subtarget.hasAVX2())
7971         return std::make_pair(0, 0);
7972       validDomains = 0xe;
7973     } else if (lookupAVX512(opcode, domain, ReplaceableInstrsAVX512)) {
7974       validDomains = 0xe;
7975     } else if (Subtarget.hasDQI() && lookupAVX512(opcode, domain,
7976                                                   ReplaceableInstrsAVX512DQ)) {
7977       validDomains = 0xe;
7978     } else if (Subtarget.hasDQI()) {
7979       if (const uint16_t *table = lookupAVX512(opcode, domain,
7980                                              ReplaceableInstrsAVX512DQMasked)) {
7981         if (domain == 1 || (domain == 3 && table[3] == opcode))
7982           validDomains = 0xa;
7983         else
7984           validDomains = 0xc;
7985       }
7986     }
7987   }
7988   return std::make_pair(domain, validDomains);
7989 }
7990 
7991 void X86InstrInfo::setExecutionDomain(MachineInstr &MI, unsigned Domain) const {
7992   assert(Domain>0 && Domain<4 && "Invalid execution domain");
7993   uint16_t dom = (MI.getDesc().TSFlags >> X86II::SSEDomainShift) & 3;
7994   assert(dom && "Not an SSE instruction");
7995 
7996   // Attempt to match for custom instructions.
7997   if (setExecutionDomainCustom(MI, Domain))
7998     return;
7999 
8000   const uint16_t *table = lookup(MI.getOpcode(), dom, ReplaceableInstrs);
8001   if (!table) { // try the other table
8002     assert((Subtarget.hasAVX2() || Domain < 3) &&
8003            "256-bit vector operations only available in AVX2");
8004     table = lookup(MI.getOpcode(), dom, ReplaceableInstrsAVX2);
8005   }
8006   if (!table) { // try the FP table
8007     table = lookup(MI.getOpcode(), dom, ReplaceableInstrsFP);
8008     assert((!table || Domain < 3) &&
8009            "Can only select PackedSingle or PackedDouble");
8010   }
8011   if (!table) { // try the other table
8012     assert(Subtarget.hasAVX2() &&
8013            "256-bit insert/extract only available in AVX2");
8014     table = lookup(MI.getOpcode(), dom, ReplaceableInstrsAVX2InsertExtract);
8015   }
8016   if (!table) { // try the AVX512 table
8017     assert(Subtarget.hasAVX512() && "Requires AVX-512");
8018     table = lookupAVX512(MI.getOpcode(), dom, ReplaceableInstrsAVX512);
8019     // Don't change integer Q instructions to D instructions.
8020     if (table && Domain == 3 && table[3] == MI.getOpcode())
8021       Domain = 4;
8022   }
8023   if (!table) { // try the AVX512DQ table
8024     assert((Subtarget.hasDQI() || Domain >= 3) && "Requires AVX-512DQ");
8025     table = lookupAVX512(MI.getOpcode(), dom, ReplaceableInstrsAVX512DQ);
8026     // Don't change integer Q instructions to D instructions and
8027     // use D instructions if we started with a PS instruction.
8028     if (table && Domain == 3 && (dom == 1 || table[3] == MI.getOpcode()))
8029       Domain = 4;
8030   }
8031   if (!table) { // try the AVX512DQMasked table
8032     assert((Subtarget.hasDQI() || Domain >= 3) && "Requires AVX-512DQ");
8033     table = lookupAVX512(MI.getOpcode(), dom, ReplaceableInstrsAVX512DQMasked);
8034     if (table && Domain == 3 && (dom == 1 || table[3] == MI.getOpcode()))
8035       Domain = 4;
8036   }
8037   assert(table && "Cannot change domain");
8038   MI.setDesc(get(table[Domain - 1]));
8039 }
8040 
8041 /// Return the noop instruction to use for a noop.
8042 MCInst X86InstrInfo::getNop() const {
8043   MCInst Nop;
8044   Nop.setOpcode(X86::NOOP);
8045   return Nop;
8046 }
8047 
8048 bool X86InstrInfo::isHighLatencyDef(int opc) const {
8049   switch (opc) {
8050   default: return false;
8051   case X86::DIVPDrm:
8052   case X86::DIVPDrr:
8053   case X86::DIVPSrm:
8054   case X86::DIVPSrr:
8055   case X86::DIVSDrm:
8056   case X86::DIVSDrm_Int:
8057   case X86::DIVSDrr:
8058   case X86::DIVSDrr_Int:
8059   case X86::DIVSSrm:
8060   case X86::DIVSSrm_Int:
8061   case X86::DIVSSrr:
8062   case X86::DIVSSrr_Int:
8063   case X86::SQRTPDm:
8064   case X86::SQRTPDr:
8065   case X86::SQRTPSm:
8066   case X86::SQRTPSr:
8067   case X86::SQRTSDm:
8068   case X86::SQRTSDm_Int:
8069   case X86::SQRTSDr:
8070   case X86::SQRTSDr_Int:
8071   case X86::SQRTSSm:
8072   case X86::SQRTSSm_Int:
8073   case X86::SQRTSSr:
8074   case X86::SQRTSSr_Int:
8075   // AVX instructions with high latency
8076   case X86::VDIVPDrm:
8077   case X86::VDIVPDrr:
8078   case X86::VDIVPDYrm:
8079   case X86::VDIVPDYrr:
8080   case X86::VDIVPSrm:
8081   case X86::VDIVPSrr:
8082   case X86::VDIVPSYrm:
8083   case X86::VDIVPSYrr:
8084   case X86::VDIVSDrm:
8085   case X86::VDIVSDrm_Int:
8086   case X86::VDIVSDrr:
8087   case X86::VDIVSDrr_Int:
8088   case X86::VDIVSSrm:
8089   case X86::VDIVSSrm_Int:
8090   case X86::VDIVSSrr:
8091   case X86::VDIVSSrr_Int:
8092   case X86::VSQRTPDm:
8093   case X86::VSQRTPDr:
8094   case X86::VSQRTPDYm:
8095   case X86::VSQRTPDYr:
8096   case X86::VSQRTPSm:
8097   case X86::VSQRTPSr:
8098   case X86::VSQRTPSYm:
8099   case X86::VSQRTPSYr:
8100   case X86::VSQRTSDm:
8101   case X86::VSQRTSDm_Int:
8102   case X86::VSQRTSDr:
8103   case X86::VSQRTSDr_Int:
8104   case X86::VSQRTSSm:
8105   case X86::VSQRTSSm_Int:
8106   case X86::VSQRTSSr:
8107   case X86::VSQRTSSr_Int:
8108   // AVX512 instructions with high latency
8109   case X86::VDIVPDZ128rm:
8110   case X86::VDIVPDZ128rmb:
8111   case X86::VDIVPDZ128rmbk:
8112   case X86::VDIVPDZ128rmbkz:
8113   case X86::VDIVPDZ128rmk:
8114   case X86::VDIVPDZ128rmkz:
8115   case X86::VDIVPDZ128rr:
8116   case X86::VDIVPDZ128rrk:
8117   case X86::VDIVPDZ128rrkz:
8118   case X86::VDIVPDZ256rm:
8119   case X86::VDIVPDZ256rmb:
8120   case X86::VDIVPDZ256rmbk:
8121   case X86::VDIVPDZ256rmbkz:
8122   case X86::VDIVPDZ256rmk:
8123   case X86::VDIVPDZ256rmkz:
8124   case X86::VDIVPDZ256rr:
8125   case X86::VDIVPDZ256rrk:
8126   case X86::VDIVPDZ256rrkz:
8127   case X86::VDIVPDZrrb:
8128   case X86::VDIVPDZrrbk:
8129   case X86::VDIVPDZrrbkz:
8130   case X86::VDIVPDZrm:
8131   case X86::VDIVPDZrmb:
8132   case X86::VDIVPDZrmbk:
8133   case X86::VDIVPDZrmbkz:
8134   case X86::VDIVPDZrmk:
8135   case X86::VDIVPDZrmkz:
8136   case X86::VDIVPDZrr:
8137   case X86::VDIVPDZrrk:
8138   case X86::VDIVPDZrrkz:
8139   case X86::VDIVPSZ128rm:
8140   case X86::VDIVPSZ128rmb:
8141   case X86::VDIVPSZ128rmbk:
8142   case X86::VDIVPSZ128rmbkz:
8143   case X86::VDIVPSZ128rmk:
8144   case X86::VDIVPSZ128rmkz:
8145   case X86::VDIVPSZ128rr:
8146   case X86::VDIVPSZ128rrk:
8147   case X86::VDIVPSZ128rrkz:
8148   case X86::VDIVPSZ256rm:
8149   case X86::VDIVPSZ256rmb:
8150   case X86::VDIVPSZ256rmbk:
8151   case X86::VDIVPSZ256rmbkz:
8152   case X86::VDIVPSZ256rmk:
8153   case X86::VDIVPSZ256rmkz:
8154   case X86::VDIVPSZ256rr:
8155   case X86::VDIVPSZ256rrk:
8156   case X86::VDIVPSZ256rrkz:
8157   case X86::VDIVPSZrrb:
8158   case X86::VDIVPSZrrbk:
8159   case X86::VDIVPSZrrbkz:
8160   case X86::VDIVPSZrm:
8161   case X86::VDIVPSZrmb:
8162   case X86::VDIVPSZrmbk:
8163   case X86::VDIVPSZrmbkz:
8164   case X86::VDIVPSZrmk:
8165   case X86::VDIVPSZrmkz:
8166   case X86::VDIVPSZrr:
8167   case X86::VDIVPSZrrk:
8168   case X86::VDIVPSZrrkz:
8169   case X86::VDIVSDZrm:
8170   case X86::VDIVSDZrr:
8171   case X86::VDIVSDZrm_Int:
8172   case X86::VDIVSDZrm_Intk:
8173   case X86::VDIVSDZrm_Intkz:
8174   case X86::VDIVSDZrr_Int:
8175   case X86::VDIVSDZrr_Intk:
8176   case X86::VDIVSDZrr_Intkz:
8177   case X86::VDIVSDZrrb_Int:
8178   case X86::VDIVSDZrrb_Intk:
8179   case X86::VDIVSDZrrb_Intkz:
8180   case X86::VDIVSSZrm:
8181   case X86::VDIVSSZrr:
8182   case X86::VDIVSSZrm_Int:
8183   case X86::VDIVSSZrm_Intk:
8184   case X86::VDIVSSZrm_Intkz:
8185   case X86::VDIVSSZrr_Int:
8186   case X86::VDIVSSZrr_Intk:
8187   case X86::VDIVSSZrr_Intkz:
8188   case X86::VDIVSSZrrb_Int:
8189   case X86::VDIVSSZrrb_Intk:
8190   case X86::VDIVSSZrrb_Intkz:
8191   case X86::VSQRTPDZ128m:
8192   case X86::VSQRTPDZ128mb:
8193   case X86::VSQRTPDZ128mbk:
8194   case X86::VSQRTPDZ128mbkz:
8195   case X86::VSQRTPDZ128mk:
8196   case X86::VSQRTPDZ128mkz:
8197   case X86::VSQRTPDZ128r:
8198   case X86::VSQRTPDZ128rk:
8199   case X86::VSQRTPDZ128rkz:
8200   case X86::VSQRTPDZ256m:
8201   case X86::VSQRTPDZ256mb:
8202   case X86::VSQRTPDZ256mbk:
8203   case X86::VSQRTPDZ256mbkz:
8204   case X86::VSQRTPDZ256mk:
8205   case X86::VSQRTPDZ256mkz:
8206   case X86::VSQRTPDZ256r:
8207   case X86::VSQRTPDZ256rk:
8208   case X86::VSQRTPDZ256rkz:
8209   case X86::VSQRTPDZm:
8210   case X86::VSQRTPDZmb:
8211   case X86::VSQRTPDZmbk:
8212   case X86::VSQRTPDZmbkz:
8213   case X86::VSQRTPDZmk:
8214   case X86::VSQRTPDZmkz:
8215   case X86::VSQRTPDZr:
8216   case X86::VSQRTPDZrb:
8217   case X86::VSQRTPDZrbk:
8218   case X86::VSQRTPDZrbkz:
8219   case X86::VSQRTPDZrk:
8220   case X86::VSQRTPDZrkz:
8221   case X86::VSQRTPSZ128m:
8222   case X86::VSQRTPSZ128mb:
8223   case X86::VSQRTPSZ128mbk:
8224   case X86::VSQRTPSZ128mbkz:
8225   case X86::VSQRTPSZ128mk:
8226   case X86::VSQRTPSZ128mkz:
8227   case X86::VSQRTPSZ128r:
8228   case X86::VSQRTPSZ128rk:
8229   case X86::VSQRTPSZ128rkz:
8230   case X86::VSQRTPSZ256m:
8231   case X86::VSQRTPSZ256mb:
8232   case X86::VSQRTPSZ256mbk:
8233   case X86::VSQRTPSZ256mbkz:
8234   case X86::VSQRTPSZ256mk:
8235   case X86::VSQRTPSZ256mkz:
8236   case X86::VSQRTPSZ256r:
8237   case X86::VSQRTPSZ256rk:
8238   case X86::VSQRTPSZ256rkz:
8239   case X86::VSQRTPSZm:
8240   case X86::VSQRTPSZmb:
8241   case X86::VSQRTPSZmbk:
8242   case X86::VSQRTPSZmbkz:
8243   case X86::VSQRTPSZmk:
8244   case X86::VSQRTPSZmkz:
8245   case X86::VSQRTPSZr:
8246   case X86::VSQRTPSZrb:
8247   case X86::VSQRTPSZrbk:
8248   case X86::VSQRTPSZrbkz:
8249   case X86::VSQRTPSZrk:
8250   case X86::VSQRTPSZrkz:
8251   case X86::VSQRTSDZm:
8252   case X86::VSQRTSDZm_Int:
8253   case X86::VSQRTSDZm_Intk:
8254   case X86::VSQRTSDZm_Intkz:
8255   case X86::VSQRTSDZr:
8256   case X86::VSQRTSDZr_Int:
8257   case X86::VSQRTSDZr_Intk:
8258   case X86::VSQRTSDZr_Intkz:
8259   case X86::VSQRTSDZrb_Int:
8260   case X86::VSQRTSDZrb_Intk:
8261   case X86::VSQRTSDZrb_Intkz:
8262   case X86::VSQRTSSZm:
8263   case X86::VSQRTSSZm_Int:
8264   case X86::VSQRTSSZm_Intk:
8265   case X86::VSQRTSSZm_Intkz:
8266   case X86::VSQRTSSZr:
8267   case X86::VSQRTSSZr_Int:
8268   case X86::VSQRTSSZr_Intk:
8269   case X86::VSQRTSSZr_Intkz:
8270   case X86::VSQRTSSZrb_Int:
8271   case X86::VSQRTSSZrb_Intk:
8272   case X86::VSQRTSSZrb_Intkz:
8273 
8274   case X86::VGATHERDPDYrm:
8275   case X86::VGATHERDPDZ128rm:
8276   case X86::VGATHERDPDZ256rm:
8277   case X86::VGATHERDPDZrm:
8278   case X86::VGATHERDPDrm:
8279   case X86::VGATHERDPSYrm:
8280   case X86::VGATHERDPSZ128rm:
8281   case X86::VGATHERDPSZ256rm:
8282   case X86::VGATHERDPSZrm:
8283   case X86::VGATHERDPSrm:
8284   case X86::VGATHERPF0DPDm:
8285   case X86::VGATHERPF0DPSm:
8286   case X86::VGATHERPF0QPDm:
8287   case X86::VGATHERPF0QPSm:
8288   case X86::VGATHERPF1DPDm:
8289   case X86::VGATHERPF1DPSm:
8290   case X86::VGATHERPF1QPDm:
8291   case X86::VGATHERPF1QPSm:
8292   case X86::VGATHERQPDYrm:
8293   case X86::VGATHERQPDZ128rm:
8294   case X86::VGATHERQPDZ256rm:
8295   case X86::VGATHERQPDZrm:
8296   case X86::VGATHERQPDrm:
8297   case X86::VGATHERQPSYrm:
8298   case X86::VGATHERQPSZ128rm:
8299   case X86::VGATHERQPSZ256rm:
8300   case X86::VGATHERQPSZrm:
8301   case X86::VGATHERQPSrm:
8302   case X86::VPGATHERDDYrm:
8303   case X86::VPGATHERDDZ128rm:
8304   case X86::VPGATHERDDZ256rm:
8305   case X86::VPGATHERDDZrm:
8306   case X86::VPGATHERDDrm:
8307   case X86::VPGATHERDQYrm:
8308   case X86::VPGATHERDQZ128rm:
8309   case X86::VPGATHERDQZ256rm:
8310   case X86::VPGATHERDQZrm:
8311   case X86::VPGATHERDQrm:
8312   case X86::VPGATHERQDYrm:
8313   case X86::VPGATHERQDZ128rm:
8314   case X86::VPGATHERQDZ256rm:
8315   case X86::VPGATHERQDZrm:
8316   case X86::VPGATHERQDrm:
8317   case X86::VPGATHERQQYrm:
8318   case X86::VPGATHERQQZ128rm:
8319   case X86::VPGATHERQQZ256rm:
8320   case X86::VPGATHERQQZrm:
8321   case X86::VPGATHERQQrm:
8322   case X86::VSCATTERDPDZ128mr:
8323   case X86::VSCATTERDPDZ256mr:
8324   case X86::VSCATTERDPDZmr:
8325   case X86::VSCATTERDPSZ128mr:
8326   case X86::VSCATTERDPSZ256mr:
8327   case X86::VSCATTERDPSZmr:
8328   case X86::VSCATTERPF0DPDm:
8329   case X86::VSCATTERPF0DPSm:
8330   case X86::VSCATTERPF0QPDm:
8331   case X86::VSCATTERPF0QPSm:
8332   case X86::VSCATTERPF1DPDm:
8333   case X86::VSCATTERPF1DPSm:
8334   case X86::VSCATTERPF1QPDm:
8335   case X86::VSCATTERPF1QPSm:
8336   case X86::VSCATTERQPDZ128mr:
8337   case X86::VSCATTERQPDZ256mr:
8338   case X86::VSCATTERQPDZmr:
8339   case X86::VSCATTERQPSZ128mr:
8340   case X86::VSCATTERQPSZ256mr:
8341   case X86::VSCATTERQPSZmr:
8342   case X86::VPSCATTERDDZ128mr:
8343   case X86::VPSCATTERDDZ256mr:
8344   case X86::VPSCATTERDDZmr:
8345   case X86::VPSCATTERDQZ128mr:
8346   case X86::VPSCATTERDQZ256mr:
8347   case X86::VPSCATTERDQZmr:
8348   case X86::VPSCATTERQDZ128mr:
8349   case X86::VPSCATTERQDZ256mr:
8350   case X86::VPSCATTERQDZmr:
8351   case X86::VPSCATTERQQZ128mr:
8352   case X86::VPSCATTERQQZ256mr:
8353   case X86::VPSCATTERQQZmr:
8354     return true;
8355   }
8356 }
8357 
8358 bool X86InstrInfo::hasHighOperandLatency(const TargetSchedModel &SchedModel,
8359                                          const MachineRegisterInfo *MRI,
8360                                          const MachineInstr &DefMI,
8361                                          unsigned DefIdx,
8362                                          const MachineInstr &UseMI,
8363                                          unsigned UseIdx) const {
8364   return isHighLatencyDef(DefMI.getOpcode());
8365 }
8366 
8367 bool X86InstrInfo::hasReassociableOperands(const MachineInstr &Inst,
8368                                            const MachineBasicBlock *MBB) const {
8369   assert(Inst.getNumExplicitOperands() == 3 && Inst.getNumExplicitDefs() == 1 &&
8370          Inst.getNumDefs() <= 2 && "Reassociation needs binary operators");
8371 
8372   // Integer binary math/logic instructions have a third source operand:
8373   // the EFLAGS register. That operand must be both defined here and never
8374   // used; ie, it must be dead. If the EFLAGS operand is live, then we can
8375   // not change anything because rearranging the operands could affect other
8376   // instructions that depend on the exact status flags (zero, sign, etc.)
8377   // that are set by using these particular operands with this operation.
8378   const MachineOperand *FlagDef = Inst.findRegisterDefOperand(X86::EFLAGS);
8379   assert((Inst.getNumDefs() == 1 || FlagDef) &&
8380          "Implicit def isn't flags?");
8381   if (FlagDef && !FlagDef->isDead())
8382     return false;
8383 
8384   return TargetInstrInfo::hasReassociableOperands(Inst, MBB);
8385 }
8386 
8387 // TODO: There are many more machine instruction opcodes to match:
8388 //       1. Other data types (integer, vectors)
8389 //       2. Other math / logic operations (xor, or)
8390 //       3. Other forms of the same operation (intrinsics and other variants)
8391 bool X86InstrInfo::isAssociativeAndCommutative(const MachineInstr &Inst) const {
8392   switch (Inst.getOpcode()) {
8393   case X86::AND8rr:
8394   case X86::AND16rr:
8395   case X86::AND32rr:
8396   case X86::AND64rr:
8397   case X86::OR8rr:
8398   case X86::OR16rr:
8399   case X86::OR32rr:
8400   case X86::OR64rr:
8401   case X86::XOR8rr:
8402   case X86::XOR16rr:
8403   case X86::XOR32rr:
8404   case X86::XOR64rr:
8405   case X86::IMUL16rr:
8406   case X86::IMUL32rr:
8407   case X86::IMUL64rr:
8408   case X86::PANDrr:
8409   case X86::PORrr:
8410   case X86::PXORrr:
8411   case X86::ANDPDrr:
8412   case X86::ANDPSrr:
8413   case X86::ORPDrr:
8414   case X86::ORPSrr:
8415   case X86::XORPDrr:
8416   case X86::XORPSrr:
8417   case X86::PADDBrr:
8418   case X86::PADDWrr:
8419   case X86::PADDDrr:
8420   case X86::PADDQrr:
8421   case X86::PMULLWrr:
8422   case X86::PMULLDrr:
8423   case X86::PMAXSBrr:
8424   case X86::PMAXSDrr:
8425   case X86::PMAXSWrr:
8426   case X86::PMAXUBrr:
8427   case X86::PMAXUDrr:
8428   case X86::PMAXUWrr:
8429   case X86::PMINSBrr:
8430   case X86::PMINSDrr:
8431   case X86::PMINSWrr:
8432   case X86::PMINUBrr:
8433   case X86::PMINUDrr:
8434   case X86::PMINUWrr:
8435   case X86::VPANDrr:
8436   case X86::VPANDYrr:
8437   case X86::VPANDDZ128rr:
8438   case X86::VPANDDZ256rr:
8439   case X86::VPANDDZrr:
8440   case X86::VPANDQZ128rr:
8441   case X86::VPANDQZ256rr:
8442   case X86::VPANDQZrr:
8443   case X86::VPORrr:
8444   case X86::VPORYrr:
8445   case X86::VPORDZ128rr:
8446   case X86::VPORDZ256rr:
8447   case X86::VPORDZrr:
8448   case X86::VPORQZ128rr:
8449   case X86::VPORQZ256rr:
8450   case X86::VPORQZrr:
8451   case X86::VPXORrr:
8452   case X86::VPXORYrr:
8453   case X86::VPXORDZ128rr:
8454   case X86::VPXORDZ256rr:
8455   case X86::VPXORDZrr:
8456   case X86::VPXORQZ128rr:
8457   case X86::VPXORQZ256rr:
8458   case X86::VPXORQZrr:
8459   case X86::VANDPDrr:
8460   case X86::VANDPSrr:
8461   case X86::VANDPDYrr:
8462   case X86::VANDPSYrr:
8463   case X86::VANDPDZ128rr:
8464   case X86::VANDPSZ128rr:
8465   case X86::VANDPDZ256rr:
8466   case X86::VANDPSZ256rr:
8467   case X86::VANDPDZrr:
8468   case X86::VANDPSZrr:
8469   case X86::VORPDrr:
8470   case X86::VORPSrr:
8471   case X86::VORPDYrr:
8472   case X86::VORPSYrr:
8473   case X86::VORPDZ128rr:
8474   case X86::VORPSZ128rr:
8475   case X86::VORPDZ256rr:
8476   case X86::VORPSZ256rr:
8477   case X86::VORPDZrr:
8478   case X86::VORPSZrr:
8479   case X86::VXORPDrr:
8480   case X86::VXORPSrr:
8481   case X86::VXORPDYrr:
8482   case X86::VXORPSYrr:
8483   case X86::VXORPDZ128rr:
8484   case X86::VXORPSZ128rr:
8485   case X86::VXORPDZ256rr:
8486   case X86::VXORPSZ256rr:
8487   case X86::VXORPDZrr:
8488   case X86::VXORPSZrr:
8489   case X86::KADDBrr:
8490   case X86::KADDWrr:
8491   case X86::KADDDrr:
8492   case X86::KADDQrr:
8493   case X86::KANDBrr:
8494   case X86::KANDWrr:
8495   case X86::KANDDrr:
8496   case X86::KANDQrr:
8497   case X86::KORBrr:
8498   case X86::KORWrr:
8499   case X86::KORDrr:
8500   case X86::KORQrr:
8501   case X86::KXORBrr:
8502   case X86::KXORWrr:
8503   case X86::KXORDrr:
8504   case X86::KXORQrr:
8505   case X86::VPADDBrr:
8506   case X86::VPADDWrr:
8507   case X86::VPADDDrr:
8508   case X86::VPADDQrr:
8509   case X86::VPADDBYrr:
8510   case X86::VPADDWYrr:
8511   case X86::VPADDDYrr:
8512   case X86::VPADDQYrr:
8513   case X86::VPADDBZ128rr:
8514   case X86::VPADDWZ128rr:
8515   case X86::VPADDDZ128rr:
8516   case X86::VPADDQZ128rr:
8517   case X86::VPADDBZ256rr:
8518   case X86::VPADDWZ256rr:
8519   case X86::VPADDDZ256rr:
8520   case X86::VPADDQZ256rr:
8521   case X86::VPADDBZrr:
8522   case X86::VPADDWZrr:
8523   case X86::VPADDDZrr:
8524   case X86::VPADDQZrr:
8525   case X86::VPMULLWrr:
8526   case X86::VPMULLWYrr:
8527   case X86::VPMULLWZ128rr:
8528   case X86::VPMULLWZ256rr:
8529   case X86::VPMULLWZrr:
8530   case X86::VPMULLDrr:
8531   case X86::VPMULLDYrr:
8532   case X86::VPMULLDZ128rr:
8533   case X86::VPMULLDZ256rr:
8534   case X86::VPMULLDZrr:
8535   case X86::VPMULLQZ128rr:
8536   case X86::VPMULLQZ256rr:
8537   case X86::VPMULLQZrr:
8538   case X86::VPMAXSBrr:
8539   case X86::VPMAXSBYrr:
8540   case X86::VPMAXSBZ128rr:
8541   case X86::VPMAXSBZ256rr:
8542   case X86::VPMAXSBZrr:
8543   case X86::VPMAXSDrr:
8544   case X86::VPMAXSDYrr:
8545   case X86::VPMAXSDZ128rr:
8546   case X86::VPMAXSDZ256rr:
8547   case X86::VPMAXSDZrr:
8548   case X86::VPMAXSQZ128rr:
8549   case X86::VPMAXSQZ256rr:
8550   case X86::VPMAXSQZrr:
8551   case X86::VPMAXSWrr:
8552   case X86::VPMAXSWYrr:
8553   case X86::VPMAXSWZ128rr:
8554   case X86::VPMAXSWZ256rr:
8555   case X86::VPMAXSWZrr:
8556   case X86::VPMAXUBrr:
8557   case X86::VPMAXUBYrr:
8558   case X86::VPMAXUBZ128rr:
8559   case X86::VPMAXUBZ256rr:
8560   case X86::VPMAXUBZrr:
8561   case X86::VPMAXUDrr:
8562   case X86::VPMAXUDYrr:
8563   case X86::VPMAXUDZ128rr:
8564   case X86::VPMAXUDZ256rr:
8565   case X86::VPMAXUDZrr:
8566   case X86::VPMAXUQZ128rr:
8567   case X86::VPMAXUQZ256rr:
8568   case X86::VPMAXUQZrr:
8569   case X86::VPMAXUWrr:
8570   case X86::VPMAXUWYrr:
8571   case X86::VPMAXUWZ128rr:
8572   case X86::VPMAXUWZ256rr:
8573   case X86::VPMAXUWZrr:
8574   case X86::VPMINSBrr:
8575   case X86::VPMINSBYrr:
8576   case X86::VPMINSBZ128rr:
8577   case X86::VPMINSBZ256rr:
8578   case X86::VPMINSBZrr:
8579   case X86::VPMINSDrr:
8580   case X86::VPMINSDYrr:
8581   case X86::VPMINSDZ128rr:
8582   case X86::VPMINSDZ256rr:
8583   case X86::VPMINSDZrr:
8584   case X86::VPMINSQZ128rr:
8585   case X86::VPMINSQZ256rr:
8586   case X86::VPMINSQZrr:
8587   case X86::VPMINSWrr:
8588   case X86::VPMINSWYrr:
8589   case X86::VPMINSWZ128rr:
8590   case X86::VPMINSWZ256rr:
8591   case X86::VPMINSWZrr:
8592   case X86::VPMINUBrr:
8593   case X86::VPMINUBYrr:
8594   case X86::VPMINUBZ128rr:
8595   case X86::VPMINUBZ256rr:
8596   case X86::VPMINUBZrr:
8597   case X86::VPMINUDrr:
8598   case X86::VPMINUDYrr:
8599   case X86::VPMINUDZ128rr:
8600   case X86::VPMINUDZ256rr:
8601   case X86::VPMINUDZrr:
8602   case X86::VPMINUQZ128rr:
8603   case X86::VPMINUQZ256rr:
8604   case X86::VPMINUQZrr:
8605   case X86::VPMINUWrr:
8606   case X86::VPMINUWYrr:
8607   case X86::VPMINUWZ128rr:
8608   case X86::VPMINUWZ256rr:
8609   case X86::VPMINUWZrr:
8610   // Normal min/max instructions are not commutative because of NaN and signed
8611   // zero semantics, but these are. Thus, there's no need to check for global
8612   // relaxed math; the instructions themselves have the properties we need.
8613   case X86::MAXCPDrr:
8614   case X86::MAXCPSrr:
8615   case X86::MAXCSDrr:
8616   case X86::MAXCSSrr:
8617   case X86::MINCPDrr:
8618   case X86::MINCPSrr:
8619   case X86::MINCSDrr:
8620   case X86::MINCSSrr:
8621   case X86::VMAXCPDrr:
8622   case X86::VMAXCPSrr:
8623   case X86::VMAXCPDYrr:
8624   case X86::VMAXCPSYrr:
8625   case X86::VMAXCPDZ128rr:
8626   case X86::VMAXCPSZ128rr:
8627   case X86::VMAXCPDZ256rr:
8628   case X86::VMAXCPSZ256rr:
8629   case X86::VMAXCPDZrr:
8630   case X86::VMAXCPSZrr:
8631   case X86::VMAXCSDrr:
8632   case X86::VMAXCSSrr:
8633   case X86::VMAXCSDZrr:
8634   case X86::VMAXCSSZrr:
8635   case X86::VMINCPDrr:
8636   case X86::VMINCPSrr:
8637   case X86::VMINCPDYrr:
8638   case X86::VMINCPSYrr:
8639   case X86::VMINCPDZ128rr:
8640   case X86::VMINCPSZ128rr:
8641   case X86::VMINCPDZ256rr:
8642   case X86::VMINCPSZ256rr:
8643   case X86::VMINCPDZrr:
8644   case X86::VMINCPSZrr:
8645   case X86::VMINCSDrr:
8646   case X86::VMINCSSrr:
8647   case X86::VMINCSDZrr:
8648   case X86::VMINCSSZrr:
8649   case X86::VMAXCPHZ128rr:
8650   case X86::VMAXCPHZ256rr:
8651   case X86::VMAXCPHZrr:
8652   case X86::VMAXCSHZrr:
8653   case X86::VMINCPHZ128rr:
8654   case X86::VMINCPHZ256rr:
8655   case X86::VMINCPHZrr:
8656   case X86::VMINCSHZrr:
8657     return true;
8658   case X86::ADDPDrr:
8659   case X86::ADDPSrr:
8660   case X86::ADDSDrr:
8661   case X86::ADDSSrr:
8662   case X86::MULPDrr:
8663   case X86::MULPSrr:
8664   case X86::MULSDrr:
8665   case X86::MULSSrr:
8666   case X86::VADDPDrr:
8667   case X86::VADDPSrr:
8668   case X86::VADDPDYrr:
8669   case X86::VADDPSYrr:
8670   case X86::VADDPDZ128rr:
8671   case X86::VADDPSZ128rr:
8672   case X86::VADDPDZ256rr:
8673   case X86::VADDPSZ256rr:
8674   case X86::VADDPDZrr:
8675   case X86::VADDPSZrr:
8676   case X86::VADDSDrr:
8677   case X86::VADDSSrr:
8678   case X86::VADDSDZrr:
8679   case X86::VADDSSZrr:
8680   case X86::VMULPDrr:
8681   case X86::VMULPSrr:
8682   case X86::VMULPDYrr:
8683   case X86::VMULPSYrr:
8684   case X86::VMULPDZ128rr:
8685   case X86::VMULPSZ128rr:
8686   case X86::VMULPDZ256rr:
8687   case X86::VMULPSZ256rr:
8688   case X86::VMULPDZrr:
8689   case X86::VMULPSZrr:
8690   case X86::VMULSDrr:
8691   case X86::VMULSSrr:
8692   case X86::VMULSDZrr:
8693   case X86::VMULSSZrr:
8694   case X86::VADDPHZ128rr:
8695   case X86::VADDPHZ256rr:
8696   case X86::VADDPHZrr:
8697   case X86::VADDSHZrr:
8698   case X86::VMULPHZ128rr:
8699   case X86::VMULPHZ256rr:
8700   case X86::VMULPHZrr:
8701   case X86::VMULSHZrr:
8702     return Inst.getFlag(MachineInstr::MIFlag::FmReassoc) &&
8703            Inst.getFlag(MachineInstr::MIFlag::FmNsz);
8704   default:
8705     return false;
8706   }
8707 }
8708 
8709 /// If \p DescribedReg overlaps with the MOVrr instruction's destination
8710 /// register then, if possible, describe the value in terms of the source
8711 /// register.
8712 static Optional<ParamLoadedValue>
8713 describeMOVrrLoadedValue(const MachineInstr &MI, Register DescribedReg,
8714                          const TargetRegisterInfo *TRI) {
8715   Register DestReg = MI.getOperand(0).getReg();
8716   Register SrcReg = MI.getOperand(1).getReg();
8717 
8718   auto Expr = DIExpression::get(MI.getMF()->getFunction().getContext(), {});
8719 
8720   // If the described register is the destination, just return the source.
8721   if (DestReg == DescribedReg)
8722     return ParamLoadedValue(MachineOperand::CreateReg(SrcReg, false), Expr);
8723 
8724   // If the described register is a sub-register of the destination register,
8725   // then pick out the source register's corresponding sub-register.
8726   if (unsigned SubRegIdx = TRI->getSubRegIndex(DestReg, DescribedReg)) {
8727     Register SrcSubReg = TRI->getSubReg(SrcReg, SubRegIdx);
8728     return ParamLoadedValue(MachineOperand::CreateReg(SrcSubReg, false), Expr);
8729   }
8730 
8731   // The remaining case to consider is when the described register is a
8732   // super-register of the destination register. MOV8rr and MOV16rr does not
8733   // write to any of the other bytes in the register, meaning that we'd have to
8734   // describe the value using a combination of the source register and the
8735   // non-overlapping bits in the described register, which is not currently
8736   // possible.
8737   if (MI.getOpcode() == X86::MOV8rr || MI.getOpcode() == X86::MOV16rr ||
8738       !TRI->isSuperRegister(DestReg, DescribedReg))
8739     return None;
8740 
8741   assert(MI.getOpcode() == X86::MOV32rr && "Unexpected super-register case");
8742   return ParamLoadedValue(MachineOperand::CreateReg(SrcReg, false), Expr);
8743 }
8744 
8745 Optional<ParamLoadedValue>
8746 X86InstrInfo::describeLoadedValue(const MachineInstr &MI, Register Reg) const {
8747   const MachineOperand *Op = nullptr;
8748   DIExpression *Expr = nullptr;
8749 
8750   const TargetRegisterInfo *TRI = &getRegisterInfo();
8751 
8752   switch (MI.getOpcode()) {
8753   case X86::LEA32r:
8754   case X86::LEA64r:
8755   case X86::LEA64_32r: {
8756     // We may need to describe a 64-bit parameter with a 32-bit LEA.
8757     if (!TRI->isSuperRegisterEq(MI.getOperand(0).getReg(), Reg))
8758       return None;
8759 
8760     // Operand 4 could be global address. For now we do not support
8761     // such situation.
8762     if (!MI.getOperand(4).isImm() || !MI.getOperand(2).isImm())
8763       return None;
8764 
8765     const MachineOperand &Op1 = MI.getOperand(1);
8766     const MachineOperand &Op2 = MI.getOperand(3);
8767     assert(Op2.isReg() && (Op2.getReg() == X86::NoRegister ||
8768                            Register::isPhysicalRegister(Op2.getReg())));
8769 
8770     // Omit situations like:
8771     // %rsi = lea %rsi, 4, ...
8772     if ((Op1.isReg() && Op1.getReg() == MI.getOperand(0).getReg()) ||
8773         Op2.getReg() == MI.getOperand(0).getReg())
8774       return None;
8775     else if ((Op1.isReg() && Op1.getReg() != X86::NoRegister &&
8776               TRI->regsOverlap(Op1.getReg(), MI.getOperand(0).getReg())) ||
8777              (Op2.getReg() != X86::NoRegister &&
8778               TRI->regsOverlap(Op2.getReg(), MI.getOperand(0).getReg())))
8779       return None;
8780 
8781     int64_t Coef = MI.getOperand(2).getImm();
8782     int64_t Offset = MI.getOperand(4).getImm();
8783     SmallVector<uint64_t, 8> Ops;
8784 
8785     if ((Op1.isReg() && Op1.getReg() != X86::NoRegister)) {
8786       Op = &Op1;
8787     } else if (Op1.isFI())
8788       Op = &Op1;
8789 
8790     if (Op && Op->isReg() && Op->getReg() == Op2.getReg() && Coef > 0) {
8791       Ops.push_back(dwarf::DW_OP_constu);
8792       Ops.push_back(Coef + 1);
8793       Ops.push_back(dwarf::DW_OP_mul);
8794     } else {
8795       if (Op && Op2.getReg() != X86::NoRegister) {
8796         int dwarfReg = TRI->getDwarfRegNum(Op2.getReg(), false);
8797         if (dwarfReg < 0)
8798           return None;
8799         else if (dwarfReg < 32) {
8800           Ops.push_back(dwarf::DW_OP_breg0 + dwarfReg);
8801           Ops.push_back(0);
8802         } else {
8803           Ops.push_back(dwarf::DW_OP_bregx);
8804           Ops.push_back(dwarfReg);
8805           Ops.push_back(0);
8806         }
8807       } else if (!Op) {
8808         assert(Op2.getReg() != X86::NoRegister);
8809         Op = &Op2;
8810       }
8811 
8812       if (Coef > 1) {
8813         assert(Op2.getReg() != X86::NoRegister);
8814         Ops.push_back(dwarf::DW_OP_constu);
8815         Ops.push_back(Coef);
8816         Ops.push_back(dwarf::DW_OP_mul);
8817       }
8818 
8819       if (((Op1.isReg() && Op1.getReg() != X86::NoRegister) || Op1.isFI()) &&
8820           Op2.getReg() != X86::NoRegister) {
8821         Ops.push_back(dwarf::DW_OP_plus);
8822       }
8823     }
8824 
8825     DIExpression::appendOffset(Ops, Offset);
8826     Expr = DIExpression::get(MI.getMF()->getFunction().getContext(), Ops);
8827 
8828     return ParamLoadedValue(*Op, Expr);;
8829   }
8830   case X86::MOV8ri:
8831   case X86::MOV16ri:
8832     // TODO: Handle MOV8ri and MOV16ri.
8833     return None;
8834   case X86::MOV32ri:
8835   case X86::MOV64ri:
8836   case X86::MOV64ri32:
8837     // MOV32ri may be used for producing zero-extended 32-bit immediates in
8838     // 64-bit parameters, so we need to consider super-registers.
8839     if (!TRI->isSuperRegisterEq(MI.getOperand(0).getReg(), Reg))
8840       return None;
8841     return ParamLoadedValue(MI.getOperand(1), Expr);
8842   case X86::MOV8rr:
8843   case X86::MOV16rr:
8844   case X86::MOV32rr:
8845   case X86::MOV64rr:
8846     return describeMOVrrLoadedValue(MI, Reg, TRI);
8847   case X86::XOR32rr: {
8848     // 64-bit parameters are zero-materialized using XOR32rr, so also consider
8849     // super-registers.
8850     if (!TRI->isSuperRegisterEq(MI.getOperand(0).getReg(), Reg))
8851       return None;
8852     if (MI.getOperand(1).getReg() == MI.getOperand(2).getReg())
8853       return ParamLoadedValue(MachineOperand::CreateImm(0), Expr);
8854     return None;
8855   }
8856   case X86::MOVSX64rr32: {
8857     // We may need to describe the lower 32 bits of the MOVSX; for example, in
8858     // cases like this:
8859     //
8860     //  $ebx = [...]
8861     //  $rdi = MOVSX64rr32 $ebx
8862     //  $esi = MOV32rr $edi
8863     if (!TRI->isSubRegisterEq(MI.getOperand(0).getReg(), Reg))
8864       return None;
8865 
8866     Expr = DIExpression::get(MI.getMF()->getFunction().getContext(), {});
8867 
8868     // If the described register is the destination register we need to
8869     // sign-extend the source register from 32 bits. The other case we handle
8870     // is when the described register is the 32-bit sub-register of the
8871     // destination register, in case we just need to return the source
8872     // register.
8873     if (Reg == MI.getOperand(0).getReg())
8874       Expr = DIExpression::appendExt(Expr, 32, 64, true);
8875     else
8876       assert(X86MCRegisterClasses[X86::GR32RegClassID].contains(Reg) &&
8877              "Unhandled sub-register case for MOVSX64rr32");
8878 
8879     return ParamLoadedValue(MI.getOperand(1), Expr);
8880   }
8881   default:
8882     assert(!MI.isMoveImmediate() && "Unexpected MoveImm instruction");
8883     return TargetInstrInfo::describeLoadedValue(MI, Reg);
8884   }
8885 }
8886 
8887 /// This is an architecture-specific helper function of reassociateOps.
8888 /// Set special operand attributes for new instructions after reassociation.
8889 void X86InstrInfo::setSpecialOperandAttr(MachineInstr &OldMI1,
8890                                          MachineInstr &OldMI2,
8891                                          MachineInstr &NewMI1,
8892                                          MachineInstr &NewMI2) const {
8893   // Propagate FP flags from the original instructions.
8894   // But clear poison-generating flags because those may not be valid now.
8895   // TODO: There should be a helper function for copying only fast-math-flags.
8896   uint16_t IntersectedFlags = OldMI1.getFlags() & OldMI2.getFlags();
8897   NewMI1.setFlags(IntersectedFlags);
8898   NewMI1.clearFlag(MachineInstr::MIFlag::NoSWrap);
8899   NewMI1.clearFlag(MachineInstr::MIFlag::NoUWrap);
8900   NewMI1.clearFlag(MachineInstr::MIFlag::IsExact);
8901 
8902   NewMI2.setFlags(IntersectedFlags);
8903   NewMI2.clearFlag(MachineInstr::MIFlag::NoSWrap);
8904   NewMI2.clearFlag(MachineInstr::MIFlag::NoUWrap);
8905   NewMI2.clearFlag(MachineInstr::MIFlag::IsExact);
8906 
8907   // Integer instructions may define an implicit EFLAGS dest register operand.
8908   MachineOperand *OldFlagDef1 = OldMI1.findRegisterDefOperand(X86::EFLAGS);
8909   MachineOperand *OldFlagDef2 = OldMI2.findRegisterDefOperand(X86::EFLAGS);
8910 
8911   assert(!OldFlagDef1 == !OldFlagDef2 &&
8912          "Unexpected instruction type for reassociation");
8913 
8914   if (!OldFlagDef1 || !OldFlagDef2)
8915     return;
8916 
8917   assert(OldFlagDef1->isDead() && OldFlagDef2->isDead() &&
8918          "Must have dead EFLAGS operand in reassociable instruction");
8919 
8920   MachineOperand *NewFlagDef1 = NewMI1.findRegisterDefOperand(X86::EFLAGS);
8921   MachineOperand *NewFlagDef2 = NewMI2.findRegisterDefOperand(X86::EFLAGS);
8922 
8923   assert(NewFlagDef1 && NewFlagDef2 &&
8924          "Unexpected operand in reassociable instruction");
8925 
8926   // Mark the new EFLAGS operands as dead to be helpful to subsequent iterations
8927   // of this pass or other passes. The EFLAGS operands must be dead in these new
8928   // instructions because the EFLAGS operands in the original instructions must
8929   // be dead in order for reassociation to occur.
8930   NewFlagDef1->setIsDead();
8931   NewFlagDef2->setIsDead();
8932 }
8933 
8934 std::pair<unsigned, unsigned>
8935 X86InstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const {
8936   return std::make_pair(TF, 0u);
8937 }
8938 
8939 ArrayRef<std::pair<unsigned, const char *>>
8940 X86InstrInfo::getSerializableDirectMachineOperandTargetFlags() const {
8941   using namespace X86II;
8942   static const std::pair<unsigned, const char *> TargetFlags[] = {
8943       {MO_GOT_ABSOLUTE_ADDRESS, "x86-got-absolute-address"},
8944       {MO_PIC_BASE_OFFSET, "x86-pic-base-offset"},
8945       {MO_GOT, "x86-got"},
8946       {MO_GOTOFF, "x86-gotoff"},
8947       {MO_GOTPCREL, "x86-gotpcrel"},
8948       {MO_PLT, "x86-plt"},
8949       {MO_TLSGD, "x86-tlsgd"},
8950       {MO_TLSLD, "x86-tlsld"},
8951       {MO_TLSLDM, "x86-tlsldm"},
8952       {MO_GOTTPOFF, "x86-gottpoff"},
8953       {MO_INDNTPOFF, "x86-indntpoff"},
8954       {MO_TPOFF, "x86-tpoff"},
8955       {MO_DTPOFF, "x86-dtpoff"},
8956       {MO_NTPOFF, "x86-ntpoff"},
8957       {MO_GOTNTPOFF, "x86-gotntpoff"},
8958       {MO_DLLIMPORT, "x86-dllimport"},
8959       {MO_DARWIN_NONLAZY, "x86-darwin-nonlazy"},
8960       {MO_DARWIN_NONLAZY_PIC_BASE, "x86-darwin-nonlazy-pic-base"},
8961       {MO_TLVP, "x86-tlvp"},
8962       {MO_TLVP_PIC_BASE, "x86-tlvp-pic-base"},
8963       {MO_SECREL, "x86-secrel"},
8964       {MO_COFFSTUB, "x86-coffstub"}};
8965   return makeArrayRef(TargetFlags);
8966 }
8967 
8968 namespace {
8969   /// Create Global Base Reg pass. This initializes the PIC
8970   /// global base register for x86-32.
8971   struct CGBR : public MachineFunctionPass {
8972     static char ID;
8973     CGBR() : MachineFunctionPass(ID) {}
8974 
8975     bool runOnMachineFunction(MachineFunction &MF) override {
8976       const X86TargetMachine *TM =
8977         static_cast<const X86TargetMachine *>(&MF.getTarget());
8978       const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
8979 
8980       // Don't do anything in the 64-bit small and kernel code models. They use
8981       // RIP-relative addressing for everything.
8982       if (STI.is64Bit() && (TM->getCodeModel() == CodeModel::Small ||
8983                             TM->getCodeModel() == CodeModel::Kernel))
8984         return false;
8985 
8986       // Only emit a global base reg in PIC mode.
8987       if (!TM->isPositionIndependent())
8988         return false;
8989 
8990       X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
8991       Register GlobalBaseReg = X86FI->getGlobalBaseReg();
8992 
8993       // If we didn't need a GlobalBaseReg, don't insert code.
8994       if (GlobalBaseReg == 0)
8995         return false;
8996 
8997       // Insert the set of GlobalBaseReg into the first MBB of the function
8998       MachineBasicBlock &FirstMBB = MF.front();
8999       MachineBasicBlock::iterator MBBI = FirstMBB.begin();
9000       DebugLoc DL = FirstMBB.findDebugLoc(MBBI);
9001       MachineRegisterInfo &RegInfo = MF.getRegInfo();
9002       const X86InstrInfo *TII = STI.getInstrInfo();
9003 
9004       Register PC;
9005       if (STI.isPICStyleGOT())
9006         PC = RegInfo.createVirtualRegister(&X86::GR32RegClass);
9007       else
9008         PC = GlobalBaseReg;
9009 
9010       if (STI.is64Bit()) {
9011         if (TM->getCodeModel() == CodeModel::Medium) {
9012           // In the medium code model, use a RIP-relative LEA to materialize the
9013           // GOT.
9014           BuildMI(FirstMBB, MBBI, DL, TII->get(X86::LEA64r), PC)
9015               .addReg(X86::RIP)
9016               .addImm(0)
9017               .addReg(0)
9018               .addExternalSymbol("_GLOBAL_OFFSET_TABLE_")
9019               .addReg(0);
9020         } else if (TM->getCodeModel() == CodeModel::Large) {
9021           // In the large code model, we are aiming for this code, though the
9022           // register allocation may vary:
9023           //   leaq .LN$pb(%rip), %rax
9024           //   movq $_GLOBAL_OFFSET_TABLE_ - .LN$pb, %rcx
9025           //   addq %rcx, %rax
9026           // RAX now holds address of _GLOBAL_OFFSET_TABLE_.
9027           Register PBReg = RegInfo.createVirtualRegister(&X86::GR64RegClass);
9028           Register GOTReg = RegInfo.createVirtualRegister(&X86::GR64RegClass);
9029           BuildMI(FirstMBB, MBBI, DL, TII->get(X86::LEA64r), PBReg)
9030               .addReg(X86::RIP)
9031               .addImm(0)
9032               .addReg(0)
9033               .addSym(MF.getPICBaseSymbol())
9034               .addReg(0);
9035           std::prev(MBBI)->setPreInstrSymbol(MF, MF.getPICBaseSymbol());
9036           BuildMI(FirstMBB, MBBI, DL, TII->get(X86::MOV64ri), GOTReg)
9037               .addExternalSymbol("_GLOBAL_OFFSET_TABLE_",
9038                                  X86II::MO_PIC_BASE_OFFSET);
9039           BuildMI(FirstMBB, MBBI, DL, TII->get(X86::ADD64rr), PC)
9040               .addReg(PBReg, RegState::Kill)
9041               .addReg(GOTReg, RegState::Kill);
9042         } else {
9043           llvm_unreachable("unexpected code model");
9044         }
9045       } else {
9046         // Operand of MovePCtoStack is completely ignored by asm printer. It's
9047         // only used in JIT code emission as displacement to pc.
9048         BuildMI(FirstMBB, MBBI, DL, TII->get(X86::MOVPC32r), PC).addImm(0);
9049 
9050         // If we're using vanilla 'GOT' PIC style, we should use relative
9051         // addressing not to pc, but to _GLOBAL_OFFSET_TABLE_ external.
9052         if (STI.isPICStyleGOT()) {
9053           // Generate addl $__GLOBAL_OFFSET_TABLE_ + [.-piclabel],
9054           // %some_register
9055           BuildMI(FirstMBB, MBBI, DL, TII->get(X86::ADD32ri), GlobalBaseReg)
9056               .addReg(PC)
9057               .addExternalSymbol("_GLOBAL_OFFSET_TABLE_",
9058                                  X86II::MO_GOT_ABSOLUTE_ADDRESS);
9059         }
9060       }
9061 
9062       return true;
9063     }
9064 
9065     StringRef getPassName() const override {
9066       return "X86 PIC Global Base Reg Initialization";
9067     }
9068 
9069     void getAnalysisUsage(AnalysisUsage &AU) const override {
9070       AU.setPreservesCFG();
9071       MachineFunctionPass::getAnalysisUsage(AU);
9072     }
9073   };
9074 } // namespace
9075 
9076 char CGBR::ID = 0;
9077 FunctionPass*
9078 llvm::createX86GlobalBaseRegPass() { return new CGBR(); }
9079 
9080 namespace {
9081   struct LDTLSCleanup : public MachineFunctionPass {
9082     static char ID;
9083     LDTLSCleanup() : MachineFunctionPass(ID) {}
9084 
9085     bool runOnMachineFunction(MachineFunction &MF) override {
9086       if (skipFunction(MF.getFunction()))
9087         return false;
9088 
9089       X86MachineFunctionInfo *MFI = MF.getInfo<X86MachineFunctionInfo>();
9090       if (MFI->getNumLocalDynamicTLSAccesses() < 2) {
9091         // No point folding accesses if there isn't at least two.
9092         return false;
9093       }
9094 
9095       MachineDominatorTree *DT = &getAnalysis<MachineDominatorTree>();
9096       return VisitNode(DT->getRootNode(), 0);
9097     }
9098 
9099     // Visit the dominator subtree rooted at Node in pre-order.
9100     // If TLSBaseAddrReg is non-null, then use that to replace any
9101     // TLS_base_addr instructions. Otherwise, create the register
9102     // when the first such instruction is seen, and then use it
9103     // as we encounter more instructions.
9104     bool VisitNode(MachineDomTreeNode *Node, unsigned TLSBaseAddrReg) {
9105       MachineBasicBlock *BB = Node->getBlock();
9106       bool Changed = false;
9107 
9108       // Traverse the current block.
9109       for (MachineBasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;
9110            ++I) {
9111         switch (I->getOpcode()) {
9112           case X86::TLS_base_addr32:
9113           case X86::TLS_base_addr64:
9114             if (TLSBaseAddrReg)
9115               I = ReplaceTLSBaseAddrCall(*I, TLSBaseAddrReg);
9116             else
9117               I = SetRegister(*I, &TLSBaseAddrReg);
9118             Changed = true;
9119             break;
9120           default:
9121             break;
9122         }
9123       }
9124 
9125       // Visit the children of this block in the dominator tree.
9126       for (auto I = Node->begin(), E = Node->end(); I != E; ++I) {
9127         Changed |= VisitNode(*I, TLSBaseAddrReg);
9128       }
9129 
9130       return Changed;
9131     }
9132 
9133     // Replace the TLS_base_addr instruction I with a copy from
9134     // TLSBaseAddrReg, returning the new instruction.
9135     MachineInstr *ReplaceTLSBaseAddrCall(MachineInstr &I,
9136                                          unsigned TLSBaseAddrReg) {
9137       MachineFunction *MF = I.getParent()->getParent();
9138       const X86Subtarget &STI = MF->getSubtarget<X86Subtarget>();
9139       const bool is64Bit = STI.is64Bit();
9140       const X86InstrInfo *TII = STI.getInstrInfo();
9141 
9142       // Insert a Copy from TLSBaseAddrReg to RAX/EAX.
9143       MachineInstr *Copy =
9144           BuildMI(*I.getParent(), I, I.getDebugLoc(),
9145                   TII->get(TargetOpcode::COPY), is64Bit ? X86::RAX : X86::EAX)
9146               .addReg(TLSBaseAddrReg);
9147 
9148       // Erase the TLS_base_addr instruction.
9149       I.eraseFromParent();
9150 
9151       return Copy;
9152     }
9153 
9154     // Create a virtual register in *TLSBaseAddrReg, and populate it by
9155     // inserting a copy instruction after I. Returns the new instruction.
9156     MachineInstr *SetRegister(MachineInstr &I, unsigned *TLSBaseAddrReg) {
9157       MachineFunction *MF = I.getParent()->getParent();
9158       const X86Subtarget &STI = MF->getSubtarget<X86Subtarget>();
9159       const bool is64Bit = STI.is64Bit();
9160       const X86InstrInfo *TII = STI.getInstrInfo();
9161 
9162       // Create a virtual register for the TLS base address.
9163       MachineRegisterInfo &RegInfo = MF->getRegInfo();
9164       *TLSBaseAddrReg = RegInfo.createVirtualRegister(is64Bit
9165                                                       ? &X86::GR64RegClass
9166                                                       : &X86::GR32RegClass);
9167 
9168       // Insert a copy from RAX/EAX to TLSBaseAddrReg.
9169       MachineInstr *Next = I.getNextNode();
9170       MachineInstr *Copy =
9171           BuildMI(*I.getParent(), Next, I.getDebugLoc(),
9172                   TII->get(TargetOpcode::COPY), *TLSBaseAddrReg)
9173               .addReg(is64Bit ? X86::RAX : X86::EAX);
9174 
9175       return Copy;
9176     }
9177 
9178     StringRef getPassName() const override {
9179       return "Local Dynamic TLS Access Clean-up";
9180     }
9181 
9182     void getAnalysisUsage(AnalysisUsage &AU) const override {
9183       AU.setPreservesCFG();
9184       AU.addRequired<MachineDominatorTree>();
9185       MachineFunctionPass::getAnalysisUsage(AU);
9186     }
9187   };
9188 }
9189 
9190 char LDTLSCleanup::ID = 0;
9191 FunctionPass*
9192 llvm::createCleanupLocalDynamicTLSPass() { return new LDTLSCleanup(); }
9193 
9194 /// Constants defining how certain sequences should be outlined.
9195 ///
9196 /// \p MachineOutlinerDefault implies that the function is called with a call
9197 /// instruction, and a return must be emitted for the outlined function frame.
9198 ///
9199 /// That is,
9200 ///
9201 /// I1                                 OUTLINED_FUNCTION:
9202 /// I2 --> call OUTLINED_FUNCTION       I1
9203 /// I3                                  I2
9204 ///                                     I3
9205 ///                                     ret
9206 ///
9207 /// * Call construction overhead: 1 (call instruction)
9208 /// * Frame construction overhead: 1 (return instruction)
9209 ///
9210 /// \p MachineOutlinerTailCall implies that the function is being tail called.
9211 /// A jump is emitted instead of a call, and the return is already present in
9212 /// the outlined sequence. That is,
9213 ///
9214 /// I1                                 OUTLINED_FUNCTION:
9215 /// I2 --> jmp OUTLINED_FUNCTION       I1
9216 /// ret                                I2
9217 ///                                    ret
9218 ///
9219 /// * Call construction overhead: 1 (jump instruction)
9220 /// * Frame construction overhead: 0 (don't need to return)
9221 ///
9222 enum MachineOutlinerClass {
9223   MachineOutlinerDefault,
9224   MachineOutlinerTailCall
9225 };
9226 
9227 outliner::OutlinedFunction X86InstrInfo::getOutliningCandidateInfo(
9228     std::vector<outliner::Candidate> &RepeatedSequenceLocs) const {
9229   unsigned SequenceSize =
9230       std::accumulate(RepeatedSequenceLocs[0].front(),
9231                       std::next(RepeatedSequenceLocs[0].back()), 0,
9232                       [](unsigned Sum, const MachineInstr &MI) {
9233                         // FIXME: x86 doesn't implement getInstSizeInBytes, so
9234                         // we can't tell the cost.  Just assume each instruction
9235                         // is one byte.
9236                         if (MI.isDebugInstr() || MI.isKill())
9237                           return Sum;
9238                         return Sum + 1;
9239                       });
9240 
9241   // We check to see if CFI Instructions are present, and if they are
9242   // we find the number of CFI Instructions in the candidates.
9243   unsigned CFICount = 0;
9244   MachineBasicBlock::iterator MBBI = RepeatedSequenceLocs[0].front();
9245   for (unsigned Loc = RepeatedSequenceLocs[0].getStartIdx();
9246        Loc < RepeatedSequenceLocs[0].getEndIdx() + 1; Loc++) {
9247     if (MBBI->isCFIInstruction())
9248       CFICount++;
9249     MBBI++;
9250   }
9251 
9252   // We compare the number of found CFI Instructions to  the number of CFI
9253   // instructions in the parent function for each candidate.  We must check this
9254   // since if we outline one of the CFI instructions in a function, we have to
9255   // outline them all for correctness. If we do not, the address offsets will be
9256   // incorrect between the two sections of the program.
9257   for (outliner::Candidate &C : RepeatedSequenceLocs) {
9258     std::vector<MCCFIInstruction> CFIInstructions =
9259         C.getMF()->getFrameInstructions();
9260 
9261     if (CFICount > 0 && CFICount != CFIInstructions.size())
9262       return outliner::OutlinedFunction();
9263   }
9264 
9265   // FIXME: Use real size in bytes for call and ret instructions.
9266   if (RepeatedSequenceLocs[0].back()->isTerminator()) {
9267     for (outliner::Candidate &C : RepeatedSequenceLocs)
9268       C.setCallInfo(MachineOutlinerTailCall, 1);
9269 
9270     return outliner::OutlinedFunction(RepeatedSequenceLocs, SequenceSize,
9271                                       0, // Number of bytes to emit frame.
9272                                       MachineOutlinerTailCall // Type of frame.
9273     );
9274   }
9275 
9276   if (CFICount > 0)
9277     return outliner::OutlinedFunction();
9278 
9279   for (outliner::Candidate &C : RepeatedSequenceLocs)
9280     C.setCallInfo(MachineOutlinerDefault, 1);
9281 
9282   return outliner::OutlinedFunction(RepeatedSequenceLocs, SequenceSize, 1,
9283                                     MachineOutlinerDefault);
9284 }
9285 
9286 bool X86InstrInfo::isFunctionSafeToOutlineFrom(MachineFunction &MF,
9287                                            bool OutlineFromLinkOnceODRs) const {
9288   const Function &F = MF.getFunction();
9289 
9290   // Does the function use a red zone? If it does, then we can't risk messing
9291   // with the stack.
9292   if (Subtarget.getFrameLowering()->has128ByteRedZone(MF)) {
9293     // It could have a red zone. If it does, then we don't want to touch it.
9294     const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
9295     if (!X86FI || X86FI->getUsesRedZone())
9296       return false;
9297   }
9298 
9299   // If we *don't* want to outline from things that could potentially be deduped
9300   // then return false.
9301   if (!OutlineFromLinkOnceODRs && F.hasLinkOnceODRLinkage())
9302       return false;
9303 
9304   // This function is viable for outlining, so return true.
9305   return true;
9306 }
9307 
9308 outliner::InstrType
9309 X86InstrInfo::getOutliningType(MachineBasicBlock::iterator &MIT,  unsigned Flags) const {
9310   MachineInstr &MI = *MIT;
9311   // Don't allow debug values to impact outlining type.
9312   if (MI.isDebugInstr() || MI.isIndirectDebugValue())
9313     return outliner::InstrType::Invisible;
9314 
9315   // At this point, KILL instructions don't really tell us much so we can go
9316   // ahead and skip over them.
9317   if (MI.isKill())
9318     return outliner::InstrType::Invisible;
9319 
9320   // Is this a tail call? If yes, we can outline as a tail call.
9321   if (isTailCall(MI))
9322     return outliner::InstrType::Legal;
9323 
9324   // Is this the terminator of a basic block?
9325   if (MI.isTerminator() || MI.isReturn()) {
9326 
9327     // Does its parent have any successors in its MachineFunction?
9328     if (MI.getParent()->succ_empty())
9329       return outliner::InstrType::Legal;
9330 
9331     // It does, so we can't tail call it.
9332     return outliner::InstrType::Illegal;
9333   }
9334 
9335   // Don't outline anything that modifies or reads from the stack pointer.
9336   //
9337   // FIXME: There are instructions which are being manually built without
9338   // explicit uses/defs so we also have to check the MCInstrDesc. We should be
9339   // able to remove the extra checks once those are fixed up. For example,
9340   // sometimes we might get something like %rax = POP64r 1. This won't be
9341   // caught by modifiesRegister or readsRegister even though the instruction
9342   // really ought to be formed so that modifiesRegister/readsRegister would
9343   // catch it.
9344   if (MI.modifiesRegister(X86::RSP, &RI) || MI.readsRegister(X86::RSP, &RI) ||
9345       MI.getDesc().hasImplicitUseOfPhysReg(X86::RSP) ||
9346       MI.getDesc().hasImplicitDefOfPhysReg(X86::RSP))
9347     return outliner::InstrType::Illegal;
9348 
9349   // Outlined calls change the instruction pointer, so don't read from it.
9350   if (MI.readsRegister(X86::RIP, &RI) ||
9351       MI.getDesc().hasImplicitUseOfPhysReg(X86::RIP) ||
9352       MI.getDesc().hasImplicitDefOfPhysReg(X86::RIP))
9353     return outliner::InstrType::Illegal;
9354 
9355   // Positions can't safely be outlined.
9356   if (MI.isPosition())
9357     return outliner::InstrType::Illegal;
9358 
9359   // Make sure none of the operands of this instruction do anything tricky.
9360   for (const MachineOperand &MOP : MI.operands())
9361     if (MOP.isCPI() || MOP.isJTI() || MOP.isCFIIndex() || MOP.isFI() ||
9362         MOP.isTargetIndex())
9363       return outliner::InstrType::Illegal;
9364 
9365   return outliner::InstrType::Legal;
9366 }
9367 
9368 void X86InstrInfo::buildOutlinedFrame(MachineBasicBlock &MBB,
9369                                           MachineFunction &MF,
9370                                           const outliner::OutlinedFunction &OF)
9371                                           const {
9372   // If we're a tail call, we already have a return, so don't do anything.
9373   if (OF.FrameConstructionID == MachineOutlinerTailCall)
9374     return;
9375 
9376   // We're a normal call, so our sequence doesn't have a return instruction.
9377   // Add it in.
9378   MachineInstr *retq = BuildMI(MF, DebugLoc(), get(X86::RET64));
9379   MBB.insert(MBB.end(), retq);
9380 }
9381 
9382 MachineBasicBlock::iterator
9383 X86InstrInfo::insertOutlinedCall(Module &M, MachineBasicBlock &MBB,
9384                                  MachineBasicBlock::iterator &It,
9385                                  MachineFunction &MF,
9386                                  const outliner::Candidate &C) const {
9387   // Is it a tail call?
9388   if (C.CallConstructionID == MachineOutlinerTailCall) {
9389     // Yes, just insert a JMP.
9390     It = MBB.insert(It,
9391                   BuildMI(MF, DebugLoc(), get(X86::TAILJMPd64))
9392                       .addGlobalAddress(M.getNamedValue(MF.getName())));
9393   } else {
9394     // No, insert a call.
9395     It = MBB.insert(It,
9396                   BuildMI(MF, DebugLoc(), get(X86::CALL64pcrel32))
9397                       .addGlobalAddress(M.getNamedValue(MF.getName())));
9398   }
9399 
9400   return It;
9401 }
9402 
9403 #define GET_INSTRINFO_HELPERS
9404 #include "X86GenInstrInfo.inc"
9405