1 //===- PatternMatchTest.cpp -----------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "GISelMITest.h"
10 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
11 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
12 #include "llvm/CodeGen/GlobalISel/Utils.h"
13 #include "llvm/CodeGen/MIRParser/MIRParser.h"
14 #include "llvm/CodeGen/MachineFunction.h"
15 #include "llvm/CodeGen/MachineModuleInfo.h"
16 #include "llvm/CodeGen/TargetFrameLowering.h"
17 #include "llvm/CodeGen/TargetInstrInfo.h"
18 #include "llvm/CodeGen/TargetLowering.h"
19 #include "llvm/CodeGen/TargetSubtargetInfo.h"
20 #include "llvm/MC/TargetRegistry.h"
21 #include "llvm/Support/SourceMgr.h"
22 #include "llvm/Support/TargetSelect.h"
23 #include "llvm/Target/TargetMachine.h"
24 #include "llvm/Target/TargetOptions.h"
25 #include "gtest/gtest.h"
26 
27 using namespace llvm;
28 using namespace MIPatternMatch;
29 
30 namespace {
31 
32 TEST_F(AArch64GISelMITest, MatchIntConstant) {
33   setUp();
34   if (!TM)
35     return;
36   auto MIBCst = B.buildConstant(LLT::scalar(64), 42);
37   int64_t Cst;
38   bool match = mi_match(MIBCst.getReg(0), *MRI, m_ICst(Cst));
39   EXPECT_TRUE(match);
40   EXPECT_EQ(Cst, 42);
41 }
42 
43 TEST_F(AArch64GISelMITest, MatchIntConstantRegister) {
44   setUp();
45   if (!TM)
46     return;
47   auto MIBCst = B.buildConstant(LLT::scalar(64), 42);
48   Optional<ValueAndVReg> Src0;
49   bool match = mi_match(MIBCst.getReg(0), *MRI, m_GCst(Src0));
50   EXPECT_TRUE(match);
51   EXPECT_EQ(Src0->VReg, MIBCst.getReg(0));
52 }
53 
54 TEST_F(AArch64GISelMITest, MachineInstrPtrBind) {
55   setUp();
56   if (!TM)
57     return;
58   auto MIBAdd = B.buildAdd(LLT::scalar(64), Copies[0], Copies[1]);
59   // Test 'MachineInstr *' bind.
60   // Default mi_match.
61   MachineInstr *MIPtr = MIBAdd.getInstr();
62   bool match = mi_match(MIPtr, *MRI, m_GAdd(m_Reg(), m_Reg()));
63   EXPECT_TRUE(match);
64   // Specialized mi_match for MachineInstr &.
65   MachineInstr &MI = *MIBAdd.getInstr();
66   match = mi_match(MI, *MRI, m_GAdd(m_Reg(), m_Reg()));
67   EXPECT_TRUE(match);
68   // MachineInstrBuilder has automatic conversion to MachineInstr *.
69   match = mi_match(MIBAdd, *MRI, m_GAdd(m_Reg(), m_Reg()));
70   EXPECT_TRUE(match);
71   // Match instruction without def.
72   auto MIBBrcond = B.buildBrCond(Copies[0], B.getMBB());
73   MachineInstr *MatchedMI;
74   match = mi_match(MIBBrcond, *MRI, m_MInstr(MatchedMI));
75   EXPECT_TRUE(match);
76   EXPECT_TRUE(MIBBrcond.getInstr() == MatchedMI);
77   // Match instruction with two defs.
78   auto MIBUAddO =
79       B.buildUAddo(LLT::scalar(64), LLT::scalar(1), Copies[0], Copies[1]);
80   match = mi_match(MIBUAddO, *MRI, m_MInstr(MatchedMI));
81   EXPECT_TRUE(match);
82   EXPECT_TRUE(MIBUAddO.getInstr() == MatchedMI);
83 }
84 
85 TEST_F(AArch64GISelMITest, MatchBinaryOp) {
86   setUp();
87   if (!TM)
88     return;
89   LLT s32 = LLT::scalar(32);
90   LLT s64 = LLT::scalar(64);
91   LLT p0 = LLT::pointer(0, 64);
92   auto MIBAdd = B.buildAdd(s64, Copies[0], Copies[1]);
93   // Test case for no bind.
94   bool match =
95       mi_match(MIBAdd.getReg(0), *MRI, m_GAdd(m_Reg(), m_Reg()));
96   EXPECT_TRUE(match);
97   Register Src0, Src1, Src2;
98   match = mi_match(MIBAdd.getReg(0), *MRI,
99                    m_GAdd(m_Reg(Src0), m_Reg(Src1)));
100   EXPECT_TRUE(match);
101   EXPECT_EQ(Src0, Copies[0]);
102   EXPECT_EQ(Src1, Copies[1]);
103 
104   // Build MUL(ADD %0, %1), %2
105   auto MIBMul = B.buildMul(s64, MIBAdd, Copies[2]);
106 
107   // Try to match MUL.
108   match = mi_match(MIBMul.getReg(0), *MRI,
109                    m_GMul(m_Reg(Src0), m_Reg(Src1)));
110   EXPECT_TRUE(match);
111   EXPECT_EQ(Src0, MIBAdd.getReg(0));
112   EXPECT_EQ(Src1, Copies[2]);
113 
114   // Try to match MUL(ADD)
115   match = mi_match(MIBMul.getReg(0), *MRI,
116                    m_GMul(m_GAdd(m_Reg(Src0), m_Reg(Src1)), m_Reg(Src2)));
117   EXPECT_TRUE(match);
118   EXPECT_EQ(Src0, Copies[0]);
119   EXPECT_EQ(Src1, Copies[1]);
120   EXPECT_EQ(Src2, Copies[2]);
121 
122   // Test Commutativity.
123   auto MIBMul2 = B.buildMul(s64, Copies[0], B.buildConstant(s64, 42));
124   // Try to match MUL(Cst, Reg) on src of MUL(Reg, Cst) to validate
125   // commutativity.
126   int64_t Cst;
127   match = mi_match(MIBMul2.getReg(0), *MRI,
128                    m_GMul(m_ICst(Cst), m_Reg(Src0)));
129   EXPECT_TRUE(match);
130   EXPECT_EQ(Cst, 42);
131   EXPECT_EQ(Src0, Copies[0]);
132 
133   // Make sure commutative doesn't work with something like SUB.
134   auto MIBSub = B.buildSub(s64, Copies[0], B.buildConstant(s64, 42));
135   match = mi_match(MIBSub.getReg(0), *MRI,
136                    m_GSub(m_ICst(Cst), m_Reg(Src0)));
137   EXPECT_FALSE(match);
138 
139   auto MIBFMul = B.buildInstr(TargetOpcode::G_FMUL, {s64},
140                               {Copies[0], B.buildConstant(s64, 42)});
141   // Match and test commutativity for FMUL.
142   match = mi_match(MIBFMul.getReg(0), *MRI,
143                    m_GFMul(m_ICst(Cst), m_Reg(Src0)));
144   EXPECT_TRUE(match);
145   EXPECT_EQ(Cst, 42);
146   EXPECT_EQ(Src0, Copies[0]);
147 
148   // FSUB
149   auto MIBFSub = B.buildInstr(TargetOpcode::G_FSUB, {s64},
150                               {Copies[0], B.buildConstant(s64, 42)});
151   match = mi_match(MIBFSub.getReg(0), *MRI,
152                    m_GFSub(m_Reg(Src0), m_Reg()));
153   EXPECT_TRUE(match);
154   EXPECT_EQ(Src0, Copies[0]);
155 
156   // Build AND %0, %1
157   auto MIBAnd = B.buildAnd(s64, Copies[0], Copies[1]);
158   // Try to match AND.
159   match = mi_match(MIBAnd.getReg(0), *MRI,
160                    m_GAnd(m_Reg(Src0), m_Reg(Src1)));
161   EXPECT_TRUE(match);
162   EXPECT_EQ(Src0, Copies[0]);
163   EXPECT_EQ(Src1, Copies[1]);
164 
165   // Build OR %0, %1
166   auto MIBOr = B.buildOr(s64, Copies[0], Copies[1]);
167   // Try to match OR.
168   match = mi_match(MIBOr.getReg(0), *MRI,
169                    m_GOr(m_Reg(Src0), m_Reg(Src1)));
170   EXPECT_TRUE(match);
171   EXPECT_EQ(Src0, Copies[0]);
172   EXPECT_EQ(Src1, Copies[1]);
173 
174   // Match lshr, and make sure a different shift amount type works.
175   auto TruncCopy1 = B.buildTrunc(s32, Copies[1]);
176   auto LShr = B.buildLShr(s64, Copies[0], TruncCopy1);
177   match = mi_match(LShr.getReg(0), *MRI,
178                    m_GLShr(m_Reg(Src0), m_Reg(Src1)));
179   EXPECT_TRUE(match);
180   EXPECT_EQ(Src0, Copies[0]);
181   EXPECT_EQ(Src1, TruncCopy1.getReg(0));
182 
183   // Match shl, and make sure a different shift amount type works.
184   auto Shl = B.buildShl(s64, Copies[0], TruncCopy1);
185   match = mi_match(Shl.getReg(0), *MRI,
186                    m_GShl(m_Reg(Src0), m_Reg(Src1)));
187   EXPECT_TRUE(match);
188   EXPECT_EQ(Src0, Copies[0]);
189   EXPECT_EQ(Src1, TruncCopy1.getReg(0));
190 
191   // Build a G_PTR_ADD and check that we can match it.
192   auto PtrAdd = B.buildPtrAdd(p0, {B.buildUndef(p0)}, Copies[0]);
193   match = mi_match(PtrAdd.getReg(0), *MRI, m_GPtrAdd(m_Reg(Src0), m_Reg(Src1)));
194   EXPECT_TRUE(match);
195   EXPECT_EQ(Src0, PtrAdd->getOperand(1).getReg());
196   EXPECT_EQ(Src1, Copies[0]);
197 
198   auto MIBCst = B.buildConstant(s64, 42);
199   auto MIBAddCst = B.buildAdd(s64, MIBCst, Copies[0]);
200   auto MIBUnmerge = B.buildUnmerge({s32, s32}, B.buildConstant(s64, 42));
201 
202   // m_BinOp with opcode.
203   // Match binary instruction, opcode and its non-commutative operands.
204   match = mi_match(MIBAddCst, *MRI,
205                    m_BinOp(TargetOpcode::G_ADD, m_ICst(Cst), m_Reg(Src0)));
206   EXPECT_TRUE(match);
207   EXPECT_EQ(Src0, Copies[0]);
208   EXPECT_EQ(Cst, 42);
209 
210   // Opcode doesn't match.
211   match = mi_match(MIBAddCst, *MRI,
212                    m_BinOp(TargetOpcode::G_MUL, m_ICst(Cst), m_Reg(Src0)));
213   EXPECT_FALSE(match);
214 
215   match = mi_match(MIBAddCst, *MRI,
216                    m_BinOp(TargetOpcode::G_ADD, m_Reg(Src0), m_ICst(Cst)));
217   EXPECT_FALSE(match);
218 
219   // Instruction is not binary.
220   match = mi_match(MIBCst, *MRI,
221                    m_BinOp(TargetOpcode::G_MUL, m_Reg(Src0), m_Reg(Src1)));
222   EXPECT_FALSE(match);
223   match = mi_match(MIBUnmerge, *MRI,
224                    m_BinOp(TargetOpcode::G_MUL, m_Reg(Src0), m_Reg(Src1)));
225   EXPECT_FALSE(match);
226 
227   // m_CommutativeBinOp with opcode.
228   match = mi_match(
229       MIBAddCst, *MRI,
230       m_CommutativeBinOp(TargetOpcode::G_ADD, m_ICst(Cst), m_Reg(Src0)));
231   EXPECT_TRUE(match);
232   EXPECT_EQ(Src0, Copies[0]);
233   EXPECT_EQ(Cst, 42);
234 
235   match = mi_match(
236       MIBAddCst, *MRI,
237       m_CommutativeBinOp(TargetOpcode::G_MUL, m_ICst(Cst), m_Reg(Src0)));
238   EXPECT_FALSE(match);
239 
240   match = mi_match(
241       MIBAddCst, *MRI,
242       m_CommutativeBinOp(TargetOpcode::G_ADD, m_Reg(Src0), m_ICst(Cst)));
243   EXPECT_TRUE(match);
244   EXPECT_EQ(Src0, Copies[0]);
245   EXPECT_EQ(Cst, 42);
246 
247   match = mi_match(
248       MIBCst, *MRI,
249       m_CommutativeBinOp(TargetOpcode::G_MUL, m_Reg(Src0), m_Reg(Src1)));
250   EXPECT_FALSE(match);
251   match = mi_match(
252       MIBUnmerge, *MRI,
253       m_CommutativeBinOp(TargetOpcode::G_MUL, m_Reg(Src0), m_Reg(Src1)));
254   EXPECT_FALSE(match);
255 }
256 
257 TEST_F(AArch64GISelMITest, MatchICmp) {
258   setUp();
259   if (!TM)
260     return;
261 
262   const LLT s1 = LLT::scalar(1);
263   auto CmpEq = B.buildICmp(CmpInst::ICMP_EQ, s1, Copies[0], Copies[1]);
264 
265   // Check match any predicate.
266   bool match =
267       mi_match(CmpEq.getReg(0), *MRI, m_GICmp(m_Pred(), m_Reg(), m_Reg()));
268   EXPECT_TRUE(match);
269 
270   // Check we get the predicate and registers.
271   CmpInst::Predicate Pred;
272   Register Reg0;
273   Register Reg1;
274   match = mi_match(CmpEq.getReg(0), *MRI,
275                    m_GICmp(m_Pred(Pred), m_Reg(Reg0), m_Reg(Reg1)));
276   EXPECT_TRUE(match);
277   EXPECT_EQ(CmpInst::ICMP_EQ, Pred);
278   EXPECT_EQ(Copies[0], Reg0);
279   EXPECT_EQ(Copies[1], Reg1);
280 }
281 
282 TEST_F(AArch64GISelMITest, MatchFCmp) {
283   setUp();
284   if (!TM)
285     return;
286 
287   const LLT s1 = LLT::scalar(1);
288   auto CmpEq = B.buildFCmp(CmpInst::FCMP_OEQ, s1, Copies[0], Copies[1]);
289 
290   // Check match any predicate.
291   bool match =
292       mi_match(CmpEq.getReg(0), *MRI, m_GFCmp(m_Pred(), m_Reg(), m_Reg()));
293   EXPECT_TRUE(match);
294 
295   // Check we get the predicate and registers.
296   CmpInst::Predicate Pred;
297   Register Reg0;
298   Register Reg1;
299   match = mi_match(CmpEq.getReg(0), *MRI,
300                    m_GFCmp(m_Pred(Pred), m_Reg(Reg0), m_Reg(Reg1)));
301   EXPECT_TRUE(match);
302   EXPECT_EQ(CmpInst::FCMP_OEQ, Pred);
303   EXPECT_EQ(Copies[0], Reg0);
304   EXPECT_EQ(Copies[1], Reg1);
305 }
306 
307 TEST_F(AArch64GISelMITest, MatchFPUnaryOp) {
308   setUp();
309   if (!TM)
310     return;
311 
312   // Truncate s64 to s32.
313   LLT s32 = LLT::scalar(32);
314   auto Copy0s32 = B.buildFPTrunc(s32, Copies[0]);
315 
316   // Match G_FABS.
317   auto MIBFabs = B.buildInstr(TargetOpcode::G_FABS, {s32}, {Copy0s32});
318   bool match =
319       mi_match(MIBFabs.getReg(0), *MRI, m_GFabs(m_Reg()));
320   EXPECT_TRUE(match);
321 
322   Register Src;
323   auto MIBFNeg = B.buildInstr(TargetOpcode::G_FNEG, {s32}, {Copy0s32});
324   match = mi_match(MIBFNeg.getReg(0), *MRI, m_GFNeg(m_Reg(Src)));
325   EXPECT_TRUE(match);
326   EXPECT_EQ(Src, Copy0s32.getReg(0));
327 
328   match = mi_match(MIBFabs.getReg(0), *MRI, m_GFabs(m_Reg(Src)));
329   EXPECT_TRUE(match);
330   EXPECT_EQ(Src, Copy0s32.getReg(0));
331 
332   // Build and match FConstant.
333   auto MIBFCst = B.buildFConstant(s32, .5);
334   const ConstantFP *TmpFP{};
335   match = mi_match(MIBFCst.getReg(0), *MRI, m_GFCst(TmpFP));
336   EXPECT_TRUE(match);
337   EXPECT_TRUE(TmpFP);
338   APFloat APF((float).5);
339   auto *CFP = ConstantFP::get(Context, APF);
340   EXPECT_EQ(CFP, TmpFP);
341 
342   // Build double float.
343   LLT s64 = LLT::scalar(64);
344   auto MIBFCst64 = B.buildFConstant(s64, .5);
345   const ConstantFP *TmpFP64{};
346   match = mi_match(MIBFCst64.getReg(0), *MRI, m_GFCst(TmpFP64));
347   EXPECT_TRUE(match);
348   EXPECT_TRUE(TmpFP64);
349   APFloat APF64(.5);
350   auto CFP64 = ConstantFP::get(Context, APF64);
351   EXPECT_EQ(CFP64, TmpFP64);
352   EXPECT_NE(TmpFP64, TmpFP);
353 
354   // Build half float.
355   LLT s16 = LLT::scalar(16);
356   auto MIBFCst16 = B.buildFConstant(s16, .5);
357   const ConstantFP *TmpFP16{};
358   match = mi_match(MIBFCst16.getReg(0), *MRI, m_GFCst(TmpFP16));
359   EXPECT_TRUE(match);
360   EXPECT_TRUE(TmpFP16);
361   bool Ignored;
362   APFloat APF16(.5);
363   APF16.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &Ignored);
364   auto CFP16 = ConstantFP::get(Context, APF16);
365   EXPECT_EQ(TmpFP16, CFP16);
366   EXPECT_NE(TmpFP16, TmpFP);
367 }
368 
369 TEST_F(AArch64GISelMITest, MatchExtendsTrunc) {
370   setUp();
371   if (!TM)
372     return;
373 
374   LLT s64 = LLT::scalar(64);
375   LLT s32 = LLT::scalar(32);
376 
377   auto MIBTrunc = B.buildTrunc(s32, Copies[0]);
378   auto MIBAExt = B.buildAnyExt(s64, MIBTrunc);
379   auto MIBZExt = B.buildZExt(s64, MIBTrunc);
380   auto MIBSExt = B.buildSExt(s64, MIBTrunc);
381   Register Src0;
382   bool match =
383       mi_match(MIBTrunc.getReg(0), *MRI, m_GTrunc(m_Reg(Src0)));
384   EXPECT_TRUE(match);
385   EXPECT_EQ(Src0, Copies[0]);
386   match =
387       mi_match(MIBAExt.getReg(0), *MRI, m_GAnyExt(m_Reg(Src0)));
388   EXPECT_TRUE(match);
389   EXPECT_EQ(Src0, MIBTrunc.getReg(0));
390 
391   match = mi_match(MIBSExt.getReg(0), *MRI, m_GSExt(m_Reg(Src0)));
392   EXPECT_TRUE(match);
393   EXPECT_EQ(Src0, MIBTrunc.getReg(0));
394 
395   match = mi_match(MIBZExt.getReg(0), *MRI, m_GZExt(m_Reg(Src0)));
396   EXPECT_TRUE(match);
397   EXPECT_EQ(Src0, MIBTrunc.getReg(0));
398 
399   // Match ext(trunc src)
400   match = mi_match(MIBAExt.getReg(0), *MRI,
401                    m_GAnyExt(m_GTrunc(m_Reg(Src0))));
402   EXPECT_TRUE(match);
403   EXPECT_EQ(Src0, Copies[0]);
404 
405   match = mi_match(MIBSExt.getReg(0), *MRI,
406                    m_GSExt(m_GTrunc(m_Reg(Src0))));
407   EXPECT_TRUE(match);
408   EXPECT_EQ(Src0, Copies[0]);
409 
410   match = mi_match(MIBZExt.getReg(0), *MRI,
411                    m_GZExt(m_GTrunc(m_Reg(Src0))));
412   EXPECT_TRUE(match);
413   EXPECT_EQ(Src0, Copies[0]);
414 }
415 
416 TEST_F(AArch64GISelMITest, MatchSpecificType) {
417   setUp();
418   if (!TM)
419     return;
420 
421   // Try to match a 64bit add.
422   LLT s64 = LLT::scalar(64);
423   LLT s32 = LLT::scalar(32);
424   auto MIBAdd = B.buildAdd(s64, Copies[0], Copies[1]);
425   EXPECT_FALSE(mi_match(MIBAdd.getReg(0), *MRI,
426                         m_GAdd(m_SpecificType(s32), m_Reg())));
427   EXPECT_TRUE(mi_match(MIBAdd.getReg(0), *MRI,
428                        m_GAdd(m_SpecificType(s64), m_Reg())));
429 
430   // Try to match the destination type of a bitcast.
431   LLT v2s32 = LLT::fixed_vector(2, 32);
432   auto MIBCast = B.buildCast(v2s32, Copies[0]);
433   EXPECT_TRUE(
434       mi_match(MIBCast.getReg(0), *MRI, m_GBitcast(m_Reg())));
435   EXPECT_TRUE(
436       mi_match(MIBCast.getReg(0), *MRI, m_SpecificType(v2s32)));
437   EXPECT_TRUE(
438       mi_match(MIBCast.getReg(1), *MRI, m_SpecificType(s64)));
439 
440   // Build a PTRToInt and INTTOPTR and match and test them.
441   LLT PtrTy = LLT::pointer(0, 64);
442   auto MIBIntToPtr = B.buildCast(PtrTy, Copies[0]);
443   auto MIBPtrToInt = B.buildCast(s64, MIBIntToPtr);
444   Register Src0;
445 
446   // match the ptrtoint(inttoptr reg)
447   bool match = mi_match(MIBPtrToInt.getReg(0), *MRI,
448                         m_GPtrToInt(m_GIntToPtr(m_Reg(Src0))));
449   EXPECT_TRUE(match);
450   EXPECT_EQ(Src0, Copies[0]);
451 }
452 
453 TEST_F(AArch64GISelMITest, MatchCombinators) {
454   setUp();
455   if (!TM)
456     return;
457 
458   LLT s64 = LLT::scalar(64);
459   LLT s32 = LLT::scalar(32);
460   auto MIBAdd = B.buildAdd(s64, Copies[0], Copies[1]);
461   Register Src0, Src1;
462   bool match =
463       mi_match(MIBAdd.getReg(0), *MRI,
464                m_all_of(m_SpecificType(s64), m_GAdd(m_Reg(Src0), m_Reg(Src1))));
465   EXPECT_TRUE(match);
466   EXPECT_EQ(Src0, Copies[0]);
467   EXPECT_EQ(Src1, Copies[1]);
468   // Check for s32 (which should fail).
469   match =
470       mi_match(MIBAdd.getReg(0), *MRI,
471                m_all_of(m_SpecificType(s32), m_GAdd(m_Reg(Src0), m_Reg(Src1))));
472   EXPECT_FALSE(match);
473   match =
474       mi_match(MIBAdd.getReg(0), *MRI,
475                m_any_of(m_SpecificType(s32), m_GAdd(m_Reg(Src0), m_Reg(Src1))));
476   EXPECT_TRUE(match);
477   EXPECT_EQ(Src0, Copies[0]);
478   EXPECT_EQ(Src1, Copies[1]);
479 
480   // Match a case where none of the predicates hold true.
481   match = mi_match(
482       MIBAdd.getReg(0), *MRI,
483       m_any_of(m_SpecificType(LLT::scalar(16)), m_GSub(m_Reg(), m_Reg())));
484   EXPECT_FALSE(match);
485 }
486 
487 TEST_F(AArch64GISelMITest, MatchMiscellaneous) {
488   setUp();
489   if (!TM)
490     return;
491 
492   LLT s64 = LLT::scalar(64);
493   auto MIBAdd = B.buildAdd(s64, Copies[0], Copies[1]);
494   Register Reg = MIBAdd.getReg(0);
495 
496   // Only one use of Reg.
497   B.buildCast(LLT::pointer(0, 32), MIBAdd);
498   EXPECT_TRUE(mi_match(Reg, *MRI, m_OneUse(m_GAdd(m_Reg(), m_Reg()))));
499   EXPECT_TRUE(mi_match(Reg, *MRI, m_OneNonDBGUse(m_GAdd(m_Reg(), m_Reg()))));
500 
501   // Add multiple debug uses of Reg.
502   B.buildInstr(TargetOpcode::DBG_VALUE, {}, {Reg});
503   B.buildInstr(TargetOpcode::DBG_VALUE, {}, {Reg});
504 
505   EXPECT_FALSE(mi_match(Reg, *MRI, m_OneUse(m_GAdd(m_Reg(), m_Reg()))));
506   EXPECT_TRUE(mi_match(Reg, *MRI, m_OneNonDBGUse(m_GAdd(m_Reg(), m_Reg()))));
507 
508   // Multiple non-debug uses of Reg.
509   B.buildCast(LLT::pointer(1, 32), MIBAdd);
510   EXPECT_FALSE(mi_match(Reg, *MRI, m_OneUse(m_GAdd(m_Reg(), m_Reg()))));
511   EXPECT_FALSE(mi_match(Reg, *MRI, m_OneNonDBGUse(m_GAdd(m_Reg(), m_Reg()))));
512 }
513 
514 TEST_F(AArch64GISelMITest, MatchSpecificConstant) {
515   setUp();
516   if (!TM)
517     return;
518 
519   // Basic case: Can we match a G_CONSTANT with a specific value?
520   auto FortyTwo = B.buildConstant(LLT::scalar(64), 42);
521   EXPECT_TRUE(mi_match(FortyTwo.getReg(0), *MRI, m_SpecificICst(42)));
522   EXPECT_FALSE(mi_match(FortyTwo.getReg(0), *MRI, m_SpecificICst(123)));
523 
524   // Test that this works inside of a more complex pattern.
525   LLT s64 = LLT::scalar(64);
526   auto MIBAdd = B.buildAdd(s64, Copies[0], FortyTwo);
527   EXPECT_TRUE(mi_match(MIBAdd.getReg(2), *MRI, m_SpecificICst(42)));
528 
529   // Wrong constant.
530   EXPECT_FALSE(mi_match(MIBAdd.getReg(2), *MRI, m_SpecificICst(123)));
531 
532   // No constant on the LHS.
533   EXPECT_FALSE(mi_match(MIBAdd.getReg(1), *MRI, m_SpecificICst(42)));
534 }
535 
536 TEST_F(AArch64GISelMITest, MatchSpecificConstantSplat) {
537   setUp();
538   if (!TM)
539     return;
540 
541   LLT s64 = LLT::scalar(64);
542   LLT v4s64 = LLT::fixed_vector(4, s64);
543 
544   MachineInstrBuilder FortyTwoSplat =
545       B.buildSplatVector(v4s64, B.buildConstant(s64, 42));
546   MachineInstrBuilder FortyTwo = B.buildConstant(s64, 42);
547 
548   EXPECT_TRUE(mi_match(FortyTwoSplat.getReg(0), *MRI, m_SpecificICstSplat(42)));
549   EXPECT_FALSE(
550       mi_match(FortyTwoSplat.getReg(0), *MRI, m_SpecificICstSplat(43)));
551   EXPECT_FALSE(mi_match(FortyTwo.getReg(0), *MRI, m_SpecificICstSplat(42)));
552 
553   MachineInstrBuilder NonConstantSplat =
554       B.buildBuildVector(v4s64, {Copies[0], Copies[0], Copies[0], Copies[0]});
555 
556   MachineInstrBuilder AddSplat =
557       B.buildAdd(v4s64, NonConstantSplat, FortyTwoSplat);
558   EXPECT_TRUE(mi_match(AddSplat.getReg(2), *MRI, m_SpecificICstSplat(42)));
559   EXPECT_FALSE(mi_match(AddSplat.getReg(2), *MRI, m_SpecificICstSplat(43)));
560   EXPECT_FALSE(mi_match(AddSplat.getReg(1), *MRI, m_SpecificICstSplat(42)));
561 
562   MachineInstrBuilder Add = B.buildAdd(s64, Copies[0], FortyTwo);
563   EXPECT_FALSE(mi_match(Add.getReg(2), *MRI, m_SpecificICstSplat(42)));
564 }
565 
566 TEST_F(AArch64GISelMITest, MatchSpecificConstantOrSplat) {
567   setUp();
568   if (!TM)
569     return;
570 
571   LLT s64 = LLT::scalar(64);
572   LLT v4s64 = LLT::fixed_vector(4, s64);
573 
574   MachineInstrBuilder FortyTwoSplat =
575       B.buildSplatVector(v4s64, B.buildConstant(s64, 42));
576   MachineInstrBuilder FortyTwo = B.buildConstant(s64, 42);
577 
578   EXPECT_TRUE(
579       mi_match(FortyTwoSplat.getReg(0), *MRI, m_SpecificICstOrSplat(42)));
580   EXPECT_FALSE(
581       mi_match(FortyTwoSplat.getReg(0), *MRI, m_SpecificICstOrSplat(43)));
582   EXPECT_TRUE(mi_match(FortyTwo.getReg(0), *MRI, m_SpecificICstOrSplat(42)));
583 
584   MachineInstrBuilder NonConstantSplat =
585       B.buildBuildVector(v4s64, {Copies[0], Copies[0], Copies[0], Copies[0]});
586 
587   MachineInstrBuilder AddSplat =
588       B.buildAdd(v4s64, NonConstantSplat, FortyTwoSplat);
589   EXPECT_TRUE(mi_match(AddSplat.getReg(2), *MRI, m_SpecificICstOrSplat(42)));
590   EXPECT_FALSE(mi_match(AddSplat.getReg(2), *MRI, m_SpecificICstOrSplat(43)));
591   EXPECT_FALSE(mi_match(AddSplat.getReg(1), *MRI, m_SpecificICstOrSplat(42)));
592 
593   MachineInstrBuilder Add = B.buildAdd(s64, Copies[0], FortyTwo);
594   EXPECT_TRUE(mi_match(Add.getReg(2), *MRI, m_SpecificICstOrSplat(42)));
595 }
596 
597 TEST_F(AArch64GISelMITest, MatchZeroInt) {
598   setUp();
599   if (!TM)
600     return;
601   auto Zero = B.buildConstant(LLT::scalar(64), 0);
602   EXPECT_TRUE(mi_match(Zero.getReg(0), *MRI, m_ZeroInt()));
603 
604   auto FortyTwo = B.buildConstant(LLT::scalar(64), 42);
605   EXPECT_FALSE(mi_match(FortyTwo.getReg(0), *MRI, m_ZeroInt()));
606 }
607 
608 TEST_F(AArch64GISelMITest, MatchAllOnesInt) {
609   setUp();
610   if (!TM)
611     return;
612   auto AllOnes = B.buildConstant(LLT::scalar(64), -1);
613   EXPECT_TRUE(mi_match(AllOnes.getReg(0), *MRI, m_AllOnesInt()));
614 
615   auto FortyTwo = B.buildConstant(LLT::scalar(64), 42);
616   EXPECT_FALSE(mi_match(FortyTwo.getReg(0), *MRI, m_AllOnesInt()));
617 }
618 
619 TEST_F(AArch64GISelMITest, MatchFPOrIntConst) {
620   setUp();
621   if (!TM)
622     return;
623 
624   Register IntOne = B.buildConstant(LLT::scalar(64), 1).getReg(0);
625   Register FPOne = B.buildFConstant(LLT::scalar(64), 1.0).getReg(0);
626   Optional<ValueAndVReg> ValReg;
627   Optional<FPValueAndVReg> FValReg;
628 
629   EXPECT_TRUE(mi_match(IntOne, *MRI, m_GCst(ValReg)));
630   EXPECT_EQ(IntOne, ValReg->VReg);
631   EXPECT_FALSE(mi_match(IntOne, *MRI, m_GFCst(FValReg)));
632 
633   EXPECT_FALSE(mi_match(FPOne, *MRI, m_GCst(ValReg)));
634   EXPECT_TRUE(mi_match(FPOne, *MRI, m_GFCst(FValReg)));
635   EXPECT_EQ(FPOne, FValReg->VReg);
636 }
637 
638 TEST_F(AArch64GISelMITest, MatchConstantSplat) {
639   setUp();
640   if (!TM)
641     return;
642 
643   LLT s64 = LLT::scalar(64);
644   LLT v4s64 = LLT::fixed_vector(4, 64);
645 
646   Register FPOne = B.buildFConstant(s64, 1.0).getReg(0);
647   Register FPZero = B.buildFConstant(s64, 0.0).getReg(0);
648   Register Undef = B.buildUndef(s64).getReg(0);
649   Optional<FPValueAndVReg> FValReg;
650 
651   // GFCstOrSplatGFCstMatch allows undef as part of splat. Undef often comes
652   // from padding to legalize into available operation and then ignore added
653   // elements e.g. v3s64 to v4s64.
654 
655   EXPECT_TRUE(mi_match(FPZero, *MRI, GFCstOrSplatGFCstMatch(FValReg)));
656   EXPECT_EQ(FPZero, FValReg->VReg);
657 
658   EXPECT_FALSE(mi_match(Undef, *MRI, GFCstOrSplatGFCstMatch(FValReg)));
659 
660   auto ZeroSplat = B.buildBuildVector(v4s64, {FPZero, FPZero, FPZero, FPZero});
661   EXPECT_TRUE(
662       mi_match(ZeroSplat.getReg(0), *MRI, GFCstOrSplatGFCstMatch(FValReg)));
663   EXPECT_EQ(FPZero, FValReg->VReg);
664 
665   auto ZeroUndef = B.buildBuildVector(v4s64, {FPZero, FPZero, FPZero, Undef});
666   EXPECT_TRUE(
667       mi_match(ZeroUndef.getReg(0), *MRI, GFCstOrSplatGFCstMatch(FValReg)));
668   EXPECT_EQ(FPZero, FValReg->VReg);
669 
670   // All undefs are not constant splat.
671   auto UndefSplat = B.buildBuildVector(v4s64, {Undef, Undef, Undef, Undef});
672   EXPECT_FALSE(
673       mi_match(UndefSplat.getReg(0), *MRI, GFCstOrSplatGFCstMatch(FValReg)));
674 
675   auto ZeroOne = B.buildBuildVector(v4s64, {FPZero, FPZero, FPZero, FPOne});
676   EXPECT_FALSE(
677       mi_match(ZeroOne.getReg(0), *MRI, GFCstOrSplatGFCstMatch(FValReg)));
678 
679   auto NonConstantSplat =
680       B.buildBuildVector(v4s64, {Copies[0], Copies[0], Copies[0], Copies[0]});
681   EXPECT_FALSE(mi_match(NonConstantSplat.getReg(0), *MRI,
682                         GFCstOrSplatGFCstMatch(FValReg)));
683 
684   auto Mixed = B.buildBuildVector(v4s64, {FPZero, FPZero, FPZero, Copies[0]});
685   EXPECT_FALSE(
686       mi_match(Mixed.getReg(0), *MRI, GFCstOrSplatGFCstMatch(FValReg)));
687 }
688 
689 TEST_F(AArch64GISelMITest, MatchNeg) {
690   setUp();
691   if (!TM)
692     return;
693 
694   LLT s64 = LLT::scalar(64);
695   auto Zero = B.buildConstant(LLT::scalar(64), 0);
696   auto NegInst = B.buildSub(s64, Zero, Copies[0]);
697   Register NegatedReg;
698 
699   // Match: G_SUB = 0, %Reg
700   EXPECT_TRUE(mi_match(NegInst.getReg(0), *MRI, m_Neg(m_Reg(NegatedReg))));
701   EXPECT_EQ(NegatedReg, Copies[0]);
702 
703   // Don't match: G_SUB = %Reg, 0
704   auto NotNegInst1 = B.buildSub(s64, Copies[0], Zero);
705   EXPECT_FALSE(mi_match(NotNegInst1.getReg(0), *MRI, m_Neg(m_Reg(NegatedReg))));
706 
707   // Don't match: G_SUB = 42, %Reg
708   auto FortyTwo = B.buildConstant(LLT::scalar(64), 42);
709   auto NotNegInst2 = B.buildSub(s64, FortyTwo, Copies[0]);
710   EXPECT_FALSE(mi_match(NotNegInst2.getReg(0), *MRI, m_Neg(m_Reg(NegatedReg))));
711 
712   // Complex testcase.
713   // %sub = G_SUB = 0, %negated_reg
714   // %add = G_ADD = %x, %sub
715   auto AddInst = B.buildAdd(s64, Copies[1], NegInst);
716   NegatedReg = Register();
717   EXPECT_TRUE(mi_match(AddInst.getReg(2), *MRI, m_Neg(m_Reg(NegatedReg))));
718   EXPECT_EQ(NegatedReg, Copies[0]);
719 }
720 
721 TEST_F(AArch64GISelMITest, MatchNot) {
722   setUp();
723   if (!TM)
724     return;
725 
726   LLT s64 = LLT::scalar(64);
727   auto AllOnes = B.buildConstant(LLT::scalar(64), -1);
728   auto NotInst1 = B.buildXor(s64, Copies[0], AllOnes);
729   Register NotReg;
730 
731   // Match: G_XOR %NotReg, -1
732   EXPECT_TRUE(mi_match(NotInst1.getReg(0), *MRI, m_Not(m_Reg(NotReg))));
733   EXPECT_EQ(NotReg, Copies[0]);
734 
735   // Match: G_XOR -1, %NotReg
736   auto NotInst2 = B.buildXor(s64, AllOnes, Copies[1]);
737   EXPECT_TRUE(mi_match(NotInst2.getReg(0), *MRI, m_Not(m_Reg(NotReg))));
738   EXPECT_EQ(NotReg, Copies[1]);
739 
740   // Don't match: G_XOR %NotReg, 42
741   auto FortyTwo = B.buildConstant(LLT::scalar(64), 42);
742   auto WrongCst = B.buildXor(s64, Copies[0], FortyTwo);
743   EXPECT_FALSE(mi_match(WrongCst.getReg(0), *MRI, m_Not(m_Reg(NotReg))));
744 
745   // Complex testcase.
746   // %xor = G_XOR %NotReg, -1
747   // %add = G_ADD %x, %xor
748   auto AddInst = B.buildAdd(s64, Copies[1], NotInst1);
749   NotReg = Register();
750   EXPECT_TRUE(mi_match(AddInst.getReg(2), *MRI, m_Not(m_Reg(NotReg))));
751   EXPECT_EQ(NotReg, Copies[0]);
752 }
753 } // namespace
754 
755 int main(int argc, char **argv) {
756   ::testing::InitGoogleTest(&argc, argv);
757   initLLVM();
758   return RUN_ALL_TESTS();
759 }
760