1; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=EG --check-prefix=FUNC
2; RUN: llc < %s -march=r600 -mcpu=cayman | FileCheck %s --check-prefix=EG --check-prefix=FUNC
3; RUN: llc < %s -march=amdgcn -mcpu=SI -verify-machineinstrs | FileCheck %s --check-prefix=SI --check-prefix=FUNC
4; RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck %s --check-prefix=VI --check-prefix=FUNC
5; RUN: llc < %s -march=amdgcn -mcpu=fiji -verify-machineinstrs | FileCheck %s --check-prefix=VI --check-prefix=FUNC
6
7declare i32 @llvm.r600.read.tidig.x() nounwind readnone
8
9; FUNC-LABEL: {{^}}u32_mad24:
10; EG: MULADD_UINT24
11; SI: v_mad_u32_u24
12; VI: v_mad_u32_u24
13
14define void @u32_mad24(i32 addrspace(1)* %out, i32 %a, i32 %b, i32 %c) {
15entry:
16  %0 = shl i32 %a, 8
17  %a_24 = lshr i32 %0, 8
18  %1 = shl i32 %b, 8
19  %b_24 = lshr i32 %1, 8
20  %2 = mul i32 %a_24, %b_24
21  %3 = add i32 %2, %c
22  store i32 %3, i32 addrspace(1)* %out
23  ret void
24}
25
26; FUNC-LABEL: {{^}}i16_mad24:
27; The order of A and B does not matter.
28; EG: MULADD_UINT24 {{[* ]*}}T{{[0-9]}}.[[MAD_CHAN:[XYZW]]]
29; The result must be sign-extended
30; EG: BFE_INT {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[MAD_CHAN]], 0.0, literal.x
31; EG: 16
32; FIXME: Should be using scalar instructions here.
33; GCN: v_mad_u32_u24 [[MAD:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
34; GCN: v_bfe_i32 v{{[0-9]}}, [[MAD]], 0, 16
35define void @i16_mad24(i32 addrspace(1)* %out, i16 %a, i16 %b, i16 %c) {
36entry:
37  %0 = mul i16 %a, %b
38  %1 = add i16 %0, %c
39  %2 = sext i16 %1 to i32
40  store i32 %2, i32 addrspace(1)* %out
41  ret void
42}
43
44; FIXME: Need to handle non-uniform case for function below (load without gep).
45; FUNC-LABEL: {{^}}i8_mad24:
46; EG: MULADD_UINT24 {{[* ]*}}T{{[0-9]}}.[[MAD_CHAN:[XYZW]]]
47; The result must be sign-extended
48; EG: BFE_INT {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[MAD_CHAN]], 0.0, literal.x
49; EG: 8
50; GCN: v_mad_u32_u24 [[MUL:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
51; GCN: v_bfe_i32 v{{[0-9]}}, [[MUL]], 0, 8
52define void @i8_mad24(i32 addrspace(1)* %out, i8 %a, i8 %b, i8 %c) {
53entry:
54  %0 = mul i8 %a, %b
55  %1 = add i8 %0, %c
56  %2 = sext i8 %1 to i32
57  store i32 %2, i32 addrspace(1)* %out
58  ret void
59}
60
61; This tests for a bug where the mad_u24 pattern matcher would call
62; SimplifyDemandedBits on the first operand of the mul instruction
63; assuming that the pattern would be matched to a 24-bit mad.  This
64; led to some instructions being incorrectly erased when the entire
65; 24-bit mad pattern wasn't being matched.
66
67; Check that the select instruction is not deleted.
68; FUNC-LABEL: {{^}}i24_i32_i32_mad:
69; EG: CNDE_INT
70; SI: v_cndmask
71define void @i24_i32_i32_mad(i32 addrspace(1)* %out, i32 %a, i32 %b, i32 %c, i32 %d) {
72entry:
73  %0 = ashr i32 %a, 8
74  %1 = icmp ne i32 %c, 0
75  %2 = select i1 %1, i32 %0, i32 34
76  %3 = mul i32 %2, %c
77  %4 = add i32 %3, %d
78  store i32 %4, i32 addrspace(1)* %out
79  ret void
80}
81
82; FUNC-LABEL: {{^}}extra_and:
83; SI-NOT: v_and
84; SI: v_mad_u32_u24
85; SI: v_mad_u32_u24
86define amdgpu_kernel void @extra_and(i32 addrspace(1)* %arg, i32 %arg2, i32 %arg3) {
87bb:
88  br label %bb4
89
90bb4:                                              ; preds = %bb4, %bb
91  %tmp = phi i32 [ 0, %bb ], [ %tmp13, %bb4 ]
92  %tmp5 = phi i32 [ 0, %bb ], [ %tmp13, %bb4 ]
93  %tmp6 = phi i32 [ 0, %bb ], [ %tmp15, %bb4 ]
94  %tmp7 = phi i32 [ 0, %bb ], [ %tmp15, %bb4 ]
95  %tmp8 = and i32 %tmp7, 16777215
96  %tmp9 = and i32 %tmp6, 16777215
97  %tmp10 = and i32 %tmp5, 16777215
98  %tmp11 = and i32 %tmp, 16777215
99  %tmp12 = mul i32 %tmp8, %tmp11
100  %tmp13 = add i32 %arg2, %tmp12
101  %tmp14 = mul i32 %tmp9, %tmp11
102  %tmp15 = add i32 %arg3, %tmp14
103  %tmp16 = add nuw nsw i32 %tmp13, %tmp15
104  %tmp17 = icmp eq i32 %tmp16, 8
105  br i1 %tmp17, label %bb18, label %bb4
106
107bb18:                                             ; preds = %bb4
108  store i32 %tmp16, i32 addrspace(1)* %arg
109  ret void
110}
111
112; FUNC-LABEL: {{^}}dont_remove_shift
113; SI: v_lshr
114; SI: v_mad_u32_u24
115; SI: v_mad_u32_u24
116define amdgpu_kernel void @dont_remove_shift(i32 addrspace(1)* %arg, i32 %arg2, i32 %arg3) {
117bb:
118  br label %bb4
119
120bb4:                                              ; preds = %bb4, %bb
121  %tmp = phi i32 [ 0, %bb ], [ %tmp13, %bb4 ]
122  %tmp5 = phi i32 [ 0, %bb ], [ %tmp13, %bb4 ]
123  %tmp6 = phi i32 [ 0, %bb ], [ %tmp15, %bb4 ]
124  %tmp7 = phi i32 [ 0, %bb ], [ %tmp15, %bb4 ]
125  %tmp8 = lshr i32 %tmp7, 8
126  %tmp9 = lshr i32 %tmp6, 8
127  %tmp10 = lshr i32 %tmp5, 8
128  %tmp11 = lshr i32 %tmp, 8
129  %tmp12 = mul i32 %tmp8, %tmp11
130  %tmp13 = add i32 %arg2, %tmp12
131  %tmp14 = mul i32 %tmp9, %tmp11
132  %tmp15 = add i32 %arg3, %tmp14
133  %tmp16 = add nuw nsw i32 %tmp13, %tmp15
134  %tmp17 = icmp eq i32 %tmp16, 8
135  br i1 %tmp17, label %bb18, label %bb4
136
137bb18:                                             ; preds = %bb4
138  store i32 %tmp16, i32 addrspace(1)* %arg
139  ret void
140}
141