1; RUN:  llc -amdgpu-scalarize-global-loads=false  -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -allow-deprecated-dag-overlap -check-prefixes=GCN,SI,FUNC %s
2; RUN:  llc -amdgpu-scalarize-global-loads=false  -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -allow-deprecated-dag-overlap -check-prefixes=GCN,VI,FUNC %s
3; RUN:  llc -amdgpu-scalarize-global-loads=false  -march=amdgcn -mcpu=gfx900 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -allow-deprecated-dag-overlap -check-prefixes=FUNC,GFX9_10 %s
4; RUN:  llc -amdgpu-scalarize-global-loads=false  -march=amdgcn -mcpu=gfx1010 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -allow-deprecated-dag-overlap -check-prefixes=FUNC,GFX9_10 %s
5; RUN:  llc -amdgpu-scalarize-global-loads=false  -march=r600 -mcpu=redwood < %s | FileCheck -allow-deprecated-dag-overlap -check-prefixes=EG,FUNC %s
6
7; mul24 and mad24 are affected
8
9; FUNC-LABEL: {{^}}test_mul_v2i32:
10; EG: MULLO_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
11; EG: MULLO_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
12
13; GCN: v_mul_lo_u32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
14; GCN: v_mul_lo_u32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
15
16define amdgpu_kernel void @test_mul_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
17  %b_ptr = getelementptr <2 x i32>, <2 x i32> addrspace(1)* %in, i32 1
18  %a = load <2 x i32>, <2 x i32> addrspace(1) * %in
19  %b = load <2 x i32>, <2 x i32> addrspace(1) * %b_ptr
20  %result = mul <2 x i32> %a, %b
21  store <2 x i32> %result, <2 x i32> addrspace(1)* %out
22  ret void
23}
24
25; FUNC-LABEL: {{^}}v_mul_v4i32:
26; EG: MULLO_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
27; EG: MULLO_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
28; EG: MULLO_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
29; EG: MULLO_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
30
31; GCN: v_mul_lo_u32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
32; GCN: v_mul_lo_u32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
33; GCN: v_mul_lo_u32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
34; GCN: v_mul_lo_u32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
35
36define amdgpu_kernel void @v_mul_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
37  %b_ptr = getelementptr <4 x i32>, <4 x i32> addrspace(1)* %in, i32 1
38  %a = load <4 x i32>, <4 x i32> addrspace(1) * %in
39  %b = load <4 x i32>, <4 x i32> addrspace(1) * %b_ptr
40  %result = mul <4 x i32> %a, %b
41  store <4 x i32> %result, <4 x i32> addrspace(1)* %out
42  ret void
43}
44
45; FUNC-LABEL: {{^}}s_trunc_i64_mul_to_i32:
46; GCN: s_load_dword
47; GCN: s_load_dword
48; GCN: s_mul_i32
49; GCN: buffer_store_dword
50define amdgpu_kernel void @s_trunc_i64_mul_to_i32(i32 addrspace(1)* %out, i64 %a, i64 %b) {
51  %mul = mul i64 %b, %a
52  %trunc = trunc i64 %mul to i32
53  store i32 %trunc, i32 addrspace(1)* %out, align 8
54  ret void
55}
56
57; FUNC-LABEL: {{^}}v_trunc_i64_mul_to_i32:
58; GCN: s_load_dword
59; GCN: s_load_dword
60; GCN: v_mul_lo_u32
61; GCN: buffer_store_dword
62define amdgpu_kernel void @v_trunc_i64_mul_to_i32(i32 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) nounwind {
63  %a = load i64, i64 addrspace(1)* %aptr, align 8
64  %b = load i64, i64 addrspace(1)* %bptr, align 8
65  %mul = mul i64 %b, %a
66  %trunc = trunc i64 %mul to i32
67  store i32 %trunc, i32 addrspace(1)* %out, align 8
68  ret void
69}
70
71; This 64-bit multiply should just use MUL_HI and MUL_LO, since the top
72; 32-bits of both arguments are sign bits.
73; FUNC-LABEL: {{^}}mul64_sext_c:
74; EG-DAG: MULLO_INT
75; EG-DAG: MULHI_INT
76; SI-DAG: s_mul_i32
77; SI-DAG: v_mul_hi_i32
78; VI: v_mad_i64_i32
79define amdgpu_kernel void @mul64_sext_c(i64 addrspace(1)* %out, i32 %in) {
80entry:
81  %0 = sext i32 %in to i64
82  %1 = mul i64 %0, 80
83  store i64 %1, i64 addrspace(1)* %out
84  ret void
85}
86
87; FUNC-LABEL: {{^}}v_mul64_sext_c:
88; EG-DAG: MULLO_INT
89; EG-DAG: MULHI_INT
90; SI-DAG: v_mul_lo_u32
91; SI-DAG: v_mul_hi_i32
92; VI: v_mad_i64_i32
93; GCN: s_endpgm
94define amdgpu_kernel void @v_mul64_sext_c(i64 addrspace(1)* %out, i32 addrspace(1)* %in) {
95  %val = load i32, i32 addrspace(1)* %in, align 4
96  %ext = sext i32 %val to i64
97  %mul = mul i64 %ext, 80
98  store i64 %mul, i64 addrspace(1)* %out, align 8
99  ret void
100}
101
102; FUNC-LABEL: {{^}}v_mul64_sext_inline_imm:
103; SI-DAG: v_mul_lo_u32 v{{[0-9]+}}, v{{[0-9]+}}, 9
104; SI-DAG: v_mul_hi_i32 v{{[0-9]+}}, v{{[0-9]+}}, 9
105; VI: v_mad_i64_i32 v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], v{{[0-9]+}}, 9, 0
106; GCN: s_endpgm
107define amdgpu_kernel void @v_mul64_sext_inline_imm(i64 addrspace(1)* %out, i32 addrspace(1)* %in) {
108  %val = load i32, i32 addrspace(1)* %in, align 4
109  %ext = sext i32 %val to i64
110  %mul = mul i64 %ext, 9
111  store i64 %mul, i64 addrspace(1)* %out, align 8
112  ret void
113}
114
115; FUNC-LABEL: {{^}}s_mul_i32:
116; GCN: s_load_dword [[SRC0:s[0-9]+]],
117; GCN: s_load_dword [[SRC1:s[0-9]+]],
118; GCN: s_mul_i32 [[SRESULT:s[0-9]+]], [[SRC0]], [[SRC1]]
119; GCN: v_mov_b32_e32 [[VRESULT:v[0-9]+]], [[SRESULT]]
120; GCN: buffer_store_dword [[VRESULT]],
121; GCN: s_endpgm
122define amdgpu_kernel void @s_mul_i32(i32 addrspace(1)* %out, [8 x i32], i32 %a, [8 x i32], i32 %b) nounwind {
123  %mul = mul i32 %a, %b
124  store i32 %mul, i32 addrspace(1)* %out, align 4
125  ret void
126}
127
128; FUNC-LABEL: {{^}}v_mul_i32:
129; GCN: v_mul_lo_u32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
130define amdgpu_kernel void @v_mul_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
131  %b_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
132  %a = load i32, i32 addrspace(1)* %in
133  %b = load i32, i32 addrspace(1)* %b_ptr
134  %result = mul i32 %a, %b
135  store i32 %result, i32 addrspace(1)* %out
136  ret void
137}
138
139; A standard 64-bit multiply.  The expansion should be around 6 instructions.
140; It would be difficult to match the expansion correctly without writing
141; a really complicated list of FileCheck expressions.  I don't want
142; to confuse people who may 'break' this test with a correct optimization,
143; so this test just uses FUNC-LABEL to make sure the compiler does not
144; crash with a 'failed to select' error.
145
146; FUNC-LABEL: {{^}}s_mul_i64:
147; GFX9_10-DAG: s_mul_i32
148; GFX9_10-DAG: s_mul_hi_u32
149; GFX9_10-DAG: s_mul_i32
150; GFX9_10-DAG: s_mul_i32
151; GFX9_10: s_endpgm
152define amdgpu_kernel void @s_mul_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
153  %mul = mul i64 %a, %b
154  store i64 %mul, i64 addrspace(1)* %out, align 8
155  ret void
156}
157
158; FUNC-LABEL: {{^}}v_mul_i64:
159; GCN: v_mul_lo_u32
160define amdgpu_kernel void @v_mul_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) {
161  %a = load i64, i64 addrspace(1)* %aptr, align 8
162  %b = load i64, i64 addrspace(1)* %bptr, align 8
163  %mul = mul i64 %a, %b
164  store i64 %mul, i64 addrspace(1)* %out, align 8
165  ret void
166}
167
168; FUNC-LABEL: {{^}}mul32_in_branch:
169; GCN: s_mul_i32
170define amdgpu_kernel void @mul32_in_branch(i32 addrspace(1)* %out, i32 addrspace(1)* %in, i32 %a, i32 %b, i32 %c) {
171entry:
172  %0 = icmp eq i32 %a, 0
173  br i1 %0, label %if, label %else
174
175if:
176  %1 = load i32, i32 addrspace(1)* %in
177  br label %endif
178
179else:
180  %2 = mul i32 %a, %b
181  br label %endif
182
183endif:
184  %3 = phi i32 [%1, %if], [%2, %else]
185  store i32 %3, i32 addrspace(1)* %out
186  ret void
187}
188
189; FUNC-LABEL: {{^}}mul64_in_branch:
190; SI-DAG: s_mul_i32
191; SI-DAG: v_mul_hi_u32
192; VI: v_mad_u64_u32
193; GCN: s_endpgm
194define amdgpu_kernel void @mul64_in_branch(i64 addrspace(1)* %out, i64 addrspace(1)* %in, i64 %a, i64 %b, i64 %c) {
195entry:
196  %0 = icmp eq i64 %a, 0
197  br i1 %0, label %if, label %else
198
199if:
200  %1 = load i64, i64 addrspace(1)* %in
201  br label %endif
202
203else:
204  %2 = mul i64 %a, %b
205  br label %endif
206
207endif:
208  %3 = phi i64 [%1, %if], [%2, %else]
209  store i64 %3, i64 addrspace(1)* %out
210  ret void
211}
212
213; FIXME: Load dwordx4
214; FUNC-LABEL: {{^}}s_mul_i128:
215; GCN: s_load_dwordx4
216; GCN: s_load_dwordx4
217
218; SI: v_mul_hi_u32
219; SI: v_mul_hi_u32
220; SI: s_mul_i32
221; SI: v_mul_hi_u32
222; SI: s_mul_i32
223; SI: s_mul_i32
224
225; SI-DAG: s_mul_i32
226; SI-DAG: v_mul_hi_u32
227; SI-DAG: v_mul_hi_u32
228; SI-DAG: s_mul_i32
229; SI-DAG: s_mul_i32
230; SI-DAG: v_mul_hi_u32
231
232; VI-DAG: v_mad_u64_u32
233; VI-DAG: v_mad_u64_u32
234; VI-DAG: v_mad_u64_u32
235; VI-DAG: v_mad_u64_u32
236; VI-DAG: v_mad_u64_u32
237; VI-DAG: v_mad_u64_u32
238; VI-DAG: s_mul_i32
239; VI-DAG: s_mul_i32
240; VI-DAG: s_mul_i32
241; VI-DAG: s_mul_i32
242
243
244; GCN: buffer_store_dwordx4
245define amdgpu_kernel void @s_mul_i128(i128 addrspace(1)* %out, [8 x i32], i128 %a, [8 x i32], i128 %b) nounwind #0 {
246  %mul = mul i128 %a, %b
247  store i128 %mul, i128 addrspace(1)* %out
248  ret void
249}
250
251; FUNC-LABEL: {{^}}v_mul_i128:
252; GCN: {{buffer|flat}}_load_dwordx4
253; GCN: {{buffer|flat}}_load_dwordx4
254
255; SI-DAG: v_mul_lo_u32
256; SI-DAG: v_mul_hi_u32
257; SI-DAG: v_mul_hi_u32
258; SI-DAG: v_mul_lo_u32
259; SI-DAG: v_mul_hi_u32
260; SI-DAG: v_mul_hi_u32
261; SI-DAG: v_mul_lo_u32
262; SI-DAG: v_mul_lo_u32
263; SI-DAG: v_add_i32_e32
264
265; SI-DAG: v_mul_hi_u32
266; SI-DAG: v_mul_lo_u32
267; SI-DAG: v_mul_hi_u32
268; SI-DAG: v_mul_lo_u32
269; SI-DAG: v_mul_lo_u32
270; SI-DAG: v_mul_lo_u32
271; SI-DAG: v_mul_lo_u32
272; SI-DAG: v_mul_lo_u32
273
274; VI-DAG: v_mad_u64_u32
275; VI-DAG: v_mad_u64_u32
276; VI-DAG: v_mad_u64_u32
277; VI-DAG: v_mad_u64_u32
278; VI-DAG: v_mad_u64_u32
279; VI-DAG: v_mad_u64_u32
280; VI-DAG: v_mul_lo_u32
281; VI-DAG: v_mul_lo_u32
282; VI-DAG: v_mul_lo_u32
283
284; GCN: {{buffer|flat}}_store_dwordx4
285define amdgpu_kernel void @v_mul_i128(i128 addrspace(1)* %out, i128 addrspace(1)* %aptr, i128 addrspace(1)* %bptr) #0 {
286  %tid = call i32 @llvm.amdgcn.workitem.id.x()
287  %gep.a = getelementptr inbounds i128, i128 addrspace(1)* %aptr, i32 %tid
288  %gep.b = getelementptr inbounds i128, i128 addrspace(1)* %bptr, i32 %tid
289  %gep.out = getelementptr inbounds i128, i128 addrspace(1)* %bptr, i32 %tid
290  %a = load i128, i128 addrspace(1)* %gep.a
291  %b = load i128, i128 addrspace(1)* %gep.b
292  %mul = mul i128 %a, %b
293  store i128 %mul, i128 addrspace(1)* %gep.out
294  ret void
295}
296
297declare i32 @llvm.amdgcn.workitem.id.x() #1
298
299attributes #0 = { nounwind }
300attributes #1 = { nounwind readnone}
301