1; RUN: llc -march=amdgcn -mcpu=tahiti -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
2; RUN: llc -march=amdgcn -mcpu=fiji -verify-machineinstrs < %s | FileCheck --check-prefixes=GCN,VI %s
3
4; IEEE bit enabled for compute kernel, so shouldn't use.
5; GCN-LABEL: {{^}}v_omod_div2_f32_enable_ieee_signed_zeros:
6; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
7; GCN: v_add_f32_e32 [[ADD:v[0-9]+]], 1.0, [[A]]{{$}}
8; GCN: v_mul_f32_e32 v{{[0-9]+}}, 0.5, [[ADD]]{{$}}
9define amdgpu_kernel void @v_omod_div2_f32_enable_ieee_signed_zeros(float addrspace(1)* %out, float addrspace(1)* %aptr) #4 {
10  %tid = call i32 @llvm.amdgcn.workitem.id.x()
11  %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid
12  %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
13  %a = load float, float addrspace(1)* %gep0
14  %add = fadd float %a, 1.0
15  %div2 = fmul float %add, 0.5
16  store float %div2, float addrspace(1)* %out.gep
17  ret void
18}
19
20; IEEE bit enabled for compute kernel, so shouldn't use.
21; GCN-LABEL: {{^}}v_omod_div2_f64_enable_ieee_signed_zeros:
22; GCN: {{buffer|flat}}_load_dwordx2 [[A:v\[[0-9]+:[0-9]+\]]]
23; GCN: v_add_f64 [[ADD:v\[[0-9]+:[0-9]+\]]], [[A]], 1.0{{$}}
24; GCN: v_mul_f64 v{{\[[0-9]+:[0-9]+\]}}, [[ADD]], 0.5{{$}}
25define amdgpu_kernel void @v_omod_div2_f64_enable_ieee_signed_zeros(double addrspace(1)* %out, double addrspace(1)* %aptr) #4 {
26  %tid = call i32 @llvm.amdgcn.workitem.id.x()
27  %gep0 = getelementptr double, double addrspace(1)* %aptr, i32 %tid
28  %out.gep = getelementptr double, double addrspace(1)* %out, i32 %tid
29  %a = load double, double addrspace(1)* %gep0
30  %add = fadd double %a, 1.0
31  %div2 = fmul double %add, 0.5
32  store double %div2, double addrspace(1)* %out.gep
33  ret void
34}
35
36; IEEE bit enabled for compute kernel, so shouldn't use even though nsz is allowed
37; GCN-LABEL: {{^}}v_omod_div2_f32_enable_ieee_nsz:
38; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
39; GCN: v_add_f32_e32 [[ADD:v[0-9]+]], 1.0, [[A]]{{$}}
40; GCN: v_mul_f32_e32 v{{[0-9]+}}, 0.5, [[ADD]]{{$}}
41define amdgpu_kernel void @v_omod_div2_f32_enable_ieee_nsz(float addrspace(1)* %out, float addrspace(1)* %aptr) #0 {
42  %tid = call i32 @llvm.amdgcn.workitem.id.x()
43  %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid
44  %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
45  %a = load float, float addrspace(1)* %gep0
46  %add = fadd float %a, 1.0
47  %div2 = fmul float %add, 0.5
48  store float %div2, float addrspace(1)* %out.gep
49  ret void
50}
51
52; IEEE bit enabled for compute kernel, so shouldn't use even though nsz is allowed.
53; GCN-LABEL: {{^}}v_omod_div2_f64_enable_ieee_nsz:
54; GCN: {{buffer|flat}}_load_dwordx2 [[A:v\[[0-9]+:[0-9]+\]]]
55; GCN: v_add_f64 [[ADD:v\[[0-9]+:[0-9]+\]]], [[A]], 1.0{{$}}
56; GCN: v_mul_f64 v{{\[[0-9]+:[0-9]+\]}}, [[ADD]], 0.5{{$}}
57define amdgpu_kernel void @v_omod_div2_f64_enable_ieee_nsz(double addrspace(1)* %out, double addrspace(1)* %aptr) #5 {
58  %tid = call i32 @llvm.amdgcn.workitem.id.x()
59  %gep0 = getelementptr double, double addrspace(1)* %aptr, i32 %tid
60  %out.gep = getelementptr double, double addrspace(1)* %out, i32 %tid
61  %a = load double, double addrspace(1)* %gep0
62  %add = fadd double %a, 1.0
63  %div2 = fmul double %add, 0.5
64  store double %div2, double addrspace(1)* %out.gep
65  ret void
66}
67
68; Only allow without IEEE bit if signed zeros are significant.
69; GCN-LABEL: {{^}}v_omod_div2_f32_signed_zeros:
70; GCN: v_add_f32_e32 [[ADD:v[0-9]+]], 1.0, v0{{$}}
71; GCN: v_mul_f32_e32 v{{[0-9]+}}, 0.5, [[ADD]]{{$}}
72define amdgpu_ps void @v_omod_div2_f32_signed_zeros(float %a) #4 {
73  %add = fadd float %a, 1.0
74  %div2 = fmul float %add, 0.5
75  store float %div2, float addrspace(1)* undef
76  ret void
77}
78
79; Only allow without IEEE bit if signed zeros are significant.
80; GCN-LABEL: {{^}}v_omod_div2_f64_signed_zeros:
81; GCN: v_add_f64 [[ADD:v\[[0-9]+:[0-9]+\]]], v{{\[[0-9]+:[0-9]+\]}}, 1.0{{$}}
82; GCN: v_mul_f64 v{{\[[0-9]+:[0-9]+\]}}, [[ADD]], 0.5{{$}}
83define amdgpu_ps void @v_omod_div2_f64_signed_zeros(double %a) #4 {
84  %add = fadd double %a, 1.0
85  %div2 = fmul double %add, 0.5
86  store double %div2, double addrspace(1)* undef
87  ret void
88}
89
90; GCN-LABEL: {{^}}v_omod_div2_f32:
91; GCN: v_add_f32_e64 v{{[0-9]+}}, v0, 1.0 div:2{{$}}
92define amdgpu_ps void @v_omod_div2_f32(float %a) #0 {
93  %add = fadd float %a, 1.0
94  %div2 = fmul float %add, 0.5
95  store float %div2, float addrspace(1)* undef
96  ret void
97}
98
99; GCN-LABEL: {{^}}v_omod_div2_f64:
100; GCN: v_add_f64  v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, 1.0 div:2{{$}}
101define amdgpu_ps void @v_omod_div2_f64(double %a) #5 {
102  %add = fadd nsz double %a, 1.0
103  %div2 = fmul nsz double %add, 0.5
104  store double %div2, double addrspace(1)* undef
105  ret void
106}
107
108; GCN-LABEL: {{^}}v_omod_mul2_f32:
109; GCN: v_add_f32_e64 v{{[0-9]+}}, v0, 1.0 mul:2{{$}}
110define amdgpu_ps void @v_omod_mul2_f32(float %a) #0 {
111  %add = fadd float %a, 1.0
112  %div2 = fmul float %add, 2.0
113  store float %div2, float addrspace(1)* undef
114  ret void
115}
116
117; GCN-LABEL: {{^}}v_omod_mul2_f64:
118; GCN: v_add_f64  v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, 1.0 mul:2{{$}}
119define amdgpu_ps void @v_omod_mul2_f64(double %a) #5 {
120  %add = fadd nsz double %a, 1.0
121  %div2 = fmul nsz double %add, 2.0
122  store double %div2, double addrspace(1)* undef
123  ret void
124}
125
126; GCN-LABEL: {{^}}v_omod_mul4_f32:
127; GCN: v_add_f32_e64 v{{[0-9]+}}, v0, 1.0 mul:4{{$}}
128define amdgpu_ps void @v_omod_mul4_f32(float %a) #0 {
129  %add = fadd float %a, 1.0
130  %div2 = fmul float %add, 4.0
131  store float %div2, float addrspace(1)* undef
132  ret void
133}
134
135; GCN-LABEL: {{^}}v_omod_mul4_f64:
136; GCN: v_add_f64  v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, 1.0 mul:4{{$}}
137define amdgpu_ps void @v_omod_mul4_f64(double %a) #5 {
138  %add = fadd nsz double %a, 1.0
139  %div2 = fmul nsz double %add, 4.0
140  store double %div2, double addrspace(1)* undef
141  ret void
142}
143
144; GCN-LABEL: {{^}}v_omod_mul4_multi_use_f32:
145; GCN: v_add_f32_e32 [[ADD:v[0-9]+]], 1.0, v0{{$}}
146; GCN: v_mul_f32_e32 v{{[0-9]+}}, 4.0, [[ADD]]{{$}}
147define amdgpu_ps void @v_omod_mul4_multi_use_f32(float %a) #0 {
148  %add = fadd float %a, 1.0
149  %div2 = fmul float %add, 4.0
150  store float %div2, float addrspace(1)* undef
151  store volatile float %add, float addrspace(1)* undef
152  ret void
153}
154
155; GCN-LABEL: {{^}}v_omod_mul4_dbg_use_f32:
156; GCN: v_add_f32_e64 v{{[0-9]+}}, v0, 1.0 mul:4{{$}}
157define amdgpu_ps void @v_omod_mul4_dbg_use_f32(float %a) #0 {
158  %add = fadd float %a, 1.0
159  call void @llvm.dbg.value(metadata float %add, i64 0, metadata !4, metadata !9), !dbg !10
160  %div2 = fmul float %add, 4.0
161  store float %div2, float addrspace(1)* undef
162  ret void
163}
164
165; Clamp is applied after omod, folding both into instruction is OK.
166; GCN-LABEL: {{^}}v_clamp_omod_div2_f32:
167; GCN: v_add_f32_e64 v{{[0-9]+}}, v0, 1.0 clamp div:2{{$}}
168define amdgpu_ps void @v_clamp_omod_div2_f32(float %a) #0 {
169  %add = fadd float %a, 1.0
170  %div2 = fmul float %add, 0.5
171
172  %max = call float @llvm.maxnum.f32(float %div2, float 0.0)
173  %clamp = call float @llvm.minnum.f32(float %max, float 1.0)
174  store float %clamp, float addrspace(1)* undef
175  ret void
176}
177
178; Cannot fold omod into clamp
179; GCN-LABEL: {{^}}v_omod_div2_clamp_f32:
180; GCN: v_add_f32_e64 [[ADD:v[0-9]+]], v0, 1.0 clamp{{$}}
181; GCN: v_mul_f32_e32 v{{[0-9]+}}, 0.5, [[ADD]]{{$}}
182define amdgpu_ps void @v_omod_div2_clamp_f32(float %a) #0 {
183  %add = fadd float %a, 1.0
184  %max = call float @llvm.maxnum.f32(float %add, float 0.0)
185  %clamp = call float @llvm.minnum.f32(float %max, float 1.0)
186  %div2 = fmul float %clamp, 0.5
187  store float %div2, float addrspace(1)* undef
188  ret void
189}
190
191; GCN-LABEL: {{^}}v_omod_div2_abs_src_f32:
192; GCN: v_add_f32_e32 [[ADD:v[0-9]+]], 1.0, v0{{$}}
193; GCN: v_mul_f32_e64 v{{[0-9]+}}, |[[ADD]]|, 0.5{{$}}
194define amdgpu_ps void @v_omod_div2_abs_src_f32(float %a) #0 {
195  %add = fadd float %a, 1.0
196  %abs.add = call float @llvm.fabs.f32(float %add)
197  %div2 = fmul float %abs.add, 0.5
198  store float %div2, float addrspace(1)* undef
199  ret void
200}
201
202; GCN-LABEL: {{^}}v_omod_add_self_clamp_f32:
203; GCN: v_add_f32_e64 v{{[0-9]+}}, v0, v0 clamp{{$}}
204define amdgpu_ps void @v_omod_add_self_clamp_f32(float %a) #0 {
205  %add = fadd float %a, %a
206  %max = call float @llvm.maxnum.f32(float %add, float 0.0)
207  %clamp = call float @llvm.minnum.f32(float %max, float 1.0)
208  store float %clamp, float addrspace(1)* undef
209  ret void
210}
211
212; GCN-LABEL: {{^}}v_omod_add_clamp_self_f32:
213; GCN: v_max_f32_e64 [[CLAMP:v[0-9]+]], v0, v0 clamp{{$}}
214; GCN: v_add_f32_e32 v{{[0-9]+}}, [[CLAMP]], [[CLAMP]]{{$}}
215define amdgpu_ps void @v_omod_add_clamp_self_f32(float %a) #0 {
216  %max = call float @llvm.maxnum.f32(float %a, float 0.0)
217  %clamp = call float @llvm.minnum.f32(float %max, float 1.0)
218  %add = fadd float %clamp, %clamp
219  store float %add, float addrspace(1)* undef
220  ret void
221}
222
223; GCN-LABEL: {{^}}v_omod_add_abs_self_f32:
224; GCN: v_add_f32_e32 [[X:v[0-9]+]], 1.0, v0
225; GCN: v_add_f32_e64 v{{[0-9]+}}, |[[X]]|, |[[X]]|{{$}}
226define amdgpu_ps void @v_omod_add_abs_self_f32(float %a) #0 {
227  %x = fadd float %a, 1.0
228  %abs.x = call float @llvm.fabs.f32(float %x)
229  %add = fadd float %abs.x, %abs.x
230  store float %add, float addrspace(1)* undef
231  ret void
232}
233
234; GCN-LABEL: {{^}}v_omod_add_abs_x_x_f32:
235
236; GCN: v_add_f32_e32 [[X:v[0-9]+]], 1.0, v0
237; GCN: v_add_f32_e64 v{{[0-9]+}}, |[[X]]|, [[X]]{{$}}
238define amdgpu_ps void @v_omod_add_abs_x_x_f32(float %a) #0 {
239  %x = fadd float %a, 1.0
240  %abs.x = call float @llvm.fabs.f32(float %x)
241  %add = fadd float %abs.x, %x
242  store float %add, float addrspace(1)* undef
243  ret void
244}
245
246; GCN-LABEL: {{^}}v_omod_add_x_abs_x_f32:
247; GCN: v_add_f32_e32 [[X:v[0-9]+]], 1.0, v0
248; GCN: v_add_f32_e64 v{{[0-9]+}}, [[X]], |[[X]]|{{$}}
249define amdgpu_ps void @v_omod_add_x_abs_x_f32(float %a) #0 {
250  %x = fadd float %a, 1.0
251  %abs.x = call float @llvm.fabs.f32(float %x)
252  %add = fadd float %x, %abs.x
253  store float %add, float addrspace(1)* undef
254  ret void
255}
256
257; Don't fold omod into omod into another omod.
258; GCN-LABEL: {{^}}v_omod_div2_omod_div2_f32:
259; GCN: v_add_f32_e64 [[ADD:v[0-9]+]], v0, 1.0 div:2{{$}}
260; GCN: v_mul_f32_e32 v{{[0-9]+}}, 0.5, [[ADD]]{{$}}
261define amdgpu_ps void @v_omod_div2_omod_div2_f32(float %a) #0 {
262  %add = fadd float %a, 1.0
263  %div2.0 = fmul float %add, 0.5
264  %div2.1 = fmul float %div2.0, 0.5
265  store float %div2.1, float addrspace(1)* undef
266  ret void
267}
268
269; Don't fold omod if denorms enabled
270; GCN-LABEL: {{^}}v_omod_div2_f32_denormals:
271; GCN: v_add_f32_e32 [[ADD:v[0-9]+]], 1.0, v0{{$}}
272; GCN: v_mul_f32_e32 v{{[0-9]+}}, 0.5, [[ADD]]{{$}}
273define amdgpu_ps void @v_omod_div2_f32_denormals(float %a) #2 {
274  %add = fadd float %a, 1.0
275  %div2 = fmul float %add, 0.5
276  store float %div2, float addrspace(1)* undef
277  ret void
278}
279
280; Don't fold omod if denorms enabled.
281; GCN-LABEL: {{^}}v_omod_div2_f64_denormals:
282; GCN: v_add_f64 [[ADD:v\[[0-9]+:[0-9]+\]]], v{{\[[0-9]+:[0-9]+\]}}, 1.0{{$}}
283; GCN: v_mul_f64 v{{\[[0-9]+:[0-9]+\]}}, [[ADD]], 0.5{{$}}
284define amdgpu_ps void @v_omod_div2_f64_denormals(double %a) #6 {
285  %add = fadd double %a, 1.0
286  %div2 = fmul double %add, 0.5
287  store double %div2, double addrspace(1)* undef
288  ret void
289}
290
291; Don't fold omod if denorms enabled for add form.
292; GCN-LABEL: {{^}}v_omod_mul2_f32_denormals:
293; GCN: v_add_f32_e32 [[ADD:v[0-9]+]], 1.0, v0{{$}}
294; GCN: v_add_f32_e32 v{{[0-9]+}}, [[ADD]], [[ADD]]{{$}}
295define amdgpu_ps void @v_omod_mul2_f32_denormals(float %a) #2 {
296  %add = fadd float %a, 1.0
297  %mul2 = fadd float %add, %add
298  store float %mul2, float addrspace(1)* undef
299  ret void
300}
301
302; Don't fold omod if denorms enabled for add form.
303; GCN-LABEL: {{^}}v_omod_mul2_f64_denormals:
304; GCN: v_add_f64 [[ADD:v\[[0-9]+:[0-9]+\]]], v{{\[[0-9]+:[0-9]+\]}}, 1.0{{$}}
305; GCN: v_add_f64 v{{\[[0-9]+:[0-9]+\]}}, [[ADD]], [[ADD]]{{$}}
306define amdgpu_ps void @v_omod_mul2_f64_denormals(double %a) #2 {
307  %add = fadd double %a, 1.0
308  %mul2 = fadd double %add, %add
309  store double %mul2, double addrspace(1)* undef
310  ret void
311}
312
313; Don't fold omod if denorms enabled
314; GCN-LABEL: {{^}}v_omod_div2_f16_denormals:
315; VI: v_add_f16_e32 [[ADD:v[0-9]+]], 1.0, v0{{$}}
316; VI: v_mul_f16_e32 v{{[0-9]+}}, 0.5, [[ADD]]{{$}}
317define amdgpu_ps void @v_omod_div2_f16_denormals(half %a) #0 {
318  %add = fadd half %a, 1.0
319  %div2 = fmul half %add, 0.5
320  store half %div2, half addrspace(1)* undef
321  ret void
322}
323
324; Don't fold omod if denorms enabled for add form.
325; GCN-LABEL: {{^}}v_omod_mul2_f16_denormals:
326; VI: v_add_f16_e32 [[ADD:v[0-9]+]], 1.0, v0{{$}}
327; VI: v_add_f16_e32 v{{[0-9]+}}, [[ADD]], [[ADD]]{{$}}
328define amdgpu_ps void @v_omod_mul2_f16_denormals(half %a) #0 {
329  %add = fadd half %a, 1.0
330  %mul2 = fadd half %add, %add
331  store half %mul2, half addrspace(1)* undef
332  ret void
333}
334
335; GCN-LABEL: {{^}}v_omod_div2_f16_no_denormals:
336; VI-NOT: v0
337; VI: v_add_f16_e64 [[ADD:v[0-9]+]], v0, 1.0 div:2{{$}}
338define amdgpu_ps void @v_omod_div2_f16_no_denormals(half %a) #3 {
339  %add = fadd half %a, 1.0
340  %div2 = fmul half %add, 0.5
341  store half %div2, half addrspace(1)* undef
342  ret void
343}
344
345; GCN-LABEL: {{^}}v_omod_mac_to_mad:
346; GCN: v_mad_f32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]}} mul:2{{$}}
347define amdgpu_ps void @v_omod_mac_to_mad(float %b, float %a) #0 {
348  %mul = fmul float %a, %a
349  %add = fadd float %mul, %b
350  %mad = fmul float %add, 2.0
351  %res = fmul float %mad, %b
352  store float %res, float addrspace(1)* undef
353  ret void
354}
355
356declare i32 @llvm.amdgcn.workitem.id.x() #1
357declare float @llvm.fabs.f32(float) #1
358declare float @llvm.floor.f32(float) #1
359declare float @llvm.minnum.f32(float, float) #1
360declare float @llvm.maxnum.f32(float, float) #1
361declare float @llvm.amdgcn.fmed3.f32(float, float, float) #1
362declare double @llvm.fabs.f64(double) #1
363declare double @llvm.minnum.f64(double, double) #1
364declare double @llvm.maxnum.f64(double, double) #1
365declare half @llvm.fabs.f16(half) #1
366declare half @llvm.minnum.f16(half, half) #1
367declare half @llvm.maxnum.f16(half, half) #1
368declare void @llvm.dbg.value(metadata, i64, metadata, metadata) #1
369
370attributes #0 = { nounwind "denormal-fp-math-f32"="preserve-sign,preserve-sign" "no-signed-zeros-fp-math"="true" }
371attributes #1 = { nounwind readnone }
372attributes #2 = { nounwind "denormal-fp-math-f32"="ieee,ieee" "no-signed-zeros-fp-math"="true" }
373attributes #3 = { nounwind "denormal-fp-math"="preserve-sign,preserve-sign" "no-signed-zeros-fp-math"="true" }
374attributes #4 = { nounwind "no-signed-zeros-fp-math"="false" }
375attributes #5 = { nounwind "denormal-fp-math"="preserve-sign,preserve-sign" }
376attributes #6 = { nounwind "denormal-fp-math"="ieee,ieee" "no-signed-zeros-fp-math"="true" }
377
378!llvm.dbg.cu = !{!0}
379!llvm.module.flags = !{!2, !3}
380
381!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, isOptimized: true, runtimeVersion: 0, emissionKind: NoDebug)
382!1 = !DIFile(filename: "/tmp/foo.cl", directory: "/dev/null")
383!2 = !{i32 2, !"Dwarf Version", i32 4}
384!3 = !{i32 2, !"Debug Info Version", i32 3}
385!4 = !DILocalVariable(name: "add", arg: 1, scope: !5, file: !1, line: 1)
386!5 = distinct !DISubprogram(name: "foo", scope: !1, file: !1, line: 1, type: !6, isLocal: false, isDefinition: true, scopeLine: 2, flags: DIFlagPrototyped, isOptimized: true, unit: !0)
387!6 = !DISubroutineType(types: !7)
388!7 = !{null, !8}
389!8 = !DIBasicType(name: "float", size: 32, align: 32)
390!9 = !DIExpression()
391!10 = !DILocation(line: 1, column: 42, scope: !5)
392