1; RUN: llc -march=amdgcn -mcpu=gfx900 -O3 < %s | FileCheck -check-prefix=GCN %s
2; RUN: opt -S -mtriple=amdgcn-- -amdgpu-lower-module-lds < %s | FileCheck %s
3; RUN: opt -S -mtriple=amdgcn-- -passes=amdgpu-lower-module-lds < %s | FileCheck %s
4
5@a = internal unnamed_addr addrspace(3) global [64 x i32] undef, align 4
6@b = internal unnamed_addr addrspace(3) global [64 x i32] undef, align 4
7@c = internal unnamed_addr addrspace(3) global [64 x i32] undef, align 4
8
9; GCN-LABEL: {{^}}no_clobber_ds_load_stores_x2:
10; GCN: ds_write2st64_b32
11; GCN: ds_read2st64_b32
12
13; CHECK-LABEL: @no_clobber_ds_load_stores_x2
14; CHECK: store i32 1, i32 addrspace(3)* %0, align 16, !alias.scope !0, !noalias !3
15; CHECK: %val.a = load i32, i32 addrspace(3)* %gep.a, align 4, !alias.scope !0, !noalias !3
16; CHECK: store i32 2, i32 addrspace(3)* %1, align 16, !alias.scope !3, !noalias !0
17; CHECK: %val.b = load i32, i32 addrspace(3)* %gep.b, align 4, !alias.scope !3, !noalias !0
18
19define amdgpu_kernel void @no_clobber_ds_load_stores_x2(i32 addrspace(1)* %arg, i32 %i) {
20bb:
21  store i32 1, i32 addrspace(3)* getelementptr inbounds ([64 x i32], [64 x i32] addrspace(3)* @a, i32 0, i32 0), align 4
22  %gep.a = getelementptr inbounds [64 x i32], [64 x i32] addrspace(3)* @a, i32 0, i32 %i
23  %val.a = load i32, i32 addrspace(3)* %gep.a, align 4
24  store i32 2, i32 addrspace(3)* getelementptr inbounds ([64 x i32], [64 x i32] addrspace(3)* @b, i32 0, i32 0), align 4
25  %gep.b = getelementptr inbounds [64 x i32], [64 x i32] addrspace(3)* @b, i32 0, i32 %i
26  %val.b = load i32, i32 addrspace(3)* %gep.b, align 4
27  %val = add i32 %val.a, %val.b
28  store i32 %val, i32 addrspace(1)* %arg, align 4
29  ret void
30}
31
32; GCN-LABEL: {{^}}no_clobber_ds_load_stores_x3:
33; GCN-DAG: ds_write2st64_b32
34; GCN-DAG: ds_write_b32
35; GCN-DAG: ds_read2st64_b32
36; GCN-DAG: ds_read_b32
37
38; CHECK-LABEL: @no_clobber_ds_load_stores_x3
39; CHECK: store i32 1, i32 addrspace(3)* %0, align 16, !alias.scope !5, !noalias !8
40; CHECK: %val.a = load i32, i32 addrspace(3)* %gep.a, align 4, !alias.scope !5, !noalias !8
41; CHECK: store i32 2, i32 addrspace(3)* %1, align 16, !alias.scope !11, !noalias !12
42; CHECK: %val.b = load i32, i32 addrspace(3)* %gep.b, align 4, !alias.scope !11, !noalias !12
43; CHECK: store i32 3, i32 addrspace(3)* %2, align 16, !alias.scope !13, !noalias !14
44; CHECK: %val.c = load i32, i32 addrspace(3)* %gep.c, align 4, !alias.scope !13, !noalias !14
45
46define amdgpu_kernel void @no_clobber_ds_load_stores_x3(i32 addrspace(1)* %arg, i32 %i) {
47bb:
48  store i32 1, i32 addrspace(3)* getelementptr inbounds ([64 x i32], [64 x i32] addrspace(3)* @a, i32 0, i32 0), align 4
49  %gep.a = getelementptr inbounds [64 x i32], [64 x i32] addrspace(3)* @a, i32 0, i32 %i
50  %val.a = load i32, i32 addrspace(3)* %gep.a, align 4
51  store i32 2, i32 addrspace(3)* getelementptr inbounds ([64 x i32], [64 x i32] addrspace(3)* @b, i32 0, i32 0), align 4
52  %gep.b = getelementptr inbounds [64 x i32], [64 x i32] addrspace(3)* @b, i32 0, i32 %i
53  %val.b = load i32, i32 addrspace(3)* %gep.b, align 4
54  store i32 3, i32 addrspace(3)* getelementptr inbounds ([64 x i32], [64 x i32] addrspace(3)* @c, i32 0, i32 0), align 4
55  %gep.c = getelementptr inbounds [64 x i32], [64 x i32] addrspace(3)* @c, i32 0, i32 %i
56  %val.c = load i32, i32 addrspace(3)* %gep.c, align 4
57  %val.1 = add i32 %val.a, %val.b
58  %val = add i32 %val.1, %val.c
59  store i32 %val, i32 addrspace(1)* %arg, align 4
60  ret void
61}
62
63; CHECK: !0 = !{!1}
64; CHECK: !1 = distinct !{!1, !2}
65; CHECK: !2 = distinct !{!2}
66; CHECK: !3 = !{!4}
67; CHECK: !4 = distinct !{!4, !2}
68; CHECK: !5 = !{!6}
69; CHECK: !6 = distinct !{!6, !7}
70; CHECK: !7 = distinct !{!7}
71; CHECK: !8 = !{!9, !10}
72; CHECK: !9 = distinct !{!9, !7}
73; CHECK: !10 = distinct !{!10, !7}
74; CHECK: !11 = !{!9}
75; CHECK: !12 = !{!6, !10}
76; CHECK: !13 = !{!10}
77; CHECK: !14 = !{!6, !9}
78