1; RUN: llc -march=amdgcn < %s | FileCheck -check-prefix=GCN %s
2
3; GCN-LABEL: {{^}}test_membound:
4; GCN: MemoryBound: 1
5; GCN: WaveLimiterHint : 1
6define amdgpu_kernel void @test_membound(<4 x i32> addrspace(1)* nocapture readonly %arg, <4 x i32> addrspace(1)* nocapture %arg1) {
7bb:
8  %tmp = tail call i32 @llvm.amdgcn.workitem.id.x()
9  %tmp2 = zext i32 %tmp to i64
10  %tmp3 = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(1)* %arg, i64 %tmp2
11  %tmp4 = load <4 x i32>, <4 x i32> addrspace(1)* %tmp3, align 16
12  %tmp5 = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(1)* %arg1, i64 %tmp2
13  store <4 x i32> %tmp4, <4 x i32> addrspace(1)* %tmp5, align 16
14  %tmp6 = add nuw nsw i64 %tmp2, 1
15  %tmp7 = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(1)* %arg, i64 %tmp6
16  %tmp8 = load <4 x i32>, <4 x i32> addrspace(1)* %tmp7, align 16
17  %tmp9 = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(1)* %arg1, i64 %tmp6
18  store <4 x i32> %tmp8, <4 x i32> addrspace(1)* %tmp9, align 16
19  ret void
20}
21
22; GCN-LABEL: {{^}}test_membound_1:
23; GCN: MemoryBound: 1
24define amdgpu_kernel void @test_membound_1(<2 x double> addrspace(1)* nocapture readonly %ptr.0,
25                                           <2 x double> addrspace(1)* nocapture %ptr.1,
26                                           <2 x double> %arg.0, i32 %arg.1, <4 x double> %arg.2) {
27bb.entry:
28  %id.32 = tail call i32 @llvm.amdgcn.workitem.id.x()
29  %id.0 = zext i32 %id.32 to i64
30  %gep.0 = getelementptr inbounds <2 x double>, <2 x double> addrspace(1)* %ptr.0, i64 %id.0
31  %ld.0 = load <2 x double>, <2 x double> addrspace(1)* %gep.0, align 16
32  %add.0 = fadd <2 x double> %arg.0, %ld.0
33
34  %id.1 = add nuw nsw i64 %id.0, 1
35  %gep.1 = getelementptr inbounds <2 x double>, <2 x double> addrspace(1)* %ptr.0, i64 %id.1
36  %ld.1 = load <2 x double>, <2 x double> addrspace(1)* %gep.1, align 16
37  %add.1 = fadd <2 x double> %add.0, %ld.1
38
39  %id.2 = add nuw nsw i64 %id.0, 2
40  %gep.2 = getelementptr inbounds <2 x double>, <2 x double> addrspace(1)* %ptr.0, i64 %id.2
41  %ld.2 = load <2 x double>, <2 x double> addrspace(1)* %gep.2, align 16
42  %add.2 = fadd <2 x double> %add.1, %ld.2
43
44  %id.3 = add nuw nsw i64 %id.0, 3
45  %gep.3= getelementptr inbounds <2 x double>, <2 x double> addrspace(1)* %ptr.0, i64 %id.3
46  %ld.3 = load <2 x double>, <2 x double> addrspace(1)* %gep.3, align 16
47  %add.3 = fadd <2 x double> %add.2, %ld.3
48
49  %id.4 = add nuw nsw i64 %id.0, 4
50  %gep.4= getelementptr inbounds <2 x double>, <2 x double> addrspace(1)* %ptr.0, i64 %id.4
51  %ld.4 = load <2 x double>, <2 x double> addrspace(1)* %gep.4, align 16
52  %add.4 = fadd <2 x double> %add.3, %ld.4
53
54  store <2 x double> %add.4, <2 x double> addrspace(1)* %ptr.1, align 16
55  %cond = icmp eq i32 %arg.1, 0
56  br i1 %cond, label %bb.true, label %bb.ret
57
58bb.true:
59  %i0.arg.0 = extractelement <2 x double> %arg.0, i32 0
60  %i1.arg.0 = extractelement <2 x double> %arg.0, i32 1
61  %add.1.0 = fadd double %i0.arg.0, %i1.arg.0
62  %i0.arg.2 = extractelement <4 x double> %arg.2, i32 0
63  %i1.arg.2 = extractelement <4 x double> %arg.2, i32 1
64  %add.1.1 = fadd double %i0.arg.2, %i1.arg.2
65  %add.1.2 = fadd double %add.1.0, %add.1.1
66  %i2.arg.2 = extractelement <4 x double> %arg.2, i32 2
67  %i3.arg.2 = extractelement <4 x double> %arg.2, i32 3
68  %add.1.3 = fadd double %i2.arg.2, %i3.arg.2
69  %add.1.4 = fadd double %add.1.2, %add.1.3
70  %i0.add.0 = extractelement <2 x double> %add.0, i32 0
71  %i1.add.0 = extractelement <2 x double> %add.0, i32 1
72  %add.1.5 = fadd double %i0.add.0, %i1.add.0
73  %add.1.6 = fadd double %add.1.4, %add.1.5
74  %i0.add.1 = extractelement <2 x double> %add.1, i32 0
75  %i1.add.1 = extractelement <2 x double> %add.1, i32 1
76  %add.1.7 = fadd double %i0.add.1, %i1.add.1
77  %add.1.8 = fadd double %add.1.6, %add.1.7
78  %i0.add.2 = extractelement <2 x double> %add.2, i32 0
79  %i1.add.2 = extractelement <2 x double> %add.2, i32 1
80  %add.1.9 = fadd double %i0.add.2, %i1.add.2
81  %add.1.10 = fadd double %add.1.8, %add.1.9
82
83  %ptr.1.bc = bitcast <2 x double> addrspace(1)* %ptr.1 to double addrspace(1)*
84  store double %add.1.8, double addrspace(1)* %ptr.1.bc, align 8
85  br label %bb.ret
86
87bb.ret:
88  ret void
89}
90
91; GCN-LABEL: {{^}}test_large_stride:
92; GCN: MemoryBound: 0
93; GCN: WaveLimiterHint : 1
94define amdgpu_kernel void @test_large_stride(i32 addrspace(1)* nocapture %arg) {
95bb:
96  %tmp = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 4096
97  %tmp1 = load i32, i32 addrspace(1)* %tmp, align 4
98  %mul1 = mul i32 %tmp1, %tmp1
99  %tmp2 = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 1
100  store i32 %mul1, i32 addrspace(1)* %tmp2, align 4
101  %tmp3 = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 8192
102  %tmp4 = load i32, i32 addrspace(1)* %tmp3, align 4
103  %mul4 = mul i32 %tmp4, %tmp4
104  %tmp5 = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 2
105  store i32 %mul4, i32 addrspace(1)* %tmp5, align 4
106  %tmp6 = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 12288
107  %tmp7 = load i32, i32 addrspace(1)* %tmp6, align 4
108  %mul7 = mul i32 %tmp7, %tmp7
109  %tmp8 = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 3
110  store i32 %mul7, i32 addrspace(1)* %tmp8, align 4
111  ret void
112}
113
114; GCN-LABEL: {{^}}test_indirect:
115; GCN: MemoryBound: 0
116; GCN: WaveLimiterHint : 1
117define amdgpu_kernel void @test_indirect(i32 addrspace(1)* nocapture %arg) {
118bb:
119  %tmp = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 1
120  %tmp1 = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 2
121  %tmp2 = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 3
122  %tmp3 = bitcast i32 addrspace(1)* %arg to <4 x i32> addrspace(1)*
123  %tmp4 = load <4 x i32>, <4 x i32> addrspace(1)* %tmp3, align 4
124  %tmp5 = extractelement <4 x i32> %tmp4, i32 0
125  %tmp6 = sext i32 %tmp5 to i64
126  %tmp7 = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 %tmp6
127  %tmp8 = load i32, i32 addrspace(1)* %tmp7, align 4
128  store i32 %tmp8, i32 addrspace(1)* %arg, align 4
129  %tmp9 = extractelement <4 x i32> %tmp4, i32 1
130  %tmp10 = sext i32 %tmp9 to i64
131  %tmp11 = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 %tmp10
132  %tmp12 = load i32, i32 addrspace(1)* %tmp11, align 4
133  store i32 %tmp12, i32 addrspace(1)* %tmp, align 4
134  %tmp13 = extractelement <4 x i32> %tmp4, i32 2
135  %tmp14 = sext i32 %tmp13 to i64
136  %tmp15 = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 %tmp14
137  %tmp16 = load i32, i32 addrspace(1)* %tmp15, align 4
138  store i32 %tmp16, i32 addrspace(1)* %tmp1, align 4
139  %tmp17 = extractelement <4 x i32> %tmp4, i32 3
140  %tmp18 = sext i32 %tmp17 to i64
141  %tmp19 = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 %tmp18
142  %tmp20 = load i32, i32 addrspace(1)* %tmp19, align 4
143  store i32 %tmp20, i32 addrspace(1)* %tmp2, align 4
144  ret void
145}
146
147; GCN-LABEL: {{^}}test_indirect_through_phi:
148; GCN: MemoryBound: 0
149; GCN: WaveLimiterHint : 0
150define amdgpu_kernel void @test_indirect_through_phi(float addrspace(1)* %arg) {
151bb:
152  %load = load float, float addrspace(1)* %arg, align 8
153  %load.f = bitcast float %load to i32
154  %n = tail call i32 @llvm.amdgcn.workitem.id.x()
155  br label %bb1
156
157bb1:                                              ; preds = %bb1, %bb
158  %phi = phi i32 [ %load.f, %bb ], [ %and2, %bb1 ]
159  %ind = phi i32 [ 0, %bb ], [ %inc2, %bb1 ]
160  %and1 = and i32 %phi, %n
161  %gep = getelementptr inbounds float, float addrspace(1)* %arg, i32 %and1
162  store float %load, float addrspace(1)* %gep, align 4
163  %inc1 = add nsw i32 %phi, 1310720
164  %and2 = and i32 %inc1, %n
165  %inc2 = add nuw nsw i32 %ind, 1
166  %cmp = icmp eq i32 %inc2, 1024
167  br i1 %cmp, label %bb2, label %bb1
168
169bb2:                                              ; preds = %bb1
170  ret void
171}
172
173declare i32 @llvm.amdgcn.workitem.id.x()
174