1; RUN: llc -show-mc-encoding -mattr=+promote-alloca -amdgpu-load-store-vectorizer=0 -enable-amdgpu-aa=0 -verify-machineinstrs -march=amdgcn < %s | FileCheck %s -check-prefix=SI-PROMOTE -check-prefix=SI -check-prefix=FUNC
2; RUN: llc -show-mc-encoding -mattr=+promote-alloca -amdgpu-load-store-vectorizer=0 -enable-amdgpu-aa=0 -verify-machineinstrs -mtriple=amdgcn--amdhsa -mcpu=kaveri -mattr=-unaligned-buffer-access < %s | FileCheck %s -check-prefix=SI-PROMOTE -check-prefix=SI -check-prefix=FUNC -check-prefix=HSA-PROMOTE
3; RUN: llc -show-mc-encoding -mattr=-promote-alloca -amdgpu-load-store-vectorizer=0 -enable-amdgpu-aa=0 -verify-machineinstrs -march=amdgcn < %s | FileCheck %s -check-prefix=SI-ALLOCA -check-prefix=SI -check-prefix=FUNC
4; RUN: llc -show-mc-encoding -mattr=-promote-alloca -amdgpu-load-store-vectorizer=0 -enable-amdgpu-aa=0 -verify-machineinstrs -mtriple=amdgcn-amdhsa -mcpu=kaveri -mattr=-unaligned-buffer-access < %s | FileCheck %s -check-prefix=SI-ALLOCA -check-prefix=SI -check-prefix=FUNC -check-prefix=HSA-ALLOCA
5; RUN: llc -show-mc-encoding -mattr=+promote-alloca -amdgpu-load-store-vectorizer=0 -enable-amdgpu-aa=0 -verify-machineinstrs -mtriple=amdgcn-amdhsa -march=amdgcn -mcpu=tonga -mattr=-unaligned-buffer-access < %s | FileCheck %s -check-prefix=SI-PROMOTE -check-prefix=SI -check-prefix=FUNC
6; RUN: llc -show-mc-encoding -mattr=-promote-alloca -amdgpu-load-store-vectorizer=0 -enable-amdgpu-aa=0 -verify-machineinstrs -mtriple=amdgcn-amdhsa -march=amdgcn -mcpu=tonga -mattr=-unaligned-buffer-access < %s | FileCheck %s -check-prefix=SI-ALLOCA -check-prefix=SI -check-prefix=FUNC
7
8; RUN: opt -S -mtriple=amdgcn-unknown-amdhsa -mcpu=kaveri -amdgpu-promote-alloca < %s | FileCheck -check-prefix=HSAOPT -check-prefix=OPT %s
9; RUN: opt -S -mtriple=amdgcn-unknown-unknown -mcpu=kaveri -amdgpu-promote-alloca < %s | FileCheck -check-prefix=NOHSAOPT -check-prefix=OPT %s
10
11; RUN: llc -march=r600 -mcpu=cypress < %s | FileCheck %s -check-prefix=R600 -check-prefix=FUNC
12
13
14; HSAOPT: @mova_same_clause.stack = internal unnamed_addr addrspace(3) global [256 x [5 x i32]] undef, align 4
15; HSAOPT: @high_alignment.stack = internal unnamed_addr addrspace(3) global [256 x [8 x i32]] undef, align 16
16
17
18; FUNC-LABEL: {{^}}mova_same_clause:
19; OPT-LABEL: @mova_same_clause(
20
21; R600: LDS_WRITE
22; R600: LDS_WRITE
23; R600: LDS_READ
24; R600: LDS_READ
25
26; HSA-PROMOTE: .amd_kernel_code_t
27; HSA-PROMOTE: workgroup_group_segment_byte_size = 5120
28; HSA-PROMOTE: .end_amd_kernel_code_t
29
30; HSA-PROMOTE: s_load_dword s{{[0-9]+}}, s[4:5], 0x2
31
32; SI-PROMOTE: ds_write_b32
33; SI-PROMOTE: ds_write_b32
34; SI-PROMOTE: ds_read_b32
35; SI-PROMOTE: ds_read_b32
36
37; HSA-ALLOCA: .amd_kernel_code_t
38; FIXME: Creating the emergency stack slots causes us to over-estimate scratch
39; by 4 bytes.
40; HSA-ALLOCA: workitem_private_segment_byte_size = 24
41; HSA-ALLOCA: .end_amd_kernel_code_t
42
43; HSA-ALLOCA: s_mov_b32 flat_scratch_lo, s7
44; HSA-ALLOCA: s_add_u32 s6, s6, s9
45; HSA-ALLOCA: s_lshr_b32 flat_scratch_hi, s6, 8
46
47; SI-ALLOCA: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} offen ; encoding: [0x00,0x10,0x70,0xe0
48; SI-ALLOCA: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} offen ; encoding: [0x00,0x10,0x70,0xe0
49
50
51; HSAOPT: [[DISPATCH_PTR:%[0-9]+]] = call noalias nonnull dereferenceable(64) i8 addrspace(2)* @llvm.amdgcn.dispatch.ptr()
52; HSAOPT: [[CAST_DISPATCH_PTR:%[0-9]+]] = bitcast i8 addrspace(2)* [[DISPATCH_PTR]] to i32 addrspace(2)*
53; HSAOPT: [[GEP0:%[0-9]+]] = getelementptr inbounds i32, i32 addrspace(2)* [[CAST_DISPATCH_PTR]], i64 1
54; HSAOPT: [[LDXY:%[0-9]+]] = load i32, i32 addrspace(2)* [[GEP0]], align 4, !invariant.load !0
55; HSAOPT: [[GEP1:%[0-9]+]] = getelementptr inbounds i32, i32 addrspace(2)* [[CAST_DISPATCH_PTR]], i64 2
56; HSAOPT: [[LDZU:%[0-9]+]] = load i32, i32 addrspace(2)* [[GEP1]], align 4, !range !1, !invariant.load !0
57; HSAOPT: [[EXTRACTY:%[0-9]+]] = lshr i32 [[LDXY]], 16
58
59; HSAOPT: [[WORKITEM_ID_X:%[0-9]+]] = call i32 @llvm.amdgcn.workitem.id.x(), !range !2
60; HSAOPT: [[WORKITEM_ID_Y:%[0-9]+]] = call i32 @llvm.amdgcn.workitem.id.y(), !range !2
61; HSAOPT: [[WORKITEM_ID_Z:%[0-9]+]] = call i32 @llvm.amdgcn.workitem.id.z(), !range !2
62
63; HSAOPT: [[Y_SIZE_X_Z_SIZE:%[0-9]+]] = mul nuw nsw i32 [[EXTRACTY]], [[LDZU]]
64; HSAOPT: [[YZ_X_XID:%[0-9]+]] = mul i32 [[Y_SIZE_X_Z_SIZE]], [[WORKITEM_ID_X]]
65; HSAOPT: [[Y_X_Z_SIZE:%[0-9]+]] = mul nuw nsw i32 [[WORKITEM_ID_Y]], [[LDZU]]
66; HSAOPT: [[ADD_YZ_X_X_YZ_SIZE:%[0-9]+]] = add i32 [[YZ_X_XID]], [[Y_X_Z_SIZE]]
67; HSAOPT: [[ADD_ZID:%[0-9]+]] = add i32 [[ADD_YZ_X_X_YZ_SIZE]], [[WORKITEM_ID_Z]]
68
69; HSAOPT: [[LOCAL_GEP:%[0-9]+]] = getelementptr inbounds [256 x [5 x i32]], [256 x [5 x i32]] addrspace(3)* @mova_same_clause.stack, i32 0, i32 [[ADD_ZID]]
70; HSAOPT: %arrayidx1 = getelementptr inbounds [5 x i32], [5 x i32] addrspace(3)* [[LOCAL_GEP]], i32 0, i32 {{%[0-9]+}}
71; HSAOPT: %arrayidx3 = getelementptr inbounds [5 x i32], [5 x i32] addrspace(3)* [[LOCAL_GEP]], i32 0, i32 {{%[0-9]+}}
72; HSAOPT: %arrayidx10 = getelementptr inbounds [5 x i32], [5 x i32] addrspace(3)* [[LOCAL_GEP]], i32 0, i32 0
73; HSAOPT: %arrayidx12 = getelementptr inbounds [5 x i32], [5 x i32] addrspace(3)* [[LOCAL_GEP]], i32 0, i32 1
74
75
76; NOHSAOPT: call i32 @llvm.r600.read.local.size.y(), !range !0
77; NOHSAOPT: call i32 @llvm.r600.read.local.size.z(), !range !0
78; NOHSAOPT: call i32 @llvm.amdgcn.workitem.id.x(), !range !1
79; NOHSAOPT: call i32 @llvm.amdgcn.workitem.id.y(), !range !1
80; NOHSAOPT: call i32 @llvm.amdgcn.workitem.id.z(), !range !1
81define amdgpu_kernel void @mova_same_clause(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* nocapture %in) #0 {
82entry:
83  %stack = alloca [5 x i32], align 4
84  %0 = load i32, i32 addrspace(1)* %in, align 4
85  %arrayidx1 = getelementptr inbounds [5 x i32], [5 x i32]* %stack, i32 0, i32 %0
86  store i32 4, i32* %arrayidx1, align 4
87  %arrayidx2 = getelementptr inbounds i32, i32 addrspace(1)* %in, i32 1
88  %1 = load i32, i32 addrspace(1)* %arrayidx2, align 4
89  %arrayidx3 = getelementptr inbounds [5 x i32], [5 x i32]* %stack, i32 0, i32 %1
90  store i32 5, i32* %arrayidx3, align 4
91  %arrayidx10 = getelementptr inbounds [5 x i32], [5 x i32]* %stack, i32 0, i32 0
92  %2 = load i32, i32* %arrayidx10, align 4
93  store i32 %2, i32 addrspace(1)* %out, align 4
94  %arrayidx12 = getelementptr inbounds [5 x i32], [5 x i32]* %stack, i32 0, i32 1
95  %3 = load i32, i32* %arrayidx12
96  %arrayidx13 = getelementptr inbounds i32, i32 addrspace(1)* %out, i32 1
97  store i32 %3, i32 addrspace(1)* %arrayidx13
98  ret void
99}
100
101; OPT-LABEL: @high_alignment(
102; OPT: getelementptr inbounds [256 x [8 x i32]], [256 x [8 x i32]] addrspace(3)* @high_alignment.stack, i32 0, i32 %{{[0-9]+}}
103define amdgpu_kernel void @high_alignment(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* nocapture %in) #0 {
104entry:
105  %stack = alloca [8 x i32], align 16
106  %0 = load i32, i32 addrspace(1)* %in, align 4
107  %arrayidx1 = getelementptr inbounds [8 x i32], [8 x i32]* %stack, i32 0, i32 %0
108  store i32 4, i32* %arrayidx1, align 4
109  %arrayidx2 = getelementptr inbounds i32, i32 addrspace(1)* %in, i32 1
110  %1 = load i32, i32 addrspace(1)* %arrayidx2, align 4
111  %arrayidx3 = getelementptr inbounds [8 x i32], [8 x i32]* %stack, i32 0, i32 %1
112  store i32 5, i32* %arrayidx3, align 4
113  %arrayidx10 = getelementptr inbounds [8 x i32], [8 x i32]* %stack, i32 0, i32 0
114  %2 = load i32, i32* %arrayidx10, align 4
115  store i32 %2, i32 addrspace(1)* %out, align 4
116  %arrayidx12 = getelementptr inbounds [8 x i32], [8 x i32]* %stack, i32 0, i32 1
117  %3 = load i32, i32* %arrayidx12
118  %arrayidx13 = getelementptr inbounds i32, i32 addrspace(1)* %out, i32 1
119  store i32 %3, i32 addrspace(1)* %arrayidx13
120  ret void
121}
122
123; FUNC-LABEL: {{^}}no_replace_inbounds_gep:
124; OPT-LABEL: @no_replace_inbounds_gep(
125; OPT: alloca [5 x i32]
126
127; SI-NOT: ds_write
128define amdgpu_kernel void @no_replace_inbounds_gep(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* nocapture %in) #0 {
129entry:
130  %stack = alloca [5 x i32], align 4
131  %0 = load i32, i32 addrspace(1)* %in, align 4
132  %arrayidx1 = getelementptr [5 x i32], [5 x i32]* %stack, i32 0, i32 %0
133  store i32 4, i32* %arrayidx1, align 4
134  %arrayidx2 = getelementptr inbounds i32, i32 addrspace(1)* %in, i32 1
135  %1 = load i32, i32 addrspace(1)* %arrayidx2, align 4
136  %arrayidx3 = getelementptr inbounds [5 x i32], [5 x i32]* %stack, i32 0, i32 %1
137  store i32 5, i32* %arrayidx3, align 4
138  %arrayidx10 = getelementptr inbounds [5 x i32], [5 x i32]* %stack, i32 0, i32 0
139  %2 = load i32, i32* %arrayidx10, align 4
140  store i32 %2, i32 addrspace(1)* %out, align 4
141  %arrayidx12 = getelementptr inbounds [5 x i32], [5 x i32]* %stack, i32 0, i32 1
142  %3 = load i32, i32* %arrayidx12
143  %arrayidx13 = getelementptr inbounds i32, i32 addrspace(1)* %out, i32 1
144  store i32 %3, i32 addrspace(1)* %arrayidx13
145  ret void
146}
147
148; This test checks that the stack offset is calculated correctly for structs.
149; All register loads/stores should be optimized away, so there shouldn't be
150; any MOVA instructions.
151;
152; XXX: This generated code has unnecessary MOVs, we should be able to optimize
153; this.
154
155; FUNC-LABEL: {{^}}multiple_structs:
156; OPT-LABEL: @multiple_structs(
157
158; R600-NOT: MOVA_INT
159; SI-NOT: v_movrel
160; SI-NOT: v_movrel
161%struct.point = type { i32, i32 }
162
163define amdgpu_kernel void @multiple_structs(i32 addrspace(1)* %out) #0 {
164entry:
165  %a = alloca %struct.point
166  %b = alloca %struct.point
167  %a.x.ptr = getelementptr %struct.point, %struct.point* %a, i32 0, i32 0
168  %a.y.ptr = getelementptr %struct.point, %struct.point* %a, i32 0, i32 1
169  %b.x.ptr = getelementptr %struct.point, %struct.point* %b, i32 0, i32 0
170  %b.y.ptr = getelementptr %struct.point, %struct.point* %b, i32 0, i32 1
171  store i32 0, i32* %a.x.ptr
172  store i32 1, i32* %a.y.ptr
173  store i32 2, i32* %b.x.ptr
174  store i32 3, i32* %b.y.ptr
175  %a.indirect.ptr = getelementptr %struct.point, %struct.point* %a, i32 0, i32 0
176  %b.indirect.ptr = getelementptr %struct.point, %struct.point* %b, i32 0, i32 0
177  %a.indirect = load i32, i32* %a.indirect.ptr
178  %b.indirect = load i32, i32* %b.indirect.ptr
179  %0 = add i32 %a.indirect, %b.indirect
180  store i32 %0, i32 addrspace(1)* %out
181  ret void
182}
183
184; Test direct access of a private array inside a loop.  The private array
185; loads and stores should be lowered to copies, so there shouldn't be any
186; MOVA instructions.
187
188; FUNC-LABEL: {{^}}direct_loop:
189; R600-NOT: MOVA_INT
190; SI-NOT: v_movrel
191
192define amdgpu_kernel void @direct_loop(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
193entry:
194  %prv_array_const = alloca [2 x i32]
195  %prv_array = alloca [2 x i32]
196  %a = load i32, i32 addrspace(1)* %in
197  %b_src_ptr = getelementptr inbounds i32, i32 addrspace(1)* %in, i32 1
198  %b = load i32, i32 addrspace(1)* %b_src_ptr
199  %a_dst_ptr = getelementptr inbounds [2 x i32], [2 x i32]* %prv_array_const, i32 0, i32 0
200  store i32 %a, i32* %a_dst_ptr
201  %b_dst_ptr = getelementptr inbounds [2 x i32], [2 x i32]* %prv_array_const, i32 0, i32 1
202  store i32 %b, i32* %b_dst_ptr
203  br label %for.body
204
205for.body:
206  %inc = phi i32 [0, %entry], [%count, %for.body]
207  %x_ptr = getelementptr inbounds [2 x i32], [2 x i32]* %prv_array_const, i32 0, i32 0
208  %x = load i32, i32* %x_ptr
209  %y_ptr = getelementptr inbounds [2 x i32], [2 x i32]* %prv_array, i32 0, i32 0
210  %y = load i32, i32* %y_ptr
211  %xy = add i32 %x, %y
212  store i32 %xy, i32* %y_ptr
213  %count = add i32 %inc, 1
214  %done = icmp eq i32 %count, 4095
215  br i1 %done, label %for.end, label %for.body
216
217for.end:
218  %value_ptr = getelementptr inbounds [2 x i32], [2 x i32]* %prv_array, i32 0, i32 0
219  %value = load i32, i32* %value_ptr
220  store i32 %value, i32 addrspace(1)* %out
221  ret void
222}
223
224; FUNC-LABEL: {{^}}short_array:
225
226; R600: MOVA_INT
227
228; SI-ALLOCA-DAG: buffer_store_short v{{[0-9]+}}, off, s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} offset:6 ; encoding: [0x06,0x00,0x68,0xe0
229; SI-ALLOCA-DAG: buffer_store_short v{{[0-9]+}}, off, s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} offset:4 ; encoding: [0x04,0x00,0x68,0xe0
230; Loaded value is 0 or 1, so sext will become zext, so we get buffer_load_ushort instead of buffer_load_sshort.
231; SI-ALLOCA: buffer_load_sshort v{{[0-9]+}}, v{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}}
232
233; SI-PROMOTE: s_load_dword [[IDX:s[0-9]+]]
234; SI-PROMOTE: s_lshl_b32 [[SCALED_IDX:s[0-9]+]], [[IDX]], 16
235; SI-PROMOTE: v_bfe_u32 v{{[0-9]+}}, v{{[0-9]+}}, [[SCALED_IDX]], 16
236define amdgpu_kernel void @short_array(i32 addrspace(1)* %out, i32 %index) #0 {
237entry:
238  %0 = alloca [2 x i16]
239  %1 = getelementptr inbounds [2 x i16], [2 x i16]* %0, i32 0, i32 0
240  %2 = getelementptr inbounds [2 x i16], [2 x i16]* %0, i32 0, i32 1
241  store i16 0, i16* %1
242  store i16 1, i16* %2
243  %3 = getelementptr inbounds [2 x i16], [2 x i16]* %0, i32 0, i32 %index
244  %4 = load i16, i16* %3
245  %5 = sext i16 %4 to i32
246  store i32 %5, i32 addrspace(1)* %out
247  ret void
248}
249
250; FUNC-LABEL: {{^}}char_array:
251
252; R600: MOVA_INT
253
254; SI-PROMOTE-DAG: buffer_store_byte v{{[0-9]+}}, off, s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} offset:4 ; encoding:
255; SI-PROMOTE-DAG: buffer_store_byte v{{[0-9]+}}, off, s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} offset:5 ; encoding:
256
257; SI-ALLOCA-DAG: buffer_store_byte v{{[0-9]+}}, off, s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} offset:4 ; encoding: [0x04,0x00,0x60,0xe0
258; SI-ALLOCA-DAG: buffer_store_byte v{{[0-9]+}}, off, s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} offset:5 ; encoding: [0x05,0x00,0x60,0xe0
259define amdgpu_kernel void @char_array(i32 addrspace(1)* %out, i32 %index) #0 {
260entry:
261  %0 = alloca [2 x i8]
262  %1 = getelementptr inbounds [2 x i8], [2 x i8]* %0, i32 0, i32 0
263  %2 = getelementptr inbounds [2 x i8], [2 x i8]* %0, i32 0, i32 1
264  store i8 0, i8* %1
265  store i8 1, i8* %2
266  %3 = getelementptr inbounds [2 x i8], [2 x i8]* %0, i32 0, i32 %index
267  %4 = load i8, i8* %3
268  %5 = sext i8 %4 to i32
269  store i32 %5, i32 addrspace(1)* %out
270  ret void
271}
272
273; Test that two stack objects are not stored in the same register
274; The second stack object should be in T3.X
275; FUNC-LABEL: {{^}}no_overlap:
276; R600-CHECK: MOV
277; R600-CHECK: [[CHAN:[XYZW]]]+
278; R600-NOT: [[CHAN]]+
279;
280; A total of 5 bytes should be allocated and used.
281; SI: buffer_store_byte v{{[0-9]+}}, off, s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} offset:4 ;
282define amdgpu_kernel void @no_overlap(i32 addrspace(1)* %out, i32 %in) #0 {
283entry:
284  %0 = alloca [3 x i8], align 1
285  %1 = alloca [2 x i8], align 1
286  %2 = getelementptr [3 x i8], [3 x i8]* %0, i32 0, i32 0
287  %3 = getelementptr [3 x i8], [3 x i8]* %0, i32 0, i32 1
288  %4 = getelementptr [3 x i8], [3 x i8]* %0, i32 0, i32 2
289  %5 = getelementptr [2 x i8], [2 x i8]* %1, i32 0, i32 0
290  %6 = getelementptr [2 x i8], [2 x i8]* %1, i32 0, i32 1
291  store i8 0, i8* %2
292  store i8 1, i8* %3
293  store i8 2, i8* %4
294  store i8 1, i8* %5
295  store i8 0, i8* %6
296  %7 = getelementptr [3 x i8], [3 x i8]* %0, i32 0, i32 %in
297  %8 = getelementptr [2 x i8], [2 x i8]* %1, i32 0, i32 %in
298  %9 = load i8, i8* %7
299  %10 = load i8, i8* %8
300  %11 = add i8 %9, %10
301  %12 = sext i8 %11 to i32
302  store i32 %12, i32 addrspace(1)* %out
303  ret void
304}
305
306define amdgpu_kernel void @char_array_array(i32 addrspace(1)* %out, i32 %index) #0 {
307entry:
308  %alloca = alloca [2 x [2 x i8]]
309  %gep0 = getelementptr [2 x [2 x i8]], [2 x [2 x i8]]* %alloca, i32 0, i32 0, i32 0
310  %gep1 = getelementptr [2 x [2 x i8]], [2 x [2 x i8]]* %alloca, i32 0, i32 0, i32 1
311  store i8 0, i8* %gep0
312  store i8 1, i8* %gep1
313  %gep2 = getelementptr [2 x [2 x i8]], [2 x [2 x i8]]* %alloca, i32 0, i32 0, i32 %index
314  %load = load i8, i8* %gep2
315  %sext = sext i8 %load to i32
316  store i32 %sext, i32 addrspace(1)* %out
317  ret void
318}
319
320define amdgpu_kernel void @i32_array_array(i32 addrspace(1)* %out, i32 %index) #0 {
321entry:
322  %alloca = alloca [2 x [2 x i32]]
323  %gep0 = getelementptr [2 x [2 x i32]], [2 x [2 x i32]]* %alloca, i32 0, i32 0, i32 0
324  %gep1 = getelementptr [2 x [2 x i32]], [2 x [2 x i32]]* %alloca, i32 0, i32 0, i32 1
325  store i32 0, i32* %gep0
326  store i32 1, i32* %gep1
327  %gep2 = getelementptr [2 x [2 x i32]], [2 x [2 x i32]]* %alloca, i32 0, i32 0, i32 %index
328  %load = load i32, i32* %gep2
329  store i32 %load, i32 addrspace(1)* %out
330  ret void
331}
332
333define amdgpu_kernel void @i64_array_array(i64 addrspace(1)* %out, i32 %index) #0 {
334entry:
335  %alloca = alloca [2 x [2 x i64]]
336  %gep0 = getelementptr [2 x [2 x i64]], [2 x [2 x i64]]* %alloca, i32 0, i32 0, i32 0
337  %gep1 = getelementptr [2 x [2 x i64]], [2 x [2 x i64]]* %alloca, i32 0, i32 0, i32 1
338  store i64 0, i64* %gep0
339  store i64 1, i64* %gep1
340  %gep2 = getelementptr [2 x [2 x i64]], [2 x [2 x i64]]* %alloca, i32 0, i32 0, i32 %index
341  %load = load i64, i64* %gep2
342  store i64 %load, i64 addrspace(1)* %out
343  ret void
344}
345
346%struct.pair32 = type { i32, i32 }
347
348define amdgpu_kernel void @struct_array_array(i32 addrspace(1)* %out, i32 %index) #0 {
349entry:
350  %alloca = alloca [2 x [2 x %struct.pair32]]
351  %gep0 = getelementptr [2 x [2 x %struct.pair32]], [2 x [2 x %struct.pair32]]* %alloca, i32 0, i32 0, i32 0, i32 1
352  %gep1 = getelementptr [2 x [2 x %struct.pair32]], [2 x [2 x %struct.pair32]]* %alloca, i32 0, i32 0, i32 1, i32 1
353  store i32 0, i32* %gep0
354  store i32 1, i32* %gep1
355  %gep2 = getelementptr [2 x [2 x %struct.pair32]], [2 x [2 x %struct.pair32]]* %alloca, i32 0, i32 0, i32 %index, i32 0
356  %load = load i32, i32* %gep2
357  store i32 %load, i32 addrspace(1)* %out
358  ret void
359}
360
361define amdgpu_kernel void @struct_pair32_array(i32 addrspace(1)* %out, i32 %index) #0 {
362entry:
363  %alloca = alloca [2 x %struct.pair32]
364  %gep0 = getelementptr [2 x %struct.pair32], [2 x %struct.pair32]* %alloca, i32 0, i32 0, i32 1
365  %gep1 = getelementptr [2 x %struct.pair32], [2 x %struct.pair32]* %alloca, i32 0, i32 1, i32 0
366  store i32 0, i32* %gep0
367  store i32 1, i32* %gep1
368  %gep2 = getelementptr [2 x %struct.pair32], [2 x %struct.pair32]* %alloca, i32 0, i32 %index, i32 0
369  %load = load i32, i32* %gep2
370  store i32 %load, i32 addrspace(1)* %out
371  ret void
372}
373
374define amdgpu_kernel void @select_private(i32 addrspace(1)* %out, i32 %in) nounwind {
375entry:
376  %tmp = alloca [2 x i32]
377  %tmp1 = getelementptr [2 x i32], [2 x i32]* %tmp, i32 0, i32 0
378  %tmp2 = getelementptr [2 x i32], [2 x i32]* %tmp, i32 0, i32 1
379  store i32 0, i32* %tmp1
380  store i32 1, i32* %tmp2
381  %cmp = icmp eq i32 %in, 0
382  %sel = select i1 %cmp, i32* %tmp1, i32* %tmp2
383  %load = load i32, i32* %sel
384  store i32 %load, i32 addrspace(1)* %out
385  ret void
386}
387
388; AMDGPUPromoteAlloca does not know how to handle ptrtoint.  When it
389; finds one, it should stop trying to promote.
390
391; FUNC-LABEL: ptrtoint:
392; SI-NOT: ds_write
393; SI: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} offen
394; SI: buffer_load_dword v{{[0-9]+}}, v{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} offen offset:5 ;
395define amdgpu_kernel void @ptrtoint(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
396  %alloca = alloca [16 x i32]
397  %tmp0 = getelementptr [16 x i32], [16 x i32]* %alloca, i32 0, i32 %a
398  store i32 5, i32* %tmp0
399  %tmp1 = ptrtoint [16 x i32]* %alloca to i32
400  %tmp2 = add i32 %tmp1, 5
401  %tmp3 = inttoptr i32 %tmp2 to i32*
402  %tmp4 = getelementptr i32, i32* %tmp3, i32 %b
403  %tmp5 = load i32, i32* %tmp4
404  store i32 %tmp5, i32 addrspace(1)* %out
405  ret void
406}
407
408; OPT-LABEL: @pointer_typed_alloca(
409; OPT:  getelementptr inbounds [256 x i32 addrspace(1)*], [256 x i32 addrspace(1)*] addrspace(3)* @pointer_typed_alloca.A.addr, i32 0, i32 %{{[0-9]+}}
410; OPT: load i32 addrspace(1)*, i32 addrspace(1)* addrspace(3)* %{{[0-9]+}}, align 4
411define amdgpu_kernel void @pointer_typed_alloca(i32 addrspace(1)* %A) {
412entry:
413  %A.addr = alloca i32 addrspace(1)*, align 4
414  store i32 addrspace(1)* %A, i32 addrspace(1)** %A.addr, align 4
415  %ld0 = load i32 addrspace(1)*, i32 addrspace(1)** %A.addr, align 4
416  %arrayidx = getelementptr inbounds i32, i32 addrspace(1)* %ld0, i32 0
417  store i32 1, i32 addrspace(1)* %arrayidx, align 4
418  %ld1 = load i32 addrspace(1)*, i32 addrspace(1)** %A.addr, align 4
419  %arrayidx1 = getelementptr inbounds i32, i32 addrspace(1)* %ld1, i32 1
420  store i32 2, i32 addrspace(1)* %arrayidx1, align 4
421  %ld2 = load i32 addrspace(1)*, i32 addrspace(1)** %A.addr, align 4
422  %arrayidx2 = getelementptr inbounds i32, i32 addrspace(1)* %ld2, i32 2
423  store i32 3, i32 addrspace(1)* %arrayidx2, align 4
424  ret void
425}
426
427; FUNC-LABEL: v16i32_stack:
428
429; R600: MOVA_INT
430; R600: MOVA_INT
431; R600: MOVA_INT
432; R600: MOVA_INT
433; R600: MOVA_INT
434; R600: MOVA_INT
435; R600: MOVA_INT
436; R600: MOVA_INT
437; R600: MOVA_INT
438; R600: MOVA_INT
439; R600: MOVA_INT
440; R600: MOVA_INT
441; R600: MOVA_INT
442; R600: MOVA_INT
443; R600: MOVA_INT
444; R600: MOVA_INT
445
446; SI: buffer_load_dword
447; SI: buffer_load_dword
448; SI: buffer_load_dword
449; SI: buffer_load_dword
450; SI: buffer_load_dword
451; SI: buffer_load_dword
452; SI: buffer_load_dword
453; SI: buffer_load_dword
454; SI: buffer_load_dword
455; SI: buffer_load_dword
456; SI: buffer_load_dword
457; SI: buffer_load_dword
458; SI: buffer_load_dword
459; SI: buffer_load_dword
460; SI: buffer_load_dword
461; SI: buffer_load_dword
462
463define amdgpu_kernel void @v16i32_stack(<16 x i32> addrspace(1)* %out, i32 %a) {
464  %alloca = alloca [2 x <16 x i32>]
465  %tmp0 = getelementptr [2 x <16 x i32>], [2 x <16 x i32>]* %alloca, i32 0, i32 %a
466  %tmp5 = load <16 x i32>, <16 x i32>* %tmp0
467  store <16 x i32> %tmp5, <16 x i32> addrspace(1)* %out
468  ret void
469}
470
471; FUNC-LABEL: v16float_stack:
472
473; R600: MOVA_INT
474; R600: MOVA_INT
475; R600: MOVA_INT
476; R600: MOVA_INT
477; R600: MOVA_INT
478; R600: MOVA_INT
479; R600: MOVA_INT
480; R600: MOVA_INT
481; R600: MOVA_INT
482; R600: MOVA_INT
483; R600: MOVA_INT
484; R600: MOVA_INT
485; R600: MOVA_INT
486; R600: MOVA_INT
487; R600: MOVA_INT
488; R600: MOVA_INT
489
490; SI: buffer_load_dword
491; SI: buffer_load_dword
492; SI: buffer_load_dword
493; SI: buffer_load_dword
494; SI: buffer_load_dword
495; SI: buffer_load_dword
496; SI: buffer_load_dword
497; SI: buffer_load_dword
498; SI: buffer_load_dword
499; SI: buffer_load_dword
500; SI: buffer_load_dword
501; SI: buffer_load_dword
502; SI: buffer_load_dword
503; SI: buffer_load_dword
504; SI: buffer_load_dword
505; SI: buffer_load_dword
506
507define amdgpu_kernel void @v16float_stack(<16 x float> addrspace(1)* %out, i32 %a) {
508  %alloca = alloca [2 x <16 x float>]
509  %tmp0 = getelementptr [2 x <16 x float>], [2 x <16 x float>]* %alloca, i32 0, i32 %a
510  %tmp5 = load <16 x float>, <16 x float>* %tmp0
511  store <16 x float> %tmp5, <16 x float> addrspace(1)* %out
512  ret void
513}
514
515; FUNC-LABEL: v2float_stack:
516
517; R600: MOVA_INT
518; R600: MOVA_INT
519
520; SI: buffer_load_dword
521; SI: buffer_load_dword
522
523define amdgpu_kernel void @v2float_stack(<2 x float> addrspace(1)* %out, i32 %a) {
524  %alloca = alloca [16 x <2 x float>]
525  %tmp0 = getelementptr [16 x <2 x float>], [16 x <2 x float>]* %alloca, i32 0, i32 %a
526  %tmp5 = load <2 x float>, <2 x float>* %tmp0
527  store <2 x float> %tmp5, <2 x float> addrspace(1)* %out
528  ret void
529}
530
531; OPT-LABEL: @direct_alloca_read_0xi32(
532; OPT: store [0 x i32] undef, [0 x i32] addrspace(3)*
533; OPT: load [0 x i32], [0 x i32] addrspace(3)*
534define amdgpu_kernel void @direct_alloca_read_0xi32([0 x i32] addrspace(1)* %out, i32 %index) {
535entry:
536  %tmp = alloca [0 x i32]
537  store [0 x i32] [], [0 x i32]* %tmp
538  %load = load [0 x i32], [0 x i32]* %tmp
539  store [0 x i32] %load, [0 x i32] addrspace(1)* %out
540  ret void
541}
542
543; OPT-LABEL: @direct_alloca_read_1xi32(
544; OPT: store [1 x i32] zeroinitializer, [1 x i32] addrspace(3)*
545; OPT: load [1 x i32], [1 x i32] addrspace(3)*
546define amdgpu_kernel void @direct_alloca_read_1xi32([1 x i32] addrspace(1)* %out, i32 %index) {
547entry:
548  %tmp = alloca [1 x i32]
549  store [1 x i32] [i32 0], [1 x i32]* %tmp
550  %load = load [1 x i32], [1 x i32]* %tmp
551  store [1 x i32] %load, [1 x i32] addrspace(1)* %out
552  ret void
553}
554
555attributes #0 = { nounwind "amdgpu-waves-per-eu"="1,2" }
556
557; HSAOPT: !0 = !{}
558; HSAOPT: !1 = !{i32 0, i32 257}
559; HSAOPT: !2 = !{i32 0, i32 256}
560
561; NOHSAOPT: !0 = !{i32 0, i32 257}
562; NOHSAOPT: !1 = !{i32 0, i32 256}
563