1; RUN: llc -march=amdgcn -mcpu=fiji -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN %s
2
3; GCN-LABEL: {{^}}float4_inselt:
4; GCN-NOT: v_movrel
5; GCN-NOT: buffer_
6; GCN-DAG: v_cmp_ne_u32_e64 [[CC1:[^,]+]], [[IDX:s[0-9]+]], 3
7; GCN-DAG: v_cndmask_b32_e32 v[[ELT_LAST:[0-9]+]], 1.0, v{{[0-9]+}}, [[CC1]]
8; GCN-DAG: v_cmp_ne_u32_e64 [[CC2:[^,]+]], [[IDX]], 2
9; GCN-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, 1.0, v{{[0-9]+}}, [[CC2]]
10; GCN-DAG: v_cmp_ne_u32_e64 [[CC3:[^,]+]], [[IDX]], 1
11; GCN-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, 1.0, v{{[0-9]+}}, [[CC3]]
12; GCN-DAG: v_cmp_ne_u32_e64 [[CC4:[^,]+]], [[IDX]], 0
13; GCN-DAG: v_cndmask_b32_e32 v[[ELT_FIRST:[0-9]+]], 1.0, v{{[0-9]+}}, [[CC4]]
14; GCN:     flat_store_dwordx4 v[{{[0-9:]+}}], v{{\[}}[[ELT_FIRST]]:[[ELT_LAST]]]
15define amdgpu_kernel void @float4_inselt(<4 x float> addrspace(1)* %out, <4 x float> %vec, i32 %sel) {
16entry:
17  %v = insertelement <4 x float> %vec, float 1.000000e+00, i32 %sel
18  store <4 x float> %v, <4 x float> addrspace(1)* %out
19  ret void
20}
21
22; GCN-LABEL: {{^}}float4_inselt_undef:
23; GCN-NOT: v_movrel
24; GCN-NOT: buffer_
25; GCN-NOT: v_cmp_
26; GCN-NOT: v_cndmask_
27; GCN:     v_mov_b32_e32 [[ONE:v[0-9]+]], 1.0
28; GCN:     v_mov_b32_e32 v{{[0-9]+}}, [[ONE]]
29; GCN:     v_mov_b32_e32 v{{[0-9]+}}, [[ONE]]
30; GCN:     v_mov_b32_e32 v{{[0-9]+}}, [[ONE]]
31define amdgpu_kernel void @float4_inselt_undef(<4 x float> addrspace(1)* %out, i32 %sel) {
32entry:
33  %v = insertelement <4 x float> undef, float 1.000000e+00, i32 %sel
34  store <4 x float> %v, <4 x float> addrspace(1)* %out
35  ret void
36}
37
38; GCN-LABEL: {{^}}int4_inselt:
39; GCN-NOT: v_movrel
40; GCN-NOT: buffer_
41; GCN-DAG: s_cmp_lg_u32 [[IDX:s[0-9]+]], 3
42; GCN-DAG: s_cselect_b32 s[[ELT_3:[0-9]+]], s{{[0-9]+}}, 1
43; GCN-DAG: s_cmp_lg_u32 [[IDX]], 2
44; GCN-DAG: s_cselect_b32 s[[ELT_2:[0-9]+]], s{{[0-9]+}}, 1
45; GCN-DAG: s_cmp_lg_u32 [[IDX]], 1
46; GCN-DAG: s_cselect_b32 s[[ELT_1:[0-9]+]], s{{[0-9]+}}, 1
47; GCN-DAG: s_cmp_lg_u32 [[IDX]], 0
48; GCN-DAG: s_cselect_b32 s[[ELT_0:[0-9]+]], s{{[0-9]+}}, 1
49; GCN-DAG: v_mov_b32_e32 v[[VELT_0:[0-9]+]], s[[ELT_0]]
50; GCN-DAG: v_mov_b32_e32 v[[VELT_1:[0-9]+]], s[[ELT_1]]
51; GCN-DAG: v_mov_b32_e32 v[[VELT_2:[0-9]+]], s[[ELT_2]]
52; GCN-DAG: v_mov_b32_e32 v[[VELT_3:[0-9]+]], s[[ELT_3]]
53; GCN:     flat_store_dwordx4 v[{{[0-9:]+}}], v{{\[}}[[VELT_0]]:[[VELT_3]]]
54define amdgpu_kernel void @int4_inselt(<4 x i32> addrspace(1)* %out, <4 x i32> %vec, i32 %sel) {
55entry:
56  %v = insertelement <4 x i32> %vec, i32 1, i32 %sel
57  store <4 x i32> %v, <4 x i32> addrspace(1)* %out
58  ret void
59}
60
61; GCN-LABEL: {{^}}float2_inselt:
62; GCN-NOT: v_movrel
63; GCN-NOT: buffer_
64; GCN-DAG: v_cmp_ne_u32_e64 [[CC1:[^,]+]], [[IDX:s[0-9]+]], 1
65; GCN-DAG: v_cndmask_b32_e32 v[[ELT_LAST:[0-9]+]], 1.0, v{{[0-9]+}}, [[CC1]]
66; GCN-DAG: v_cmp_ne_u32_e64 [[CC2:[^,]+]], [[IDX]], 0
67; GCN-DAG: v_cndmask_b32_e32 v[[ELT_FIRST:[0-9]+]], 1.0, v{{[0-9]+}}, [[CC2]]
68; GCN:     flat_store_dwordx2 v[{{[0-9:]+}}], v{{\[}}[[ELT_FIRST]]:[[ELT_LAST]]]
69define amdgpu_kernel void @float2_inselt(<2 x float> addrspace(1)* %out, <2 x float> %vec, i32 %sel) {
70entry:
71  %v = insertelement <2 x float> %vec, float 1.000000e+00, i32 %sel
72  store <2 x float> %v, <2 x float> addrspace(1)* %out
73  ret void
74}
75
76; GCN-LABEL: {{^}}float8_inselt:
77; GCN-NOT: v_movrel
78; GCN-NOT: buffer_
79; GCN-DAG: v_cmp_ne_u32_e64 [[CC1:[^,]+]], [[IDX:s[0-9]+]], 3
80; GCN-DAG: v_cndmask_b32_e32 v[[ELT_LAST0:[0-9]+]], 1.0, v{{[0-9]+}}, [[CC1]]
81; GCN-DAG: v_cmp_ne_u32_e64 [[CC2:[^,]+]], [[IDX]], 2
82; GCN-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, 1.0, v{{[0-9]+}}, [[CC2]]
83; GCN-DAG: v_cmp_ne_u32_e64 [[CC3:[^,]+]], [[IDX]], 1
84; GCN-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, 1.0, v{{[0-9]+}}, [[CC3]]
85; GCN-DAG: v_cmp_ne_u32_e64 [[CC4:[^,]+]], [[IDX]], 0
86; GCN-DAG: v_cndmask_b32_e32 v[[ELT_FIRST0:[0-9]+]], 1.0, v{{[0-9]+}}, [[CC4]]
87; GCN-DAG: v_cmp_ne_u32_e64 [[CC5:[^,]+]], [[IDX:s[0-9]+]], 7
88; GCN-DAG: v_cndmask_b32_e32 v[[ELT_LAST1:[0-9]+]], 1.0, v{{[0-9]+}}, [[CC5]]
89; GCN-DAG: v_cmp_ne_u32_e64 [[CC6:[^,]+]], [[IDX]], 6
90; GCN-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, 1.0, v{{[0-9]+}}, [[CC6]]
91; GCN-DAG: v_cmp_ne_u32_e64 [[CC7:[^,]+]], [[IDX]], 5
92; GCN-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, 1.0, v{{[0-9]+}}, [[CC7]]
93; GCN-DAG: v_cmp_ne_u32_e64 [[CC8:[^,]+]], [[IDX]], 4
94; GCN-DAG: v_cndmask_b32_e32 v[[ELT_FIRST1:[0-9]+]], 1.0, v{{[0-9]+}}, [[CC8]]
95; GCN-DAG: flat_store_dwordx4 v[{{[0-9:]+}}], v{{\[}}[[ELT_FIRST0]]:[[ELT_LAST0]]]
96; GCN-DAG: flat_store_dwordx4 v[{{[0-9:]+}}], v{{\[}}[[ELT_FIRST1]]:[[ELT_LAST1]]]
97define amdgpu_kernel void @float8_inselt(<8 x float> addrspace(1)* %out, <8 x float> %vec, i32 %sel) {
98entry:
99  %v = insertelement <8 x float> %vec, float 1.000000e+00, i32 %sel
100  store <8 x float> %v, <8 x float> addrspace(1)* %out
101  ret void
102}
103
104; GCN-LABEL: {{^}}float16_inselt:
105; GCN: v_movreld_b32
106define amdgpu_kernel void @float16_inselt(<16 x float> addrspace(1)* %out, <16 x float> %vec, i32 %sel) {
107entry:
108  %v = insertelement <16 x float> %vec, float 1.000000e+00, i32 %sel
109  store <16 x float> %v, <16 x float> addrspace(1)* %out
110  ret void
111}
112
113; GCN-LABEL: {{^}}float32_inselt:
114; GCN: v_movreld_b32
115define amdgpu_kernel void @float32_inselt(<32 x float> addrspace(1)* %out, <32 x float> %vec, i32 %sel) {
116entry:
117  %v = insertelement <32 x float> %vec, float 1.000000e+00, i32 %sel
118  store <32 x float> %v, <32 x float> addrspace(1)* %out
119  ret void
120}
121
122; GCN-LABEL: {{^}}half4_inselt:
123; GCN-NOT: v_cndmask_b32
124; GCN-NOT: v_movrel
125; GCN-NOT: buffer_
126; GCN:     s_lshl_b32 [[SEL:s[0-9]+]], s{{[0-9]+}}, 4
127; GCN:     s_lshl_b64 s[{{[0-9:]+}}], s[{{[0-9:]+}}], [[SEL]]
128; GCN:     s_mov_b32 [[K:s[0-9]+]], 0x3c003c00
129; GCN:     v_mov_b32_e32 [[V:v[0-9]+]], [[K]]
130; GCN:     v_bfi_b32 v{{[0-9]+}}, s{{[0-9]+}}, [[V]], v{{[0-9]+}}
131; GCN:     v_bfi_b32 v{{[0-9]+}}, s{{[0-9]+}}, [[V]], v{{[0-9]+}}
132define amdgpu_kernel void @half4_inselt(<4 x half> addrspace(1)* %out, <4 x half> %vec, i32 %sel) {
133entry:
134  %v = insertelement <4 x half> %vec, half 1.000000e+00, i32 %sel
135  store <4 x half> %v, <4 x half> addrspace(1)* %out
136  ret void
137}
138
139; GCN-LABEL: {{^}}half2_inselt:
140; GCN-NOT: v_cndmask_b32
141; GCN-NOT: v_movrel
142; GCN-NOT: buffer_
143; GCN:     s_lshl_b32 [[SEL:s[0-9]+]], s{{[0-9]+}}, 4
144; GCN:     s_lshl_b32 [[V:s[0-9]+]], 0xffff, [[SEL]]
145; GCN:     v_bfi_b32 v{{[0-9]+}}, [[V]], v{{[0-9]+}}, v{{[0-9]+}}
146define amdgpu_kernel void @half2_inselt(<2 x half> addrspace(1)* %out, <2 x half> %vec, i32 %sel) {
147entry:
148  %v = insertelement <2 x half> %vec, half 1.000000e+00, i32 %sel
149  store <2 x half> %v, <2 x half> addrspace(1)* %out
150  ret void
151}
152
153; GCN-LABEL: {{^}}half8_inselt:
154; GCN-NOT: v_movrel
155; GCN-NOT: buffer_
156; GCN-DAG: v_cmp_ne_u32_e64 {{[^,]+}}, {{s[0-9]+}}, 0
157; GCN-DAG: v_cmp_ne_u32_e64 {{[^,]+}}, {{s[0-9]+}}, 1
158; GCN-DAG: v_cmp_ne_u32_e64 {{[^,]+}}, {{s[0-9]+}}, 2
159; GCN-DAG: v_cmp_ne_u32_e64 {{[^,]+}}, {{s[0-9]+}}, 3
160; GCN-DAG: v_cmp_ne_u32_e64 {{[^,]+}}, {{s[0-9]+}}, 4
161; GCN-DAG: v_cmp_ne_u32_e64 {{[^,]+}}, {{s[0-9]+}}, 5
162; GCN-DAG: v_cmp_ne_u32_e64 {{[^,]+}}, {{s[0-9]+}}, 6
163; GCN-DAG: v_cmp_ne_u32_e64 {{[^,]+}}, {{s[0-9]+}}, 7
164; GCN-DAG: v_cndmask_b32_e32
165; GCN-DAG: v_cndmask_b32_e32
166; GCN-DAG: v_cndmask_b32_e32
167; GCN-DAG: v_cndmask_b32_e32
168; GCN-DAG: v_cndmask_b32_e32
169; GCN-DAG: v_cndmask_b32_e32
170; GCN-DAG: v_cndmask_b32_e32
171; GCN-DAG: v_cndmask_b32_e32
172; GCN-DAG: v_or_b32_sdwa
173; GCN-DAG: v_or_b32_sdwa
174; GCN-DAG: v_or_b32_sdwa
175; GCN-DAG: v_or_b32_sdwa
176define amdgpu_kernel void @half8_inselt(<8 x half> addrspace(1)* %out, <8 x half> %vec, i32 %sel) {
177entry:
178  %v = insertelement <8 x half> %vec, half 1.000000e+00, i32 %sel
179  store <8 x half> %v, <8 x half> addrspace(1)* %out
180  ret void
181}
182
183; GCN-LABEL: {{^}}short2_inselt:
184; GCN-NOT: v_cndmask_b32
185; GCN-NOT: v_movrel
186; GCN-NOT: buffer_
187; GCN:     v_mov_b32_e32 [[K:v[0-9]+]], 0x10001
188; GCN:     s_lshl_b32 [[SEL:s[0-9]+]], s{{[0-9]+}}, 4
189; GCN:     s_lshl_b32 [[V:s[0-9]+]], 0xffff, [[SEL]]
190; GCN:     v_bfi_b32 v{{[0-9]+}}, [[V]], [[K]], v{{[0-9]+}}
191define amdgpu_kernel void @short2_inselt(<2 x i16> addrspace(1)* %out, <2 x i16> %vec, i32 %sel) {
192entry:
193  %v = insertelement <2 x i16> %vec, i16 1, i32 %sel
194  store <2 x i16> %v, <2 x i16> addrspace(1)* %out
195  ret void
196}
197
198; GCN-LABEL: {{^}}short4_inselt:
199; GCN-NOT: v_cndmask_b32
200; GCN-NOT: v_movrel
201; GCN-NOT: buffer_
202; GCN:     s_lshl_b32 [[SEL:s[0-9]+]], s{{[0-9]+}}, 4
203; GCN:     s_lshl_b64 s[{{[0-9:]+}}], s[{{[0-9:]+}}], [[SEL]]
204; GCN:     s_mov_b32 [[K:s[0-9]+]], 0x10001
205; GCN:     v_mov_b32_e32 [[V:v[0-9]+]], [[K]]
206; GCN:     v_bfi_b32 v{{[0-9]+}}, s{{[0-9]+}}, [[V]], v{{[0-9]+}}
207; GCN:     v_bfi_b32 v{{[0-9]+}}, s{{[0-9]+}}, [[V]], v{{[0-9]+}}
208define amdgpu_kernel void @short4_inselt(<4 x i16> addrspace(1)* %out, <4 x i16> %vec, i32 %sel) {
209entry:
210  %v = insertelement <4 x i16> %vec, i16 1, i32 %sel
211  store <4 x i16> %v, <4 x i16> addrspace(1)* %out
212  ret void
213}
214
215; GCN-LABEL: {{^}}byte8_inselt:
216; GCN-NOT: v_movrel
217; GCN-NOT: buffer_
218; GCN:     s_lshl_b32 [[SEL:s[0-9]+]], s{{[0-9]+}}, 3
219; GCN:     s_lshl_b64 s[{{[0-9:]+}}], s[{{[0-9:]+}}], [[SEL]]
220; GCN:     s_mov_b32 [[K:s[0-9]+]], 0x1010101
221; GCN:     s_and_b32 s3, s1, [[K]]
222; GCN:     s_and_b32 s{{[0-9]+}}, s{{[0-9]+}}, [[K]]
223; GCN:     s_andn2_b64 s[{{[0-9:]+}}], s[{{[0-9:]+}}], s[{{[0-9:]+}}]
224; GCN:     s_or_b64 s[{{[0-9:]+}}], s[{{[0-9:]+}}], s[{{[0-9:]+}}]
225define amdgpu_kernel void @byte8_inselt(<8 x i8> addrspace(1)* %out, <8 x i8> %vec, i32 %sel) {
226entry:
227  %v = insertelement <8 x i8> %vec, i8 1, i32 %sel
228  store <8 x i8> %v, <8 x i8> addrspace(1)* %out
229  ret void
230}
231
232; GCN-LABEL: {{^}}byte16_inselt:
233; GCN-NOT: v_movrel
234; GCN-NOT: buffer_
235; GCN-DAG: v_cmp_ne_u32_e64 {{[^,]+}}, {{s[0-9]+}}, 0
236; GCN-DAG: v_cmp_ne_u32_e64 {{[^,]+}}, {{s[0-9]+}}, 15
237; GCN-DAG: v_cndmask_b32_e32
238; GCN-DAG: v_cndmask_b32_e32
239; GCN-DAG: v_cndmask_b32_e32
240; GCN-DAG: v_cndmask_b32_e32
241; GCN-DAG: v_cndmask_b32_e32
242; GCN-DAG: v_cndmask_b32_e32
243; GCN-DAG: v_cndmask_b32_e32
244; GCN-DAG: v_cndmask_b32_e32
245; GCN-DAG: v_cndmask_b32_e32
246; GCN-DAG: v_cndmask_b32_e32
247; GCN-DAG: v_cndmask_b32_e32
248; GCN-DAG: v_cndmask_b32_e32
249; GCN-DAG: v_cndmask_b32_e32
250; GCN-DAG: v_cndmask_b32_e32
251; GCN-DAG: v_cndmask_b32_e32
252; GCN-DAG: v_cndmask_b32_e32
253; GCN-DAG: v_or_b32_sdwa
254; GCN-DAG: v_or_b32_sdwa
255; GCN-DAG: v_or_b32_sdwa
256; GCN-DAG: v_or_b32_sdwa
257; GCN-DAG: v_or_b32_sdwa
258; GCN-DAG: v_or_b32_sdwa
259; GCN-DAG: v_or_b32_sdwa
260; GCN-DAG: v_or_b32_sdwa
261define amdgpu_kernel void @byte16_inselt(<16 x i8> addrspace(1)* %out, <16 x i8> %vec, i32 %sel) {
262entry:
263  %v = insertelement <16 x i8> %vec, i8 1, i32 %sel
264  store <16 x i8> %v, <16 x i8> addrspace(1)* %out
265  ret void
266}
267
268; GCN-LABEL: {{^}}double2_inselt:
269; GCN-NOT: v_movrel
270; GCN-NOT: buffer_
271; GCN-DAG: v_cmp_eq_u32_e64 [[CC1:[^,]+]], [[IDX:s[0-9]+]], 1
272; GCN-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[CC1]]
273; GCN-DAG: v_cndmask_b32_e64 v{{[0-9]+}}, v{{[0-9]+}}, 0, [[CC1]]
274; GCN-DAG: v_cmp_eq_u32_e64 [[CC2:[^,]+]], [[IDX]], 0
275; GCN-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[CC2]]
276; GCN-DAG: v_cndmask_b32_e64 v{{[0-9]+}}, v{{[0-9]+}}, 0, [[CC2]]
277define amdgpu_kernel void @double2_inselt(<2 x double> addrspace(1)* %out, <2 x double> %vec, i32 %sel) {
278entry:
279  %v = insertelement <2 x double> %vec, double 1.000000e+00, i32 %sel
280  store <2 x double> %v, <2 x double> addrspace(1)* %out
281  ret void
282}
283
284; GCN-LABEL: {{^}}double5_inselt:
285; GCN-NOT: v_movrel
286; GCN-NOT: buffer_
287; GCN-COUNT-10: v_cndmask_b32
288define amdgpu_kernel void @double5_inselt(<5 x double> addrspace(1)* %out, <5 x double> %vec, i32 %sel) {
289entry:
290  %v = insertelement <5 x double> %vec, double 1.000000e+00, i32 %sel
291  store <5 x double> %v, <5 x double> addrspace(1)* %out
292  ret void
293}
294
295; GCN-LABEL: {{^}}double8_inselt:
296; GCN-NOT: v_cndmask
297; GCN-NOT: buffer_
298; GCN-NOT: s_or_b32
299; GCN-DAG: s_mov_b32 m0, [[IND:s[0-9]+]]
300; GCN-DAG: v_movreld_b32_e32 v[[#BASE:]], 0
301; GCN-NOT: s_mov_b32 m0
302; GCN:     v_movreld_b32_e32 v[[#BASE+1]],
303define amdgpu_kernel void @double8_inselt(<8 x double> addrspace(1)* %out, <8 x double> %vec, i32 %sel) {
304entry:
305  %v = insertelement <8 x double> %vec, double 1.000000e+00, i32 %sel
306  store <8 x double> %v, <8 x double> addrspace(1)* %out
307  ret void
308}
309
310; GCN-LABEL: {{^}}double7_inselt:
311; GCN-NOT: v_cndmask
312; GCN-NOT: buffer_
313; GCN-NOT: s_or_b32
314; GCN-DAG: s_mov_b32 m0, [[IND:s[0-9]+]]
315; GCN-DAG: v_movreld_b32_e32 v[[#BASE]], 0
316; GCN-NOT: s_mov_b32 m0
317; GCN:     v_movreld_b32_e32 v[[#BASE+1]],
318define amdgpu_kernel void @double7_inselt(<7 x double> addrspace(1)* %out, <7 x double> %vec, i32 %sel) {
319entry:
320  %v = insertelement <7 x double> %vec, double 1.000000e+00, i32 %sel
321  store <7 x double> %v, <7 x double> addrspace(1)* %out
322  ret void
323}
324
325; GCN-LABEL: {{^}}double16_inselt:
326; GCN-NOT: v_cndmask
327; GCN-NOT: buffer_
328; GCN-NOT: s_or_b32
329; GCN-DAG: s_mov_b32 m0, [[IND:s[0-9]+]]
330; GCN-DAG: v_movreld_b32_e32 v[[#BASE:]], 0
331; GCN-NOT: s_mov_b32 m0
332; GCN:     v_movreld_b32_e32 v[[#BASE+1]],
333define amdgpu_kernel void @double16_inselt(<16 x double> addrspace(1)* %out, <16 x double> %vec, i32 %sel) {
334entry:
335  %v = insertelement <16 x double> %vec, double 1.000000e+00, i32 %sel
336  store <16 x double> %v, <16 x double> addrspace(1)* %out
337  ret void
338}
339
340; GCN-LABEL: {{^}}double15_inselt:
341; GCN-NOT: v_cndmask
342; GCN-NOT: buffer_
343; GCN-NOT: s_or_b32
344; GCN-DAG: s_mov_b32 m0, [[IND:s[0-9]+]]
345; GCN-DAG: v_movreld_b32_e32 v[[#BASE:]], 0
346; GCN-NOT: s_mov_b32 m0
347; GCN:     v_movreld_b32_e32 v[[#BASE+1]],
348define amdgpu_kernel void @double15_inselt(<15 x double> addrspace(1)* %out, <15 x double> %vec, i32 %sel) {
349entry:
350  %v = insertelement <15 x double> %vec, double 1.000000e+00, i32 %sel
351  store <15 x double> %v, <15 x double> addrspace(1)* %out
352  ret void
353}
354
355; GCN-LABEL: {{^}}bit4_inselt:
356; GCN: buffer_store_byte
357; GCN: buffer_load_ubyte
358; GCN: buffer_load_ubyte
359; GCN: buffer_load_ubyte
360; GCN: buffer_load_ubyte
361define amdgpu_kernel void @bit4_inselt(<4 x i1> addrspace(1)* %out, <4 x i1> %vec, i32 %sel) {
362entry:
363  %v = insertelement <4 x i1> %vec, i1 1, i32 %sel
364  store <4 x i1> %v, <4 x i1> addrspace(1)* %out
365  ret void
366}
367
368; GCN-LABEL: {{^}}bit128_inselt:
369; GCN-NOT: buffer_
370; GCN-DAG: v_cmp_ne_u32_e64 [[CC1:[^,]+]], s{{[0-9]+}}, 0
371; GCN-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, 1, v{{[0-9]+}}, [[CC1]]
372; GCN-DAG: v_mov_b32_e32 [[LASTIDX:v[0-9]+]], 0x7f
373; GCN-DAG: v_cmp_ne_u32_e32 [[CCL:[^,]+]], s{{[0-9]+}}, [[LASTIDX]]
374; GCN-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, 1, v{{[0-9]+}}, [[CCL]]
375define amdgpu_kernel void @bit128_inselt(<128 x i1> addrspace(1)* %out, <128 x i1> %vec, i32 %sel) {
376entry:
377  %v = insertelement <128 x i1> %vec, i1 1, i32 %sel
378  store <128 x i1> %v, <128 x i1> addrspace(1)* %out
379  ret void
380}
381
382; GCN-LABEL: {{^}}float32_inselt_vec:
383; GCN-NOT: buffer_
384; GCN-COUNT-32: v_cmp_ne_u32
385; GCN-COUNT-32: v_cndmask_b32_e{{32|64}} v{{[0-9]+}}, 1.0,
386define amdgpu_ps <32 x float> @float32_inselt_vec(<32 x float> %vec, i32 %sel) {
387entry:
388  %v = insertelement <32 x float> %vec, float 1.000000e+00, i32 %sel
389  ret <32 x float> %v
390}
391
392; GCN-LABEL: {{^}}double8_inselt_vec:
393; GCN-NOT: buffer_
394; GCN:         v_cmp_eq_u32
395; GCN-COUNT-2: v_cndmask_b32
396; GCN:         v_cmp_eq_u32
397; GCN-COUNT-2: v_cndmask_b32
398; GCN:         v_cmp_eq_u32
399; GCN-COUNT-2: v_cndmask_b32
400; GCN:         v_cmp_eq_u32
401; GCN-COUNT-2: v_cndmask_b32
402; GCN:         v_cmp_eq_u32
403; GCN-COUNT-2: v_cndmask_b32
404; GCN:         v_cmp_eq_u32
405; GCN-COUNT-2: v_cndmask_b32
406; GCN:         v_cmp_eq_u32
407; GCN-COUNT-2: v_cndmask_b32
408; GCN:         v_cmp_eq_u32
409; GCN-COUNT-2: v_cndmask_b32
410define <8 x double> @double8_inselt_vec(<8 x double> %vec, i32 %sel) {
411entry:
412  %v = insertelement <8 x double> %vec, double 1.000000e+00, i32 %sel
413  ret <8 x double> %v
414}
415