1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s | FileCheck %s
3
4target datalayout = "e-m:o-p:32:32-i1:8:32-i8:8:32-i16:16:32-f64:32:64-v64:32:64-v128:32:128-a:0:32-n32-S32"
5target triple = "thumbv7-none-eabi"
6
7define void @store_v8i8(<8 x i8>** %ptr, <8 x i8> %val) {
8; CHECK-LABEL: store_v8i8:
9; CHECK:       @ %bb.0:
10; CHECK-NEXT:    ldr r0, [r0]
11; CHECK-NEXT:    str r3, [r0, #4]
12; CHECK-NEXT:    str r2, [r0]
13; CHECK-NEXT:    bx lr
14	%A = load <8 x i8>*, <8 x i8>** %ptr
15	store  <8 x i8> %val, <8 x i8>* %A, align 1
16	ret void
17}
18
19define void @store_v8i8_update(<8 x i8>** %ptr, <8 x i8> %val) {
20; CHECK-LABEL: store_v8i8_update:
21; CHECK:       @ %bb.0:
22; CHECK-NEXT:    ldr r1, [r0]
23; CHECK-NEXT:    vmov d16, r2, r3
24; CHECK-NEXT:    vst1.8 {d16}, [r1]!
25; CHECK-NEXT:    str r1, [r0]
26; CHECK-NEXT:    bx lr
27	%A = load <8 x i8>*, <8 x i8>** %ptr
28	store  <8 x i8> %val, <8 x i8>* %A, align 1
29	%inc = getelementptr <8 x i8>, <8 x i8>* %A, i38 1
30        store <8 x i8>* %inc, <8 x i8>** %ptr
31	ret void
32}
33
34define void @store_v4i16(<4 x i16>** %ptr, <4 x i16> %val) {
35; CHECK-LABEL: store_v4i16:
36; CHECK:       @ %bb.0:
37; CHECK-NEXT:    ldr r0, [r0]
38; CHECK-NEXT:    str r3, [r0, #4]
39; CHECK-NEXT:    str r2, [r0]
40; CHECK-NEXT:    bx lr
41	%A = load <4 x i16>*, <4 x i16>** %ptr
42	store  <4 x i16> %val, <4 x i16>* %A, align 1
43	ret void
44}
45
46define void @store_v4i16_update(<4 x i16>** %ptr, <4 x i16> %val) {
47; CHECK-LABEL: store_v4i16_update:
48; CHECK:       @ %bb.0:
49; CHECK-NEXT:    ldr r1, [r0]
50; CHECK-NEXT:    vmov d16, r2, r3
51; CHECK-NEXT:    vst1.8 {d16}, [r1]!
52; CHECK-NEXT:    str r1, [r0]
53; CHECK-NEXT:    bx lr
54	%A = load <4 x i16>*, <4 x i16>** %ptr
55	store  <4 x i16> %val, <4 x i16>* %A, align 1
56	%inc = getelementptr <4 x i16>, <4 x i16>* %A, i34 1
57        store <4 x i16>* %inc, <4 x i16>** %ptr
58	ret void
59}
60
61define void @store_v2i32(<2 x i32>** %ptr, <2 x i32> %val) {
62; CHECK-LABEL: store_v2i32:
63; CHECK:       @ %bb.0:
64; CHECK-NEXT:    ldr r0, [r0]
65; CHECK-NEXT:    str r3, [r0, #4]
66; CHECK-NEXT:    str r2, [r0]
67; CHECK-NEXT:    bx lr
68	%A = load <2 x i32>*, <2 x i32>** %ptr
69	store  <2 x i32> %val, <2 x i32>* %A, align 1
70	ret void
71}
72
73define void @store_v2i32_update(<2 x i32>** %ptr, <2 x i32> %val) {
74; CHECK-LABEL: store_v2i32_update:
75; CHECK:       @ %bb.0:
76; CHECK-NEXT:    ldr r1, [r0]
77; CHECK-NEXT:    vmov d16, r2, r3
78; CHECK-NEXT:    vst1.8 {d16}, [r1]!
79; CHECK-NEXT:    str r1, [r0]
80; CHECK-NEXT:    bx lr
81	%A = load <2 x i32>*, <2 x i32>** %ptr
82	store  <2 x i32> %val, <2 x i32>* %A, align 1
83	%inc = getelementptr <2 x i32>, <2 x i32>* %A, i32 1
84        store <2 x i32>* %inc, <2 x i32>** %ptr
85	ret void
86}
87
88define void @store_v2f32(<2 x float>** %ptr, <2 x float> %val) {
89; CHECK-LABEL: store_v2f32:
90; CHECK:       @ %bb.0:
91; CHECK-NEXT:    ldr r0, [r0]
92; CHECK-NEXT:    str r3, [r0, #4]
93; CHECK-NEXT:    str r2, [r0]
94; CHECK-NEXT:    bx lr
95	%A = load <2 x float>*, <2 x float>** %ptr
96	store  <2 x float> %val, <2 x float>* %A, align 1
97	ret void
98}
99
100define void @store_v2f32_update(<2 x float>** %ptr, <2 x float> %val) {
101; CHECK-LABEL: store_v2f32_update:
102; CHECK:       @ %bb.0:
103; CHECK-NEXT:    ldr r1, [r0]
104; CHECK-NEXT:    vmov d16, r2, r3
105; CHECK-NEXT:    vst1.8 {d16}, [r1]!
106; CHECK-NEXT:    str r1, [r0]
107; CHECK-NEXT:    bx lr
108	%A = load <2 x float>*, <2 x float>** %ptr
109	store  <2 x float> %val, <2 x float>* %A, align 1
110	%inc = getelementptr <2 x float>, <2 x float>* %A, i32 1
111        store <2 x float>* %inc, <2 x float>** %ptr
112	ret void
113}
114
115define void @store_v1i64(<1 x i64>** %ptr, <1 x i64> %val) {
116; CHECK-LABEL: store_v1i64:
117; CHECK:       @ %bb.0:
118; CHECK-NEXT:    ldr r0, [r0]
119; CHECK-NEXT:    str r3, [r0, #4]
120; CHECK-NEXT:    str r2, [r0]
121; CHECK-NEXT:    bx lr
122	%A = load <1 x i64>*, <1 x i64>** %ptr
123	store  <1 x i64> %val, <1 x i64>* %A, align 1
124	ret void
125}
126
127define void @store_v1i64_update(<1 x i64>** %ptr, <1 x i64> %val) {
128; CHECK-LABEL: store_v1i64_update:
129; CHECK:       @ %bb.0:
130; CHECK-NEXT:    ldr r1, [r0]
131; CHECK-NEXT:    vmov d16, r2, r3
132; CHECK-NEXT:    vst1.8 {d16}, [r1]!
133; CHECK-NEXT:    str r1, [r0]
134; CHECK-NEXT:    bx lr
135	%A = load <1 x i64>*, <1 x i64>** %ptr
136	store  <1 x i64> %val, <1 x i64>* %A, align 1
137	%inc = getelementptr <1 x i64>, <1 x i64>* %A, i31 1
138        store <1 x i64>* %inc, <1 x i64>** %ptr
139	ret void
140}
141
142define void @store_v16i8(<16 x i8>** %ptr, <16 x i8> %val) {
143; CHECK-LABEL: store_v16i8:
144; CHECK:       @ %bb.0:
145; CHECK-NEXT:    vldr d17, [sp]
146; CHECK-NEXT:    ldr r0, [r0]
147; CHECK-NEXT:    vmov d16, r2, r3
148; CHECK-NEXT:    vst1.8 {d16, d17}, [r0]
149; CHECK-NEXT:    bx lr
150	%A = load <16 x i8>*, <16 x i8>** %ptr
151	store  <16 x i8> %val, <16 x i8>* %A, align 1
152	ret void
153}
154
155define void @store_v16i8_update(<16 x i8>** %ptr, <16 x i8> %val) {
156; CHECK-LABEL: store_v16i8_update:
157; CHECK:       @ %bb.0:
158; CHECK-NEXT:    vldr d17, [sp]
159; CHECK-NEXT:    vmov d16, r2, r3
160; CHECK-NEXT:    ldr r1, [r0]
161; CHECK-NEXT:    vst1.8 {d16, d17}, [r1]!
162; CHECK-NEXT:    str r1, [r0]
163; CHECK-NEXT:    bx lr
164	%A = load <16 x i8>*, <16 x i8>** %ptr
165	store  <16 x i8> %val, <16 x i8>* %A, align 1
166	%inc = getelementptr <16 x i8>, <16 x i8>* %A, i316 1
167        store <16 x i8>* %inc, <16 x i8>** %ptr
168	ret void
169}
170
171define void @store_v8i16(<8 x i16>** %ptr, <8 x i16> %val) {
172; CHECK-LABEL: store_v8i16:
173; CHECK:       @ %bb.0:
174; CHECK-NEXT:    vldr d17, [sp]
175; CHECK-NEXT:    ldr r0, [r0]
176; CHECK-NEXT:    vmov d16, r2, r3
177; CHECK-NEXT:    vst1.8 {d16, d17}, [r0]
178; CHECK-NEXT:    bx lr
179	%A = load <8 x i16>*, <8 x i16>** %ptr
180	store  <8 x i16> %val, <8 x i16>* %A, align 1
181	ret void
182}
183
184define void @store_v8i16_update(<8 x i16>** %ptr, <8 x i16> %val) {
185; CHECK-LABEL: store_v8i16_update:
186; CHECK:       @ %bb.0:
187; CHECK-NEXT:    vldr d17, [sp]
188; CHECK-NEXT:    vmov d16, r2, r3
189; CHECK-NEXT:    ldr r1, [r0]
190; CHECK-NEXT:    vst1.8 {d16, d17}, [r1]!
191; CHECK-NEXT:    str r1, [r0]
192; CHECK-NEXT:    bx lr
193	%A = load <8 x i16>*, <8 x i16>** %ptr
194	store  <8 x i16> %val, <8 x i16>* %A, align 1
195	%inc = getelementptr <8 x i16>, <8 x i16>* %A, i38 1
196        store <8 x i16>* %inc, <8 x i16>** %ptr
197	ret void
198}
199
200define void @store_v4i32(<4 x i32>** %ptr, <4 x i32> %val) {
201; CHECK-LABEL: store_v4i32:
202; CHECK:       @ %bb.0:
203; CHECK-NEXT:    vldr d17, [sp]
204; CHECK-NEXT:    ldr r0, [r0]
205; CHECK-NEXT:    vmov d16, r2, r3
206; CHECK-NEXT:    vst1.8 {d16, d17}, [r0]
207; CHECK-NEXT:    bx lr
208	%A = load <4 x i32>*, <4 x i32>** %ptr
209	store  <4 x i32> %val, <4 x i32>* %A, align 1
210	ret void
211}
212
213define void @store_v4i32_update(<4 x i32>** %ptr, <4 x i32> %val) {
214; CHECK-LABEL: store_v4i32_update:
215; CHECK:       @ %bb.0:
216; CHECK-NEXT:    vldr d17, [sp]
217; CHECK-NEXT:    vmov d16, r2, r3
218; CHECK-NEXT:    ldr r1, [r0]
219; CHECK-NEXT:    vst1.8 {d16, d17}, [r1]!
220; CHECK-NEXT:    str r1, [r0]
221; CHECK-NEXT:    bx lr
222	%A = load <4 x i32>*, <4 x i32>** %ptr
223	store  <4 x i32> %val, <4 x i32>* %A, align 1
224	%inc = getelementptr <4 x i32>, <4 x i32>* %A, i34 1
225        store <4 x i32>* %inc, <4 x i32>** %ptr
226	ret void
227}
228
229define void @store_v4f32(<4 x float>** %ptr, <4 x float> %val) {
230; CHECK-LABEL: store_v4f32:
231; CHECK:       @ %bb.0:
232; CHECK-NEXT:    vldr d17, [sp]
233; CHECK-NEXT:    ldr r0, [r0]
234; CHECK-NEXT:    vmov d16, r2, r3
235; CHECK-NEXT:    vst1.8 {d16, d17}, [r0]
236; CHECK-NEXT:    bx lr
237	%A = load <4 x float>*, <4 x float>** %ptr
238	store  <4 x float> %val, <4 x float>* %A, align 1
239	ret void
240}
241
242define void @store_v4f32_update(<4 x float>** %ptr, <4 x float> %val) {
243; CHECK-LABEL: store_v4f32_update:
244; CHECK:       @ %bb.0:
245; CHECK-NEXT:    vldr d17, [sp]
246; CHECK-NEXT:    vmov d16, r2, r3
247; CHECK-NEXT:    ldr r1, [r0]
248; CHECK-NEXT:    vst1.8 {d16, d17}, [r1]!
249; CHECK-NEXT:    str r1, [r0]
250; CHECK-NEXT:    bx lr
251	%A = load <4 x float>*, <4 x float>** %ptr
252	store  <4 x float> %val, <4 x float>* %A, align 1
253	%inc = getelementptr <4 x float>, <4 x float>* %A, i34 1
254        store <4 x float>* %inc, <4 x float>** %ptr
255	ret void
256}
257
258define void @store_v2i64(<2 x i64>** %ptr, <2 x i64> %val) {
259; CHECK-LABEL: store_v2i64:
260; CHECK:       @ %bb.0:
261; CHECK-NEXT:    vldr d17, [sp]
262; CHECK-NEXT:    ldr r0, [r0]
263; CHECK-NEXT:    vmov d16, r2, r3
264; CHECK-NEXT:    vst1.8 {d16, d17}, [r0]
265; CHECK-NEXT:    bx lr
266	%A = load <2 x i64>*, <2 x i64>** %ptr
267	store  <2 x i64> %val, <2 x i64>* %A, align 1
268	ret void
269}
270
271define void @store_v2i64_update(<2 x i64>** %ptr, <2 x i64> %val) {
272; CHECK-LABEL: store_v2i64_update:
273; CHECK:       @ %bb.0:
274; CHECK-NEXT:    vldr d17, [sp]
275; CHECK-NEXT:    vmov d16, r2, r3
276; CHECK-NEXT:    ldr r1, [r0]
277; CHECK-NEXT:    vst1.8 {d16, d17}, [r1]!
278; CHECK-NEXT:    str r1, [r0]
279; CHECK-NEXT:    bx lr
280	%A = load <2 x i64>*, <2 x i64>** %ptr
281	store  <2 x i64> %val, <2 x i64>* %A, align 1
282	%inc = getelementptr <2 x i64>, <2 x i64>* %A, i32 1
283        store <2 x i64>* %inc, <2 x i64>** %ptr
284	ret void
285}
286
287define void @store_v2i64_update_aligned2(<2 x i64>** %ptr, <2 x i64> %val) {
288; CHECK-LABEL: store_v2i64_update_aligned2:
289; CHECK:       @ %bb.0:
290; CHECK-NEXT:    vldr d17, [sp]
291; CHECK-NEXT:    vmov d16, r2, r3
292; CHECK-NEXT:    ldr r1, [r0]
293; CHECK-NEXT:    vst1.16 {d16, d17}, [r1]!
294; CHECK-NEXT:    str r1, [r0]
295; CHECK-NEXT:    bx lr
296	%A = load <2 x i64>*, <2 x i64>** %ptr
297	store  <2 x i64> %val, <2 x i64>* %A, align 2
298	%inc = getelementptr <2 x i64>, <2 x i64>* %A, i32 1
299        store <2 x i64>* %inc, <2 x i64>** %ptr
300	ret void
301}
302
303define void @store_v2i64_update_aligned4(<2 x i64>** %ptr, <2 x i64> %val) {
304; CHECK-LABEL: store_v2i64_update_aligned4:
305; CHECK:       @ %bb.0:
306; CHECK-NEXT:    vldr d17, [sp]
307; CHECK-NEXT:    vmov d16, r2, r3
308; CHECK-NEXT:    ldr r1, [r0]
309; CHECK-NEXT:    vst1.32 {d16, d17}, [r1]!
310; CHECK-NEXT:    str r1, [r0]
311; CHECK-NEXT:    bx lr
312	%A = load <2 x i64>*, <2 x i64>** %ptr
313	store  <2 x i64> %val, <2 x i64>* %A, align 4
314	%inc = getelementptr <2 x i64>, <2 x i64>* %A, i32 1
315        store <2 x i64>* %inc, <2 x i64>** %ptr
316	ret void
317}
318
319define void @store_v2i64_update_aligned8(<2 x i64>** %ptr, <2 x i64> %val) {
320; CHECK-LABEL: store_v2i64_update_aligned8:
321; CHECK:       @ %bb.0:
322; CHECK-NEXT:    vldr d17, [sp]
323; CHECK-NEXT:    vmov d16, r2, r3
324; CHECK-NEXT:    ldr r1, [r0]
325; CHECK-NEXT:    vst1.64 {d16, d17}, [r1]!
326; CHECK-NEXT:    str r1, [r0]
327; CHECK-NEXT:    bx lr
328	%A = load <2 x i64>*, <2 x i64>** %ptr
329	store  <2 x i64> %val, <2 x i64>* %A, align 8
330	%inc = getelementptr <2 x i64>, <2 x i64>* %A, i32 1
331        store <2 x i64>* %inc, <2 x i64>** %ptr
332	ret void
333}
334
335define void @store_v2i64_update_aligned16(<2 x i64>** %ptr, <2 x i64> %val) {
336; CHECK-LABEL: store_v2i64_update_aligned16:
337; CHECK:       @ %bb.0:
338; CHECK-NEXT:    vldr d17, [sp]
339; CHECK-NEXT:    vmov d16, r2, r3
340; CHECK-NEXT:    ldr r1, [r0]
341; CHECK-NEXT:    vst1.64 {d16, d17}, [r1:128]!
342; CHECK-NEXT:    str r1, [r0]
343; CHECK-NEXT:    bx lr
344	%A = load <2 x i64>*, <2 x i64>** %ptr
345	store  <2 x i64> %val, <2 x i64>* %A, align 16
346	%inc = getelementptr <2 x i64>, <2 x i64>* %A, i32 1
347        store <2 x i64>* %inc, <2 x i64>** %ptr
348	ret void
349}
350
351define void @truncstore_v4i32tov4i8(<4 x i8>** %ptr, <4 x i32> %val) {
352; CHECK-LABEL: truncstore_v4i32tov4i8:
353; CHECK:       @ %bb.0:
354; CHECK-NEXT:    vldr d17, [sp]
355; CHECK-NEXT:    vmov d16, r2, r3
356; CHECK-NEXT:    ldr r0, [r0]
357; CHECK-NEXT:    vmovn.i32 d16, q8
358; CHECK-NEXT:    vuzp.8 d16, d17
359; CHECK-NEXT:    vst1.32 {d16[0]}, [r0:32]
360; CHECK-NEXT:    bx lr
361	%A = load <4 x i8>*, <4 x i8>** %ptr
362        %trunc = trunc <4 x i32> %val to <4 x i8>
363	store  <4 x i8> %trunc, <4 x i8>* %A, align 4
364	ret void
365}
366
367define void @truncstore_v4i32tov4i8_fake_update(<4 x i8>** %ptr, <4 x i32> %val) {
368; CHECK-LABEL: truncstore_v4i32tov4i8_fake_update:
369; CHECK:       @ %bb.0:
370; CHECK-NEXT:    vldr d17, [sp]
371; CHECK-NEXT:    vmov d16, r2, r3
372; CHECK-NEXT:    ldr r1, [r0]
373; CHECK-NEXT:    movs r2, #16
374; CHECK-NEXT:    vmovn.i32 d16, q8
375; CHECK-NEXT:    vuzp.8 d16, d17
376; CHECK-NEXT:    vst1.32 {d16[0]}, [r1:32], r2
377; CHECK-NEXT:    str r1, [r0]
378; CHECK-NEXT:    bx lr
379	%A = load <4 x i8>*, <4 x i8>** %ptr
380        %trunc = trunc <4 x i32> %val to <4 x i8>
381	store  <4 x i8> %trunc, <4 x i8>* %A, align 4
382	%inc = getelementptr <4 x i8>, <4 x i8>* %A, i38 4
383        store <4 x i8>* %inc, <4 x i8>** %ptr
384	ret void
385}
386
387define <4 x i32>* @test_vst1_1reg(<4 x i32>* %ptr.in, <4 x i32>* %ptr.out) {
388; CHECK-LABEL: test_vst1_1reg:
389; CHECK:       @ %bb.0:
390; CHECK-NEXT:    vld1.64 {d16, d17}, [r0]
391; CHECK-NEXT:    movs r0, #32
392; CHECK-NEXT:    vst1.32 {d16, d17}, [r1], r0
393; CHECK-NEXT:    mov r0, r1
394; CHECK-NEXT:    bx lr
395  %val = load <4 x i32>, <4 x i32>* %ptr.in
396  store <4 x i32> %val, <4 x i32>* %ptr.out
397  %next = getelementptr <4 x i32>, <4 x i32>* %ptr.out, i32 2
398  ret <4 x i32>* %next
399}
400
401; PR56970
402define void @v3i8store(<3 x i8> *%p) {
403; CHECK-LABEL: v3i8store:
404; CHECK:       @ %bb.0:
405; CHECK-NEXT:    sub sp, #4
406; CHECK-NEXT:    vmov.i32 d16, #0xff
407; CHECK-NEXT:    mov r1, sp
408; CHECK-NEXT:    vmov.i32 d17, #0x0
409; CHECK-NEXT:    movs r2, #0
410; CHECK-NEXT:    vand d16, d17, d16
411; CHECK-NEXT:    vst1.32 {d16[0]}, [r1:32]
412; CHECK-NEXT:    vld1.32 {d16[0]}, [r1:32]
413; CHECK-NEXT:    vmovl.u16 q8, d16
414; CHECK-NEXT:    strb r2, [r0, #2]
415; CHECK-NEXT:    vmov.32 r1, d16[0]
416; CHECK-NEXT:    strh r1, [r0]
417; CHECK-NEXT:    add sp, #4
418; CHECK-NEXT:    bx lr
419  store <3 x i8> zeroinitializer, <3 x i8> *%p, align 4
420  ret void
421}
422
423define void @v3i64shuffle(<3 x i64> *%p, <3 x i64> %a) {
424; CHECK-LABEL: v3i64shuffle:
425; CHECK:       @ %bb.0:
426; CHECK-NEXT:    vmov.i32 q8, #0x0
427; CHECK-NEXT:    ldrd r12, r1, [sp, #8]
428; CHECK-NEXT:    vmov d18, r2, r3
429; CHECK-NEXT:    vorr d19, d16, d16
430; CHECK-NEXT:    str r1, [r0, #20]
431; CHECK-NEXT:    vst1.32 {d18, d19}, [r0]!
432; CHECK-NEXT:    str.w r12, [r0]
433; CHECK-NEXT:    bx lr
434  %b = shufflevector <3 x i64> %a, <3 x i64> zeroinitializer, <3 x i32> <i32 0, i32 3, i32 2>
435  store <3 x i64> %b, <3 x i64> *%p, align 4
436  ret void
437}
438
439