1; RUN: llc < %s -debug-only=legalize-types 2>&1 | FileCheck %s --check-prefix=CHECK-LEGALIZATION
2; RUN: llc < %s | FileCheck %s
3; REQUIRES: asserts
4
5target triple = "aarch64-unknown-linux-gnu"
6attributes #0 = {"target-features"="+sve" uwtable}
7
8declare <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64>, <8 x i64>, i64)
9declare <vscale x 2 x double> @llvm.vector.insert.nxv2f64.v8f64(<vscale x 2 x double>, <8 x double>, i64)
10
11define <vscale x 2 x i64> @test_nxv2i64_v8i64(<vscale x 2 x i64> %a, <8 x i64> %b) #0 {
12; CHECK-LEGALIZATION: Legally typed node: [[T1:t[0-9]+]]: nxv2i64 = insert_subvector {{t[0-9]+}}, {{t[0-9]+}}, Constant:i64<0>
13; CHECK-LEGALIZATION: Legally typed node: [[T2:t[0-9]+]]: nxv2i64 = insert_subvector [[T1]], {{t[0-9]+}}, Constant:i64<2>
14; CHECK-LEGALIZATION: Legally typed node: [[T3:t[0-9]+]]: nxv2i64 = insert_subvector [[T2]], {{t[0-9]+}}, Constant:i64<4>
15; CHECK-LEGALIZATION: Legally typed node: [[T4:t[0-9]+]]: nxv2i64 = insert_subvector [[T3]], {{t[0-9]+}}, Constant:i64<6>
16
17; CHECK-LABEL: test_nxv2i64_v8i64:
18; CHECK:       // %bb.0:
19; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
20; CHECK-NEXT:    .cfi_def_cfa_offset 16
21; CHECK-NEXT:    .cfi_offset w29, -16
22; CHECK-NEXT:    addvl sp, sp, #-3
23; CHECK-NEXT:    .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG
24; CHECK-NEXT:    cntd x8
25; CHECK-NEXT:    mov w9, #2
26; CHECK-NEXT:    sub x8, x8, #2
27; CHECK-NEXT:    ptrue p0.d, vl2
28; CHECK-NEXT:    cmp x8, #2
29; CHECK-NEXT:    mov x10, sp
30; CHECK-NEXT:    csel x9, x8, x9, lo
31; CHECK-NEXT:    // kill: def $q1 killed $q1 def $z1
32; CHECK-NEXT:    mov z0.d, p0/m, z1.d
33; CHECK-NEXT:    lsl x9, x9, #3
34; CHECK-NEXT:    ptrue p0.d
35; CHECK-NEXT:    st1d { z0.d }, p0, [sp]
36; CHECK-NEXT:    cmp x8, #4
37; CHECK-NEXT:    str q2, [x10, x9]
38; CHECK-NEXT:    mov w9, #4
39; CHECK-NEXT:    ld1d { z0.d }, p0/z, [sp]
40; CHECK-NEXT:    csel x9, x8, x9, lo
41; CHECK-NEXT:    lsl x9, x9, #3
42; CHECK-NEXT:    addvl x10, sp, #1
43; CHECK-NEXT:    cmp x8, #6
44; CHECK-NEXT:    st1d { z0.d }, p0, [sp, #1, mul vl]
45; CHECK-NEXT:    str q3, [x10, x9]
46; CHECK-NEXT:    mov w9, #6
47; CHECK-NEXT:    ld1d { z0.d }, p0/z, [sp, #1, mul vl]
48; CHECK-NEXT:    csel x8, x8, x9, lo
49; CHECK-NEXT:    addvl x9, sp, #2
50; CHECK-NEXT:    lsl x8, x8, #3
51; CHECK-NEXT:    st1d { z0.d }, p0, [sp, #2, mul vl]
52; CHECK-NEXT:    str q4, [x9, x8]
53; CHECK-NEXT:    ld1d { z0.d }, p0/z, [sp, #2, mul vl]
54; CHECK-NEXT:    addvl sp, sp, #3
55; CHECK-NEXT:    .cfi_def_cfa wsp, 16
56; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
57; CHECK-NEXT:   .cfi_def_cfa_offset 0
58; CHECK-NEXT:   .cfi_restore w29
59; CHECK-NEXT:    ret
60
61
62
63
64  %r = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> %a, <8 x i64> %b, i64 0)
65  ret <vscale x 2 x i64> %r
66}
67
68define <vscale x 2 x double> @test_nxv2f64_v8f64(<vscale x 2 x double> %a, <8 x double> %b) #0 {
69; CHECK-LEGALIZATION: Legally typed node: [[T1:t[0-9]+]]: nxv2f64 = insert_subvector {{t[0-9]+}}, {{t[0-9]+}}, Constant:i64<0>
70; CHECK-LEGALIZATION: Legally typed node: [[T2:t[0-9]+]]: nxv2f64 = insert_subvector [[T1]], {{t[0-9]+}}, Constant:i64<2>
71; CHECK-LEGALIZATION: Legally typed node: [[T3:t[0-9]+]]: nxv2f64 = insert_subvector [[T2]], {{t[0-9]+}}, Constant:i64<4>
72; CHECK-LEGALIZATION: Legally typed node: [[T4:t[0-9]+]]: nxv2f64 = insert_subvector [[T3]], {{t[0-9]+}}, Constant:i64<6>
73
74; CHECK-LABEL: test_nxv2f64_v8f64:
75; CHECK:       // %bb.0:
76; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
77; CHECK-NEXT:    .cfi_def_cfa_offset 16
78; CHECK-NEXT:    .cfi_offset w29, -16
79; CHECK-NEXT:    addvl sp, sp, #-3
80; CHECK-NEXT:    .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG
81; CHECK-NEXT:    cntd x8
82; CHECK-NEXT:    mov w9, #2
83; CHECK-NEXT:    sub x8, x8, #2
84; CHECK-NEXT:    ptrue p0.d, vl2
85; CHECK-NEXT:    cmp x8, #2
86; CHECK-NEXT:    mov x10, sp
87; CHECK-NEXT:    csel x9, x8, x9, lo
88; CHECK-NEXT:    // kill: def $q1 killed $q1 def $z1
89; CHECK-NEXT:    mov z0.d, p0/m, z1.d
90; CHECK-NEXT:    lsl x9, x9, #3
91; CHECK-NEXT:    ptrue p0.d
92; CHECK-NEXT:    st1d { z0.d }, p0, [sp]
93; CHECK-NEXT:    cmp x8, #4
94; CHECK-NEXT:    str q2, [x10, x9]
95; CHECK-NEXT:    mov w9, #4
96; CHECK-NEXT:    ld1d { z0.d }, p0/z, [sp]
97; CHECK-NEXT:    csel x9, x8, x9, lo
98; CHECK-NEXT:    lsl x9, x9, #3
99; CHECK-NEXT:    addvl x10, sp, #1
100; CHECK-NEXT:    cmp x8, #6
101; CHECK-NEXT:    st1d { z0.d }, p0, [sp, #1, mul vl]
102; CHECK-NEXT:    str q3, [x10, x9]
103; CHECK-NEXT:    mov w9, #6
104; CHECK-NEXT:    ld1d { z0.d }, p0/z, [sp, #1, mul vl]
105; CHECK-NEXT:    csel x8, x8, x9, lo
106; CHECK-NEXT:    addvl x9, sp, #2
107; CHECK-NEXT:    lsl x8, x8, #3
108; CHECK-NEXT:    st1d { z0.d }, p0, [sp, #2, mul vl]
109; CHECK-NEXT:    str q4, [x9, x8]
110; CHECK-NEXT:    ld1d { z0.d }, p0/z, [sp, #2, mul vl]
111; CHECK-NEXT:    addvl sp, sp, #3
112; CHECK-NEXT:   .cfi_def_cfa wsp, 16
113; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
114; CHECK-NEXT:   .cfi_def_cfa_offset 0
115; CHECK-NEXT:   .cfi_restore w29
116; CHECK-NEXT:    ret
117
118
119
120
121  %r = call <vscale x 2 x double> @llvm.vector.insert.nxv2f64.v8f64(<vscale x 2 x double> %a, <8 x double> %b, i64 0)
122  ret <vscale x 2 x double> %r
123}
124