1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+zve64x -verify-machineinstrs < %s \
3; RUN:   | FileCheck %s --check-prefix=RV32
4; RUN: llc -mtriple=riscv64 -mattr=+zve64x -verify-machineinstrs < %s \
5; RUN:   | FileCheck %s --check-prefix=RV64
6; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s \
7; RUN:   | FileCheck %s --check-prefix=RV32
8; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \
9; RUN:   | FileCheck %s --check-prefix=RV64
10
11; FIXME: We are over-aligning the stack on V, wasting stack space.
12
13define i64* @scalar_stack_align16() nounwind {
14; RV32-LABEL: scalar_stack_align16:
15; RV32:       # %bb.0:
16; RV32-NEXT:    addi sp, sp, -48
17; RV32-NEXT:    sw ra, 44(sp) # 4-byte Folded Spill
18; RV32-NEXT:    csrr a0, vlenb
19; RV32-NEXT:    slli a0, a0, 1
20; RV32-NEXT:    sub sp, sp, a0
21; RV32-NEXT:    addi a0, sp, 32
22; RV32-NEXT:    call extern@plt
23; RV32-NEXT:    addi a0, sp, 16
24; RV32-NEXT:    csrr a1, vlenb
25; RV32-NEXT:    slli a1, a1, 1
26; RV32-NEXT:    add sp, sp, a1
27; RV32-NEXT:    lw ra, 44(sp) # 4-byte Folded Reload
28; RV32-NEXT:    addi sp, sp, 48
29; RV32-NEXT:    ret
30;
31; RV64-LABEL: scalar_stack_align16:
32; RV64:       # %bb.0:
33; RV64-NEXT:    addi sp, sp, -48
34; RV64-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
35; RV64-NEXT:    csrr a0, vlenb
36; RV64-NEXT:    slli a0, a0, 1
37; RV64-NEXT:    sub sp, sp, a0
38; RV64-NEXT:    addi a0, sp, 32
39; RV64-NEXT:    call extern@plt
40; RV64-NEXT:    addi a0, sp, 16
41; RV64-NEXT:    csrr a1, vlenb
42; RV64-NEXT:    slli a1, a1, 1
43; RV64-NEXT:    add sp, sp, a1
44; RV64-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
45; RV64-NEXT:    addi sp, sp, 48
46; RV64-NEXT:    ret
47  %a = alloca <vscale x 2 x i32>
48  %c = alloca i64, align 16
49  call void @extern(<vscale x 2 x i32>* %a)
50  ret i64* %c
51}
52
53declare void @extern(<vscale x 2 x i32>*)
54