1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+reserve-x18 -verify-machineinstrs < %s \
3; RUN:   | FileCheck %s --check-prefix=RV32
4; RUN: llc -mtriple=riscv64 -mattr=+reserve-x18 -verify-machineinstrs < %s \
5; RUN:   | FileCheck %s --check-prefix=RV64
6
7define void @f1() shadowcallstack {
8; RV32-LABEL: f1:
9; RV32:       # %bb.0:
10; RV32-NEXT:    ret
11;
12; RV64-LABEL: f1:
13; RV64:       # %bb.0:
14; RV64-NEXT:    ret
15  ret void
16}
17
18declare void @foo()
19
20define void @f2() shadowcallstack {
21; RV32-LABEL: f2:
22; RV32:       # %bb.0:
23; RV32-NEXT:    tail foo@plt
24;
25; RV64-LABEL: f2:
26; RV64:       # %bb.0:
27; RV64-NEXT:    tail foo@plt
28  tail call void @foo()
29  ret void
30}
31
32declare i32 @bar()
33
34define i32 @f3() shadowcallstack {
35; RV32-LABEL: f3:
36; RV32:       # %bb.0:
37; RV32-NEXT:    sw ra, 0(s2)
38; RV32-NEXT:    addi s2, s2, 4
39; RV32-NEXT:    addi sp, sp, -16
40; RV32-NEXT:    .cfi_def_cfa_offset 16
41; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
42; RV32-NEXT:    .cfi_offset ra, -4
43; RV32-NEXT:    call bar@plt
44; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
45; RV32-NEXT:    addi sp, sp, 16
46; RV32-NEXT:    lw ra, -4(s2)
47; RV32-NEXT:    addi s2, s2, -4
48; RV32-NEXT:    ret
49;
50; RV64-LABEL: f3:
51; RV64:       # %bb.0:
52; RV64-NEXT:    sd ra, 0(s2)
53; RV64-NEXT:    addi s2, s2, 8
54; RV64-NEXT:    addi sp, sp, -16
55; RV64-NEXT:    .cfi_def_cfa_offset 16
56; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
57; RV64-NEXT:    .cfi_offset ra, -8
58; RV64-NEXT:    call bar@plt
59; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
60; RV64-NEXT:    addi sp, sp, 16
61; RV64-NEXT:    ld ra, -8(s2)
62; RV64-NEXT:    addi s2, s2, -8
63; RV64-NEXT:    ret
64  %res = call i32 @bar()
65  %res1 = add i32 %res, 1
66  ret i32 %res
67}
68
69define i32 @f4() shadowcallstack {
70; RV32-LABEL: f4:
71; RV32:       # %bb.0:
72; RV32-NEXT:    sw ra, 0(s2)
73; RV32-NEXT:    addi s2, s2, 4
74; RV32-NEXT:    addi sp, sp, -16
75; RV32-NEXT:    .cfi_def_cfa_offset 16
76; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
77; RV32-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
78; RV32-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
79; RV32-NEXT:    sw s3, 0(sp) # 4-byte Folded Spill
80; RV32-NEXT:    .cfi_offset ra, -4
81; RV32-NEXT:    .cfi_offset s0, -8
82; RV32-NEXT:    .cfi_offset s1, -12
83; RV32-NEXT:    .cfi_offset s3, -16
84; RV32-NEXT:    call bar@plt
85; RV32-NEXT:    mv s0, a0
86; RV32-NEXT:    call bar@plt
87; RV32-NEXT:    mv s1, a0
88; RV32-NEXT:    call bar@plt
89; RV32-NEXT:    mv s3, a0
90; RV32-NEXT:    call bar@plt
91; RV32-NEXT:    add a1, s0, s1
92; RV32-NEXT:    add a0, s3, a0
93; RV32-NEXT:    add a0, a1, a0
94; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
95; RV32-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
96; RV32-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
97; RV32-NEXT:    lw s3, 0(sp) # 4-byte Folded Reload
98; RV32-NEXT:    addi sp, sp, 16
99; RV32-NEXT:    lw ra, -4(s2)
100; RV32-NEXT:    addi s2, s2, -4
101; RV32-NEXT:    ret
102;
103; RV64-LABEL: f4:
104; RV64:       # %bb.0:
105; RV64-NEXT:    sd ra, 0(s2)
106; RV64-NEXT:    addi s2, s2, 8
107; RV64-NEXT:    addi sp, sp, -32
108; RV64-NEXT:    .cfi_def_cfa_offset 32
109; RV64-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
110; RV64-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
111; RV64-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
112; RV64-NEXT:    sd s3, 0(sp) # 8-byte Folded Spill
113; RV64-NEXT:    .cfi_offset ra, -8
114; RV64-NEXT:    .cfi_offset s0, -16
115; RV64-NEXT:    .cfi_offset s1, -24
116; RV64-NEXT:    .cfi_offset s3, -32
117; RV64-NEXT:    call bar@plt
118; RV64-NEXT:    mv s0, a0
119; RV64-NEXT:    call bar@plt
120; RV64-NEXT:    mv s1, a0
121; RV64-NEXT:    call bar@plt
122; RV64-NEXT:    mv s3, a0
123; RV64-NEXT:    call bar@plt
124; RV64-NEXT:    addw a1, s0, s1
125; RV64-NEXT:    addw a0, s3, a0
126; RV64-NEXT:    addw a0, a1, a0
127; RV64-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
128; RV64-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
129; RV64-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
130; RV64-NEXT:    ld s3, 0(sp) # 8-byte Folded Reload
131; RV64-NEXT:    addi sp, sp, 32
132; RV64-NEXT:    ld ra, -8(s2)
133; RV64-NEXT:    addi s2, s2, -8
134; RV64-NEXT:    ret
135  %res1 = call i32 @bar()
136  %res2 = call i32 @bar()
137  %res3 = call i32 @bar()
138  %res4 = call i32 @bar()
139  %res12 = add i32 %res1, %res2
140  %res34 = add i32 %res3, %res4
141  %res1234 = add i32 %res12, %res34
142  ret i32 %res1234
143}
144
145define i32 @f5() shadowcallstack nounwind {
146; RV32-LABEL: f5:
147; RV32:       # %bb.0:
148; RV32-NEXT:    sw ra, 0(s2)
149; RV32-NEXT:    addi s2, s2, 4
150; RV32-NEXT:    addi sp, sp, -16
151; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
152; RV32-NEXT:    call bar@plt
153; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
154; RV32-NEXT:    addi sp, sp, 16
155; RV32-NEXT:    lw ra, -4(s2)
156; RV32-NEXT:    addi s2, s2, -4
157; RV32-NEXT:    ret
158;
159; RV64-LABEL: f5:
160; RV64:       # %bb.0:
161; RV64-NEXT:    sd ra, 0(s2)
162; RV64-NEXT:    addi s2, s2, 8
163; RV64-NEXT:    addi sp, sp, -16
164; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
165; RV64-NEXT:    call bar@plt
166; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
167; RV64-NEXT:    addi sp, sp, 16
168; RV64-NEXT:    ld ra, -8(s2)
169; RV64-NEXT:    addi s2, s2, -8
170; RV64-NEXT:    ret
171  %res = call i32 @bar()
172  %res1 = add i32 %res, 1
173  ret i32 %res
174}
175