1; RUN: llc -verify-machineinstrs -mcpu=ppc64 -O0 -frame-pointer=all -fast-isel=false < %s | FileCheck %s
2
3target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v128:128:128-n32:64"
4target triple = "powerpc64-unknown-linux-gnu"
5
6%struct.s1 = type { i8 }
7%struct.s2 = type { i16 }
8%struct.s4 = type { i32 }
9%struct.t1 = type { i8 }
10%struct.t3 = type <{ i16, i8 }>
11%struct.t5 = type <{ i32, i8 }>
12%struct.t6 = type <{ i32, i16 }>
13%struct.t7 = type <{ i32, i16, i8 }>
14%struct.s3 = type { i16, i8 }
15%struct.s5 = type { i32, i8 }
16%struct.s6 = type { i32, i16 }
17%struct.s7 = type { i32, i16, i8 }
18%struct.t2 = type <{ i16 }>
19%struct.t4 = type <{ i32 }>
20
21@caller1.p1 = private unnamed_addr constant %struct.s1 { i8 1 }, align 1
22@caller1.p2 = private unnamed_addr constant %struct.s2 { i16 2 }, align 2
23@caller1.p3 = private unnamed_addr constant { i16, i8, i8 } { i16 4, i8 8, i8 undef }, align 2
24@caller1.p4 = private unnamed_addr constant %struct.s4 { i32 16 }, align 4
25@caller1.p5 = private unnamed_addr constant { i32, i8, [3 x i8] } { i32 32, i8 64, [3 x i8] undef }, align 4
26@caller1.p6 = private unnamed_addr constant { i32, i16, [2 x i8] } { i32 128, i16 256, [2 x i8] undef }, align 4
27@caller1.p7 = private unnamed_addr constant { i32, i16, i8, i8 } { i32 512, i16 1024, i8 -3, i8 undef }, align 4
28@caller2.p1 = private unnamed_addr constant %struct.t1 { i8 1 }, align 1
29@caller2.p2 = private unnamed_addr constant { i16 } { i16 2 }, align 1
30@caller2.p3 = private unnamed_addr constant %struct.t3 <{ i16 4, i8 8 }>, align 1
31@caller2.p4 = private unnamed_addr constant { i32 } { i32 16 }, align 1
32@caller2.p5 = private unnamed_addr constant %struct.t5 <{ i32 32, i8 64 }>, align 1
33@caller2.p6 = private unnamed_addr constant %struct.t6 <{ i32 128, i16 256 }>, align 1
34@caller2.p7 = private unnamed_addr constant %struct.t7 <{ i32 512, i16 1024, i8 -3 }>, align 1
35
36define i32 @caller1() nounwind {
37entry:
38  %p1 = alloca %struct.s1, align 1
39  %p2 = alloca %struct.s2, align 2
40  %p3 = alloca %struct.s3, align 2
41  %p4 = alloca %struct.s4, align 4
42  %p5 = alloca %struct.s5, align 4
43  %p6 = alloca %struct.s6, align 4
44  %p7 = alloca %struct.s7, align 4
45  %0 = bitcast %struct.s1* %p1 to i8*
46  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* getelementptr inbounds (%struct.s1, %struct.s1* @caller1.p1, i32 0, i32 0), i64 1, i1 false)
47  %1 = bitcast %struct.s2* %p2 to i8*
48  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 2 %1, i8* align 2 bitcast (%struct.s2* @caller1.p2 to i8*), i64 2, i1 false)
49  %2 = bitcast %struct.s3* %p3 to i8*
50  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 2 %2, i8* align 2 bitcast ({ i16, i8, i8 }* @caller1.p3 to i8*), i64 4, i1 false)
51  %3 = bitcast %struct.s4* %p4 to i8*
52  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %3, i8* align 4 bitcast (%struct.s4* @caller1.p4 to i8*), i64 4, i1 false)
53  %4 = bitcast %struct.s5* %p5 to i8*
54  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %4, i8* align 4 bitcast ({ i32, i8, [3 x i8] }* @caller1.p5 to i8*), i64 8, i1 false)
55  %5 = bitcast %struct.s6* %p6 to i8*
56  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %5, i8* align 4 bitcast ({ i32, i16, [2 x i8] }* @caller1.p6 to i8*), i64 8, i1 false)
57  %6 = bitcast %struct.s7* %p7 to i8*
58  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %6, i8* align 4 bitcast ({ i32, i16, i8, i8 }* @caller1.p7 to i8*), i64 8, i1 false)
59  %call = call i32 @callee1(%struct.s1* byval(%struct.s1) %p1, %struct.s2* byval(%struct.s2) %p2, %struct.s3* byval(%struct.s3) %p3, %struct.s4* byval(%struct.s4) %p4, %struct.s5* byval(%struct.s5) %p5, %struct.s6* byval(%struct.s6) %p6, %struct.s7* byval(%struct.s7) %p7)
60  ret i32 %call
61
62; CHECK-LABEL: caller1
63; CHECK: ld 9, 112(31)
64; CHECK: ld 8, 120(31)
65; CHECK: ld 7, 128(31)
66; CHECK: lwz 6, 136(31)
67; CHECK: lwz 5, 144(31)
68; CHECK: lhz 4, 152(31)
69; CHECK: lbz 3, 160(31)
70}
71
72declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind
73
74define internal i32 @callee1(%struct.s1* byval(%struct.s1) %v1, %struct.s2* byval(%struct.s2) %v2, %struct.s3* byval(%struct.s3) %v3, %struct.s4* byval(%struct.s4) %v4, %struct.s5* byval(%struct.s5) %v5, %struct.s6* byval(%struct.s6) %v6, %struct.s7* byval(%struct.s7) %v7) nounwind {
75entry:
76  %a = getelementptr inbounds %struct.s1, %struct.s1* %v1, i32 0, i32 0
77  %0 = load i8, i8* %a, align 1
78  %conv = zext i8 %0 to i32
79  %a1 = getelementptr inbounds %struct.s2, %struct.s2* %v2, i32 0, i32 0
80  %1 = load i16, i16* %a1, align 2
81  %conv2 = sext i16 %1 to i32
82  %add = add nsw i32 %conv, %conv2
83  %a3 = getelementptr inbounds %struct.s3, %struct.s3* %v3, i32 0, i32 0
84  %2 = load i16, i16* %a3, align 2
85  %conv4 = sext i16 %2 to i32
86  %add5 = add nsw i32 %add, %conv4
87  %a6 = getelementptr inbounds %struct.s4, %struct.s4* %v4, i32 0, i32 0
88  %3 = load i32, i32* %a6, align 4
89  %add7 = add nsw i32 %add5, %3
90  %a8 = getelementptr inbounds %struct.s5, %struct.s5* %v5, i32 0, i32 0
91  %4 = load i32, i32* %a8, align 4
92  %add9 = add nsw i32 %add7, %4
93  %a10 = getelementptr inbounds %struct.s6, %struct.s6* %v6, i32 0, i32 0
94  %5 = load i32, i32* %a10, align 4
95  %add11 = add nsw i32 %add9, %5
96  %a12 = getelementptr inbounds %struct.s7, %struct.s7* %v7, i32 0, i32 0
97  %6 = load i32, i32* %a12, align 4
98  %add13 = add nsw i32 %add11, %6
99  ret i32 %add13
100
101; CHECK-LABEL: callee1
102; CHECK-DAG: std 9, 96(1)
103; CHECK-DAG: std 8, 88(1)
104; CHECK-DAG: std 7, 80(1)
105; CHECK-DAG: stw 6, 76(1)
106; CHECK-DAG: stw 5, 68(1)
107; CHECK-DAG: sth 4, 62(1)
108; CHECK-DAG: stb 3, 55(1)
109; CHECK-DAG: lha {{[0-9]+}}, 62(1)
110; CHECK-DAG: lha {{[0-9]+}}, 68(1)
111; CHECK-DAG: lbz {{[0-9]+}}, 55(1)
112; CHECK-DAG: lwz {{[0-9]+}}, 76(1)
113; CHECK-DAG: lwz {{[0-9]+}}, 80(1)
114; CHECK-DAG: lwz {{[0-9]+}}, 88(1)
115; CHECK-DAG: lwz {{[0-9]+}}, 96(1)
116}
117
118define i32 @caller2() nounwind {
119entry:
120  %p1 = alloca %struct.t1, align 1
121  %p2 = alloca %struct.t2, align 1
122  %p3 = alloca %struct.t3, align 1
123  %p4 = alloca %struct.t4, align 1
124  %p5 = alloca %struct.t5, align 1
125  %p6 = alloca %struct.t6, align 1
126  %p7 = alloca %struct.t7, align 1
127  %0 = bitcast %struct.t1* %p1 to i8*
128  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* getelementptr inbounds (%struct.t1, %struct.t1* @caller2.p1, i32 0, i32 0), i64 1, i1 false)
129  %1 = bitcast %struct.t2* %p2 to i8*
130  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* bitcast ({ i16 }* @caller2.p2 to i8*), i64 2, i1 false)
131  %2 = bitcast %struct.t3* %p3 to i8*
132  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %2, i8* bitcast (%struct.t3* @caller2.p3 to i8*), i64 3, i1 false)
133  %3 = bitcast %struct.t4* %p4 to i8*
134  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %3, i8* bitcast ({ i32 }* @caller2.p4 to i8*), i64 4, i1 false)
135  %4 = bitcast %struct.t5* %p5 to i8*
136  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %4, i8* bitcast (%struct.t5* @caller2.p5 to i8*), i64 5, i1 false)
137  %5 = bitcast %struct.t6* %p6 to i8*
138  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %5, i8* bitcast (%struct.t6* @caller2.p6 to i8*), i64 6, i1 false)
139  %6 = bitcast %struct.t7* %p7 to i8*
140  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %6, i8* bitcast (%struct.t7* @caller2.p7 to i8*), i64 7, i1 false)
141  %call = call i32 @callee2(%struct.t1* byval(%struct.t1) %p1, %struct.t2* byval(%struct.t2) %p2, %struct.t3* byval(%struct.t3) %p3, %struct.t4* byval(%struct.t4) %p4, %struct.t5* byval(%struct.t5) %p5, %struct.t6* byval(%struct.t6) %p6, %struct.t7* byval(%struct.t7) %p7)
142  ret i32 %call
143
144; CHECK-LABEL: caller2
145; CHECK: stb {{[0-9]+}}, 71(1)
146; CHECK: sth {{[0-9]+}}, 69(1)
147; CHECK: stb {{[0-9]+}}, 87(1)
148; CHECK: stw {{[0-9]+}}, 83(1)
149; CHECK: sth {{[0-9]+}}, 94(1)
150; CHECK: stw {{[0-9]+}}, 90(1)
151; CHECK: stw {{[0-9]+}}, 100(1)
152; CHECK: stw {{[0-9]+}}, 97(1)
153; CHECK: ld 9, 96(1)
154; CHECK: ld 8, 88(1)
155; CHECK: ld 7, 80(1)
156; CHECK: lwz 6, 136(31)
157; CHECK: ld 5, 64(1)
158; CHECK: lhz 4, 152(31)
159; CHECK: lbz 3, 160(31)
160}
161
162define internal i32 @callee2(%struct.t1* byval(%struct.t1) %v1, %struct.t2* byval(%struct.t2) %v2, %struct.t3* byval(%struct.t3) %v3, %struct.t4* byval(%struct.t4) %v4, %struct.t5* byval(%struct.t5) %v5, %struct.t6* byval(%struct.t6) %v6, %struct.t7* byval(%struct.t7) %v7) nounwind {
163entry:
164  %a = getelementptr inbounds %struct.t1, %struct.t1* %v1, i32 0, i32 0
165  %0 = load i8, i8* %a, align 1
166  %conv = zext i8 %0 to i32
167  %a1 = getelementptr inbounds %struct.t2, %struct.t2* %v2, i32 0, i32 0
168  %1 = load i16, i16* %a1, align 1
169  %conv2 = sext i16 %1 to i32
170  %add = add nsw i32 %conv, %conv2
171  %a3 = getelementptr inbounds %struct.t3, %struct.t3* %v3, i32 0, i32 0
172  %2 = load i16, i16* %a3, align 1
173  %conv4 = sext i16 %2 to i32
174  %add5 = add nsw i32 %add, %conv4
175  %a6 = getelementptr inbounds %struct.t4, %struct.t4* %v4, i32 0, i32 0
176  %3 = load i32, i32* %a6, align 1
177  %add7 = add nsw i32 %add5, %3
178  %a8 = getelementptr inbounds %struct.t5, %struct.t5* %v5, i32 0, i32 0
179  %4 = load i32, i32* %a8, align 1
180  %add9 = add nsw i32 %add7, %4
181  %a10 = getelementptr inbounds %struct.t6, %struct.t6* %v6, i32 0, i32 0
182  %5 = load i32, i32* %a10, align 1
183  %add11 = add nsw i32 %add9, %5
184  %a12 = getelementptr inbounds %struct.t7, %struct.t7* %v7, i32 0, i32 0
185  %6 = load i32, i32* %a12, align 1
186  %add13 = add nsw i32 %add11, %6
187  ret i32 %add13
188
189; CHECK-LABEL: callee2
190; CHECK:     stb 9, 103(1)
191; CHECK:     rldicl 10, 9, 56, 8
192; CHECK:     sth 10, 101(1)
193; CHECK:     rldicl 9, 9, 40, 24
194; CHECK:     stw 9, 97(1)
195; CHECK:     sth 8, 94(1)
196; CHECK:     rldicl 8, 8, 48, 16
197; CHECK:     stw 8, 90(1)
198; CHECK:     stb 7, 87(1)
199; CHECK:     rldicl 7, 7, 56, 8
200; CHECK:     stw 7, 83(1)
201; CHECK:     stb 5, 71(1)
202; CHECK:     rldicl 5, 5, 56, 8
203; CHECK:     sth 5, 69(1)
204; CHECK:     stw 6, 76(1)
205; CHECK:     sth 4, 62(1)
206; CHECK:     stb 3, 55(1)
207; CHECK-DAG: lha {{[0-9]+}}, 62(1)
208; CHECK-DAG: lha {{[0-9]+}}, 69(1)
209; CHECK-DAG: lbz {{[0-9]+}}, 55(1)
210; CHECK-DAG: lwz {{[0-9]+}}, 76(1)
211; CHECK-DAG: lwz {{[0-9]+}}, 83(1)
212; CHECK-DAG: lwz {{[0-9]+}}, 90(1)
213; CHECK-DAG: lwz {{[0-9]+}}, 97(1)
214}
215