1 // RUN: %clang_cc1 -no-opaque-pointers %s -emit-llvm -o - -triple=x86_64-apple-darwin10 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-NOCOMPAT
2 // RUN: %clang_cc1 -no-opaque-pointers %s -emit-llvm -o - -triple=x86_64-apple-darwin10 -fclang-abi-compat=6.0 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-V6COMPAT
3 // RUN: %clang_cc1 -no-opaque-pointers %s -emit-llvm -o - -triple=x86_64-scei-ps4 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-V6COMPAT
4 // RUN: %clang_cc1 -no-opaque-pointers %s -emit-llvm -o - -triple=x86_64-sie-ps5 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-V6COMPAT
5
6 extern int int_source();
7 extern void int_sink(int x);
8
9 namespace test0 {
10 struct A {
11 int aField;
12 int bField;
13 };
14
15 struct B {
16 int onebit : 2;
17 int twobit : 6;
18 int intField;
19 };
20
21 struct __attribute__((packed, aligned(2))) C : A, B {
22 };
23
24 // These accesses should have alignment 4 because they're at offset 0
25 // in a reference with an assumed alignment of 4.
26 // CHECK-LABEL: @_ZN5test01aERNS_1BE
a(B & b)27 void a(B &b) {
28 // CHECK: [[CALL:%.*]] = call noundef i32 @_Z10int_sourcev()
29 // CHECK: [[B_P:%.*]] = load [[B:%.*]]*, [[B]]**
30 // CHECK: [[FIELD_P:%.*]] = bitcast [[B]]* [[B_P]] to i8*
31 // CHECK: [[TRUNC:%.*]] = trunc i32 [[CALL]] to i8
32 // CHECK: [[OLD_VALUE:%.*]] = load i8, i8* [[FIELD_P]], align 4
33 // CHECK: [[T0:%.*]] = and i8 [[TRUNC]], 3
34 // CHECK: [[T1:%.*]] = and i8 [[OLD_VALUE]], -4
35 // CHECK: [[T2:%.*]] = or i8 [[T1]], [[T0]]
36 // CHECK: store i8 [[T2]], i8* [[FIELD_P]], align 4
37 b.onebit = int_source();
38
39 // CHECK: [[B_P:%.*]] = load [[B]]*, [[B]]**
40 // CHECK: [[FIELD_P:%.*]] = bitcast [[B]]* [[B_P]] to i8*
41 // CHECK: [[VALUE:%.*]] = load i8, i8* [[FIELD_P]], align 4
42 // CHECK: [[T0:%.*]] = shl i8 [[VALUE]], 6
43 // CHECK: [[T1:%.*]] = ashr i8 [[T0]], 6
44 // CHECK: [[T2:%.*]] = sext i8 [[T1]] to i32
45 // CHECK: call void @_Z8int_sinki(i32 noundef [[T2]])
46 int_sink(b.onebit);
47 }
48
49 // These accesses should have alignment 2 because they're at offset 8
50 // in a reference/pointer with an assumed alignment of 2.
51 // CHECK-LABEL: @_ZN5test01bERNS_1CE
b(C & c)52 void b(C &c) {
53 // CHECK: [[CALL:%.*]] = call noundef i32 @_Z10int_sourcev()
54 // CHECK: [[C_P:%.*]] = load [[C:%.*]]*, [[C]]**
55 // CHECK: [[T0:%.*]] = bitcast [[C]]* [[C_P]] to i8*
56 // CHECK: [[T1:%.*]] = getelementptr inbounds i8, i8* [[T0]], i64 8
57 // CHECK: [[B_P:%.*]] = bitcast i8* [[T1]] to [[B]]*
58 // CHECK: [[FIELD_P:%.*]] = bitcast [[B]]* [[B_P]] to i8*
59 // CHECK: [[TRUNC:%.*]] = trunc i32 [[CALL]] to i8
60 // CHECK-V6COMPAT: [[OLD_VALUE:%.*]] = load i8, i8* [[FIELD_P]], align 2
61 // CHECK-NOCOMPAT: [[OLD_VALUE:%.*]] = load i8, i8* [[FIELD_P]], align 4
62 // CHECK: [[T0:%.*]] = and i8 [[TRUNC]], 3
63 // CHECK: [[T1:%.*]] = and i8 [[OLD_VALUE]], -4
64 // CHECK: [[T2:%.*]] = or i8 [[T1]], [[T0]]
65 // CHECK-V6COMPAT: store i8 [[T2]], i8* [[FIELD_P]], align 2
66 // CHECK-NOCOMPAT: store i8 [[T2]], i8* [[FIELD_P]], align 4
67 c.onebit = int_source();
68
69 // CHECK: [[C_P:%.*]] = load [[C]]*, [[C]]**
70 // CHECK: [[T0:%.*]] = bitcast [[C]]* [[C_P]] to i8*
71 // CHECK: [[T1:%.*]] = getelementptr inbounds i8, i8* [[T0]], i64 8
72 // CHECK: [[B_P:%.*]] = bitcast i8* [[T1]] to [[B]]*
73 // CHECK: [[FIELD_P:%.*]] = bitcast [[B]]* [[B_P]] to i8*
74 // CHECK-V6COMPAT: [[VALUE:%.*]] = load i8, i8* [[FIELD_P]], align 2
75 // CHECK-NOCOMPAT: [[VALUE:%.*]] = load i8, i8* [[FIELD_P]], align 4
76 // CHECK: [[T0:%.*]] = shl i8 [[VALUE]], 6
77 // CHECK: [[T1:%.*]] = ashr i8 [[T0]], 6
78 // CHECK: [[T2:%.*]] = sext i8 [[T1]] to i32
79 // CHECK: call void @_Z8int_sinki(i32 noundef [[T2]])
80 int_sink(c.onebit);
81 }
82
83 // CHECK-LABEL: @_ZN5test01cEPNS_1CE
c(C * c)84 void c(C *c) {
85 // CHECK: [[CALL:%.*]] = call noundef i32 @_Z10int_sourcev()
86 // CHECK: [[C_P:%.*]] = load [[C]]*, [[C]]**
87 // CHECK: [[T0:%.*]] = bitcast [[C]]* [[C_P]] to i8*
88 // CHECK: [[T1:%.*]] = getelementptr inbounds i8, i8* [[T0]], i64 8
89 // CHECK: [[B_P:%.*]] = bitcast i8* [[T1]] to [[B]]*
90 // CHECK: [[FIELD_P:%.*]] = bitcast [[B]]* [[B_P]] to i8*
91 // CHECK: [[TRUNC:%.*]] = trunc i32 [[CALL]] to i8
92 // CHECK-V6COMPAT: [[OLD_VALUE:%.*]] = load i8, i8* [[FIELD_P]], align 2
93 // CHECK-NOCOMPAT: [[OLD_VALUE:%.*]] = load i8, i8* [[FIELD_P]], align 4
94 // CHECK: [[T0:%.*]] = and i8 [[TRUNC]], 3
95 // CHECK: [[T1:%.*]] = and i8 [[OLD_VALUE]], -4
96 // CHECK: [[T2:%.*]] = or i8 [[T1]], [[T0]]
97 // CHECK-V6COMPAT: store i8 [[T2]], i8* [[FIELD_P]], align 2
98 // CHECK-NOCOMPAT: store i8 [[T2]], i8* [[FIELD_P]], align 4
99 c->onebit = int_source();
100
101 // CHECK: [[C_P:%.*]] = load [[C:%.*]]*, [[C]]**
102 // CHECK: [[T0:%.*]] = bitcast [[C]]* [[C_P]] to i8*
103 // CHECK: [[T1:%.*]] = getelementptr inbounds i8, i8* [[T0]], i64 8
104 // CHECK: [[B_P:%.*]] = bitcast i8* [[T1]] to [[B:%.*]]*
105 // CHECK: [[FIELD_P:%.*]] = bitcast [[B]]* [[B_P]] to i8*
106 // CHECK-V6COMPAT: [[VALUE:%.*]] = load i8, i8* [[FIELD_P]], align 2
107 // CHECK-NOCOMPAT: [[VALUE:%.*]] = load i8, i8* [[FIELD_P]], align 4
108 // CHECK: [[T0:%.*]] = shl i8 [[VALUE]], 6
109 // CHECK: [[T1:%.*]] = ashr i8 [[T0]], 6
110 // CHECK: [[T2:%.*]] = sext i8 [[T1]] to i32
111 // CHECK: call void @_Z8int_sinki(i32 noundef [[T2]])
112 int_sink(c->onebit);
113 }
114
115 // These accesses should have alignment 2 because they're at offset 8
116 // in an alignment-2 variable.
117 // CHECK-LABEL: @_ZN5test01dEv
d()118 void d() {
119 // CHECK-V6COMPAT: [[C_P:%.*]] = alloca [[C:%.*]], align 2
120 // CHECK-NOCOMPAT: [[C_P:%.*]] = alloca [[C:%.*]], align 4
121 C c;
122
123 // CHECK: [[CALL:%.*]] = call noundef i32 @_Z10int_sourcev()
124 // CHECK: [[T0:%.*]] = bitcast [[C]]* [[C_P]] to i8*
125 // CHECK: [[T1:%.*]] = getelementptr inbounds i8, i8* [[T0]], i64 8
126 // CHECK: [[B_P:%.*]] = bitcast i8* [[T1]] to [[B]]*
127 // CHECK: [[FIELD_P:%.*]] = bitcast [[B]]* [[B_P]] to i8*
128 // CHECK: [[TRUNC:%.*]] = trunc i32 [[CALL]] to i8
129 // CHECK-V6COMPAT: [[OLD_VALUE:%.*]] = load i8, i8* [[FIELD_P]], align 2
130 // CHECK-NOCOMPAT: [[OLD_VALUE:%.*]] = load i8, i8* [[FIELD_P]], align 4
131 // CHECK: [[T0:%.*]] = and i8 [[TRUNC]], 3
132 // CHECK: [[T1:%.*]] = and i8 [[OLD_VALUE]], -4
133 // CHECK: [[T2:%.*]] = or i8 [[T1]], [[T0]]
134 // CHECK-V6COMPAT: store i8 [[T2]], i8* [[FIELD_P]], align 2
135 // CHECK-NOCOMPAT: store i8 [[T2]], i8* [[FIELD_P]], align 4
136 c.onebit = int_source();
137
138 // CHECK: [[T0:%.*]] = bitcast [[C]]* [[C_P]] to i8*
139 // CHECK: [[T1:%.*]] = getelementptr inbounds i8, i8* [[T0]], i64 8
140 // CHECK: [[B_P:%.*]] = bitcast i8* [[T1]] to [[B:%.*]]*
141 // CHECK: [[FIELD_P:%.*]] = bitcast [[B]]* [[B_P]] to i8*
142 // CHECK-V6COMPAT: [[VALUE:%.*]] = load i8, i8* [[FIELD_P]], align 2
143 // CHECK-NOCOMPAT: [[VALUE:%.*]] = load i8, i8* [[FIELD_P]], align 4
144 // CHECK: [[T0:%.*]] = shl i8 [[VALUE]], 6
145 // CHECK: [[T1:%.*]] = ashr i8 [[T0]], 6
146 // CHECK: [[T2:%.*]] = sext i8 [[T1]] to i32
147 // CHECK: call void @_Z8int_sinki(i32 noundef [[T2]])
148 int_sink(c.onebit);
149 }
150
151 // These accesses should have alignment 8 because they're at offset 8
152 // in an alignment-16 variable.
153 // CHECK-LABEL: @_ZN5test01eEv
e()154 void e() {
155 // CHECK: [[C_P:%.*]] = alloca [[C:%.*]], align 16
156 __attribute__((aligned(16))) C c;
157
158 // CHECK: [[CALL:%.*]] = call noundef i32 @_Z10int_sourcev()
159 // CHECK: [[T0:%.*]] = bitcast [[C]]* [[C_P]] to i8*
160 // CHECK: [[T1:%.*]] = getelementptr inbounds i8, i8* [[T0]], i64 8
161 // CHECK: [[B_P:%.*]] = bitcast i8* [[T1]] to [[B]]*
162 // CHECK: [[FIELD_P:%.*]] = bitcast [[B]]* [[B_P]] to i8*
163 // CHECK: [[TRUNC:%.*]] = trunc i32 [[CALL]] to i8
164 // CHECK: [[OLD_VALUE:%.*]] = load i8, i8* [[FIELD_P]], align 8
165 // CHECK: [[T0:%.*]] = and i8 [[TRUNC]], 3
166 // CHECK: [[T1:%.*]] = and i8 [[OLD_VALUE]], -4
167 // CHECK: [[T2:%.*]] = or i8 [[T1]], [[T0]]
168 // CHECK: store i8 [[T2]], i8* [[FIELD_P]], align 8
169 c.onebit = int_source();
170
171 // CHECK: [[T0:%.*]] = bitcast [[C]]* [[C_P]] to i8*
172 // CHECK: [[T1:%.*]] = getelementptr inbounds i8, i8* [[T0]], i64 8
173 // CHECK: [[B_P:%.*]] = bitcast i8* [[T1]] to [[B:%.*]]*
174 // CHECK: [[FIELD_P:%.*]] = bitcast [[B]]* [[B_P]] to i8*
175 // CHECK: [[VALUE:%.*]] = load i8, i8* [[FIELD_P]], align 8
176 // CHECK: [[T0:%.*]] = shl i8 [[VALUE]], 6
177 // CHECK: [[T1:%.*]] = ashr i8 [[T0]], 6
178 // CHECK: [[T2:%.*]] = sext i8 [[T1]] to i32
179 // CHECK: call void @_Z8int_sinki(i32 noundef [[T2]])
180 int_sink(c.onebit);
181 }
182 }
183
184 namespace test1 {
185 struct Array {
186 int elts[4];
187 };
188
189 struct A {
190 __attribute__((aligned(16))) Array aArray;
191 };
192
193 struct B : virtual A {
194 void *bPointer; // puts bArray at offset 16
195 Array bArray;
196 };
197
198 struct C : virtual A { // must be viable as primary base
199 // Non-empty, nv-size not a multiple of 16.
200 void *cPointer1;
201 void *cPointer2;
202 };
203
204 // Proof of concept that the non-virtual components of B do not have
205 // to be 16-byte-aligned.
206 struct D : C, B {};
207
208 // For the following tests, we want to assign into a variable whose
209 // alignment is high enough that it will absolutely not be the
210 // constraint on the memcpy alignment.
211 typedef __attribute__((aligned(64))) Array AlignedArray;
212
213 // CHECK-LABEL: @_ZN5test11aERNS_1AE
a(A & a)214 void a(A &a) {
215 // CHECK: [[RESULT:%.*]] = alloca [[ARRAY:%.*]], align 64
216 // CHECK: [[A_P:%.*]] = load [[A:%.*]]*, [[A]]**
217 // CHECK: [[ARRAY_P:%.*]] = getelementptr inbounds [[A]], [[A]]* [[A_P]], i32 0, i32 0
218 // CHECK: [[T0:%.*]] = bitcast [[ARRAY]]* [[RESULT]] to i8*
219 // CHECK: [[T1:%.*]] = bitcast [[ARRAY]]* [[ARRAY_P]] to i8*
220 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 64 [[T0]], i8* align 16 [[T1]], i64 16, i1 false)
221 AlignedArray result = a.aArray;
222 }
223
224 // CHECK-LABEL: @_ZN5test11bERNS_1BE
b(B & b)225 void b(B &b) {
226 // CHECK: [[RESULT:%.*]] = alloca [[ARRAY]], align 64
227 // CHECK: [[B_P:%.*]] = load [[B:%.*]]*, [[B]]**
228 // CHECK: [[VPTR_P:%.*]] = bitcast [[B]]* [[B_P]] to i8**
229 // CHECK: [[VPTR:%.*]] = load i8*, i8** [[VPTR_P]], align 8
230 // CHECK: [[T0:%.*]] = getelementptr i8, i8* [[VPTR]], i64 -24
231 // CHECK: [[OFFSET_P:%.*]] = bitcast i8* [[T0]] to i64*
232 // CHECK: [[OFFSET:%.*]] = load i64, i64* [[OFFSET_P]], align 8
233 // CHECK: [[T0:%.*]] = bitcast [[B]]* [[B_P]] to i8*
234 // CHECK: [[T1:%.*]] = getelementptr inbounds i8, i8* [[T0]], i64 [[OFFSET]]
235 // CHECK: [[A_P:%.*]] = bitcast i8* [[T1]] to [[A]]*
236 // CHECK: [[ARRAY_P:%.*]] = getelementptr inbounds [[A]], [[A]]* [[A_P]], i32 0, i32 0
237 // CHECK: [[T0:%.*]] = bitcast [[ARRAY]]* [[RESULT]] to i8*
238 // CHECK: [[T1:%.*]] = bitcast [[ARRAY]]* [[ARRAY_P]] to i8*
239 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 64 [[T0]], i8* align 16 [[T1]], i64 16, i1 false)
240 AlignedArray result = b.aArray;
241 }
242
243 // CHECK-LABEL: @_ZN5test11cERNS_1BE
c(B & b)244 void c(B &b) {
245 // CHECK: [[RESULT:%.*]] = alloca [[ARRAY]], align 64
246 // CHECK: [[B_P:%.*]] = load [[B]]*, [[B]]**
247 // CHECK: [[ARRAY_P:%.*]] = getelementptr inbounds [[B]], [[B]]* [[B_P]], i32 0, i32 2
248 // CHECK: [[T0:%.*]] = bitcast [[ARRAY]]* [[RESULT]] to i8*
249 // CHECK: [[T1:%.*]] = bitcast [[ARRAY]]* [[ARRAY_P]] to i8*
250 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 64 [[T0]], i8* align 8 [[T1]], i64 16, i1 false)
251 AlignedArray result = b.bArray;
252 }
253
254 // CHECK-LABEL: @_ZN5test11dEPNS_1BE
d(B * b)255 void d(B *b) {
256 // CHECK: [[RESULT:%.*]] = alloca [[ARRAY]], align 64
257 // CHECK: [[B_P:%.*]] = load [[B]]*, [[B]]**
258 // CHECK: [[ARRAY_P:%.*]] = getelementptr inbounds [[B]], [[B]]* [[B_P]], i32 0, i32 2
259 // CHECK: [[T0:%.*]] = bitcast [[ARRAY]]* [[RESULT]] to i8*
260 // CHECK: [[T1:%.*]] = bitcast [[ARRAY]]* [[ARRAY_P]] to i8*
261 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 64 [[T0]], i8* align 8 [[T1]], i64 16, i1 false)
262 AlignedArray result = b->bArray;
263 }
264
265 // CHECK-LABEL: @_ZN5test11eEv
e()266 void e() {
267 // CHECK: [[B_P:%.*]] = alloca [[B]], align 16
268 // CHECK: [[RESULT:%.*]] = alloca [[ARRAY]], align 64
269 // CHECK: [[ARRAY_P:%.*]] = getelementptr inbounds [[B]], [[B]]* [[B_P]], i32 0, i32 2
270 // CHECK: [[T0:%.*]] = bitcast [[ARRAY]]* [[RESULT]] to i8*
271 // CHECK: [[T1:%.*]] = bitcast [[ARRAY]]* [[ARRAY_P]] to i8*
272 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 64 [[T0]], i8* align 16 [[T1]], i64 16, i1 false)
273 B b;
274 AlignedArray result = b.bArray;
275 }
276
277 // CHECK-LABEL: @_ZN5test11fEv
f()278 void f() {
279 // TODO: we should devirtualize this derived-to-base conversion.
280 // CHECK: [[D_P:%.*]] = alloca [[D:%.*]], align 16
281 // CHECK: [[RESULT:%.*]] = alloca [[ARRAY]], align 64
282 // CHECK: [[VPTR_P:%.*]] = bitcast [[D]]* [[D_P]] to i8**
283 // CHECK: [[VPTR:%.*]] = load i8*, i8** [[VPTR_P]], align 16
284 // CHECK: [[T0:%.*]] = getelementptr i8, i8* [[VPTR]], i64 -24
285 // CHECK: [[OFFSET_P:%.*]] = bitcast i8* [[T0]] to i64*
286 // CHECK: [[OFFSET:%.*]] = load i64, i64* [[OFFSET_P]], align 8
287 // CHECK: [[T0:%.*]] = bitcast [[D]]* [[D_P]] to i8*
288 // CHECK: [[T1:%.*]] = getelementptr inbounds i8, i8* [[T0]], i64 [[OFFSET]]
289 // CHECK: [[A_P:%.*]] = bitcast i8* [[T1]] to [[A]]*
290 // CHECK: [[ARRAY_P:%.*]] = getelementptr inbounds [[A]], [[A]]* [[A_P]], i32 0, i32 0
291 // CHECK: [[T0:%.*]] = bitcast [[ARRAY]]* [[RESULT]] to i8*
292 // CHECK: [[T1:%.*]] = bitcast [[ARRAY]]* [[ARRAY_P]] to i8*
293 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 64 [[T0]], i8* align 16 [[T1]], i64 16, i1 false)
294 D d;
295 AlignedArray result = d.aArray;
296 }
297
298 // CHECK-LABEL: @_ZN5test11gEv
g()299 void g() {
300 // CHECK: [[D_P:%.*]] = alloca [[D]], align 16
301 // CHECK: [[RESULT:%.*]] = alloca [[ARRAY]], align 64
302 // CHECK: [[T0:%.*]] = bitcast [[D]]* [[D_P]] to i8*
303 // CHECK: [[T1:%.*]] = getelementptr inbounds i8, i8* [[T0]], i64 24
304 // CHECK: [[B_P:%.*]] = bitcast i8* [[T1]] to [[B:%.*]]*
305 // CHECK: [[ARRAY_P:%.*]] = getelementptr inbounds [[B]], [[B]]* [[B_P]], i32 0, i32 2
306 // CHECK: [[T0:%.*]] = bitcast [[ARRAY]]* [[RESULT]] to i8*
307 // CHECK: [[T1:%.*]] = bitcast [[ARRAY]]* [[ARRAY_P]] to i8*
308 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 64 [[T0]], i8* align 8 [[T1]], i64 16, i1 false)
309 D d;
310 AlignedArray result = d.bArray;
311 }
312
313 // CHECK-LABEL: @_ZN5test11hEPA_NS_1BE
h(B (* b)[])314 void h(B (*b)[]) {
315 // CHECK: [[RESULT:%.*]] = alloca [[ARRAY]], align 64
316 // CHECK: [[B_P:%.*]] = load [0 x [[B]]]*, [0 x [[B]]]**
317 // CHECK: [[ELEMENT_P:%.*]] = getelementptr inbounds [0 x [[B]]], [0 x [[B]]]* [[B_P]], i64 0
318 // CHECK: [[ARRAY_P:%.*]] = getelementptr inbounds [[B]], [[B]]* [[ELEMENT_P]], i32 0, i32 2
319 // CHECK: [[T0:%.*]] = bitcast [[ARRAY]]* [[RESULT]] to i8*
320 // CHECK: [[T1:%.*]] = bitcast [[ARRAY]]* [[ARRAY_P]] to i8*
321 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 64 [[T0]], i8* align 16 [[T1]], i64 16, i1 false)
322 AlignedArray result = (*b)->bArray;
323 }
324 }
325
326 // CHECK-LABEL: @_Z22incomplete_array_derefPA_i
327 // CHECK: load i32, i32* {{%.*}}, align 4
incomplete_array_deref(int (* p)[])328 int incomplete_array_deref(int (*p)[]) { return (*p)[2]; }
329