1 // RUN: %clang_cc1 -no-opaque-pointers -emit-llvm -o %t %s
2 // RUN: not grep __builtin %t
3 // RUN: %clang_cc1 -no-opaque-pointers %s -emit-llvm -o - -triple x86_64-darwin-apple | FileCheck %s
4
5 int printf(const char *, ...);
6
p(char * str,int x)7 void p(char *str, int x) {
8 printf("%s: %d\n", str, x);
9 }
q(char * str,double x)10 void q(char *str, double x) {
11 printf("%s: %f\n", str, x);
12 }
r(char * str,void * ptr)13 void r(char *str, void *ptr) {
14 printf("%s: %p\n", str, ptr);
15 }
16
17 int random(void);
18 int finite(double);
19
main(void)20 int main(void) {
21 int N = random();
22 #define P(n,args) p(#n #args, __builtin_##n args)
23 #define Q(n,args) q(#n #args, __builtin_##n args)
24 #define R(n,args) r(#n #args, __builtin_##n args)
25 #define V(n,args) p(#n #args, (__builtin_##n args, 0))
26 P(types_compatible_p, (int, float));
27 P(choose_expr, (0, 10, 20));
28 P(constant_p, (sizeof(10)));
29 P(expect, (N == 12, 0));
30 V(prefetch, (&N));
31 V(prefetch, (&N, 1));
32 V(prefetch, (&N, 1, 0));
33
34 // Numeric Constants
35
36 Q(huge_val, ());
37 Q(huge_valf, ());
38 Q(huge_vall, ());
39 Q(inf, ());
40 Q(inff, ());
41 Q(infl, ());
42
43 P(fpclassify, (0, 1, 2, 3, 4, 1.0));
44 P(fpclassify, (0, 1, 2, 3, 4, 1.0f));
45 P(fpclassify, (0, 1, 2, 3, 4, 1.0l));
46
47 Q(nan, (""));
48 Q(nanf, (""));
49 Q(nanl, (""));
50 Q(nans, (""));
51 Q(nan, ("10"));
52 Q(nanf, ("10"));
53 Q(nanl, ("10"));
54 Q(nans, ("10"));
55
56 P(isgreater, (1., 2.));
57 P(isgreaterequal, (1., 2.));
58 P(isless, (1., 2.));
59 P(islessequal, (1., 2.));
60 P(islessgreater, (1., 2.));
61 P(isunordered, (1., 2.));
62
63 P(isinf, (1.));
64 P(isinf_sign, (1.));
65 P(isnan, (1.));
66
67 // Bitwise & Numeric Functions
68
69 P(abs, (N));
70
71 P(clz, (N));
72 P(clzl, (N));
73 P(clzll, (N));
74 P(ctz, (N));
75 P(ctzl, (N));
76 P(ctzll, (N));
77 P(ffs, (N));
78 P(ffsl, (N));
79 P(ffsll, (N));
80 P(parity, (N));
81 P(parityl, (N));
82 P(parityll, (N));
83 P(popcount, (N));
84 P(popcountl, (N));
85 P(popcountll, (N));
86 Q(powi, (1.2f, N));
87 Q(powif, (1.2f, N));
88 Q(powil, (1.2f, N));
89
90 // Lib functions
91 int a, b, n = random(); // Avoid optimizing out.
92 char s0[10], s1[] = "Hello";
93 V(strcat, (s0, s1));
94 V(strcmp, (s0, s1));
95 V(strdup, (s0));
96 V(strncat, (s0, s1, n));
97 V(strndup, (s0, n));
98 V(strchr, (s0, s1[0]));
99 V(strrchr, (s0, s1[0]));
100 V(strcpy, (s0, s1));
101 V(strncpy, (s0, s1, n));
102 V(sprintf, (s0, "%s", s1));
103 V(snprintf, (s0, n, "%s", s1));
104
105 // Object size checking
106 V(__memset_chk, (s0, 0, sizeof s0, n));
107 V(__memcpy_chk, (s0, s1, sizeof s0, n));
108 V(__memmove_chk, (s0, s1, sizeof s0, n));
109 V(__mempcpy_chk, (s0, s1, sizeof s0, n));
110 V(__strncpy_chk, (s0, s1, sizeof s0, n));
111 V(__strcpy_chk, (s0, s1, n));
112 s0[0] = 0;
113 V(__strcat_chk, (s0, s1, n));
114 P(object_size, (s0, 0));
115 P(object_size, (s0, 1));
116 P(object_size, (s0, 2));
117 P(object_size, (s0, 3));
118
119 // Whatever
120
121 P(bswap16, (N));
122 P(bswap32, (N));
123 P(bswap64, (N));
124
125 // CHECK: @llvm.bitreverse.i8
126 // CHECK: @llvm.bitreverse.i16
127 // CHECK: @llvm.bitreverse.i32
128 // CHECK: @llvm.bitreverse.i64
129 P(bitreverse8, (N));
130 P(bitreverse16, (N));
131 P(bitreverse32, (N));
132 P(bitreverse64, (N));
133
134 // FIXME
135 // V(clear_cache, (&N, &N+1));
136 V(trap, ());
137 R(extract_return_addr, (&N));
138 P(signbit, (1.0));
139
140 R(launder, (&N));
141
142 return 0;
143 }
144
145
146
foo(void)147 void foo(void) {
148 __builtin_strcat(0, 0);
149 }
150
151 // CHECK-LABEL: define{{.*}} void @bar(
bar(void)152 void bar(void) {
153 float f;
154 double d;
155 long double ld;
156
157 // LLVM's hex representation of float constants is really unfortunate;
158 // basically it does a float-to-double "conversion" and then prints the
159 // hex form of that. That gives us weird artifacts like exponents
160 // that aren't numerically similar to the original exponent and
161 // significand bit-patterns that are offset by three bits (because
162 // the exponent was expanded from 8 bits to 11).
163 //
164 // 0xAE98 == 1010111010011000
165 // 0x15D3 == 1010111010011
166
167 f = __builtin_huge_valf(); // CHECK: float 0x7FF0000000000000
168 d = __builtin_huge_val(); // CHECK: double 0x7FF0000000000000
169 ld = __builtin_huge_vall(); // CHECK: x86_fp80 0xK7FFF8000000000000000
170 f = __builtin_nanf(""); // CHECK: float 0x7FF8000000000000
171 d = __builtin_nan(""); // CHECK: double 0x7FF8000000000000
172 ld = __builtin_nanl(""); // CHECK: x86_fp80 0xK7FFFC000000000000000
173 f = __builtin_nanf("0xAE98"); // CHECK: float 0x7FF815D300000000
174 d = __builtin_nan("0xAE98"); // CHECK: double 0x7FF800000000AE98
175 ld = __builtin_nanl("0xAE98"); // CHECK: x86_fp80 0xK7FFFC00000000000AE98
176 f = __builtin_nansf(""); // CHECK: float 0x7FF4000000000000
177 d = __builtin_nans(""); // CHECK: double 0x7FF4000000000000
178 ld = __builtin_nansl(""); // CHECK: x86_fp80 0xK7FFFA000000000000000
179 f = __builtin_nansf("0xAE98"); // CHECK: float 0x7FF015D300000000
180 d = __builtin_nans("0xAE98"); // CHECK: double 0x7FF000000000AE98
181 ld = __builtin_nansl("0xAE98");// CHECK: x86_fp80 0xK7FFF800000000000AE98
182
183 }
184 // CHECK: }
185
186 // CHECK-LABEL: define{{.*}} void @test_conditional_bzero
test_conditional_bzero(void)187 void test_conditional_bzero(void) {
188 char dst[20];
189 int _sz = 20, len = 20;
190 return (_sz
191 ? ((_sz >= len)
192 ? __builtin_bzero(dst, len)
193 : foo())
194 : __builtin_bzero(dst, len));
195 // CHECK: call void @llvm.memset
196 // CHECK: call void @llvm.memset
197 // CHECK-NOT: phi
198 }
199
200 // CHECK-LABEL: define{{.*}} void @test_float_builtins
test_float_builtins(__fp16 * H,float F,double D,long double LD)201 void test_float_builtins(__fp16 *H, float F, double D, long double LD) {
202 volatile int res;
203 res = __builtin_isinf(*H);
204 // CHECK: call half @llvm.fabs.f16(half
205 // CHECK: fcmp oeq half {{.*}}, 0xH7C00
206
207 res = __builtin_isinf(F);
208 // CHECK: call float @llvm.fabs.f32(float
209 // CHECK: fcmp oeq float {{.*}}, 0x7FF0000000000000
210
211 res = __builtin_isinf(D);
212 // CHECK: call double @llvm.fabs.f64(double
213 // CHECK: fcmp oeq double {{.*}}, 0x7FF0000000000000
214
215 res = __builtin_isinf(LD);
216 // CHECK: call x86_fp80 @llvm.fabs.f80(x86_fp80
217 // CHECK: fcmp oeq x86_fp80 {{.*}}, 0xK7FFF8000000000000000
218
219 res = __builtin_isinf_sign(*H);
220 // CHECK: %[[ABS:.*]] = call half @llvm.fabs.f16(half %[[ARG:.*]])
221 // CHECK: %[[ISINF:.*]] = fcmp oeq half %[[ABS]], 0xH7C00
222 // CHECK: %[[BITCAST:.*]] = bitcast half %[[ARG]] to i16
223 // CHECK: %[[ISNEG:.*]] = icmp slt i16 %[[BITCAST]], 0
224 // CHECK: %[[SIGN:.*]] = select i1 %[[ISNEG]], i32 -1, i32 1
225 // CHECK: select i1 %[[ISINF]], i32 %[[SIGN]], i32 0
226
227 res = __builtin_isinf_sign(F);
228 // CHECK: %[[ABS:.*]] = call float @llvm.fabs.f32(float %[[ARG:.*]])
229 // CHECK: %[[ISINF:.*]] = fcmp oeq float %[[ABS]], 0x7FF0000000000000
230 // CHECK: %[[BITCAST:.*]] = bitcast float %[[ARG]] to i32
231 // CHECK: %[[ISNEG:.*]] = icmp slt i32 %[[BITCAST]], 0
232 // CHECK: %[[SIGN:.*]] = select i1 %[[ISNEG]], i32 -1, i32 1
233 // CHECK: select i1 %[[ISINF]], i32 %[[SIGN]], i32 0
234
235 res = __builtin_isinf_sign(D);
236 // CHECK: %[[ABS:.*]] = call double @llvm.fabs.f64(double %[[ARG:.*]])
237 // CHECK: %[[ISINF:.*]] = fcmp oeq double %[[ABS]], 0x7FF0000000000000
238 // CHECK: %[[BITCAST:.*]] = bitcast double %[[ARG]] to i64
239 // CHECK: %[[ISNEG:.*]] = icmp slt i64 %[[BITCAST]], 0
240 // CHECK: %[[SIGN:.*]] = select i1 %[[ISNEG]], i32 -1, i32 1
241 // CHECK: select i1 %[[ISINF]], i32 %[[SIGN]], i32 0
242
243 res = __builtin_isinf_sign(LD);
244 // CHECK: %[[ABS:.*]] = call x86_fp80 @llvm.fabs.f80(x86_fp80 %[[ARG:.*]])
245 // CHECK: %[[ISINF:.*]] = fcmp oeq x86_fp80 %[[ABS]], 0xK7FFF8000000000000000
246 // CHECK: %[[BITCAST:.*]] = bitcast x86_fp80 %[[ARG]] to i80
247 // CHECK: %[[ISNEG:.*]] = icmp slt i80 %[[BITCAST]], 0
248 // CHECK: %[[SIGN:.*]] = select i1 %[[ISNEG]], i32 -1, i32 1
249 // CHECK: select i1 %[[ISINF]], i32 %[[SIGN]], i32 0
250
251 res = __builtin_isfinite(*H);
252 // CHECK: call half @llvm.fabs.f16(half
253 // CHECK: fcmp one half {{.*}}, 0xH7C00
254
255 res = __builtin_isfinite(F);
256 // CHECK: call float @llvm.fabs.f32(float
257 // CHECK: fcmp one float {{.*}}, 0x7FF0000000000000
258
259 res = finite(D);
260 // CHECK: call double @llvm.fabs.f64(double
261 // CHECK: fcmp one double {{.*}}, 0x7FF0000000000000
262
263 res = __builtin_isnormal(*H);
264 // CHECK: fcmp oeq half
265 // CHECK: call half @llvm.fabs.f16(half
266 // CHECK: fcmp ult half {{.*}}, 0xH7C00
267 // CHECK: fcmp uge half {{.*}}, 0xH0400
268 // CHECK: and i1
269 // CHECK: and i1
270
271 res = __builtin_isnormal(F);
272 // CHECK: fcmp oeq float
273 // CHECK: call float @llvm.fabs.f32(float
274 // CHECK: fcmp ult float {{.*}}, 0x7FF0000000000000
275 // CHECK: fcmp uge float {{.*}}, 0x3810000000000000
276 // CHECK: and i1
277 // CHECK: and i1
278
279 res = __builtin_flt_rounds();
280 // CHECK: call i32 @llvm.flt.rounds(
281 }
282
283 // CHECK-LABEL: define{{.*}} void @test_float_builtin_ops
test_float_builtin_ops(float F,double D,long double LD)284 void test_float_builtin_ops(float F, double D, long double LD) {
285 volatile float resf;
286 volatile double resd;
287 volatile long double resld;
288 volatile long int resli;
289 volatile long long int reslli;
290
291 resf = __builtin_fmodf(F,F);
292 // CHECK: frem float
293
294 resd = __builtin_fmod(D,D);
295 // CHECK: frem double
296
297 resld = __builtin_fmodl(LD,LD);
298 // CHECK: frem x86_fp80
299
300 resf = __builtin_fabsf(F);
301 resd = __builtin_fabs(D);
302 resld = __builtin_fabsl(LD);
303 // CHECK: call float @llvm.fabs.f32(float
304 // CHECK: call double @llvm.fabs.f64(double
305 // CHECK: call x86_fp80 @llvm.fabs.f80(x86_fp80
306
307 resf = __builtin_canonicalizef(F);
308 resd = __builtin_canonicalize(D);
309 resld = __builtin_canonicalizel(LD);
310 // CHECK: call float @llvm.canonicalize.f32(float
311 // CHECK: call double @llvm.canonicalize.f64(double
312 // CHECK: call x86_fp80 @llvm.canonicalize.f80(x86_fp80
313
314 resf = __builtin_fminf(F, F);
315 // CHECK: call float @llvm.minnum.f32
316
317 resd = __builtin_fmin(D, D);
318 // CHECK: call double @llvm.minnum.f64
319
320 resld = __builtin_fminl(LD, LD);
321 // CHECK: call x86_fp80 @llvm.minnum.f80
322
323 resf = __builtin_fmaxf(F, F);
324 // CHECK: call float @llvm.maxnum.f32
325
326 resd = __builtin_fmax(D, D);
327 // CHECK: call double @llvm.maxnum.f64
328
329 resld = __builtin_fmaxl(LD, LD);
330 // CHECK: call x86_fp80 @llvm.maxnum.f80
331
332 resf = __builtin_fabsf(F);
333 // CHECK: call float @llvm.fabs.f32
334
335 resd = __builtin_fabs(D);
336 // CHECK: call double @llvm.fabs.f64
337
338 resld = __builtin_fabsl(LD);
339 // CHECK: call x86_fp80 @llvm.fabs.f80
340
341 resf = __builtin_copysignf(F, F);
342 // CHECK: call float @llvm.copysign.f32
343
344 resd = __builtin_copysign(D, D);
345 // CHECK: call double @llvm.copysign.f64
346
347 resld = __builtin_copysignl(LD, LD);
348 // CHECK: call x86_fp80 @llvm.copysign.f80
349
350
351 resf = __builtin_ceilf(F);
352 // CHECK: call float @llvm.ceil.f32
353
354 resd = __builtin_ceil(D);
355 // CHECK: call double @llvm.ceil.f64
356
357 resld = __builtin_ceill(LD);
358 // CHECK: call x86_fp80 @llvm.ceil.f80
359
360 resf = __builtin_floorf(F);
361 // CHECK: call float @llvm.floor.f32
362
363 resd = __builtin_floor(D);
364 // CHECK: call double @llvm.floor.f64
365
366 resld = __builtin_floorl(LD);
367 // CHECK: call x86_fp80 @llvm.floor.f80
368
369 resf = __builtin_sqrtf(F);
370 // CHECK: call float @llvm.sqrt.f32(
371
372 resd = __builtin_sqrt(D);
373 // CHECK: call double @llvm.sqrt.f64(
374
375 resld = __builtin_sqrtl(LD);
376 // CHECK: call x86_fp80 @llvm.sqrt.f80
377
378 resf = __builtin_truncf(F);
379 // CHECK: call float @llvm.trunc.f32
380
381 resd = __builtin_trunc(D);
382 // CHECK: call double @llvm.trunc.f64
383
384 resld = __builtin_truncl(LD);
385 // CHECK: call x86_fp80 @llvm.trunc.f80
386
387 resf = __builtin_rintf(F);
388 // CHECK: call float @llvm.rint.f32
389
390 resd = __builtin_rint(D);
391 // CHECK: call double @llvm.rint.f64
392
393 resld = __builtin_rintl(LD);
394 // CHECK: call x86_fp80 @llvm.rint.f80
395
396 resf = __builtin_nearbyintf(F);
397 // CHECK: call float @llvm.nearbyint.f32
398
399 resd = __builtin_nearbyint(D);
400 // CHECK: call double @llvm.nearbyint.f64
401
402 resld = __builtin_nearbyintl(LD);
403 // CHECK: call x86_fp80 @llvm.nearbyint.f80
404
405 resf = __builtin_roundf(F);
406 // CHECK: call float @llvm.round.f32
407
408 resd = __builtin_round(D);
409 // CHECK: call double @llvm.round.f64
410
411 resld = __builtin_roundl(LD);
412 // CHECK: call x86_fp80 @llvm.round.f80
413
414 resli = __builtin_lroundf (F);
415 // CHECK: call i64 @llvm.lround.i64.f32
416
417 resli = __builtin_lround (D);
418 // CHECK: call i64 @llvm.lround.i64.f64
419
420 resli = __builtin_lroundl (LD);
421 // CHECK: call i64 @llvm.lround.i64.f80
422
423 resli = __builtin_lrintf (F);
424 // CHECK: call i64 @llvm.lrint.i64.f32
425
426 resli = __builtin_lrint (D);
427 // CHECK: call i64 @llvm.lrint.i64.f64
428
429 resli = __builtin_lrintl (LD);
430 // CHECK: call i64 @llvm.lrint.i64.f80
431 }
432
433 // __builtin_longjmp isn't supported on all platforms, so only test it on X86.
434 #ifdef __x86_64__
435
436 // CHECK-LABEL: define{{.*}} void @test_builtin_longjmp
test_builtin_longjmp(void ** buffer)437 void test_builtin_longjmp(void **buffer) {
438 // CHECK: [[BITCAST:%.*]] = bitcast
439 // CHECK-NEXT: call void @llvm.eh.sjlj.longjmp(i8* [[BITCAST]])
440 __builtin_longjmp(buffer, 1);
441 // CHECK-NEXT: unreachable
442 }
443
444 #endif
445
446 // CHECK-LABEL: define{{.*}} void @test_memory_builtins
test_memory_builtins(int n)447 void test_memory_builtins(int n) {
448 // CHECK: call i8* @malloc
449 void * p = __builtin_malloc(n);
450 // CHECK: call void @free
451 __builtin_free(p);
452 // CHECK: call i8* @calloc
453 p = __builtin_calloc(1, n);
454 // CHECK: call i8* @realloc
455 p = __builtin_realloc(p, n);
456 // CHECK: call void @free
457 __builtin_free(p);
458 }
459
460 // CHECK-LABEL: define{{.*}} i64 @test_builtin_readcyclecounter
test_builtin_readcyclecounter(void)461 long long test_builtin_readcyclecounter(void) {
462 // CHECK: call i64 @llvm.readcyclecounter()
463 return __builtin_readcyclecounter();
464 }
465
466 /// __builtin_launder should be a NOP in C since there are no vtables.
467 // CHECK-LABEL: define{{.*}} void @test_builtin_launder
test_builtin_launder(int * p)468 void test_builtin_launder(int *p) {
469 // CHECK: [[TMP:%.*]] = load i32*,
470 // CHECK-NOT: @llvm.launder
471 // CHECK: store i32* [[TMP]],
472 int *d = __builtin_launder(p);
473 }
474
475 // __warn_memset_zero_len should be NOP, see https://sourceware.org/bugzilla/show_bug.cgi?id=25399
476 // CHECK-LABEL: define{{.*}} void @test___warn_memset_zero_len
test___warn_memset_zero_len(void)477 void test___warn_memset_zero_len(void) {
478 // CHECK-NOT: @__warn_memset_zero_len
479 __warn_memset_zero_len();
480 }
481
482 // Behavior of __builtin_os_log differs between platforms, so only test on X86
483 #ifdef __x86_64__
484
485 // CHECK-LABEL: define{{.*}} void @test_builtin_os_log
486 // CHECK: (i8* noundef %[[BUF:.*]], i32 noundef %[[I:.*]], i8* noundef %[[DATA:.*]])
test_builtin_os_log(void * buf,int i,const char * data)487 void test_builtin_os_log(void *buf, int i, const char *data) {
488 volatile int len;
489 // CHECK: %[[BUF_ADDR:.*]] = alloca i8*, align 8
490 // CHECK: %[[I_ADDR:.*]] = alloca i32, align 4
491 // CHECK: %[[DATA_ADDR:.*]] = alloca i8*, align 8
492 // CHECK: %[[LEN:.*]] = alloca i32, align 4
493 // CHECK: store i8* %[[BUF]], i8** %[[BUF_ADDR]], align 8
494 // CHECK: store i32 %[[I]], i32* %[[I_ADDR]], align 4
495 // CHECK: store i8* %[[DATA]], i8** %[[DATA_ADDR]], align 8
496
497 // CHECK: store volatile i32 34, i32* %[[LEN]]
498 len = __builtin_os_log_format_buffer_size("%d %{public}s %{private}.16P", i, data, data);
499
500 // CHECK: %[[V1:.*]] = load i8*, i8** %[[BUF_ADDR]]
501 // CHECK: %[[V2:.*]] = load i32, i32* %[[I_ADDR]]
502 // CHECK: %[[V3:.*]] = load i8*, i8** %[[DATA_ADDR]]
503 // CHECK: %[[V4:.*]] = ptrtoint i8* %[[V3]] to i64
504 // CHECK: %[[V5:.*]] = load i8*, i8** %[[DATA_ADDR]]
505 // CHECK: %[[V6:.*]] = ptrtoint i8* %[[V5]] to i64
506 // CHECK: call void @__os_log_helper_1_3_4_4_0_8_34_4_17_8_49(i8* noundef %[[V1]], i32 noundef %[[V2]], i64 noundef %[[V4]], i32 noundef 16, i64 noundef %[[V6]])
507 __builtin_os_log_format(buf, "%d %{public}s %{private}.16P", i, data, data);
508
509 // privacy annotations aren't recognized when they are preceded or followed
510 // by non-whitespace characters.
511
512 // CHECK: call void @__os_log_helper_1_2_1_8_32(
513 __builtin_os_log_format(buf, "%{xyz public}s", data);
514
515 // CHECK: call void @__os_log_helper_1_2_1_8_32(
516 __builtin_os_log_format(buf, "%{ public xyz}s", data);
517
518 // CHECK: call void @__os_log_helper_1_2_1_8_32(
519 __builtin_os_log_format(buf, "%{ public1}s", data);
520
521 // Privacy annotations do not have to be in the first comma-delimited string.
522
523 // CHECK: call void @__os_log_helper_1_2_1_8_34(
524 __builtin_os_log_format(buf, "%{ xyz, public }s", "abc");
525
526 // CHECK: call void @__os_log_helper_1_3_1_8_33(
527 __builtin_os_log_format(buf, "%{ xyz, private }s", "abc");
528
529 // CHECK: call void @__os_log_helper_1_3_1_8_37(
530 __builtin_os_log_format(buf, "%{ xyz, sensitive }s", "abc");
531
532 // The strictest privacy annotation in the string wins.
533
534 // CHECK: call void @__os_log_helper_1_3_1_8_33(
535 __builtin_os_log_format(buf, "%{ private, public, private, public}s", "abc");
536
537 // CHECK: call void @__os_log_helper_1_3_1_8_37(
538 __builtin_os_log_format(buf, "%{ private, sensitive, private, public}s",
539 "abc");
540
541 // CHECK: store volatile i32 22, i32* %[[LEN]], align 4
542 len = __builtin_os_log_format_buffer_size("%{mask.xyz}s", "abc");
543
544 // CHECK: call void @__os_log_helper_1_2_2_8_112_8_34(i8* noundef {{.*}}, i64 noundef 8026488
545 __builtin_os_log_format(buf, "%{mask.xyz, public}s", "abc");
546
547 // CHECK: call void @__os_log_helper_1_3_2_8_112_4_1(i8* noundef {{.*}}, i64 noundef 8026488
548 __builtin_os_log_format(buf, "%{ mask.xyz, private }d", 11);
549
550 // Mask type is silently ignored.
551 // CHECK: call void @__os_log_helper_1_2_1_8_32(
552 __builtin_os_log_format(buf, "%{ mask. xyz }s", "abc");
553
554 // CHECK: call void @__os_log_helper_1_2_1_8_32(
555 __builtin_os_log_format(buf, "%{ mask.xy z }s", "abc");
556 }
557
558 // CHECK-LABEL: define linkonce_odr hidden void @__os_log_helper_1_3_4_4_0_8_34_4_17_8_49
559 // CHECK: (i8* noundef %[[BUFFER:.*]], i32 noundef %[[ARG0:.*]], i64 noundef %[[ARG1:.*]], i32 noundef %[[ARG2:.*]], i64 noundef %[[ARG3:.*]])
560
561 // CHECK: %[[BUFFER_ADDR:.*]] = alloca i8*, align 8
562 // CHECK: %[[ARG0_ADDR:.*]] = alloca i32, align 4
563 // CHECK: %[[ARG1_ADDR:.*]] = alloca i64, align 8
564 // CHECK: %[[ARG2_ADDR:.*]] = alloca i32, align 4
565 // CHECK: %[[ARG3_ADDR:.*]] = alloca i64, align 8
566 // CHECK: store i8* %[[BUFFER]], i8** %[[BUFFER_ADDR]], align 8
567 // CHECK: store i32 %[[ARG0]], i32* %[[ARG0_ADDR]], align 4
568 // CHECK: store i64 %[[ARG1]], i64* %[[ARG1_ADDR]], align 8
569 // CHECK: store i32 %[[ARG2]], i32* %[[ARG2_ADDR]], align 4
570 // CHECK: store i64 %[[ARG3]], i64* %[[ARG3_ADDR]], align 8
571 // CHECK: %[[BUF:.*]] = load i8*, i8** %[[BUFFER_ADDR]], align 8
572 // CHECK: %[[SUMMARY:.*]] = getelementptr i8, i8* %[[BUF]], i64 0
573 // CHECK: store i8 3, i8* %[[SUMMARY]], align 1
574 // CHECK: %[[NUMARGS:.*]] = getelementptr i8, i8* %[[BUF]], i64 1
575 // CHECK: store i8 4, i8* %[[NUMARGS]], align 1
576 // CHECK: %[[ARGDESCRIPTOR:.*]] = getelementptr i8, i8* %[[BUF]], i64 2
577 // CHECK: store i8 0, i8* %[[ARGDESCRIPTOR]], align 1
578 // CHECK: %[[ARGSIZE:.*]] = getelementptr i8, i8* %[[BUF]], i64 3
579 // CHECK: store i8 4, i8* %[[ARGSIZE]], align 1
580 // CHECK: %[[ARGDATA:.*]] = getelementptr i8, i8* %[[BUF]], i64 4
581 // CHECK: %[[ARGDATACAST:.*]] = bitcast i8* %[[ARGDATA]] to i32*
582 // CHECK: %[[V0:.*]] = load i32, i32* %[[ARG0_ADDR]], align 4
583 // CHECK: store i32 %[[V0]], i32* %[[ARGDATACAST]], align 1
584 // CHECK: %[[ARGDESCRIPTOR1:.*]] = getelementptr i8, i8* %[[BUF]], i64 8
585 // CHECK: store i8 34, i8* %[[ARGDESCRIPTOR1]], align 1
586 // CHECK: %[[ARGSIZE2:.*]] = getelementptr i8, i8* %[[BUF]], i64 9
587 // CHECK: store i8 8, i8* %[[ARGSIZE2]], align 1
588 // CHECK: %[[ARGDATA3:.*]] = getelementptr i8, i8* %[[BUF]], i64 10
589 // CHECK: %[[ARGDATACAST4:.*]] = bitcast i8* %[[ARGDATA3]] to i64*
590 // CHECK: %[[V1:.*]] = load i64, i64* %[[ARG1_ADDR]], align 8
591 // CHECK: store i64 %[[V1]], i64* %[[ARGDATACAST4]], align 1
592 // CHECK: %[[ARGDESCRIPTOR5:.*]] = getelementptr i8, i8* %[[BUF]], i64 18
593 // CHECK: store i8 17, i8* %[[ARGDESCRIPTOR5]], align 1
594 // CHECK: %[[ARGSIZE6:.*]] = getelementptr i8, i8* %[[BUF]], i64 19
595 // CHECK: store i8 4, i8* %[[ARGSIZE6]], align 1
596 // CHECK: %[[ARGDATA7:.*]] = getelementptr i8, i8* %[[BUF]], i64 20
597 // CHECK: %[[ARGDATACAST8:.*]] = bitcast i8* %[[ARGDATA7]] to i32*
598 // CHECK: %[[V2:.*]] = load i32, i32* %[[ARG2_ADDR]], align 4
599 // CHECK: store i32 %[[V2]], i32* %[[ARGDATACAST8]], align 1
600 // CHECK: %[[ARGDESCRIPTOR9:.*]] = getelementptr i8, i8* %[[BUF]], i64 24
601 // CHECK: store i8 49, i8* %[[ARGDESCRIPTOR9]], align 1
602 // CHECK: %[[ARGSIZE10:.*]] = getelementptr i8, i8* %[[BUF]], i64 25
603 // CHECK: store i8 8, i8* %[[ARGSIZE10]], align 1
604 // CHECK: %[[ARGDATA11:.*]] = getelementptr i8, i8* %[[BUF]], i64 26
605 // CHECK: %[[ARGDATACAST12:.*]] = bitcast i8* %[[ARGDATA11]] to i64*
606 // CHECK: %[[V3:.*]] = load i64, i64* %[[ARG3_ADDR]], align 8
607 // CHECK: store i64 %[[V3]], i64* %[[ARGDATACAST12]], align 1
608
609 // CHECK-LABEL: define{{.*}} void @test_builtin_os_log_wide
610 // CHECK: (i8* noundef %[[BUF:.*]], i8* noundef %[[DATA:.*]], i32* noundef %[[STR:.*]])
611 typedef int wchar_t;
test_builtin_os_log_wide(void * buf,const char * data,wchar_t * str)612 void test_builtin_os_log_wide(void *buf, const char *data, wchar_t *str) {
613 volatile int len;
614
615 // CHECK: %[[BUF_ADDR:.*]] = alloca i8*, align 8
616 // CHECK: %[[DATA_ADDR:.*]] = alloca i8*, align 8
617 // CHECK: %[[STR_ADDR:.*]] = alloca i32*, align 8
618 // CHECK: %[[LEN:.*]] = alloca i32, align 4
619 // CHECK: store i8* %[[BUF]], i8** %[[BUF_ADDR]], align 8
620 // CHECK: store i8* %[[DATA]], i8** %[[DATA_ADDR]], align 8
621 // CHECK: store i32* %[[STR]], i32** %[[STR_ADDR]], align 8
622
623 // CHECK: store volatile i32 12, i32* %[[LEN]], align 4
624 len = __builtin_os_log_format_buffer_size("%S", str);
625
626 // CHECK: %[[V1:.*]] = load i8*, i8** %[[BUF_ADDR]], align 8
627 // CHECK: %[[V2:.*]] = load i32*, i32** %[[STR_ADDR]], align 8
628 // CHECK: %[[V3:.*]] = ptrtoint i32* %[[V2]] to i64
629 // CHECK: call void @__os_log_helper_1_2_1_8_80(i8* noundef %[[V1]], i64 noundef %[[V3]])
630
631 __builtin_os_log_format(buf, "%S", str);
632 }
633
634 // CHECK-LABEL: define linkonce_odr hidden void @__os_log_helper_1_2_1_8_80
635 // CHECK: (i8* noundef %[[BUFFER:.*]], i64 noundef %[[ARG0:.*]])
636
637 // CHECK: %[[BUFFER_ADDR:.*]] = alloca i8*, align 8
638 // CHECK: %[[ARG0_ADDR:.*]] = alloca i64, align 8
639 // CHECK: store i8* %[[BUFFER]], i8** %[[BUFFER_ADDR]], align 8
640 // CHECK: store i64 %[[ARG0]], i64* %[[ARG0_ADDR]], align 8
641 // CHECK: %[[BUF:.*]] = load i8*, i8** %[[BUFFER_ADDR]], align 8
642 // CHECK: %[[SUMMARY:.*]] = getelementptr i8, i8* %[[BUF]], i64 0
643 // CHECK: store i8 2, i8* %[[SUMMARY]], align 1
644 // CHECK: %[[NUMARGS:.*]] = getelementptr i8, i8* %[[BUF]], i64 1
645 // CHECK: store i8 1, i8* %[[NUMARGS]], align 1
646 // CHECK: %[[ARGDESCRIPTOR:.*]] = getelementptr i8, i8* %[[BUF]], i64 2
647 // CHECK: store i8 80, i8* %[[ARGDESCRIPTOR]], align 1
648 // CHECK: %[[ARGSIZE:.*]] = getelementptr i8, i8* %[[BUF]], i64 3
649 // CHECK: store i8 8, i8* %[[ARGSIZE]], align 1
650 // CHECK: %[[ARGDATA:.*]] = getelementptr i8, i8* %[[BUF]], i64 4
651 // CHECK: %[[ARGDATACAST:.*]] = bitcast i8* %[[ARGDATA]] to i64*
652 // CHECK: %[[V0:.*]] = load i64, i64* %[[ARG0_ADDR]], align 8
653 // CHECK: store i64 %[[V0]], i64* %[[ARGDATACAST]], align 1
654
655 // CHECK-LABEL: define{{.*}} void @test_builtin_os_log_precision_width
656 // CHECK: (i8* noundef %[[BUF:.*]], i8* noundef %[[DATA:.*]], i32 noundef %[[PRECISION:.*]], i32 noundef %[[WIDTH:.*]])
test_builtin_os_log_precision_width(void * buf,const char * data,int precision,int width)657 void test_builtin_os_log_precision_width(void *buf, const char *data,
658 int precision, int width) {
659 volatile int len;
660 // CHECK: %[[BUF_ADDR:.*]] = alloca i8*, align 8
661 // CHECK: %[[DATA_ADDR:.*]] = alloca i8*, align 8
662 // CHECK: %[[PRECISION_ADDR:.*]] = alloca i32, align 4
663 // CHECK: %[[WIDTH_ADDR:.*]] = alloca i32, align 4
664 // CHECK: %[[LEN:.*]] = alloca i32, align 4
665 // CHECK: store i8* %[[BUF]], i8** %[[BUF_ADDR]], align 8
666 // CHECK: store i8* %[[DATA]], i8** %[[DATA_ADDR]], align 8
667 // CHECK: store i32 %[[PRECISION]], i32* %[[PRECISION_ADDR]], align 4
668 // CHECK: store i32 %[[WIDTH]], i32* %[[WIDTH_ADDR]], align 4
669
670 // CHECK: store volatile i32 24, i32* %[[LEN]], align 4
671 len = __builtin_os_log_format_buffer_size("Hello %*.*s World", precision, width, data);
672
673 // CHECK: %[[V1:.*]] = load i8*, i8** %[[BUF_ADDR]], align 8
674 // CHECK: %[[V2:.*]] = load i32, i32* %[[PRECISION_ADDR]], align 4
675 // CHECK: %[[V3:.*]] = load i32, i32* %[[WIDTH_ADDR]], align 4
676 // CHECK: %[[V4:.*]] = load i8*, i8** %[[DATA_ADDR]], align 8
677 // CHECK: %[[V5:.*]] = ptrtoint i8* %[[V4]] to i64
678 // CHECK: call void @__os_log_helper_1_2_3_4_0_4_16_8_32(i8* noundef %[[V1]], i32 noundef %[[V2]], i32 noundef %[[V3]], i64 noundef %[[V5]])
679 __builtin_os_log_format(buf, "Hello %*.*s World", precision, width, data);
680 }
681
682 // CHECK-LABEL: define linkonce_odr hidden void @__os_log_helper_1_2_3_4_0_4_16_8_32
683 // CHECK: (i8* noundef %[[BUFFER:.*]], i32 noundef %[[ARG0:.*]], i32 noundef %[[ARG1:.*]], i64 noundef %[[ARG2:.*]])
684
685 // CHECK: %[[BUFFER_ADDR:.*]] = alloca i8*, align 8
686 // CHECK: %[[ARG0_ADDR:.*]] = alloca i32, align 4
687 // CHECK: %[[ARG1_ADDR:.*]] = alloca i32, align 4
688 // CHECK: %[[ARG2_ADDR:.*]] = alloca i64, align 8
689 // CHECK: store i8* %[[BUFFER]], i8** %[[BUFFER_ADDR]], align 8
690 // CHECK: store i32 %[[ARG0]], i32* %[[ARG0_ADDR]], align 4
691 // CHECK: store i32 %[[ARG1]], i32* %[[ARG1_ADDR]], align 4
692 // CHECK: store i64 %[[ARG2]], i64* %[[ARG2_ADDR]], align 8
693 // CHECK: %[[BUF:.*]] = load i8*, i8** %[[BUFFER_ADDR]], align 8
694 // CHECK: %[[SUMMARY:.*]] = getelementptr i8, i8* %[[BUF]], i64 0
695 // CHECK: store i8 2, i8* %[[SUMMARY]], align 1
696 // CHECK: %[[NUMARGS:.*]] = getelementptr i8, i8* %[[BUF]], i64 1
697 // CHECK: store i8 3, i8* %[[NUMARGS]], align 1
698 // CHECK: %[[ARGDESCRIPTOR:.*]] = getelementptr i8, i8* %[[BUF]], i64 2
699 // CHECK: store i8 0, i8* %[[ARGDESCRIPTOR]], align 1
700 // CHECK: %[[ARGSIZE:.*]] = getelementptr i8, i8* %[[BUF]], i64 3
701 // CHECK: store i8 4, i8* %[[ARGSIZE]], align 1
702 // CHECK: %[[ARGDATA:.*]] = getelementptr i8, i8* %[[BUF]], i64 4
703 // CHECK: %[[ARGDATACAST:.*]] = bitcast i8* %[[ARGDATA]] to i32*
704 // CHECK: %[[V0:.*]] = load i32, i32* %[[ARG0_ADDR]], align 4
705 // CHECK: store i32 %[[V0]], i32* %[[ARGDATACAST]], align 1
706 // CHECK: %[[ARGDESCRIPTOR1:.*]] = getelementptr i8, i8* %[[BUF]], i64 8
707 // CHECK: store i8 16, i8* %[[ARGDESCRIPTOR1]], align 1
708 // CHECK: %[[ARGSIZE2:.*]] = getelementptr i8, i8* %[[BUF]], i64 9
709 // CHECK: store i8 4, i8* %[[ARGSIZE2]], align 1
710 // CHECK: %[[ARGDATA3:.*]] = getelementptr i8, i8* %[[BUF]], i64 10
711 // CHECK: %[[ARGDATACAST4:.*]] = bitcast i8* %[[ARGDATA3]] to i32*
712 // CHECK: %[[V1:.*]] = load i32, i32* %[[ARG1_ADDR]], align 4
713 // CHECK: store i32 %[[V1]], i32* %[[ARGDATACAST4]], align 1
714 // CHECK: %[[ARGDESCRIPTOR5:.*]] = getelementptr i8, i8* %[[BUF]], i64 14
715 // CHECK: store i8 32, i8* %[[ARGDESCRIPTOR5]], align 1
716 // CHECK: %[[ARGSIZE6:.*]] = getelementptr i8, i8* %[[BUF]], i64 15
717 // CHECK: store i8 8, i8* %[[ARGSIZE6]], align 1
718 // CHECK: %[[ARGDATA7:.*]] = getelementptr i8, i8* %[[BUF]], i64 16
719 // CHECK: %[[ARGDATACAST8:.*]] = bitcast i8* %[[ARGDATA7]] to i64*
720 // CHECK: %[[V2:.*]] = load i64, i64* %[[ARG2_ADDR]], align 8
721 // CHECK: store i64 %[[V2]], i64* %[[ARGDATACAST8]], align 1
722
723 // CHECK-LABEL: define{{.*}} void @test_builtin_os_log_invalid
724 // CHECK: (i8* noundef %[[BUF:.*]], i32 noundef %[[DATA:.*]])
test_builtin_os_log_invalid(void * buf,int data)725 void test_builtin_os_log_invalid(void *buf, int data) {
726 volatile int len;
727 // CHECK: %[[BUF_ADDR:.*]] = alloca i8*, align 8
728 // CHECK: %[[DATA_ADDR:.*]] = alloca i32, align 4
729 // CHECK: %[[LEN:.*]] = alloca i32, align 4
730 // CHECK: store i8* %[[BUF]], i8** %[[BUF_ADDR]], align 8
731 // CHECK: store i32 %[[DATA]], i32* %[[DATA_ADDR]], align 4
732
733 // CHECK: store volatile i32 8, i32* %[[LEN]], align 4
734 len = __builtin_os_log_format_buffer_size("invalid specifier %: %d even a trailing one%", data);
735
736 // CHECK: %[[V1:.*]] = load i8*, i8** %[[BUF_ADDR]], align 8
737 // CHECK: %[[V2:.*]] = load i32, i32* %[[DATA_ADDR]], align 4
738 // CHECK: call void @__os_log_helper_1_0_1_4_0(i8* noundef %[[V1]], i32 noundef %[[V2]])
739
740 __builtin_os_log_format(buf, "invalid specifier %: %d even a trailing one%", data);
741 }
742
743 // CHECK-LABEL: define linkonce_odr hidden void @__os_log_helper_1_0_1_4_0
744 // CHECK: (i8* noundef %[[BUFFER:.*]], i32 noundef %[[ARG0:.*]])
745
746 // CHECK: %[[BUFFER_ADDR:.*]] = alloca i8*, align 8
747 // CHECK: %[[ARG0_ADDR:.*]] = alloca i32, align 4
748 // CHECK: store i8* %[[BUFFER]], i8** %[[BUFFER_ADDR]], align 8
749 // CHECK: store i32 %[[ARG0]], i32* %[[ARG0_ADDR]], align 4
750 // CHECK: %[[BUF:.*]] = load i8*, i8** %[[BUFFER_ADDR]], align 8
751 // CHECK: %[[SUMMARY:.*]] = getelementptr i8, i8* %[[BUF]], i64 0
752 // CHECK: store i8 0, i8* %[[SUMMARY]], align 1
753 // CHECK: %[[NUMARGS:.*]] = getelementptr i8, i8* %[[BUF]], i64 1
754 // CHECK: store i8 1, i8* %[[NUMARGS]], align 1
755 // CHECK: %[[ARGDESCRIPTOR:.*]] = getelementptr i8, i8* %[[BUF]], i64 2
756 // CHECK: store i8 0, i8* %[[ARGDESCRIPTOR]], align 1
757 // CHECK: %[[ARGSIZE:.*]] = getelementptr i8, i8* %[[BUF]], i64 3
758 // CHECK: store i8 4, i8* %[[ARGSIZE]], align 1
759 // CHECK: %[[ARGDATA:.*]] = getelementptr i8, i8* %[[BUF]], i64 4
760 // CHECK: %[[ARGDATACAST:.*]] = bitcast i8* %[[ARGDATA]] to i32*
761 // CHECK: %[[V0:.*]] = load i32, i32* %[[ARG0_ADDR]], align 4
762 // CHECK: store i32 %[[V0]], i32* %[[ARGDATACAST]], align 1
763
764 // CHECK-LABEL: define{{.*}} void @test_builtin_os_log_percent
765 // CHECK: (i8* noundef %[[BUF:.*]], i8* noundef %[[DATA1:.*]], i8* noundef %[[DATA2:.*]])
766 // Check that the %% which does not consume any argument is correctly handled
test_builtin_os_log_percent(void * buf,const char * data1,const char * data2)767 void test_builtin_os_log_percent(void *buf, const char *data1, const char *data2) {
768 volatile int len;
769 // CHECK: %[[BUF_ADDR:.*]] = alloca i8*, align 8
770 // CHECK: %[[DATA1_ADDR:.*]] = alloca i8*, align 8
771 // CHECK: %[[DATA2_ADDR:.*]] = alloca i8*, align 8
772 // CHECK: %[[LEN:.*]] = alloca i32, align 4
773 // CHECK: store i8* %[[BUF]], i8** %[[BUF_ADDR]], align 8
774 // CHECK: store i8* %[[DATA1]], i8** %[[DATA1_ADDR]], align 8
775 // CHECK: store i8* %[[DATA2]], i8** %[[DATA2_ADDR]], align 8
776 // CHECK: store volatile i32 22, i32* %[[LEN]], align 4
777
778 len = __builtin_os_log_format_buffer_size("%s %% %s", data1, data2);
779
780 // CHECK: %[[V1:.*]] = load i8*, i8** %[[BUF_ADDR]], align 8
781 // CHECK: %[[V2:.*]] = load i8*, i8** %[[DATA1_ADDR]], align 8
782 // CHECK: %[[V3:.*]] = ptrtoint i8* %[[V2]] to i64
783 // CHECK: %[[V4:.*]] = load i8*, i8** %[[DATA2_ADDR]], align 8
784 // CHECK: %[[V5:.*]] = ptrtoint i8* %[[V4]] to i64
785 // CHECK: call void @__os_log_helper_1_2_2_8_32_8_32(i8* noundef %[[V1]], i64 noundef %[[V3]], i64 noundef %[[V5]])
786
787 __builtin_os_log_format(buf, "%s %% %s", data1, data2);
788 }
789
790 // CHECK-LABEL: define linkonce_odr hidden void @__os_log_helper_1_2_2_8_32_8_32
791 // CHECK: (i8* noundef %[[BUFFER:.*]], i64 noundef %[[ARG0:.*]], i64 noundef %[[ARG1:.*]])
792
793 // CHECK: %[[BUFFER_ADDR:.*]] = alloca i8*, align 8
794 // CHECK: %[[ARG0_ADDR:.*]] = alloca i64, align 8
795 // CHECK: %[[ARG1_ADDR:.*]] = alloca i64, align 8
796 // CHECK: store i8* %[[BUFFER]], i8** %[[BUFFER_ADDR]], align 8
797 // CHECK: store i64 %[[ARG0]], i64* %[[ARG0_ADDR]], align 8
798 // CHECK: store i64 %[[ARG1]], i64* %[[ARG1_ADDR]], align 8
799 // CHECK: %[[BUF:.*]] = load i8*, i8** %[[BUFFER_ADDR]], align 8
800 // CHECK: %[[SUMMARY:.*]] = getelementptr i8, i8* %[[BUF]], i64 0
801 // CHECK: store i8 2, i8* %[[SUMMARY]], align 1
802 // CHECK: %[[NUMARGS:.*]] = getelementptr i8, i8* %[[BUF]], i64 1
803 // CHECK: store i8 2, i8* %[[NUMARGS]], align 1
804 // CHECK: %[[ARGDESCRIPTOR:.*]] = getelementptr i8, i8* %[[BUF]], i64 2
805 // CHECK: store i8 32, i8* %[[ARGDESCRIPTOR]], align 1
806 // CHECK: %[[ARGSIZE:.*]] = getelementptr i8, i8* %[[BUF]], i64 3
807 // CHECK: store i8 8, i8* %[[ARGSIZE]], align 1
808 // CHECK: %[[ARGDATA:.*]] = getelementptr i8, i8* %[[BUF]], i64 4
809 // CHECK: %[[ARGDATACAST:.*]] = bitcast i8* %[[ARGDATA]] to i64*
810 // CHECK: %[[V0:.*]] = load i64, i64* %[[ARG0_ADDR]], align 8
811 // CHECK: store i64 %[[V0]], i64* %[[ARGDATACAST]], align 1
812 // CHECK: %[[ARGDESCRIPTOR1:.*]] = getelementptr i8, i8* %[[BUF]], i64 12
813 // CHECK: store i8 32, i8* %[[ARGDESCRIPTOR1]], align 1
814 // CHECK: %[[ARGSIZE2:.*]] = getelementptr i8, i8* %[[BUF]], i64 13
815 // CHECK: store i8 8, i8* %[[ARGSIZE2]], align 1
816 // CHECK: %[[ARGDATA3:.*]] = getelementptr i8, i8* %[[BUF]], i64 14
817 // CHECK: %[[ARGDATACAST4:.*]] = bitcast i8* %[[ARGDATA3]] to i64*
818 // CHECK: %[[V1:.*]] = load i64, i64* %[[ARG1_ADDR]], align 8
819 // CHECK: store i64 %[[V1]], i64* %[[ARGDATACAST4]], align 1
820
821 // Check that the following two functions call the same helper function.
822
823 // CHECK-LABEL: define{{.*}} void @test_builtin_os_log_merge_helper0
824 // CHECK: call void @__os_log_helper_1_0_2_4_0_8_0(
test_builtin_os_log_merge_helper0(void * buf,int i,double d)825 void test_builtin_os_log_merge_helper0(void *buf, int i, double d) {
826 __builtin_os_log_format(buf, "%d %f", i, d);
827 }
828
829 // CHECK-LABEL: define linkonce_odr hidden void @__os_log_helper_1_0_2_4_0_8_0(
830
831 // CHECK-LABEL: define{{.*}} void @test_builtin_os_log_merge_helper1
832 // CHECK: call void @__os_log_helper_1_0_2_4_0_8_0(
test_builtin_os_log_merge_helper1(void * buf,unsigned u,long long ll)833 void test_builtin_os_log_merge_helper1(void *buf, unsigned u, long long ll) {
834 __builtin_os_log_format(buf, "%u %lld", u, ll);
835 }
836
837 // Check that this function doesn't write past the end of array 'buf'.
838
839 // CHECK-LABEL: define{{.*}} void @test_builtin_os_log_errno
test_builtin_os_log_errno(void)840 void test_builtin_os_log_errno(void) {
841 // CHECK-NOT: @stacksave
842 // CHECK: %[[BUF:.*]] = alloca [4 x i8], align 1
843 // CHECK: %[[DECAY:.*]] = getelementptr inbounds [4 x i8], [4 x i8]* %[[BUF]], i64 0, i64 0
844 // CHECK: call void @__os_log_helper_1_2_1_0_96(i8* noundef %[[DECAY]])
845 // CHECK-NOT: @stackrestore
846
847 char buf[__builtin_os_log_format_buffer_size("%m")];
848 __builtin_os_log_format(buf, "%m");
849 }
850
851 // CHECK-LABEL: define linkonce_odr hidden void @__os_log_helper_1_2_1_0_96
852 // CHECK: (i8* noundef %[[BUFFER:.*]])
853
854 // CHECK: %[[BUFFER_ADDR:.*]] = alloca i8*, align 8
855 // CHECK: store i8* %[[BUFFER]], i8** %[[BUFFER_ADDR]], align 8
856 // CHECK: %[[BUF:.*]] = load i8*, i8** %[[BUFFER_ADDR]], align 8
857 // CHECK: %[[SUMMARY:.*]] = getelementptr i8, i8* %[[BUF]], i64 0
858 // CHECK: store i8 2, i8* %[[SUMMARY]], align 1
859 // CHECK: %[[NUMARGS:.*]] = getelementptr i8, i8* %[[BUF]], i64 1
860 // CHECK: store i8 1, i8* %[[NUMARGS]], align 1
861 // CHECK: %[[ARGDESCRIPTOR:.*]] = getelementptr i8, i8* %[[BUF]], i64 2
862 // CHECK: store i8 96, i8* %[[ARGDESCRIPTOR]], align 1
863 // CHECK: %[[ARGSIZE:.*]] = getelementptr i8, i8* %[[BUF]], i64 3
864 // CHECK: store i8 0, i8* %[[ARGSIZE]], align 1
865 // CHECK-NEXT: ret void
866
867 // CHECK-LABEL: define{{.*}} void @test_builtin_os_log_long_double
868 // CHECK: (i8* noundef %[[BUF:.*]], x86_fp80 noundef %[[LD:.*]])
test_builtin_os_log_long_double(void * buf,long double ld)869 void test_builtin_os_log_long_double(void *buf, long double ld) {
870 // CHECK: %[[BUF_ADDR:.*]] = alloca i8*, align 8
871 // CHECK: %[[LD_ADDR:.*]] = alloca x86_fp80, align 16
872 // CHECK: %[[COERCE:.*]] = alloca i128, align 16
873 // CHECK: store i8* %[[BUF]], i8** %[[BUF_ADDR]], align 8
874 // CHECK: store x86_fp80 %[[LD]], x86_fp80* %[[LD_ADDR]], align 16
875 // CHECK: %[[V0:.*]] = load i8*, i8** %[[BUF_ADDR]], align 8
876 // CHECK: %[[V1:.*]] = load x86_fp80, x86_fp80* %[[LD_ADDR]], align 16
877 // CHECK: %[[V2:.*]] = bitcast x86_fp80 %[[V1]] to i80
878 // CHECK: %[[V3:.*]] = zext i80 %[[V2]] to i128
879 // CHECK: store i128 %[[V3]], i128* %[[COERCE]], align 16
880 // CHECK: %[[V4:.*]] = bitcast i128* %[[COERCE]] to { i64, i64 }*
881 // CHECK: %[[V5:.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* %[[V4]], i32 0, i32 0
882 // CHECK: %[[V6:.*]] = load i64, i64* %[[V5]], align 16
883 // CHECK: %[[V7:.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* %[[V4]], i32 0, i32 1
884 // CHECK: %[[V8:.*]] = load i64, i64* %[[V7]], align 8
885 // CHECK: call void @__os_log_helper_1_0_1_16_0(i8* noundef %[[V0]], i64 noundef %[[V6]], i64 noundef %[[V8]])
886
887 __builtin_os_log_format(buf, "%Lf", ld);
888 }
889
890 // CHECK-LABEL: define linkonce_odr hidden void @__os_log_helper_1_0_1_16_0
891 // CHECK: (i8* noundef %[[BUFFER:.*]], i64 noundef %[[ARG0_COERCE0:.*]], i64 noundef %[[ARG0_COERCE1:.*]])
892
893 // CHECK: %[[ARG0:.*]] = alloca i128, align 16
894 // CHECK: %[[BUFFER_ADDR:.*]] = alloca i8*, align 8
895 // CHECK: %[[ARG0_ADDR:.*]] = alloca i128, align 16
896 // CHECK: %[[V0:.*]] = bitcast i128* %[[ARG0]] to { i64, i64 }*
897 // CHECK: %[[V1:.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* %[[V0]], i32 0, i32 0
898 // CHECK: store i64 %[[ARG0_COERCE0]], i64* %[[V1]], align 16
899 // CHECK: %[[V2:.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* %[[V0]], i32 0, i32 1
900 // CHECK: store i64 %[[ARG0_COERCE1]], i64* %[[V2]], align 8
901 // CHECK: %[[ARG01:.*]] = load i128, i128* %[[ARG0]], align 16
902 // CHECK: store i8* %[[BUFFER]], i8** %[[BUFFER_ADDR]], align 8
903 // CHECK: store i128 %[[ARG01]], i128* %[[ARG0_ADDR]], align 16
904 // CHECK: %[[BUF:.*]] = load i8*, i8** %[[BUFFER_ADDR]], align 8
905 // CHECK: %[[SUMMARY:.*]] = getelementptr i8, i8* %[[BUF]], i64 0
906 // CHECK: store i8 0, i8* %[[SUMMARY]], align 1
907 // CHECK: %[[NUMARGS:.*]] = getelementptr i8, i8* %[[BUF]], i64 1
908 // CHECK: store i8 1, i8* %[[NUMARGS]], align 1
909 // CHECK: %[[ARGDESCRIPTOR:.*]] = getelementptr i8, i8* %[[BUF]], i64 2
910 // CHECK: store i8 0, i8* %[[ARGDESCRIPTOR]], align 1
911 // CHECK: %[[ARGSIZE:.*]] = getelementptr i8, i8* %[[BUF]], i64 3
912 // CHECK: store i8 16, i8* %[[ARGSIZE]], align 1
913 // CHECK: %[[ARGDATA:.*]] = getelementptr i8, i8* %[[BUF]], i64 4
914 // CHECK: %[[ARGDATACAST:.*]] = bitcast i8* %[[ARGDATA]] to i128*
915 // CHECK: %[[V3:.*]] = load i128, i128* %[[ARG0_ADDR]], align 16
916 // CHECK: store i128 %[[V3]], i128* %[[ARGDATACAST]], align 1
917
918 #endif
919