1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt -lower-constant-intrinsics -S < %s | FileCheck %s
3
4target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
5target triple = "x86_64-apple-darwin10.0.0"
6
7declare i64 @llvm.objectsize.i64(i8*, i1, i1, i1) nounwind readonly
8declare i64 @llvm.objectsize.i64.p1i8(i8 addrspace(1)*, i1, i1, i1) nounwind readonly
9declare void @llvm.trap() nounwind
10
11; objectsize should fold to a constant, which causes the branch to fold to an
12; uncond branch.
13define i32 @test1(i8* %ptr) nounwind ssp noredzone align 2 {
14; CHECK-LABEL: @test1(
15; CHECK-NEXT:  entry:
16; CHECK-NEXT:    br label [[T:%.*]]
17; CHECK:       T:
18; CHECK-NEXT:    ret i32 4
19;
20entry:
21  %0 = tail call i64 @llvm.objectsize.i64(i8* %ptr, i1 false, i1 false, i1 false)
22  %1 = icmp ugt i64 %0, 3
23  br i1 %1, label %T, label %trap
24
25
26trap:                                             ; preds = %0, %entry
27  tail call void @llvm.trap() noreturn nounwind
28  unreachable
29
30T:
31  ret i32 4
32}
33
34define i64 @test_objectsize_null_flag(i8* %ptr) {
35; CHECK-LABEL: @test_objectsize_null_flag(
36; CHECK-NEXT:  entry:
37; CHECK-NEXT:    ret i64 -1
38;
39entry:
40  %0 = tail call i64 @llvm.objectsize.i64(i8* null, i1 false, i1 true, i1 false)
41  ret i64 %0
42}
43
44define i64 @test_objectsize_null_flag_min(i8* %ptr) {
45; CHECK-LABEL: @test_objectsize_null_flag_min(
46; CHECK-NEXT:  entry:
47; CHECK-NEXT:    ret i64 0
48;
49entry:
50  %0 = tail call i64 @llvm.objectsize.i64(i8* null, i1 true, i1 true, i1 false)
51  ret i64 %0
52}
53
54; Test foldable null pointers because we evaluate them with non-exact modes in
55; CodeGenPrepare.
56define i64 @test_objectsize_null_flag_noas0() {
57; CHECK-LABEL: @test_objectsize_null_flag_noas0(
58; CHECK-NEXT:  entry:
59; CHECK-NEXT:    ret i64 -1
60;
61entry:
62  %0 = tail call i64 @llvm.objectsize.i64.p1i8(i8 addrspace(1)* null, i1 false,
63  i1 true, i1 false)
64  ret i64 %0
65}
66
67define i64 @test_objectsize_null_flag_min_noas0() {
68; CHECK-LABEL: @test_objectsize_null_flag_min_noas0(
69; CHECK-NEXT:  entry:
70; CHECK-NEXT:    ret i64 0
71;
72entry:
73  %0 = tail call i64 @llvm.objectsize.i64.p1i8(i8 addrspace(1)* null, i1 true,
74  i1 true, i1 false)
75  ret i64 %0
76}
77
78define i64 @test_objectsize_null_known_flag_noas0() {
79; CHECK-LABEL: @test_objectsize_null_known_flag_noas0(
80; CHECK-NEXT:  entry:
81; CHECK-NEXT:    ret i64 -1
82;
83entry:
84  %0 = tail call i64 @llvm.objectsize.i64.p1i8(i8 addrspace(1)* null, i1 false,
85  i1 false, i1 false)
86  ret i64 %0
87}
88
89define i64 @test_objectsize_null_known_flag_min_noas0() {
90; CHECK-LABEL: @test_objectsize_null_known_flag_min_noas0(
91; CHECK-NEXT:  entry:
92; CHECK-NEXT:    ret i64 0
93;
94entry:
95  %0 = tail call i64 @llvm.objectsize.i64.p1i8(i8 addrspace(1)* null, i1 true,
96  i1 false, i1 false)
97  ret i64 %0
98}
99
100define i64 @test_objectsize_byval_arg([42 x i8]* byval([42 x i8]) %ptr) {
101; CHECK-LABEL: @test_objectsize_byval_arg(
102; CHECK-NEXT:    [[CAST:%.*]] = bitcast [42 x i8]* [[PTR:%.*]] to i8*
103; CHECK-NEXT:    ret i64 42
104;
105  %cast = bitcast [42 x i8]* %ptr to i8*
106  %size = tail call i64 @llvm.objectsize.i64(i8* %cast, i1 true, i1 false, i1 false)
107  ret i64 %size
108}
109
110define i64 @test_objectsize_byref_arg([42 x i8]* byref([42 x i8]) %ptr) {
111; CHECK-LABEL: @test_objectsize_byref_arg(
112; CHECK-NEXT:    [[CAST:%.*]] = bitcast [42 x i8]* [[PTR:%.*]] to i8*
113; CHECK-NEXT:    ret i64 42
114;
115  %cast = bitcast [42 x i8]* %ptr to i8*
116  %size = tail call i64 @llvm.objectsize.i64(i8* %cast, i1 true, i1 false, i1 false)
117  ret i64 %size
118}
119
120; https://llvm.org/PR50023
121; The alloca operand type may not match pointer type size.
122
123define i64 @vla_pointer_size_mismatch(i42 %x) {
124; CHECK-LABEL: @vla_pointer_size_mismatch(
125; CHECK-NEXT:    [[TMP1:%.*]] = zext i42 [[X:%.*]] to i64
126; CHECK-NEXT:    [[TMP2:%.*]] = mul i64 1, [[TMP1]]
127; CHECK-NEXT:    [[A:%.*]] = alloca i8, i42 [[X]], align 1
128; CHECK-NEXT:    [[G1:%.*]] = getelementptr i8, i8* [[A]], i8 17
129; CHECK-NEXT:    [[TMP3:%.*]] = sub i64 [[TMP2]], 17
130; CHECK-NEXT:    [[TMP4:%.*]] = icmp ult i64 [[TMP2]], 17
131; CHECK-NEXT:    [[TMP5:%.*]] = select i1 [[TMP4]], i64 0, i64 [[TMP3]]
132; CHECK-NEXT:    [[TMP6:%.*]] = icmp ne i64 [[TMP5]], -1
133; CHECK-NEXT:    call void @llvm.assume(i1 [[TMP6]])
134; CHECK-NEXT:    ret i64 [[TMP5]]
135;
136  %A = alloca i8, i42 %x, align 1
137  %G1 = getelementptr i8, i8* %A, i8 17
138  %objsize = call i64 @llvm.objectsize.i64(i8* %G1, i1 false, i1 true, i1 true)
139  ret i64 %objsize
140}
141
142declare i8* @malloc(i64) allocsize(0)
143
144define i64 @test_objectsize_malloc() {
145; CHECK-LABEL: @test_objectsize_malloc(
146; CHECK-NEXT:    [[PTR:%.*]] = call i8* @malloc(i64 16)
147; CHECK-NEXT:    ret i64 16
148;
149  %ptr = call i8* @malloc(i64 16)
150  %objsize = call i64 @llvm.objectsize.i64(i8* %ptr, i1 false, i1 true, i1 true)
151  ret i64 %objsize
152}
153