1; RUN: opt < %s -passes=bounds-checking -S | FileCheck %s
2target datalayout = "e-p:64:64:64-p1:16:16:16-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
3
4@.str = private constant [8 x i8] c"abcdefg\00"   ; <[8 x i8]*>
5
6@.str_as1 = private addrspace(1) constant [8 x i8] c"abcdefg\00"   ; <[8 x i8] addrspace(1)*>
7
8
9declare noalias i8* @malloc(i64) nounwind allocsize(0)
10declare noalias i8* @calloc(i64, i64) nounwind allocsize(0,1)
11declare noalias i8* @realloc(i8* nocapture allocptr, i64) nounwind allocsize(1)
12
13; CHECK: @f1
14define void @f1() nounwind {
15  %1 = tail call i8* @malloc(i64 32)
16  %2 = bitcast i8* %1 to i32*
17  %idx = getelementptr inbounds i32, i32* %2, i64 2
18; CHECK-NOT: trap
19  store i32 3, i32* %idx, align 4
20  ret void
21}
22
23; CHECK: @f2
24define void @f2() nounwind {
25  %1 = tail call i8* @malloc(i64 32)
26  %2 = bitcast i8* %1 to i32*
27  %idx = getelementptr inbounds i32, i32* %2, i64 8
28; CHECK: trap
29  store i32 3, i32* %idx, align 4
30  ret void
31}
32
33; CHECK: @f3
34define void @f3(i64 %x) nounwind {
35  %1 = tail call i8* @calloc(i64 4, i64 %x)
36  %2 = bitcast i8* %1 to i32*
37  %idx = getelementptr inbounds i32, i32* %2, i64 8
38; CHECK: mul i64 4, %
39; CHECK: sub i64 {{.*}}, 32
40; CHECK-NEXT: icmp ult i64 {{.*}}, 32
41; CHECK-NEXT: icmp ult i64 {{.*}}, 4
42; CHECK-NEXT: or i1
43; CHECK: trap
44  store i32 3, i32* %idx, align 4
45  ret void
46}
47
48; CHECK: @store_volatile
49define void @store_volatile(i64 %x) nounwind {
50  %1 = tail call i8* @calloc(i64 4, i64 %x)
51  %2 = bitcast i8* %1 to i32*
52  %idx = getelementptr inbounds i32, i32* %2, i64 8
53; CHECK-NOT: trap
54  store volatile i32 3, i32* %idx, align 4
55  ret void
56}
57
58; CHECK: @f4
59define void @f4(i64 %x) nounwind {
60  %1 = tail call i8* @realloc(i8* null, i64 %x) nounwind
61  %2 = bitcast i8* %1 to i32*
62  %idx = getelementptr inbounds i32, i32* %2, i64 8
63; CHECK: trap
64  %3 = load i32, i32* %idx, align 4
65  ret void
66}
67
68; CHECK: @f5
69define void @f5(i64 %x) nounwind {
70  %idx = getelementptr inbounds [8 x i8], [8 x i8]* @.str, i64 0, i64 %x
71; CHECK: trap
72  %1 = load i8, i8* %idx, align 4
73  ret void
74}
75
76define void @f5_as1(i64 %x) nounwind {
77; CHECK: @f5_as1
78  %idx = getelementptr inbounds [8 x i8], [8 x i8] addrspace(1)* @.str_as1, i64 0, i64 %x
79  ; CHECK: sub i16
80  ; CHECK: icmp ult i16
81; CHECK: trap
82  %1 = load i8, i8 addrspace(1)* %idx, align 4
83  ret void
84}
85
86; CHECK: @f6
87define void @f6(i64 %x) nounwind {
88  %1 = alloca i128
89; CHECK-NOT: trap
90  %2 = load i128, i128* %1, align 4
91  ret void
92}
93
94; CHECK: @f7
95define void @f7(i64 %x) nounwind {
96  %1 = alloca i128, i64 %x
97; CHECK: mul i64 16,
98; CHECK: trap
99  %2 = load i128, i128* %1, align 4
100  ret void
101}
102
103; CHECK: @f8
104define void @f8() nounwind {
105  %1 = alloca i128
106  %2 = alloca i128
107  %3 = select i1 undef, i128* %1, i128* %2
108; CHECK-NOT: trap
109  %4 = load i128, i128* %3, align 4
110  ret void
111}
112
113; CHECK: @f9
114define void @f9(i128* %arg) nounwind {
115  %1 = alloca i128
116  %2 = select i1 undef, i128* %arg, i128* %1
117; CHECK-NOT: trap
118  %3 = load i128, i128* %2, align 4
119  ret void
120}
121
122; CHECK: @f10
123define void @f10(i64 %x, i64 %y) nounwind {
124  %1 = alloca i128, i64 %x
125  %2 = alloca i128, i64 %y
126  %3 = select i1 undef, i128* %1, i128* %2
127; CHECK: select
128; CHECK: select
129; CHECK: trap
130  %4 = load i128, i128* %3, align 4
131  ret void
132}
133
134; CHECK: @f11
135define void @f11(i128* byval(i128) %x) nounwind {
136  %1 = bitcast i128* %x to i8*
137  %2 = getelementptr inbounds i8, i8* %1, i64 16
138; CHECK: br label
139  %3 = load i8, i8* %2, align 4
140  ret void
141}
142
143; CHECK: @f11_as1
144define void @f11_as1(i128 addrspace(1)* byval(i128) %x) nounwind {
145  %1 = bitcast i128 addrspace(1)* %x to i8 addrspace(1)*
146  %2 = getelementptr inbounds i8, i8 addrspace(1)* %1, i16 16
147; CHECK: br label
148  %3 = load i8, i8 addrspace(1)* %2, align 4
149  ret void
150}
151
152; CHECK: @f12
153define i64 @f12(i64 %x, i64 %y) nounwind {
154  %1 = tail call i8* @calloc(i64 1, i64 %x)
155; CHECK: mul i64 %y, 8
156; CHECK: trap
157  %2 = bitcast i8* %1 to i64*
158  %3 = getelementptr inbounds i64, i64* %2, i64 %y
159  %4 = load i64, i64* %3, align 8
160  ret i64 %4
161}
162
163; CHECK: @load_volatile
164define i64 @load_volatile(i64 %x, i64 %y) nounwind {
165  %1 = tail call i8* @calloc(i64 1, i64 %x)
166; CHECK-NOT: trap
167  %2 = bitcast i8* %1 to i64*
168  %3 = getelementptr inbounds i64, i64* %2, i64 %y
169  %4 = load volatile i64, i64* %3, align 8
170  ret i64 %4
171}
172
173; PR17402
174; CHECK-LABEL: @f13
175define void @f13() nounwind {
176entry:
177  br label %alive
178
179dead:
180  ; Self-refential GEPs can occur in dead code.
181  %incdec.ptr = getelementptr inbounds i32, i32* %incdec.ptr, i64 1
182  ; CHECK: %incdec.ptr = getelementptr inbounds i32, i32* %incdec.ptr
183  %l = load i32, i32* %incdec.ptr
184  br label %alive
185
186alive:
187  ret void
188}
189