1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt -S -loop-predication -loop-predication-enable-iv-truncation=true < %s 2>&1 | FileCheck %s
3; RUN: opt -S -passes='require<scalar-evolution>,loop-mssa(loop-predication)' -verify-memoryssa < %s 2>&1 | FileCheck %s
4declare void @llvm.experimental.guard(i1, ...)
5
6declare i32 @length(i8*)
7
8declare i16 @short_length(i8*)
9; Consider range check of type i16 and i32, while IV is of type i64
10; We can loop predicate this because the IV range is within i16 and within i32.
11define i64 @iv_wider_type_rc_two_narrow_types(i32 %offA, i16 %offB, i8* %arrA, i8* %arrB) {
12; CHECK-LABEL: @iv_wider_type_rc_two_narrow_types(
13; CHECK-NEXT:  entry:
14; CHECK-NEXT:    [[LENGTHA:%.*]] = call i32 @length(i8* [[ARRA:%.*]])
15; CHECK-NEXT:    [[LENGTHB:%.*]] = call i16 @short_length(i8* [[ARRB:%.*]])
16; CHECK-NEXT:    [[TMP0:%.*]] = sub i16 [[LENGTHB]], [[OFFB:%.*]]
17; CHECK-NEXT:    [[TMP1:%.*]] = icmp ule i16 16, [[TMP0]]
18; CHECK-NEXT:    [[TMP2:%.*]] = icmp ult i16 [[OFFB]], [[LENGTHB]]
19; CHECK-NEXT:    [[TMP3:%.*]] = and i1 [[TMP2]], [[TMP1]]
20; CHECK-NEXT:    [[TMP4:%.*]] = sub i32 [[LENGTHA]], [[OFFA:%.*]]
21; CHECK-NEXT:    [[TMP5:%.*]] = icmp ule i32 16, [[TMP4]]
22; CHECK-NEXT:    [[TMP6:%.*]] = icmp ult i32 [[OFFA]], [[LENGTHA]]
23; CHECK-NEXT:    [[TMP7:%.*]] = and i1 [[TMP6]], [[TMP5]]
24; CHECK-NEXT:    [[TMP8:%.*]] = and i1 [[TMP3]], [[TMP7]]
25; CHECK-NEXT:    br label [[LOOP:%.*]]
26; CHECK:       loop:
27; CHECK-NEXT:    [[IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
28; CHECK-NEXT:    [[IV_TRUNC_32:%.*]] = trunc i64 [[IV]] to i32
29; CHECK-NEXT:    [[IV_TRUNC_16:%.*]] = trunc i64 [[IV]] to i16
30; CHECK-NEXT:    [[INDEXA:%.*]] = add i32 [[IV_TRUNC_32]], [[OFFA]]
31; CHECK-NEXT:    [[INDEXB:%.*]] = add i16 [[IV_TRUNC_16]], [[OFFB]]
32; CHECK-NEXT:    call void (i1, ...) @llvm.experimental.guard(i1 [[TMP8]], i32 9) [ "deopt"() ]
33; CHECK-NEXT:    [[INDEXA_EXT:%.*]] = zext i32 [[INDEXA]] to i64
34; CHECK-NEXT:    [[ADDRA:%.*]] = getelementptr inbounds i8, i8* [[ARRA]], i64 [[INDEXA_EXT]]
35; CHECK-NEXT:    [[ELTA:%.*]] = load i8, i8* [[ADDRA]]
36; CHECK-NEXT:    [[INDEXB_EXT:%.*]] = zext i16 [[INDEXB]] to i64
37; CHECK-NEXT:    [[ADDRB:%.*]] = getelementptr inbounds i8, i8* [[ARRB]], i64 [[INDEXB_EXT]]
38; CHECK-NEXT:    store i8 [[ELTA]], i8* [[ADDRB]]
39; CHECK-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
40; CHECK-NEXT:    [[LATCH_CHECK:%.*]] = icmp ult i64 [[IV_NEXT]], 16
41; CHECK-NEXT:    br i1 [[LATCH_CHECK]], label [[LOOP]], label [[EXIT:%.*]]
42; CHECK:       exit:
43; CHECK-NEXT:    [[IV_LCSSA:%.*]] = phi i64 [ [[IV]], [[LOOP]] ]
44; CHECK-NEXT:    ret i64 [[IV_LCSSA]]
45;
46entry:
47  %lengthA = call i32 @length(i8* %arrA)
48  %lengthB = call i16 @short_length(i8* %arrB)
49  br label %loop
50
51loop:
52  %iv = phi i64 [0, %entry ], [ %iv.next, %loop ]
53  %iv.trunc.32 = trunc i64 %iv to i32
54  %iv.trunc.16 = trunc i64 %iv to i16
55  %indexA = add i32 %iv.trunc.32, %offA
56  %indexB = add i16 %iv.trunc.16, %offB
57  %rcA = icmp ult i32 %indexA, %lengthA
58  %rcB = icmp ult i16 %indexB, %lengthB
59  %wide.chk = and i1 %rcA, %rcB
60  call void (i1, ...) @llvm.experimental.guard(i1 %wide.chk, i32 9) [ "deopt"() ]
61  %indexA.ext = zext i32 %indexA to i64
62  %addrA = getelementptr inbounds i8, i8* %arrA, i64 %indexA.ext
63  %eltA = load i8, i8* %addrA
64  %indexB.ext = zext i16 %indexB to i64
65  %addrB = getelementptr inbounds i8, i8* %arrB, i64 %indexB.ext
66  store i8 %eltA, i8* %addrB
67  %iv.next = add nuw nsw i64 %iv, 1
68  %latch.check = icmp ult i64 %iv.next, 16
69  br i1 %latch.check, label %loop, label %exit
70
71exit:
72  ret i64 %iv
73}
74
75
76; Consider an IV of type long and an array access into int array.
77; IV is of type i64 while the range check operands are of type i32 and i64.
78define i64 @iv_rc_different_types(i32 %offA, i32 %offB, i8* %arrA, i8* %arrB, i64 %max)
79; CHECK-LABEL: @iv_rc_different_types(
80; CHECK-NEXT:  entry:
81; CHECK-NEXT:    [[LENGTHA:%.*]] = call i32 @length(i8* [[ARRA:%.*]])
82; CHECK-NEXT:    [[LENGTHB:%.*]] = call i32 @length(i8* [[ARRB:%.*]])
83; CHECK-NEXT:    [[TMP0:%.*]] = add i32 [[LENGTHB]], -1
84; CHECK-NEXT:    [[TMP1:%.*]] = sub i32 [[TMP0]], [[OFFB:%.*]]
85; CHECK-NEXT:    [[TMP2:%.*]] = icmp ule i32 15, [[TMP1]]
86; CHECK-NEXT:    [[TMP3:%.*]] = icmp ult i32 [[OFFB]], [[LENGTHB]]
87; CHECK-NEXT:    [[TMP4:%.*]] = and i1 [[TMP3]], [[TMP2]]
88; CHECK-NEXT:    [[TMP5:%.*]] = add i64 [[MAX:%.*]], -1
89; CHECK-NEXT:    [[TMP6:%.*]] = icmp ule i64 15, [[TMP5]]
90; CHECK-NEXT:    [[TMP7:%.*]] = icmp ult i64 0, [[MAX]]
91; CHECK-NEXT:    [[TMP8:%.*]] = and i1 [[TMP7]], [[TMP6]]
92; CHECK-NEXT:    [[TMP9:%.*]] = add i32 [[LENGTHA]], -1
93; CHECK-NEXT:    [[TMP10:%.*]] = sub i32 [[TMP9]], [[OFFA:%.*]]
94; CHECK-NEXT:    [[TMP11:%.*]] = icmp ule i32 15, [[TMP10]]
95; CHECK-NEXT:    [[TMP12:%.*]] = icmp ult i32 [[OFFA]], [[LENGTHA]]
96; CHECK-NEXT:    [[TMP13:%.*]] = and i1 [[TMP12]], [[TMP11]]
97; CHECK-NEXT:    [[TMP14:%.*]] = and i1 [[TMP4]], [[TMP8]]
98; CHECK-NEXT:    [[TMP15:%.*]] = and i1 [[TMP14]], [[TMP13]]
99; CHECK-NEXT:    br label [[LOOP:%.*]]
100; CHECK:       loop:
101; CHECK-NEXT:    [[IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
102; CHECK-NEXT:    [[IV_TRUNC:%.*]] = trunc i64 [[IV]] to i32
103; CHECK-NEXT:    [[INDEXA:%.*]] = add i32 [[IV_TRUNC]], [[OFFA]]
104; CHECK-NEXT:    [[INDEXB:%.*]] = add i32 [[IV_TRUNC]], [[OFFB]]
105; CHECK-NEXT:    call void (i1, ...) @llvm.experimental.guard(i1 [[TMP15]], i32 9) [ "deopt"() ]
106; CHECK-NEXT:    [[INDEXA_EXT:%.*]] = zext i32 [[INDEXA]] to i64
107; CHECK-NEXT:    [[ADDRA:%.*]] = getelementptr inbounds i8, i8* [[ARRA]], i64 [[INDEXA_EXT]]
108; CHECK-NEXT:    [[ELTA:%.*]] = load i8, i8* [[ADDRA]]
109; CHECK-NEXT:    [[INDEXB_EXT:%.*]] = zext i32 [[INDEXB]] to i64
110; CHECK-NEXT:    [[ADDRB:%.*]] = getelementptr inbounds i8, i8* [[ARRB]], i64 [[INDEXB_EXT]]
111; CHECK-NEXT:    [[ELTB:%.*]] = load i8, i8* [[ADDRB]]
112; CHECK-NEXT:    [[RESULT:%.*]] = xor i8 [[ELTA]], [[ELTB]]
113; CHECK-NEXT:    store i8 [[RESULT]], i8* [[ADDRA]]
114; CHECK-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
115; CHECK-NEXT:    [[LATCH_CHECK:%.*]] = icmp ult i64 [[IV]], 15
116; CHECK-NEXT:    br i1 [[LATCH_CHECK]], label [[LOOP]], label [[EXIT:%.*]]
117; CHECK:       exit:
118; CHECK-NEXT:    [[IV_LCSSA:%.*]] = phi i64 [ [[IV]], [[LOOP]] ]
119; CHECK-NEXT:    ret i64 [[IV_LCSSA]]
120;
121{
122entry:
123  %lengthA = call i32 @length(i8* %arrA)
124  %lengthB = call i32 @length(i8* %arrB)
125  br label %loop
126
127loop:
128  %iv = phi i64 [0, %entry ], [ %iv.next, %loop ]
129  %iv.trunc = trunc i64 %iv to i32
130  %indexA = add i32 %iv.trunc, %offA
131  %indexB = add i32 %iv.trunc, %offB
132  %rcA = icmp ult i32 %indexA, %lengthA
133  %rcIV = icmp ult i64 %iv, %max
134  %wide.chk = and i1 %rcA, %rcIV
135  %rcB = icmp ult i32 %indexB, %lengthB
136  %wide.chk.final = and i1 %wide.chk, %rcB
137  call void (i1, ...) @llvm.experimental.guard(i1 %wide.chk.final, i32 9) [ "deopt"() ]
138  %indexA.ext = zext i32 %indexA to i64
139  %addrA = getelementptr inbounds i8, i8* %arrA, i64 %indexA.ext
140  %eltA = load i8, i8* %addrA
141  %indexB.ext = zext i32 %indexB to i64
142  %addrB = getelementptr inbounds i8, i8* %arrB, i64 %indexB.ext
143  %eltB = load i8, i8* %addrB
144  %result = xor i8 %eltA, %eltB
145  store i8 %result, i8* %addrA
146  %iv.next = add nuw nsw i64 %iv, 1
147  %latch.check = icmp ult i64 %iv, 15
148  br i1 %latch.check, label %loop, label %exit
149
150exit:
151  ret i64 %iv
152}
153
154; cannot narrow the IV to the range type, because we lose information.
155; for (i64 i= 5; i>= 2; i++)
156; this loop wraps around after reaching 2^64.
157define i64 @iv_rc_different_type(i32 %offA, i8* %arrA) {
158; CHECK-LABEL: @iv_rc_different_type(
159; CHECK-NEXT:  entry:
160; CHECK-NEXT:    [[LENGTHA:%.*]] = call i32 @length(i8* [[ARRA:%.*]])
161; CHECK-NEXT:    br label [[LOOP:%.*]]
162; CHECK:       loop:
163; CHECK-NEXT:    [[IV:%.*]] = phi i64 [ 5, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
164; CHECK-NEXT:    [[IV_TRUNC_32:%.*]] = trunc i64 [[IV]] to i32
165; CHECK-NEXT:    [[INDEXA:%.*]] = add i32 [[IV_TRUNC_32]], [[OFFA:%.*]]
166; CHECK-NEXT:    [[RCA:%.*]] = icmp ult i32 [[INDEXA]], [[LENGTHA]]
167; CHECK-NEXT:    call void (i1, ...) @llvm.experimental.guard(i1 [[RCA]], i32 9) [ "deopt"() ]
168; CHECK-NEXT:    [[INDEXA_EXT:%.*]] = zext i32 [[INDEXA]] to i64
169; CHECK-NEXT:    [[ADDRA:%.*]] = getelementptr inbounds i8, i8* [[ARRA]], i64 [[INDEXA_EXT]]
170; CHECK-NEXT:    [[ELTA:%.*]] = load i8, i8* [[ADDRA]]
171; CHECK-NEXT:    [[RES:%.*]] = add i8 [[ELTA]], 2
172; CHECK-NEXT:    store i8 [[ELTA]], i8* [[ADDRA]]
173; CHECK-NEXT:    [[IV_NEXT]] = add i64 [[IV]], 1
174; CHECK-NEXT:    [[LATCH_CHECK:%.*]] = icmp sge i64 [[IV_NEXT]], 2
175; CHECK-NEXT:    br i1 [[LATCH_CHECK]], label [[LOOP]], label [[EXIT:%.*]]
176; CHECK:       exit:
177; CHECK-NEXT:    [[IV_LCSSA:%.*]] = phi i64 [ [[IV]], [[LOOP]] ]
178; CHECK-NEXT:    ret i64 [[IV_LCSSA]]
179;
180entry:
181  %lengthA = call i32 @length(i8* %arrA)
182  br label %loop
183
184loop:
185  %iv = phi i64 [ 5, %entry ], [ %iv.next, %loop ]
186  %iv.trunc.32 = trunc i64 %iv to i32
187  %indexA = add i32 %iv.trunc.32, %offA
188  %rcA = icmp ult i32 %indexA, %lengthA
189  call void (i1, ...) @llvm.experimental.guard(i1 %rcA, i32 9) [ "deopt"() ]
190  %indexA.ext = zext i32 %indexA to i64
191  %addrA = getelementptr inbounds i8, i8* %arrA, i64 %indexA.ext
192  %eltA = load i8, i8* %addrA
193  %res = add i8 %eltA, 2
194  store i8 %eltA, i8* %addrA
195  %iv.next = add i64 %iv, 1
196  %latch.check = icmp sge i64 %iv.next, 2
197  br i1 %latch.check, label %loop, label %exit
198
199exit:
200  ret i64 %iv
201}
202