1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt -S -vectorize-num-stores-pred=1 -force-vector-width=1 -force-vector-interleave=2 -loop-vectorize -verify-loop-info -simplifycfg -simplifycfg-require-and-preserve-domtree=1 < %s | FileCheck %s --check-prefix=UNROLL
3; RUN: opt -S -vectorize-num-stores-pred=1 -force-vector-width=1 -force-vector-interleave=2 -loop-vectorize -verify-loop-info < %s | FileCheck %s --check-prefix=UNROLL-NOSIMPLIFY
4; RUN: opt -S -vectorize-num-stores-pred=1 -force-vector-width=2 -force-vector-interleave=1 -loop-vectorize -verify-loop-info -simplifycfg -simplifycfg-require-and-preserve-domtree=1 < %s | FileCheck %s --check-prefix=VEC
5
6target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
7
8; Test predication of stores.
9define i32 @test(i32* nocapture %f) #0 {
10; UNROLL-LABEL: @test(
11; UNROLL-NEXT:  entry:
12; UNROLL-NEXT:    br label [[VECTOR_BODY:%.*]]
13; UNROLL:       vector.body:
14; UNROLL-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE3:%.*]] ]
15; UNROLL-NEXT:    [[INDUCTION:%.*]] = add i64 [[INDEX]], 0
16; UNROLL-NEXT:    [[INDUCTION1:%.*]] = add i64 [[INDEX]], 1
17; UNROLL-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i32, i32* [[F:%.*]], i64 [[INDUCTION]]
18; UNROLL-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i32, i32* [[F]], i64 [[INDUCTION1]]
19; UNROLL-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP0]], align 4
20; UNROLL-NEXT:    [[TMP3:%.*]] = load i32, i32* [[TMP1]], align 4
21; UNROLL-NEXT:    [[TMP4:%.*]] = icmp sgt i32 [[TMP2]], 100
22; UNROLL-NEXT:    [[TMP5:%.*]] = icmp sgt i32 [[TMP3]], 100
23; UNROLL-NEXT:    br i1 [[TMP4]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]]
24; UNROLL:       pred.store.if:
25; UNROLL-NEXT:    [[TMP6:%.*]] = add nsw i32 [[TMP2]], 20
26; UNROLL-NEXT:    store i32 [[TMP6]], i32* [[TMP0]], align 4
27; UNROLL-NEXT:    br label [[PRED_STORE_CONTINUE]]
28; UNROLL:       pred.store.continue:
29; UNROLL-NEXT:    br i1 [[TMP5]], label [[PRED_STORE_IF2:%.*]], label [[PRED_STORE_CONTINUE3]]
30; UNROLL:       pred.store.if2:
31; UNROLL-NEXT:    [[TMP7:%.*]] = add nsw i32 [[TMP3]], 20
32; UNROLL-NEXT:    store i32 [[TMP7]], i32* [[TMP1]], align 4
33; UNROLL-NEXT:    br label [[PRED_STORE_CONTINUE3]]
34; UNROLL:       pred.store.continue3:
35; UNROLL-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
36; UNROLL-NEXT:    [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128
37; UNROLL-NEXT:    br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
38; UNROLL:       middle.block:
39; UNROLL-NEXT:    [[CMP_N:%.*]] = icmp eq i64 128, 128
40; UNROLL-NEXT:    br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[FOR_BODY:%.*]]
41; UNROLL:       for.body:
42; UNROLL-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_INC:%.*]] ], [ 128, [[MIDDLE_BLOCK]] ]
43; UNROLL-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[F]], i64 [[INDVARS_IV]]
44; UNROLL-NEXT:    [[TMP9:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
45; UNROLL-NEXT:    [[CMP1:%.*]] = icmp sgt i32 [[TMP9]], 100
46; UNROLL-NEXT:    br i1 [[CMP1]], label [[IF_THEN:%.*]], label [[FOR_INC]]
47; UNROLL:       if.then:
48; UNROLL-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP9]], 20
49; UNROLL-NEXT:    store i32 [[ADD]], i32* [[ARRAYIDX]], align 4
50; UNROLL-NEXT:    br label [[FOR_INC]]
51; UNROLL:       for.inc:
52; UNROLL-NEXT:    [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
53; UNROLL-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 128
54; UNROLL-NEXT:    br i1 [[EXITCOND]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP2:![0-9]+]]
55; UNROLL:       for.end:
56; UNROLL-NEXT:    ret i32 0
57;
58; UNROLL-NOSIMPLIFY-LABEL: @test(
59; UNROLL-NOSIMPLIFY-NEXT:  entry:
60; UNROLL-NOSIMPLIFY-NEXT:    br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
61; UNROLL-NOSIMPLIFY:       vector.ph:
62; UNROLL-NOSIMPLIFY-NEXT:    br label [[VECTOR_BODY:%.*]]
63; UNROLL-NOSIMPLIFY:       vector.body:
64; UNROLL-NOSIMPLIFY-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE3:%.*]] ]
65; UNROLL-NOSIMPLIFY-NEXT:    [[INDUCTION:%.*]] = add i64 [[INDEX]], 0
66; UNROLL-NOSIMPLIFY-NEXT:    [[INDUCTION1:%.*]] = add i64 [[INDEX]], 1
67; UNROLL-NOSIMPLIFY-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i32, i32* [[F:%.*]], i64 [[INDUCTION]]
68; UNROLL-NOSIMPLIFY-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i32, i32* [[F]], i64 [[INDUCTION1]]
69; UNROLL-NOSIMPLIFY-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP0]], align 4
70; UNROLL-NOSIMPLIFY-NEXT:    [[TMP3:%.*]] = load i32, i32* [[TMP1]], align 4
71; UNROLL-NOSIMPLIFY-NEXT:    [[TMP4:%.*]] = icmp sgt i32 [[TMP2]], 100
72; UNROLL-NOSIMPLIFY-NEXT:    [[TMP5:%.*]] = icmp sgt i32 [[TMP3]], 100
73; UNROLL-NOSIMPLIFY-NEXT:    br i1 [[TMP4]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]]
74; UNROLL-NOSIMPLIFY:       pred.store.if:
75; UNROLL-NOSIMPLIFY-NEXT:    [[TMP6:%.*]] = add nsw i32 [[TMP2]], 20
76; UNROLL-NOSIMPLIFY-NEXT:    store i32 [[TMP6]], i32* [[TMP0]], align 4
77; UNROLL-NOSIMPLIFY-NEXT:    br label [[PRED_STORE_CONTINUE]]
78; UNROLL-NOSIMPLIFY:       pred.store.continue:
79; UNROLL-NOSIMPLIFY-NEXT:    br i1 [[TMP5]], label [[PRED_STORE_IF2:%.*]], label [[PRED_STORE_CONTINUE3]]
80; UNROLL-NOSIMPLIFY:       pred.store.if2:
81; UNROLL-NOSIMPLIFY-NEXT:    [[TMP7:%.*]] = add nsw i32 [[TMP3]], 20
82; UNROLL-NOSIMPLIFY-NEXT:    store i32 [[TMP7]], i32* [[TMP1]], align 4
83; UNROLL-NOSIMPLIFY-NEXT:    br label [[PRED_STORE_CONTINUE3]]
84; UNROLL-NOSIMPLIFY:       pred.store.continue3:
85; UNROLL-NOSIMPLIFY-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
86; UNROLL-NOSIMPLIFY-NEXT:    [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128
87; UNROLL-NOSIMPLIFY-NEXT:    br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
88; UNROLL-NOSIMPLIFY:       middle.block:
89; UNROLL-NOSIMPLIFY-NEXT:    [[CMP_N:%.*]] = icmp eq i64 128, 128
90; UNROLL-NOSIMPLIFY-NEXT:    br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
91; UNROLL-NOSIMPLIFY:       scalar.ph:
92; UNROLL-NOSIMPLIFY-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ 128, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
93; UNROLL-NOSIMPLIFY-NEXT:    br label [[FOR_BODY:%.*]]
94; UNROLL-NOSIMPLIFY:       for.body:
95; UNROLL-NOSIMPLIFY-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_INC:%.*]] ]
96; UNROLL-NOSIMPLIFY-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[F]], i64 [[INDVARS_IV]]
97; UNROLL-NOSIMPLIFY-NEXT:    [[TMP9:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
98; UNROLL-NOSIMPLIFY-NEXT:    [[CMP1:%.*]] = icmp sgt i32 [[TMP9]], 100
99; UNROLL-NOSIMPLIFY-NEXT:    br i1 [[CMP1]], label [[IF_THEN:%.*]], label [[FOR_INC]]
100; UNROLL-NOSIMPLIFY:       if.then:
101; UNROLL-NOSIMPLIFY-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP9]], 20
102; UNROLL-NOSIMPLIFY-NEXT:    store i32 [[ADD]], i32* [[ARRAYIDX]], align 4
103; UNROLL-NOSIMPLIFY-NEXT:    br label [[FOR_INC]]
104; UNROLL-NOSIMPLIFY:       for.inc:
105; UNROLL-NOSIMPLIFY-NEXT:    [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
106; UNROLL-NOSIMPLIFY-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 128
107; UNROLL-NOSIMPLIFY-NEXT:    br i1 [[EXITCOND]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP2:![0-9]+]]
108; UNROLL-NOSIMPLIFY:       for.end:
109; UNROLL-NOSIMPLIFY-NEXT:    ret i32 0
110;
111; VEC-LABEL: @test(
112; VEC-NEXT:  entry:
113; VEC-NEXT:    br label [[VECTOR_BODY:%.*]]
114; VEC:       vector.body:
115; VEC-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE2:%.*]] ]
116; VEC-NEXT:    [[TMP0:%.*]] = add i64 [[INDEX]], 0
117; VEC-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i32, i32* [[F:%.*]], i64 [[TMP0]]
118; VEC-NEXT:    [[TMP2:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i32 0
119; VEC-NEXT:    [[TMP3:%.*]] = bitcast i32* [[TMP2]] to <2 x i32>*
120; VEC-NEXT:    [[WIDE_LOAD:%.*]] = load <2 x i32>, <2 x i32>* [[TMP3]], align 4
121; VEC-NEXT:    [[TMP4:%.*]] = icmp sgt <2 x i32> [[WIDE_LOAD]], <i32 100, i32 100>
122; VEC-NEXT:    [[TMP5:%.*]] = extractelement <2 x i1> [[TMP4]], i32 0
123; VEC-NEXT:    br i1 [[TMP5]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]]
124; VEC:       pred.store.if:
125; VEC-NEXT:    [[TMP6:%.*]] = extractelement <2 x i32> [[WIDE_LOAD]], i32 0
126; VEC-NEXT:    [[TMP7:%.*]] = add nsw i32 [[TMP6]], 20
127; VEC-NEXT:    [[TMP8:%.*]] = getelementptr inbounds i32, i32* [[F]], i64 [[TMP0]]
128; VEC-NEXT:    store i32 [[TMP7]], i32* [[TMP8]], align 4
129; VEC-NEXT:    br label [[PRED_STORE_CONTINUE]]
130; VEC:       pred.store.continue:
131; VEC-NEXT:    [[TMP9:%.*]] = extractelement <2 x i1> [[TMP4]], i32 1
132; VEC-NEXT:    br i1 [[TMP9]], label [[PRED_STORE_IF1:%.*]], label [[PRED_STORE_CONTINUE2]]
133; VEC:       pred.store.if1:
134; VEC-NEXT:    [[TMP10:%.*]] = add i64 [[INDEX]], 1
135; VEC-NEXT:    [[TMP11:%.*]] = extractelement <2 x i32> [[WIDE_LOAD]], i32 1
136; VEC-NEXT:    [[TMP12:%.*]] = add nsw i32 [[TMP11]], 20
137; VEC-NEXT:    [[TMP13:%.*]] = getelementptr inbounds i32, i32* [[F]], i64 [[TMP10]]
138; VEC-NEXT:    store i32 [[TMP12]], i32* [[TMP13]], align 4
139; VEC-NEXT:    br label [[PRED_STORE_CONTINUE2]]
140; VEC:       pred.store.continue2:
141; VEC-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
142; VEC-NEXT:    [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128
143; VEC-NEXT:    br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
144; VEC:       middle.block:
145; VEC-NEXT:    [[CMP_N:%.*]] = icmp eq i64 128, 128
146; VEC-NEXT:    br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[FOR_BODY:%.*]]
147; VEC:       for.body:
148; VEC-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_INC:%.*]] ], [ 128, [[MIDDLE_BLOCK]] ]
149; VEC-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[F]], i64 [[INDVARS_IV]]
150; VEC-NEXT:    [[TMP15:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
151; VEC-NEXT:    [[CMP1:%.*]] = icmp sgt i32 [[TMP15]], 100
152; VEC-NEXT:    br i1 [[CMP1]], label [[IF_THEN:%.*]], label [[FOR_INC]]
153; VEC:       if.then:
154; VEC-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP15]], 20
155; VEC-NEXT:    store i32 [[ADD]], i32* [[ARRAYIDX]], align 4
156; VEC-NEXT:    br label [[FOR_INC]]
157; VEC:       for.inc:
158; VEC-NEXT:    [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
159; VEC-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 128
160; VEC-NEXT:    br i1 [[EXITCOND]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP2:![0-9]+]]
161; VEC:       for.end:
162; VEC-NEXT:    ret i32 0
163;
164entry:
165  br label %for.body
166
167
168
169for.body:
170  %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.inc ]
171  %arrayidx = getelementptr inbounds i32, i32* %f, i64 %indvars.iv
172  %0 = load i32, i32* %arrayidx, align 4
173  %cmp1 = icmp sgt i32 %0, 100
174  br i1 %cmp1, label %if.then, label %for.inc
175
176if.then:
177  %add = add nsw i32 %0, 20
178  store i32 %add, i32* %arrayidx, align 4
179  br label %for.inc
180
181for.inc:
182  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
183  %exitcond = icmp eq i64 %indvars.iv.next, 128
184  br i1 %exitcond, label %for.end, label %for.body
185
186for.end:
187  ret i32 0
188}
189
190; Track basic blocks when unrolling conditional blocks. This code used to assert
191; because we did not update the phi nodes with the proper predecessor in the
192; vectorized loop body.
193; PR18724
194
195define void @bug18724(i1 %cond, [768 x i32]* %ptr, i1 %cond.2, i64 %v.1, i32 %v.2) {
196; UNROLL-LABEL: @bug18724(
197; UNROLL-NEXT:  entry:
198; UNROLL-NEXT:    [[TMP0:%.*]] = xor i1 [[COND:%.*]], true
199; UNROLL-NEXT:    call void @llvm.assume(i1 [[TMP0]])
200; UNROLL-NEXT:    [[TMP1:%.*]] = trunc i64 [[V_1:%.*]] to i32
201; UNROLL-NEXT:    [[SMAX:%.*]] = call i32 @llvm.smax.i32(i32 [[TMP1]], i32 0)
202; UNROLL-NEXT:    [[TMP2:%.*]] = sub i32 [[SMAX]], [[TMP1]]
203; UNROLL-NEXT:    [[TMP3:%.*]] = zext i32 [[TMP2]] to i64
204; UNROLL-NEXT:    [[TMP4:%.*]] = add nuw nsw i64 [[TMP3]], 1
205; UNROLL-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP4]], 2
206; UNROLL-NEXT:    br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
207; UNROLL:       vector.ph:
208; UNROLL-NEXT:    [[N_MOD_VF:%.*]] = urem i64 [[TMP4]], 2
209; UNROLL-NEXT:    [[N_VEC:%.*]] = sub i64 [[TMP4]], [[N_MOD_VF]]
210; UNROLL-NEXT:    [[IND_END:%.*]] = add i64 [[V_1]], [[N_VEC]]
211; UNROLL-NEXT:    br label [[VECTOR_BODY:%.*]]
212; UNROLL:       vector.body:
213; UNROLL-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE4:%.*]] ]
214; UNROLL-NEXT:    [[VEC_PHI:%.*]] = phi i32 [ [[V_2:%.*]], [[VECTOR_PH]] ], [ [[PREDPHI:%.*]], [[PRED_STORE_CONTINUE4]] ]
215; UNROLL-NEXT:    [[VEC_PHI2:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[PREDPHI5:%.*]], [[PRED_STORE_CONTINUE4]] ]
216; UNROLL-NEXT:    [[OFFSET_IDX:%.*]] = add i64 [[V_1]], [[INDEX]]
217; UNROLL-NEXT:    [[INDUCTION:%.*]] = add i64 [[OFFSET_IDX]], 0
218; UNROLL-NEXT:    [[INDUCTION1:%.*]] = add i64 [[OFFSET_IDX]], 1
219; UNROLL-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [768 x i32], [768 x i32]* [[PTR:%.*]], i64 0, i64 [[INDUCTION]]
220; UNROLL-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [768 x i32], [768 x i32]* [[PTR]], i64 0, i64 [[INDUCTION1]]
221; UNROLL-NEXT:    [[TMP7:%.*]] = load i32, i32* [[TMP5]], align 4
222; UNROLL-NEXT:    [[TMP8:%.*]] = load i32, i32* [[TMP6]], align 4
223; UNROLL-NEXT:    br i1 [[COND_2:%.*]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE4]]
224; UNROLL:       pred.store.if:
225; UNROLL-NEXT:    store i32 2, i32* [[TMP5]], align 4
226; UNROLL-NEXT:    store i32 2, i32* [[TMP6]], align 4
227; UNROLL-NEXT:    br label [[PRED_STORE_CONTINUE4]]
228; UNROLL:       pred.store.continue4:
229; UNROLL-NEXT:    [[TMP9:%.*]] = add i32 [[VEC_PHI]], 1
230; UNROLL-NEXT:    [[TMP10:%.*]] = add i32 [[VEC_PHI2]], 1
231; UNROLL-NEXT:    [[TMP11:%.*]] = xor i1 [[COND_2]], true
232; UNROLL-NEXT:    [[TMP12:%.*]] = xor i1 [[COND_2]], true
233; UNROLL-NEXT:    [[PREDPHI]] = select i1 [[TMP11]], i32 [[VEC_PHI]], i32 [[TMP9]]
234; UNROLL-NEXT:    [[PREDPHI5]] = select i1 [[TMP12]], i32 [[VEC_PHI2]], i32 [[TMP10]]
235; UNROLL-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
236; UNROLL-NEXT:    [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
237; UNROLL-NEXT:    br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
238; UNROLL:       middle.block:
239; UNROLL-NEXT:    [[BIN_RDX:%.*]] = add i32 [[PREDPHI5]], [[PREDPHI]]
240; UNROLL-NEXT:    [[CMP_N:%.*]] = icmp eq i64 [[TMP4]], [[N_VEC]]
241; UNROLL-NEXT:    [[TMP14:%.*]] = xor i1 [[CMP_N]], true
242; UNROLL-NEXT:    call void @llvm.assume(i1 [[TMP14]])
243; UNROLL-NEXT:    br label [[SCALAR_PH]]
244; UNROLL:       scalar.ph:
245; UNROLL-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ [[V_1]], [[ENTRY:%.*]] ]
246; UNROLL-NEXT:    [[BC_MERGE_RDX:%.*]] = phi i32 [ [[V_2]], [[ENTRY]] ], [ [[BIN_RDX]], [[MIDDLE_BLOCK]] ]
247; UNROLL-NEXT:    br label [[FOR_BODY14:%.*]]
248; UNROLL:       for.body14:
249; UNROLL-NEXT:    [[INDVARS_IV3:%.*]] = phi i64 [ [[INDVARS_IV_NEXT4:%.*]], [[FOR_INC23:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
250; UNROLL-NEXT:    [[INEWCHUNKS_120:%.*]] = phi i32 [ [[INEWCHUNKS_2:%.*]], [[FOR_INC23]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
251; UNROLL-NEXT:    [[ARRAYIDX16:%.*]] = getelementptr inbounds [768 x i32], [768 x i32]* [[PTR]], i64 0, i64 [[INDVARS_IV3]]
252; UNROLL-NEXT:    [[TMP:%.*]] = load i32, i32* [[ARRAYIDX16]], align 4
253; UNROLL-NEXT:    br i1 [[COND_2]], label [[IF_THEN18:%.*]], label [[FOR_INC23]]
254; UNROLL:       if.then18:
255; UNROLL-NEXT:    store i32 2, i32* [[ARRAYIDX16]], align 4
256; UNROLL-NEXT:    [[INC21:%.*]] = add nsw i32 [[INEWCHUNKS_120]], 1
257; UNROLL-NEXT:    br label [[FOR_INC23]]
258; UNROLL:       for.inc23:
259; UNROLL-NEXT:    [[INEWCHUNKS_2]] = phi i32 [ [[INC21]], [[IF_THEN18]] ], [ [[INEWCHUNKS_120]], [[FOR_BODY14]] ]
260; UNROLL-NEXT:    [[INDVARS_IV_NEXT4]] = add nsw i64 [[INDVARS_IV3]], 1
261; UNROLL-NEXT:    [[TMP1:%.*]] = trunc i64 [[INDVARS_IV3]] to i32
262; UNROLL-NEXT:    [[CMP13:%.*]] = icmp slt i32 [[TMP1]], 0
263; UNROLL-NEXT:    call void @llvm.assume(i1 [[CMP13]])
264; UNROLL-NEXT:    br label [[FOR_BODY14]]
265;
266; UNROLL-NOSIMPLIFY-LABEL: @bug18724(
267; UNROLL-NOSIMPLIFY-NEXT:  entry:
268; UNROLL-NOSIMPLIFY-NEXT:    br label [[FOR_BODY9:%.*]]
269; UNROLL-NOSIMPLIFY:       for.body9:
270; UNROLL-NOSIMPLIFY-NEXT:    br i1 [[COND:%.*]], label [[FOR_INC26:%.*]], label [[FOR_BODY14_PREHEADER:%.*]]
271; UNROLL-NOSIMPLIFY:       for.body14.preheader:
272; UNROLL-NOSIMPLIFY-NEXT:    [[TMP0:%.*]] = trunc i64 [[V_1:%.*]] to i32
273; UNROLL-NOSIMPLIFY-NEXT:    [[SMAX:%.*]] = call i32 @llvm.smax.i32(i32 [[TMP0]], i32 0)
274; UNROLL-NOSIMPLIFY-NEXT:    [[TMP1:%.*]] = sub i32 [[SMAX]], [[TMP0]]
275; UNROLL-NOSIMPLIFY-NEXT:    [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
276; UNROLL-NOSIMPLIFY-NEXT:    [[TMP3:%.*]] = add nuw nsw i64 [[TMP2]], 1
277; UNROLL-NOSIMPLIFY-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP3]], 2
278; UNROLL-NOSIMPLIFY-NEXT:    br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
279; UNROLL-NOSIMPLIFY:       vector.ph:
280; UNROLL-NOSIMPLIFY-NEXT:    [[N_MOD_VF:%.*]] = urem i64 [[TMP3]], 2
281; UNROLL-NOSIMPLIFY-NEXT:    [[N_VEC:%.*]] = sub i64 [[TMP3]], [[N_MOD_VF]]
282; UNROLL-NOSIMPLIFY-NEXT:    [[IND_END:%.*]] = add i64 [[V_1]], [[N_VEC]]
283; UNROLL-NOSIMPLIFY-NEXT:    br label [[VECTOR_BODY:%.*]]
284; UNROLL-NOSIMPLIFY:       vector.body:
285; UNROLL-NOSIMPLIFY-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE4:%.*]] ]
286; UNROLL-NOSIMPLIFY-NEXT:    [[VEC_PHI:%.*]] = phi i32 [ [[V_2:%.*]], [[VECTOR_PH]] ], [ [[PREDPHI:%.*]], [[PRED_STORE_CONTINUE4]] ]
287; UNROLL-NOSIMPLIFY-NEXT:    [[VEC_PHI2:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[PREDPHI5:%.*]], [[PRED_STORE_CONTINUE4]] ]
288; UNROLL-NOSIMPLIFY-NEXT:    [[OFFSET_IDX:%.*]] = add i64 [[V_1]], [[INDEX]]
289; UNROLL-NOSIMPLIFY-NEXT:    [[INDUCTION:%.*]] = add i64 [[OFFSET_IDX]], 0
290; UNROLL-NOSIMPLIFY-NEXT:    [[INDUCTION1:%.*]] = add i64 [[OFFSET_IDX]], 1
291; UNROLL-NOSIMPLIFY-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [768 x i32], [768 x i32]* [[PTR:%.*]], i64 0, i64 [[INDUCTION]]
292; UNROLL-NOSIMPLIFY-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [768 x i32], [768 x i32]* [[PTR]], i64 0, i64 [[INDUCTION1]]
293; UNROLL-NOSIMPLIFY-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP4]], align 4
294; UNROLL-NOSIMPLIFY-NEXT:    [[TMP7:%.*]] = load i32, i32* [[TMP5]], align 4
295; UNROLL-NOSIMPLIFY-NEXT:    br i1 [[COND_2:%.*]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]]
296; UNROLL-NOSIMPLIFY:       pred.store.if:
297; UNROLL-NOSIMPLIFY-NEXT:    store i32 2, i32* [[TMP4]], align 4
298; UNROLL-NOSIMPLIFY-NEXT:    br label [[PRED_STORE_CONTINUE]]
299; UNROLL-NOSIMPLIFY:       pred.store.continue:
300; UNROLL-NOSIMPLIFY-NEXT:    br i1 [[COND_2]], label [[PRED_STORE_IF3:%.*]], label [[PRED_STORE_CONTINUE4]]
301; UNROLL-NOSIMPLIFY:       pred.store.if3:
302; UNROLL-NOSIMPLIFY-NEXT:    store i32 2, i32* [[TMP5]], align 4
303; UNROLL-NOSIMPLIFY-NEXT:    br label [[PRED_STORE_CONTINUE4]]
304; UNROLL-NOSIMPLIFY:       pred.store.continue4:
305; UNROLL-NOSIMPLIFY-NEXT:    [[TMP8:%.*]] = add i32 [[VEC_PHI]], 1
306; UNROLL-NOSIMPLIFY-NEXT:    [[TMP9:%.*]] = add i32 [[VEC_PHI2]], 1
307; UNROLL-NOSIMPLIFY-NEXT:    [[TMP10:%.*]] = xor i1 [[COND_2]], true
308; UNROLL-NOSIMPLIFY-NEXT:    [[TMP11:%.*]] = xor i1 [[COND_2]], true
309; UNROLL-NOSIMPLIFY-NEXT:    [[PREDPHI]] = select i1 [[TMP10]], i32 [[VEC_PHI]], i32 [[TMP8]]
310; UNROLL-NOSIMPLIFY-NEXT:    [[PREDPHI5]] = select i1 [[TMP11]], i32 [[VEC_PHI2]], i32 [[TMP9]]
311; UNROLL-NOSIMPLIFY-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
312; UNROLL-NOSIMPLIFY-NEXT:    [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
313; UNROLL-NOSIMPLIFY-NEXT:    br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
314; UNROLL-NOSIMPLIFY:       middle.block:
315; UNROLL-NOSIMPLIFY-NEXT:    [[BIN_RDX:%.*]] = add i32 [[PREDPHI5]], [[PREDPHI]]
316; UNROLL-NOSIMPLIFY-NEXT:    [[CMP_N:%.*]] = icmp eq i64 [[TMP3]], [[N_VEC]]
317; UNROLL-NOSIMPLIFY-NEXT:    br i1 [[CMP_N]], label [[FOR_INC26_LOOPEXIT:%.*]], label [[SCALAR_PH]]
318; UNROLL-NOSIMPLIFY:       scalar.ph:
319; UNROLL-NOSIMPLIFY-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ [[V_1]], [[FOR_BODY14_PREHEADER]] ]
320; UNROLL-NOSIMPLIFY-NEXT:    [[BC_MERGE_RDX:%.*]] = phi i32 [ [[V_2]], [[FOR_BODY14_PREHEADER]] ], [ [[BIN_RDX]], [[MIDDLE_BLOCK]] ]
321; UNROLL-NOSIMPLIFY-NEXT:    br label [[FOR_BODY14:%.*]]
322; UNROLL-NOSIMPLIFY:       for.body14:
323; UNROLL-NOSIMPLIFY-NEXT:    [[INDVARS_IV3:%.*]] = phi i64 [ [[INDVARS_IV_NEXT4:%.*]], [[FOR_INC23:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
324; UNROLL-NOSIMPLIFY-NEXT:    [[INEWCHUNKS_120:%.*]] = phi i32 [ [[INEWCHUNKS_2:%.*]], [[FOR_INC23]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
325; UNROLL-NOSIMPLIFY-NEXT:    [[ARRAYIDX16:%.*]] = getelementptr inbounds [768 x i32], [768 x i32]* [[PTR]], i64 0, i64 [[INDVARS_IV3]]
326; UNROLL-NOSIMPLIFY-NEXT:    [[TMP:%.*]] = load i32, i32* [[ARRAYIDX16]], align 4
327; UNROLL-NOSIMPLIFY-NEXT:    br i1 [[COND_2]], label [[IF_THEN18:%.*]], label [[FOR_INC23]]
328; UNROLL-NOSIMPLIFY:       if.then18:
329; UNROLL-NOSIMPLIFY-NEXT:    store i32 2, i32* [[ARRAYIDX16]], align 4
330; UNROLL-NOSIMPLIFY-NEXT:    [[INC21:%.*]] = add nsw i32 [[INEWCHUNKS_120]], 1
331; UNROLL-NOSIMPLIFY-NEXT:    br label [[FOR_INC23]]
332; UNROLL-NOSIMPLIFY:       for.inc23:
333; UNROLL-NOSIMPLIFY-NEXT:    [[INEWCHUNKS_2]] = phi i32 [ [[INC21]], [[IF_THEN18]] ], [ [[INEWCHUNKS_120]], [[FOR_BODY14]] ]
334; UNROLL-NOSIMPLIFY-NEXT:    [[INDVARS_IV_NEXT4]] = add nsw i64 [[INDVARS_IV3]], 1
335; UNROLL-NOSIMPLIFY-NEXT:    [[TMP1:%.*]] = trunc i64 [[INDVARS_IV3]] to i32
336; UNROLL-NOSIMPLIFY-NEXT:    [[CMP13:%.*]] = icmp slt i32 [[TMP1]], 0
337; UNROLL-NOSIMPLIFY-NEXT:    br i1 [[CMP13]], label [[FOR_BODY14]], label [[FOR_INC26_LOOPEXIT]], !llvm.loop [[LOOP4:![0-9]+]]
338; UNROLL-NOSIMPLIFY:       for.inc26.loopexit:
339; UNROLL-NOSIMPLIFY-NEXT:    [[INEWCHUNKS_2_LCSSA:%.*]] = phi i32 [ [[INEWCHUNKS_2]], [[FOR_INC23]] ], [ [[BIN_RDX]], [[MIDDLE_BLOCK]] ]
340; UNROLL-NOSIMPLIFY-NEXT:    br label [[FOR_INC26]]
341; UNROLL-NOSIMPLIFY:       for.inc26:
342; UNROLL-NOSIMPLIFY-NEXT:    [[INEWCHUNKS_1_LCSSA:%.*]] = phi i32 [ undef, [[FOR_BODY9]] ], [ [[INEWCHUNKS_2_LCSSA]], [[FOR_INC26_LOOPEXIT]] ]
343; UNROLL-NOSIMPLIFY-NEXT:    unreachable
344;
345; VEC-LABEL: @bug18724(
346; VEC-NEXT:  entry:
347; VEC-NEXT:    [[TMP0:%.*]] = xor i1 [[COND:%.*]], true
348; VEC-NEXT:    call void @llvm.assume(i1 [[TMP0]])
349; VEC-NEXT:    [[TMP1:%.*]] = trunc i64 [[V_1:%.*]] to i32
350; VEC-NEXT:    [[SMAX:%.*]] = call i32 @llvm.smax.i32(i32 [[TMP1]], i32 0)
351; VEC-NEXT:    [[TMP2:%.*]] = sub i32 [[SMAX]], [[TMP1]]
352; VEC-NEXT:    [[TMP3:%.*]] = zext i32 [[TMP2]] to i64
353; VEC-NEXT:    [[TMP4:%.*]] = add nuw nsw i64 [[TMP3]], 1
354; VEC-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP4]], 2
355; VEC-NEXT:    br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
356; VEC:       vector.ph:
357; VEC-NEXT:    [[N_MOD_VF:%.*]] = urem i64 [[TMP4]], 2
358; VEC-NEXT:    [[N_VEC:%.*]] = sub i64 [[TMP4]], [[N_MOD_VF]]
359; VEC-NEXT:    [[IND_END:%.*]] = add i64 [[V_1]], [[N_VEC]]
360; VEC-NEXT:    [[TMP5:%.*]] = insertelement <2 x i32> zeroinitializer, i32 [[V_2:%.*]], i32 0
361; VEC-NEXT:    [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x i1> poison, i1 [[COND_2:%.*]], i32 0
362; VEC-NEXT:    [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x i1> [[BROADCAST_SPLATINSERT]], <2 x i1> poison, <2 x i32> zeroinitializer
363; VEC-NEXT:    br label [[VECTOR_BODY:%.*]]
364; VEC:       vector.body:
365; VEC-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE2:%.*]] ]
366; VEC-NEXT:    [[VEC_PHI:%.*]] = phi <2 x i32> [ [[TMP5]], [[VECTOR_PH]] ], [ [[PREDPHI:%.*]], [[PRED_STORE_CONTINUE2]] ]
367; VEC-NEXT:    [[OFFSET_IDX:%.*]] = add i64 [[V_1]], [[INDEX]]
368; VEC-NEXT:    [[TMP6:%.*]] = add i64 [[OFFSET_IDX]], 0
369; VEC-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [768 x i32], [768 x i32]* [[PTR:%.*]], i64 0, i64 [[TMP6]]
370; VEC-NEXT:    [[TMP8:%.*]] = getelementptr inbounds i32, i32* [[TMP7]], i32 0
371; VEC-NEXT:    [[TMP9:%.*]] = bitcast i32* [[TMP8]] to <2 x i32>*
372; VEC-NEXT:    [[WIDE_LOAD:%.*]] = load <2 x i32>, <2 x i32>* [[TMP9]], align 4
373; VEC-NEXT:    [[TMP10:%.*]] = extractelement <2 x i1> [[BROADCAST_SPLAT]], i32 0
374; VEC-NEXT:    br i1 [[TMP10]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]]
375; VEC:       pred.store.if:
376; VEC-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [768 x i32], [768 x i32]* [[PTR]], i64 0, i64 [[TMP6]]
377; VEC-NEXT:    store i32 2, i32* [[TMP11]], align 4
378; VEC-NEXT:    br label [[PRED_STORE_CONTINUE]]
379; VEC:       pred.store.continue:
380; VEC-NEXT:    [[TMP12:%.*]] = extractelement <2 x i1> [[BROADCAST_SPLAT]], i32 1
381; VEC-NEXT:    br i1 [[TMP12]], label [[PRED_STORE_IF1:%.*]], label [[PRED_STORE_CONTINUE2]]
382; VEC:       pred.store.if1:
383; VEC-NEXT:    [[TMP13:%.*]] = add i64 [[OFFSET_IDX]], 1
384; VEC-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [768 x i32], [768 x i32]* [[PTR]], i64 0, i64 [[TMP13]]
385; VEC-NEXT:    store i32 2, i32* [[TMP14]], align 4
386; VEC-NEXT:    br label [[PRED_STORE_CONTINUE2]]
387; VEC:       pred.store.continue2:
388; VEC-NEXT:    [[TMP15:%.*]] = add <2 x i32> [[VEC_PHI]], <i32 1, i32 1>
389; VEC-NEXT:    [[TMP16:%.*]] = xor <2 x i1> [[BROADCAST_SPLAT]], <i1 true, i1 true>
390; VEC-NEXT:    [[PREDPHI]] = select <2 x i1> [[TMP16]], <2 x i32> [[VEC_PHI]], <2 x i32> [[TMP15]]
391; VEC-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
392; VEC-NEXT:    [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
393; VEC-NEXT:    br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
394; VEC:       middle.block:
395; VEC-NEXT:    [[TMP18:%.*]] = call i32 @llvm.vector.reduce.add.v2i32(<2 x i32> [[PREDPHI]])
396; VEC-NEXT:    [[CMP_N:%.*]] = icmp eq i64 [[TMP4]], [[N_VEC]]
397; VEC-NEXT:    [[TMP19:%.*]] = xor i1 [[CMP_N]], true
398; VEC-NEXT:    call void @llvm.assume(i1 [[TMP19]])
399; VEC-NEXT:    br label [[SCALAR_PH]]
400; VEC:       scalar.ph:
401; VEC-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ [[V_1]], [[ENTRY:%.*]] ]
402; VEC-NEXT:    [[BC_MERGE_RDX:%.*]] = phi i32 [ [[V_2]], [[ENTRY]] ], [ [[TMP18]], [[MIDDLE_BLOCK]] ]
403; VEC-NEXT:    br label [[FOR_BODY14:%.*]]
404; VEC:       for.body14:
405; VEC-NEXT:    [[INDVARS_IV3:%.*]] = phi i64 [ [[INDVARS_IV_NEXT4:%.*]], [[FOR_INC23:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
406; VEC-NEXT:    [[INEWCHUNKS_120:%.*]] = phi i32 [ [[INEWCHUNKS_2:%.*]], [[FOR_INC23]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
407; VEC-NEXT:    [[ARRAYIDX16:%.*]] = getelementptr inbounds [768 x i32], [768 x i32]* [[PTR]], i64 0, i64 [[INDVARS_IV3]]
408; VEC-NEXT:    [[TMP:%.*]] = load i32, i32* [[ARRAYIDX16]], align 4
409; VEC-NEXT:    br i1 [[COND_2]], label [[IF_THEN18:%.*]], label [[FOR_INC23]]
410; VEC:       if.then18:
411; VEC-NEXT:    store i32 2, i32* [[ARRAYIDX16]], align 4
412; VEC-NEXT:    [[INC21:%.*]] = add nsw i32 [[INEWCHUNKS_120]], 1
413; VEC-NEXT:    br label [[FOR_INC23]]
414; VEC:       for.inc23:
415; VEC-NEXT:    [[INEWCHUNKS_2]] = phi i32 [ [[INC21]], [[IF_THEN18]] ], [ [[INEWCHUNKS_120]], [[FOR_BODY14]] ]
416; VEC-NEXT:    [[INDVARS_IV_NEXT4]] = add nsw i64 [[INDVARS_IV3]], 1
417; VEC-NEXT:    [[TMP1:%.*]] = trunc i64 [[INDVARS_IV3]] to i32
418; VEC-NEXT:    [[CMP13:%.*]] = icmp slt i32 [[TMP1]], 0
419; VEC-NEXT:    call void @llvm.assume(i1 [[CMP13]])
420; VEC-NEXT:    br label [[FOR_BODY14]]
421;
422entry:
423  br label %for.body9
424
425for.body9:
426  br i1 %cond, label %for.inc26, label %for.body14
427
428for.body14:
429  %indvars.iv3 = phi i64 [ %indvars.iv.next4, %for.inc23 ], [ %v.1, %for.body9 ]
430  %iNewChunks.120 = phi i32 [ %iNewChunks.2, %for.inc23 ], [ %v.2, %for.body9 ]
431  %arrayidx16 = getelementptr inbounds [768 x i32], [768 x i32]* %ptr, i64 0, i64 %indvars.iv3
432  %tmp = load i32, i32* %arrayidx16, align 4
433  br i1 %cond.2, label %if.then18, label %for.inc23
434
435if.then18:
436  store i32 2, i32* %arrayidx16, align 4
437  %inc21 = add nsw i32 %iNewChunks.120, 1
438  br label %for.inc23
439
440for.inc23:
441  %iNewChunks.2 = phi i32 [ %inc21, %if.then18 ], [ %iNewChunks.120, %for.body14 ]
442  %indvars.iv.next4 = add nsw i64 %indvars.iv3, 1
443  %tmp1 = trunc i64 %indvars.iv3 to i32
444  %cmp13 = icmp slt i32 %tmp1, 0
445  br i1 %cmp13, label %for.body14, label %for.inc26
446
447for.inc26:
448  %iNewChunks.1.lcssa = phi i32 [ undef, %for.body9 ], [ %iNewChunks.2, %for.inc23 ]
449  unreachable
450}
451
452; In the test below, it's more profitable for the expression feeding the
453; conditional store to remain scalar. Since we can only type-shrink vector
454; types, we shouldn't try to represent the expression in a smaller type.
455;
456define void @minimal_bit_widths(i1 %c) {
457; UNROLL-LABEL: @minimal_bit_widths(
458; UNROLL-NEXT:  entry:
459; UNROLL-NEXT:    br label [[VECTOR_BODY:%.*]]
460; UNROLL:       vector.body:
461; UNROLL-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE6:%.*]] ]
462; UNROLL-NEXT:    [[OFFSET_IDX:%.*]] = sub i64 undef, [[INDEX]]
463; UNROLL-NEXT:    [[INDUCTION3:%.*]] = add i64 [[OFFSET_IDX]], 0
464; UNROLL-NEXT:    [[INDUCTION4:%.*]] = add i64 [[OFFSET_IDX]], -1
465; UNROLL-NEXT:    br i1 [[C:%.*]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE6]]
466; UNROLL:       pred.store.if:
467; UNROLL-NEXT:    [[INDUCTION:%.*]] = add i64 [[INDEX]], 0
468; UNROLL-NEXT:    [[TMP0:%.*]] = getelementptr i8, i8* undef, i64 [[INDUCTION]]
469; UNROLL-NEXT:    [[TMP1:%.*]] = load i8, i8* [[TMP0]], align 1
470; UNROLL-NEXT:    [[TMP2:%.*]] = zext i8 [[TMP1]] to i32
471; UNROLL-NEXT:    [[TMP3:%.*]] = trunc i32 [[TMP2]] to i8
472; UNROLL-NEXT:    store i8 [[TMP3]], i8* [[TMP0]], align 1
473; UNROLL-NEXT:    [[INDUCTION2:%.*]] = add i64 [[INDEX]], 1
474; UNROLL-NEXT:    [[TMP4:%.*]] = getelementptr i8, i8* undef, i64 [[INDUCTION2]]
475; UNROLL-NEXT:    [[TMP5:%.*]] = load i8, i8* [[TMP4]], align 1
476; UNROLL-NEXT:    [[TMP6:%.*]] = zext i8 [[TMP5]] to i32
477; UNROLL-NEXT:    [[TMP7:%.*]] = trunc i32 [[TMP6]] to i8
478; UNROLL-NEXT:    store i8 [[TMP7]], i8* [[TMP4]], align 1
479; UNROLL-NEXT:    br label [[PRED_STORE_CONTINUE6]]
480; UNROLL:       pred.store.continue6:
481; UNROLL-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
482; UNROLL-NEXT:    [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], undef
483; UNROLL-NEXT:    br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
484; UNROLL:       middle.block:
485; UNROLL-NEXT:    [[CMP_N:%.*]] = icmp eq i64 undef, undef
486; UNROLL-NEXT:    br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[FOR_BODY:%.*]]
487; UNROLL:       for.body:
488; UNROLL-NEXT:    [[TMP0:%.*]] = phi i64 [ [[TMP6:%.*]], [[FOR_INC:%.*]] ], [ undef, [[MIDDLE_BLOCK]] ]
489; UNROLL-NEXT:    [[TMP1:%.*]] = phi i64 [ [[TMP7:%.*]], [[FOR_INC]] ], [ undef, [[MIDDLE_BLOCK]] ]
490; UNROLL-NEXT:    [[TMP2:%.*]] = getelementptr i8, i8* undef, i64 [[TMP0]]
491; UNROLL-NEXT:    [[TMP3:%.*]] = load i8, i8* [[TMP2]], align 1
492; UNROLL-NEXT:    br i1 [[C]], label [[IF_THEN:%.*]], label [[FOR_INC]]
493; UNROLL:       if.then:
494; UNROLL-NEXT:    [[TMP4:%.*]] = zext i8 [[TMP3]] to i32
495; UNROLL-NEXT:    [[TMP5:%.*]] = trunc i32 [[TMP4]] to i8
496; UNROLL-NEXT:    store i8 [[TMP5]], i8* [[TMP2]], align 1
497; UNROLL-NEXT:    br label [[FOR_INC]]
498; UNROLL:       for.inc:
499; UNROLL-NEXT:    [[TMP6]] = add nuw nsw i64 [[TMP0]], 1
500; UNROLL-NEXT:    [[TMP7]] = add i64 [[TMP1]], -1
501; UNROLL-NEXT:    [[TMP8:%.*]] = icmp eq i64 [[TMP7]], 0
502; UNROLL-NEXT:    br i1 [[TMP8]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
503; UNROLL:       for.end:
504; UNROLL-NEXT:    ret void
505;
506; UNROLL-NOSIMPLIFY-LABEL: @minimal_bit_widths(
507; UNROLL-NOSIMPLIFY-NEXT:  entry:
508; UNROLL-NOSIMPLIFY-NEXT:    br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
509; UNROLL-NOSIMPLIFY:       vector.ph:
510; UNROLL-NOSIMPLIFY-NEXT:    br label [[VECTOR_BODY:%.*]]
511; UNROLL-NOSIMPLIFY:       vector.body:
512; UNROLL-NOSIMPLIFY-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE6:%.*]] ]
513; UNROLL-NOSIMPLIFY-NEXT:    [[OFFSET_IDX:%.*]] = sub i64 undef, [[INDEX]]
514; UNROLL-NOSIMPLIFY-NEXT:    [[INDUCTION3:%.*]] = add i64 [[OFFSET_IDX]], 0
515; UNROLL-NOSIMPLIFY-NEXT:    [[INDUCTION4:%.*]] = add i64 [[OFFSET_IDX]], -1
516; UNROLL-NOSIMPLIFY-NEXT:    br i1 [[C:%.*]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]]
517; UNROLL-NOSIMPLIFY:       pred.store.if:
518; UNROLL-NOSIMPLIFY-NEXT:    [[INDUCTION:%.*]] = add i64 [[INDEX]], 0
519; UNROLL-NOSIMPLIFY-NEXT:    [[TMP0:%.*]] = getelementptr i8, i8* undef, i64 [[INDUCTION]]
520; UNROLL-NOSIMPLIFY-NEXT:    [[TMP1:%.*]] = load i8, i8* [[TMP0]], align 1
521; UNROLL-NOSIMPLIFY-NEXT:    [[TMP2:%.*]] = zext i8 [[TMP1]] to i32
522; UNROLL-NOSIMPLIFY-NEXT:    [[TMP3:%.*]] = trunc i32 [[TMP2]] to i8
523; UNROLL-NOSIMPLIFY-NEXT:    store i8 [[TMP3]], i8* [[TMP0]], align 1
524; UNROLL-NOSIMPLIFY-NEXT:    br label [[PRED_STORE_CONTINUE]]
525; UNROLL-NOSIMPLIFY:       pred.store.continue:
526; UNROLL-NOSIMPLIFY-NEXT:    br i1 [[C]], label [[PRED_STORE_IF5:%.*]], label [[PRED_STORE_CONTINUE6]]
527; UNROLL-NOSIMPLIFY:       pred.store.if5:
528; UNROLL-NOSIMPLIFY-NEXT:    [[INDUCTION2:%.*]] = add i64 [[INDEX]], 1
529; UNROLL-NOSIMPLIFY-NEXT:    [[TMP4:%.*]] = getelementptr i8, i8* undef, i64 [[INDUCTION2]]
530; UNROLL-NOSIMPLIFY-NEXT:    [[TMP5:%.*]] = load i8, i8* [[TMP4]], align 1
531; UNROLL-NOSIMPLIFY-NEXT:    [[TMP6:%.*]] = zext i8 [[TMP5]] to i32
532; UNROLL-NOSIMPLIFY-NEXT:    [[TMP7:%.*]] = trunc i32 [[TMP6]] to i8
533; UNROLL-NOSIMPLIFY-NEXT:    store i8 [[TMP7]], i8* [[TMP4]], align 1
534; UNROLL-NOSIMPLIFY-NEXT:    br label [[PRED_STORE_CONTINUE6]]
535; UNROLL-NOSIMPLIFY:       pred.store.continue6:
536; UNROLL-NOSIMPLIFY-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
537; UNROLL-NOSIMPLIFY-NEXT:    [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], undef
538; UNROLL-NOSIMPLIFY-NEXT:    br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
539; UNROLL-NOSIMPLIFY:       middle.block:
540; UNROLL-NOSIMPLIFY-NEXT:    [[CMP_N:%.*]] = icmp eq i64 undef, undef
541; UNROLL-NOSIMPLIFY-NEXT:    br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
542; UNROLL-NOSIMPLIFY:       scalar.ph:
543; UNROLL-NOSIMPLIFY-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ undef, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
544; UNROLL-NOSIMPLIFY-NEXT:    [[BC_RESUME_VAL1:%.*]] = phi i64 [ undef, [[MIDDLE_BLOCK]] ], [ undef, [[ENTRY]] ]
545; UNROLL-NOSIMPLIFY-NEXT:    br label [[FOR_BODY:%.*]]
546; UNROLL-NOSIMPLIFY:       for.body:
547; UNROLL-NOSIMPLIFY-NEXT:    [[TMP0:%.*]] = phi i64 [ [[TMP6:%.*]], [[FOR_INC:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
548; UNROLL-NOSIMPLIFY-NEXT:    [[TMP1:%.*]] = phi i64 [ [[TMP7:%.*]], [[FOR_INC]] ], [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ]
549; UNROLL-NOSIMPLIFY-NEXT:    [[TMP2:%.*]] = getelementptr i8, i8* undef, i64 [[TMP0]]
550; UNROLL-NOSIMPLIFY-NEXT:    [[TMP3:%.*]] = load i8, i8* [[TMP2]], align 1
551; UNROLL-NOSIMPLIFY-NEXT:    br i1 [[C]], label [[IF_THEN:%.*]], label [[FOR_INC]]
552; UNROLL-NOSIMPLIFY:       if.then:
553; UNROLL-NOSIMPLIFY-NEXT:    [[TMP4:%.*]] = zext i8 [[TMP3]] to i32
554; UNROLL-NOSIMPLIFY-NEXT:    [[TMP5:%.*]] = trunc i32 [[TMP4]] to i8
555; UNROLL-NOSIMPLIFY-NEXT:    store i8 [[TMP5]], i8* [[TMP2]], align 1
556; UNROLL-NOSIMPLIFY-NEXT:    br label [[FOR_INC]]
557; UNROLL-NOSIMPLIFY:       for.inc:
558; UNROLL-NOSIMPLIFY-NEXT:    [[TMP6]] = add nuw nsw i64 [[TMP0]], 1
559; UNROLL-NOSIMPLIFY-NEXT:    [[TMP7]] = add i64 [[TMP1]], -1
560; UNROLL-NOSIMPLIFY-NEXT:    [[TMP8:%.*]] = icmp eq i64 [[TMP7]], 0
561; UNROLL-NOSIMPLIFY-NEXT:    br i1 [[TMP8]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
562; UNROLL-NOSIMPLIFY:       for.end:
563; UNROLL-NOSIMPLIFY-NEXT:    ret void
564;
565; VEC-LABEL: @minimal_bit_widths(
566; VEC-NEXT:  entry:
567; VEC-NEXT:    [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x i1> poison, i1 [[C:%.*]], i32 0
568; VEC-NEXT:    [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x i1> [[BROADCAST_SPLATINSERT]], <2 x i1> poison, <2 x i32> zeroinitializer
569; VEC-NEXT:    br label [[VECTOR_BODY:%.*]]
570; VEC:       vector.body:
571; VEC-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE3:%.*]] ]
572; VEC-NEXT:    [[TMP0:%.*]] = add i64 [[INDEX]], 0
573; VEC-NEXT:    [[OFFSET_IDX:%.*]] = sub i64 undef, [[INDEX]]
574; VEC-NEXT:    [[TMP1:%.*]] = add i64 [[OFFSET_IDX]], 0
575; VEC-NEXT:    [[TMP2:%.*]] = getelementptr i8, i8* undef, i64 [[TMP0]]
576; VEC-NEXT:    [[TMP3:%.*]] = getelementptr i8, i8* [[TMP2]], i32 0
577; VEC-NEXT:    [[TMP4:%.*]] = bitcast i8* [[TMP3]] to <2 x i8>*
578; VEC-NEXT:    [[WIDE_LOAD:%.*]] = load <2 x i8>, <2 x i8>* [[TMP4]], align 1
579; VEC-NEXT:    [[TMP5:%.*]] = extractelement <2 x i1> [[BROADCAST_SPLAT]], i32 0
580; VEC-NEXT:    br i1 [[TMP5]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]]
581; VEC:       pred.store.if:
582; VEC-NEXT:    [[TMP6:%.*]] = extractelement <2 x i8> [[WIDE_LOAD]], i32 0
583; VEC-NEXT:    [[TMP7:%.*]] = zext i8 [[TMP6]] to i32
584; VEC-NEXT:    [[TMP8:%.*]] = trunc i32 [[TMP7]] to i8
585; VEC-NEXT:    [[TMP9:%.*]] = getelementptr i8, i8* undef, i64 [[TMP0]]
586; VEC-NEXT:    store i8 [[TMP8]], i8* [[TMP9]], align 1
587; VEC-NEXT:    br label [[PRED_STORE_CONTINUE]]
588; VEC:       pred.store.continue:
589; VEC-NEXT:    [[TMP10:%.*]] = extractelement <2 x i1> [[BROADCAST_SPLAT]], i32 1
590; VEC-NEXT:    br i1 [[TMP10]], label [[PRED_STORE_IF2:%.*]], label [[PRED_STORE_CONTINUE3]]
591; VEC:       pred.store.if2:
592; VEC-NEXT:    [[TMP11:%.*]] = add i64 [[INDEX]], 1
593; VEC-NEXT:    [[TMP12:%.*]] = extractelement <2 x i8> [[WIDE_LOAD]], i32 1
594; VEC-NEXT:    [[TMP13:%.*]] = zext i8 [[TMP12]] to i32
595; VEC-NEXT:    [[TMP14:%.*]] = trunc i32 [[TMP13]] to i8
596; VEC-NEXT:    [[TMP15:%.*]] = getelementptr i8, i8* undef, i64 [[TMP11]]
597; VEC-NEXT:    store i8 [[TMP14]], i8* [[TMP15]], align 1
598; VEC-NEXT:    br label [[PRED_STORE_CONTINUE3]]
599; VEC:       pred.store.continue3:
600; VEC-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
601; VEC-NEXT:    [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], undef
602; VEC-NEXT:    br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
603; VEC:       middle.block:
604; VEC-NEXT:    [[CMP_N:%.*]] = icmp eq i64 undef, undef
605; VEC-NEXT:    br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[FOR_BODY:%.*]]
606; VEC:       for.body:
607; VEC-NEXT:    [[TMP0:%.*]] = phi i64 [ [[TMP6:%.*]], [[FOR_INC:%.*]] ], [ undef, [[MIDDLE_BLOCK]] ]
608; VEC-NEXT:    [[TMP1:%.*]] = phi i64 [ [[TMP7:%.*]], [[FOR_INC]] ], [ undef, [[MIDDLE_BLOCK]] ]
609; VEC-NEXT:    [[TMP2:%.*]] = getelementptr i8, i8* undef, i64 [[TMP0]]
610; VEC-NEXT:    [[TMP3:%.*]] = load i8, i8* [[TMP2]], align 1
611; VEC-NEXT:    br i1 [[C]], label [[IF_THEN:%.*]], label [[FOR_INC]]
612; VEC:       if.then:
613; VEC-NEXT:    [[TMP4:%.*]] = zext i8 [[TMP3]] to i32
614; VEC-NEXT:    [[TMP5:%.*]] = trunc i32 [[TMP4]] to i8
615; VEC-NEXT:    store i8 [[TMP5]], i8* [[TMP2]], align 1
616; VEC-NEXT:    br label [[FOR_INC]]
617; VEC:       for.inc:
618; VEC-NEXT:    [[TMP6]] = add nuw nsw i64 [[TMP0]], 1
619; VEC-NEXT:    [[TMP7]] = add i64 [[TMP1]], -1
620; VEC-NEXT:    [[TMP8:%.*]] = icmp eq i64 [[TMP7]], 0
621; VEC-NEXT:    br i1 [[TMP8]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
622; VEC:       for.end:
623; VEC-NEXT:    ret void
624;
625entry:
626  br label %for.body
627
628for.body:
629  %tmp0 = phi i64 [ %tmp6, %for.inc ], [ 0, %entry ]
630  %tmp1 = phi i64 [ %tmp7, %for.inc ], [ undef, %entry ]
631  %tmp2 = getelementptr i8, i8* undef, i64 %tmp0
632  %tmp3 = load i8, i8* %tmp2, align 1
633  br i1 %c, label %if.then, label %for.inc
634
635if.then:
636  %tmp4 = zext i8 %tmp3 to i32
637  %tmp5 = trunc i32 %tmp4 to i8
638  store i8 %tmp5, i8* %tmp2, align 1
639  br label %for.inc
640
641for.inc:
642  %tmp6 = add nuw nsw i64 %tmp0, 1
643  %tmp7 = add i64 %tmp1, -1
644  %tmp8 = icmp eq i64 %tmp7, 0
645  br i1 %tmp8, label %for.end, label %for.body
646
647for.end:
648  ret void
649}
650
651define void @minimal_bit_widths_with_aliasing_store(i1 %c, i8* %ptr) {
652; UNROLL-LABEL: @minimal_bit_widths_with_aliasing_store(
653; UNROLL-NEXT:  entry:
654; UNROLL-NEXT:    br label [[FOR_BODY:%.*]]
655; UNROLL:       for.body:
656; UNROLL-NEXT:    [[TMP0:%.*]] = phi i64 [ [[TMP6:%.*]], [[FOR_INC:%.*]] ], [ 0, [[ENTRY:%.*]] ]
657; UNROLL-NEXT:    [[TMP1:%.*]] = phi i64 [ [[TMP7:%.*]], [[FOR_INC]] ], [ 0, [[ENTRY]] ]
658; UNROLL-NEXT:    [[TMP2:%.*]] = getelementptr i8, i8* [[PTR:%.*]], i64 [[TMP0]]
659; UNROLL-NEXT:    [[TMP3:%.*]] = load i8, i8* [[TMP2]], align 1
660; UNROLL-NEXT:    store i8 0, i8* [[TMP2]], align 1
661; UNROLL-NEXT:    br i1 [[C:%.*]], label [[IF_THEN:%.*]], label [[FOR_INC]]
662; UNROLL:       if.then:
663; UNROLL-NEXT:    [[TMP4:%.*]] = zext i8 [[TMP3]] to i32
664; UNROLL-NEXT:    [[TMP5:%.*]] = trunc i32 [[TMP4]] to i8
665; UNROLL-NEXT:    store i8 [[TMP5]], i8* [[TMP2]], align 1
666; UNROLL-NEXT:    br label [[FOR_INC]]
667; UNROLL:       for.inc:
668; UNROLL-NEXT:    [[TMP6]] = add nuw nsw i64 [[TMP0]], 1
669; UNROLL-NEXT:    [[TMP7]] = add i64 [[TMP1]], -1
670; UNROLL-NEXT:    [[TMP8:%.*]] = icmp eq i64 [[TMP7]], 0
671; UNROLL-NEXT:    br i1 [[TMP8]], label [[FOR_END:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
672; UNROLL:       for.end:
673; UNROLL-NEXT:    ret void
674;
675; UNROLL-NOSIMPLIFY-LABEL: @minimal_bit_widths_with_aliasing_store(
676; UNROLL-NOSIMPLIFY-NEXT:  entry:
677; UNROLL-NOSIMPLIFY-NEXT:    br i1 true, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
678; UNROLL-NOSIMPLIFY:       vector.ph:
679; UNROLL-NOSIMPLIFY-NEXT:    br label [[VECTOR_BODY:%.*]]
680; UNROLL-NOSIMPLIFY:       vector.body:
681; UNROLL-NOSIMPLIFY-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE6:%.*]] ]
682; UNROLL-NOSIMPLIFY-NEXT:    [[INDUCTION:%.*]] = add i64 [[INDEX]], 0
683; UNROLL-NOSIMPLIFY-NEXT:    [[INDUCTION2:%.*]] = add i64 [[INDEX]], 1
684; UNROLL-NOSIMPLIFY-NEXT:    [[OFFSET_IDX:%.*]] = sub i64 0, [[INDEX]]
685; UNROLL-NOSIMPLIFY-NEXT:    [[INDUCTION3:%.*]] = add i64 [[OFFSET_IDX]], 0
686; UNROLL-NOSIMPLIFY-NEXT:    [[INDUCTION4:%.*]] = add i64 [[OFFSET_IDX]], -1
687; UNROLL-NOSIMPLIFY-NEXT:    [[TMP0:%.*]] = getelementptr i8, i8* [[PTR:%.*]], i64 [[INDUCTION]]
688; UNROLL-NOSIMPLIFY-NEXT:    [[TMP1:%.*]] = getelementptr i8, i8* [[PTR]], i64 [[INDUCTION2]]
689; UNROLL-NOSIMPLIFY-NEXT:    store i8 0, i8* [[TMP0]], align 1
690; UNROLL-NOSIMPLIFY-NEXT:    store i8 0, i8* [[TMP1]], align 1
691; UNROLL-NOSIMPLIFY-NEXT:    br i1 [[C:%.*]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]]
692; UNROLL-NOSIMPLIFY:       pred.store.if:
693; UNROLL-NOSIMPLIFY-NEXT:    [[TMP2:%.*]] = load i8, i8* [[TMP0]], align 1
694; UNROLL-NOSIMPLIFY-NEXT:    [[TMP3:%.*]] = zext i8 [[TMP2]] to i32
695; UNROLL-NOSIMPLIFY-NEXT:    [[TMP4:%.*]] = trunc i32 [[TMP3]] to i8
696; UNROLL-NOSIMPLIFY-NEXT:    store i8 [[TMP4]], i8* [[TMP0]], align 1
697; UNROLL-NOSIMPLIFY-NEXT:    br label [[PRED_STORE_CONTINUE]]
698; UNROLL-NOSIMPLIFY:       pred.store.continue:
699; UNROLL-NOSIMPLIFY-NEXT:    br i1 [[C]], label [[PRED_STORE_IF5:%.*]], label [[PRED_STORE_CONTINUE6]]
700; UNROLL-NOSIMPLIFY:       pred.store.if5:
701; UNROLL-NOSIMPLIFY-NEXT:    [[TMP5:%.*]] = load i8, i8* [[TMP1]], align 1
702; UNROLL-NOSIMPLIFY-NEXT:    [[TMP6:%.*]] = zext i8 [[TMP5]] to i32
703; UNROLL-NOSIMPLIFY-NEXT:    [[TMP7:%.*]] = trunc i32 [[TMP6]] to i8
704; UNROLL-NOSIMPLIFY-NEXT:    store i8 [[TMP7]], i8* [[TMP1]], align 1
705; UNROLL-NOSIMPLIFY-NEXT:    br label [[PRED_STORE_CONTINUE6]]
706; UNROLL-NOSIMPLIFY:       pred.store.continue6:
707; UNROLL-NOSIMPLIFY-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
708; UNROLL-NOSIMPLIFY-NEXT:    [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 0
709; UNROLL-NOSIMPLIFY-NEXT:    br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
710; UNROLL-NOSIMPLIFY:       middle.block:
711; UNROLL-NOSIMPLIFY-NEXT:    [[CMP_N:%.*]] = icmp eq i64 0, 0
712; UNROLL-NOSIMPLIFY-NEXT:    br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
713; UNROLL-NOSIMPLIFY:       scalar.ph:
714; UNROLL-NOSIMPLIFY-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
715; UNROLL-NOSIMPLIFY-NEXT:    [[BC_RESUME_VAL1:%.*]] = phi i64 [ 0, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ]
716; UNROLL-NOSIMPLIFY-NEXT:    br label [[FOR_BODY:%.*]]
717; UNROLL-NOSIMPLIFY:       for.body:
718; UNROLL-NOSIMPLIFY-NEXT:    [[TMP0:%.*]] = phi i64 [ [[TMP6:%.*]], [[FOR_INC:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
719; UNROLL-NOSIMPLIFY-NEXT:    [[TMP1:%.*]] = phi i64 [ [[TMP7:%.*]], [[FOR_INC]] ], [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ]
720; UNROLL-NOSIMPLIFY-NEXT:    [[TMP2:%.*]] = getelementptr i8, i8* [[PTR]], i64 [[TMP0]]
721; UNROLL-NOSIMPLIFY-NEXT:    [[TMP3:%.*]] = load i8, i8* [[TMP2]], align 1
722; UNROLL-NOSIMPLIFY-NEXT:    store i8 0, i8* [[TMP2]], align 1
723; UNROLL-NOSIMPLIFY-NEXT:    br i1 [[C]], label [[IF_THEN:%.*]], label [[FOR_INC]]
724; UNROLL-NOSIMPLIFY:       if.then:
725; UNROLL-NOSIMPLIFY-NEXT:    [[TMP4:%.*]] = zext i8 [[TMP3]] to i32
726; UNROLL-NOSIMPLIFY-NEXT:    [[TMP5:%.*]] = trunc i32 [[TMP4]] to i8
727; UNROLL-NOSIMPLIFY-NEXT:    store i8 [[TMP5]], i8* [[TMP2]], align 1
728; UNROLL-NOSIMPLIFY-NEXT:    br label [[FOR_INC]]
729; UNROLL-NOSIMPLIFY:       for.inc:
730; UNROLL-NOSIMPLIFY-NEXT:    [[TMP6]] = add nuw nsw i64 [[TMP0]], 1
731; UNROLL-NOSIMPLIFY-NEXT:    [[TMP7]] = add i64 [[TMP1]], -1
732; UNROLL-NOSIMPLIFY-NEXT:    [[TMP8:%.*]] = icmp eq i64 [[TMP7]], 0
733; UNROLL-NOSIMPLIFY-NEXT:    br i1 [[TMP8]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
734; UNROLL-NOSIMPLIFY:       for.end:
735; UNROLL-NOSIMPLIFY-NEXT:    ret void
736;
737; VEC-LABEL: @minimal_bit_widths_with_aliasing_store(
738; VEC-NEXT:  entry:
739; VEC-NEXT:    br label [[FOR_BODY:%.*]]
740; VEC:       for.body:
741; VEC-NEXT:    [[TMP0:%.*]] = phi i64 [ [[TMP6:%.*]], [[FOR_INC:%.*]] ], [ 0, [[ENTRY:%.*]] ]
742; VEC-NEXT:    [[TMP1:%.*]] = phi i64 [ [[TMP7:%.*]], [[FOR_INC]] ], [ 0, [[ENTRY]] ]
743; VEC-NEXT:    [[TMP2:%.*]] = getelementptr i8, i8* [[PTR:%.*]], i64 [[TMP0]]
744; VEC-NEXT:    [[TMP3:%.*]] = load i8, i8* [[TMP2]], align 1
745; VEC-NEXT:    store i8 0, i8* [[TMP2]], align 1
746; VEC-NEXT:    br i1 [[C:%.*]], label [[IF_THEN:%.*]], label [[FOR_INC]]
747; VEC:       if.then:
748; VEC-NEXT:    [[TMP4:%.*]] = zext i8 [[TMP3]] to i32
749; VEC-NEXT:    [[TMP5:%.*]] = trunc i32 [[TMP4]] to i8
750; VEC-NEXT:    store i8 [[TMP5]], i8* [[TMP2]], align 1
751; VEC-NEXT:    br label [[FOR_INC]]
752; VEC:       for.inc:
753; VEC-NEXT:    [[TMP6]] = add nuw nsw i64 [[TMP0]], 1
754; VEC-NEXT:    [[TMP7]] = add i64 [[TMP1]], -1
755; VEC-NEXT:    [[TMP8:%.*]] = icmp eq i64 [[TMP7]], 0
756; VEC-NEXT:    br i1 [[TMP8]], label [[FOR_END:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
757; VEC:       for.end:
758; VEC-NEXT:    ret void
759;
760entry:
761  br label %for.body
762
763for.body:
764  %tmp0 = phi i64 [ %tmp6, %for.inc ], [ 0, %entry ]
765  %tmp1 = phi i64 [ %tmp7, %for.inc ], [ 0, %entry ]
766  %tmp2 = getelementptr i8, i8* %ptr, i64 %tmp0
767  %tmp3 = load i8, i8* %tmp2, align 1
768  store i8 0, i8* %tmp2
769  br i1 %c, label %if.then, label %for.inc
770
771if.then:
772  %tmp4 = zext i8 %tmp3 to i32
773  %tmp5 = trunc i32 %tmp4 to i8
774  store i8 %tmp5, i8* %tmp2, align 1
775  br label %for.inc
776
777for.inc:
778  %tmp6 = add nuw nsw i64 %tmp0, 1
779  %tmp7 = add i64 %tmp1, -1
780  %tmp8 = icmp eq i64 %tmp7, 0
781  br i1 %tmp8, label %for.end, label %for.body
782
783for.end:
784  ret void
785}
786