1; RUN: opt < %s -loop-vectorize -force-vector-interleave=1 -force-vector-width=2 -S | FileCheck %s 2; RUN: opt < %s -loop-vectorize -force-vector-interleave=1 -force-vector-width=2 -instcombine -S | FileCheck %s --check-prefix=IND 3; RUN: opt < %s -loop-vectorize -force-vector-interleave=2 -force-vector-width=2 -instcombine -S | FileCheck %s --check-prefix=UNROLL 4 5target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" 6 7; Make sure that we can handle multiple integer induction variables. 8; CHECK-LABEL: @multi_int_induction( 9; CHECK: vector.body: 10; CHECK: %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] 11; CHECK: %[[VAR:.*]] = trunc i64 %index to i32 12; CHECK: %offset.idx = add i32 190, %[[VAR]] 13define void @multi_int_induction(i32* %A, i32 %N) { 14for.body.lr.ph: 15 br label %for.body 16 17for.body: 18 %indvars.iv = phi i64 [ 0, %for.body.lr.ph ], [ %indvars.iv.next, %for.body ] 19 %count.09 = phi i32 [ 190, %for.body.lr.ph ], [ %inc, %for.body ] 20 %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv 21 store i32 %count.09, i32* %arrayidx2, align 4 22 %inc = add nsw i32 %count.09, 1 23 %indvars.iv.next = add i64 %indvars.iv, 1 24 %lftr.wideiv = trunc i64 %indvars.iv.next to i32 25 %exitcond = icmp ne i32 %lftr.wideiv, %N 26 br i1 %exitcond, label %for.body, label %for.end 27 28for.end: 29 ret void 30} 31 32; Make sure we remove unneeded vectorization of induction variables. 33; In order for instcombine to cleanup the vectorized induction variables that we 34; create in the loop vectorizer we need to perform some form of redundancy 35; elimination to get rid of multiple uses. 36 37; IND-LABEL: scalar_use 38 39; IND: br label %vector.body 40; IND: vector.body: 41; Vectorized induction variable. 42; IND-NOT: insertelement <2 x i64> 43; IND-NOT: shufflevector <2 x i64> 44; IND: br {{.*}}, label %vector.body 45 46define void @scalar_use(float* %a, float %b, i64 %offset, i64 %offset2, i64 %n) { 47entry: 48 br label %for.body 49 50for.body: 51 %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ] 52 %ind.sum = add i64 %iv, %offset 53 %arr.idx = getelementptr inbounds float, float* %a, i64 %ind.sum 54 %l1 = load float, float* %arr.idx, align 4 55 %ind.sum2 = add i64 %iv, %offset2 56 %arr.idx2 = getelementptr inbounds float, float* %a, i64 %ind.sum2 57 %l2 = load float, float* %arr.idx2, align 4 58 %m = fmul fast float %b, %l2 59 %ad = fadd fast float %l1, %m 60 store float %ad, float* %arr.idx, align 4 61 %iv.next = add nuw nsw i64 %iv, 1 62 %exitcond = icmp eq i64 %iv.next, %n 63 br i1 %exitcond, label %loopexit, label %for.body 64 65loopexit: 66 ret void 67} 68 69 70; Make sure that the loop exit count computation does not overflow for i8 and 71; i16. The exit count of these loops is i8/i16 max + 1. If we don't cast the 72; induction variable to a bigger type the exit count computation will overflow 73; to 0. 74; PR17532 75 76; CHECK-LABEL: i8_loop 77; CHECK: icmp eq i32 {{.*}}, 256 78define i32 @i8_loop() nounwind readnone ssp uwtable { 79 br label %1 80 81; <label>:1 ; preds = %1, %0 82 %a.0 = phi i32 [ 1, %0 ], [ %2, %1 ] 83 %b.0 = phi i8 [ 0, %0 ], [ %3, %1 ] 84 %2 = and i32 %a.0, 4 85 %3 = add i8 %b.0, -1 86 %4 = icmp eq i8 %3, 0 87 br i1 %4, label %5, label %1 88 89; <label>:5 ; preds = %1 90 ret i32 %2 91} 92 93; CHECK-LABEL: i16_loop 94; CHECK: icmp eq i32 {{.*}}, 65536 95 96define i32 @i16_loop() nounwind readnone ssp uwtable { 97 br label %1 98 99; <label>:1 ; preds = %1, %0 100 %a.0 = phi i32 [ 1, %0 ], [ %2, %1 ] 101 %b.0 = phi i16 [ 0, %0 ], [ %3, %1 ] 102 %2 = and i32 %a.0, 4 103 %3 = add i16 %b.0, -1 104 %4 = icmp eq i16 %3, 0 105 br i1 %4, label %5, label %1 106 107; <label>:5 ; preds = %1 108 ret i32 %2 109} 110 111; This loop has a backedge taken count of i32_max. We need to check for this 112; condition and branch directly to the scalar loop. 113 114; CHECK-LABEL: max_i32_backedgetaken 115; CHECK: br i1 true, label %scalar.ph, label %min.iters.checked 116 117; CHECK: scalar.ph: 118; CHECK: %bc.resume.val = phi i32 [ 0, %middle.block ], [ 0, %0 ] 119; CHECK: %bc.merge.rdx = phi i32 [ 1, %0 ], [ 1, %min.iters.checked ], [ %5, %middle.block ] 120 121define i32 @max_i32_backedgetaken() nounwind readnone ssp uwtable { 122 123 br label %1 124 125; <label>:1 ; preds = %1, %0 126 %a.0 = phi i32 [ 1, %0 ], [ %2, %1 ] 127 %b.0 = phi i32 [ 0, %0 ], [ %3, %1 ] 128 %2 = and i32 %a.0, 4 129 %3 = add i32 %b.0, -1 130 %4 = icmp eq i32 %3, 0 131 br i1 %4, label %5, label %1 132 133; <label>:5 ; preds = %1 134 ret i32 %2 135} 136 137; When generating the overflow check we must sure that the induction start value 138; is defined before the branch to the scalar preheader. 139 140; CHECK-LABEL: testoverflowcheck 141; CHECK: entry 142; CHECK: %[[LOAD:.*]] = load i8 143; CHECK: br 144 145; CHECK: scalar.ph 146; CHECK: phi i8 [ %{{.*}}, %middle.block ], [ %[[LOAD]], %entry ] 147 148@e = global i8 1, align 1 149@d = common global i32 0, align 4 150@c = common global i32 0, align 4 151define i32 @testoverflowcheck() { 152entry: 153 %.pr.i = load i8, i8* @e, align 1 154 %0 = load i32, i32* @d, align 4 155 %c.promoted.i = load i32, i32* @c, align 4 156 br label %cond.end.i 157 158cond.end.i: 159 %inc4.i = phi i8 [ %.pr.i, %entry ], [ %inc.i, %cond.end.i ] 160 %and3.i = phi i32 [ %c.promoted.i, %entry ], [ %and.i, %cond.end.i ] 161 %and.i = and i32 %0, %and3.i 162 %inc.i = add i8 %inc4.i, 1 163 %tobool.i = icmp eq i8 %inc.i, 0 164 br i1 %tobool.i, label %loopexit, label %cond.end.i 165 166loopexit: 167 ret i32 %and.i 168} 169 170; The SCEV expression of %sphi is (zext i8 {%t,+,1}<%loop> to i32) 171; In order to recognize %sphi as an induction PHI and vectorize this loop, 172; we need to convert the SCEV expression into an AddRecExpr. 173; The expression gets converted to {zext i8 %t to i32,+,1}. 174 175; CHECK-LABEL: wrappingindvars1 176; CHECK-LABEL: vector.scevcheck 177; CHECK-LABEL: vector.ph 178; CHECK: %[[START:.*]] = add <2 x i32> %{{.*}}, <i32 0, i32 1> 179; CHECK-LABEL: vector.body 180; CHECK: %[[PHI:.*]] = phi <2 x i32> [ %[[START]], %vector.ph ], [ %[[STEP:.*]], %vector.body ] 181; CHECK: %[[STEP]] = add <2 x i32> %[[PHI]], <i32 2, i32 2> 182define void @wrappingindvars1(i8 %t, i32 %len, i32 *%A) { 183 entry: 184 %st = zext i8 %t to i16 185 %ext = zext i8 %t to i32 186 %ecmp = icmp ult i16 %st, 42 187 br i1 %ecmp, label %loop, label %exit 188 189 loop: 190 191 %idx = phi i8 [ %t, %entry ], [ %idx.inc, %loop ] 192 %idx.b = phi i32 [ 0, %entry ], [ %idx.b.inc, %loop ] 193 %sphi = phi i32 [ %ext, %entry ], [%idx.inc.ext, %loop] 194 195 %ptr = getelementptr inbounds i32, i32* %A, i8 %idx 196 store i32 %sphi, i32* %ptr 197 198 %idx.inc = add i8 %idx, 1 199 %idx.inc.ext = zext i8 %idx.inc to i32 200 %idx.b.inc = add nuw nsw i32 %idx.b, 1 201 202 %c = icmp ult i32 %idx.b, %len 203 br i1 %c, label %loop, label %exit 204 205 exit: 206 ret void 207} 208 209; The SCEV expression of %sphi is (4 * (zext i8 {%t,+,1}<%loop> to i32)) 210; In order to recognize %sphi as an induction PHI and vectorize this loop, 211; we need to convert the SCEV expression into an AddRecExpr. 212; The expression gets converted to ({4 * (zext %t to i32),+,4}). 213; CHECK-LABEL: wrappingindvars2 214; CHECK-LABEL: vector.scevcheck 215; CHECK-LABEL: vector.ph 216; CHECK: %[[START:.*]] = add <2 x i32> %{{.*}}, <i32 0, i32 4> 217; CHECK-LABEL: vector.body 218; CHECK: %[[PHI:.*]] = phi <2 x i32> [ %[[START]], %vector.ph ], [ %[[STEP:.*]], %vector.body ] 219; CHECK: %[[STEP]] = add <2 x i32> %[[PHI]], <i32 8, i32 8> 220define void @wrappingindvars2(i8 %t, i32 %len, i32 *%A) { 221 222entry: 223 %st = zext i8 %t to i16 224 %ext = zext i8 %t to i32 225 %ext.mul = mul i32 %ext, 4 226 227 %ecmp = icmp ult i16 %st, 42 228 br i1 %ecmp, label %loop, label %exit 229 230 loop: 231 232 %idx = phi i8 [ %t, %entry ], [ %idx.inc, %loop ] 233 %sphi = phi i32 [ %ext.mul, %entry ], [%mul, %loop] 234 %idx.b = phi i32 [ 0, %entry ], [ %idx.b.inc, %loop ] 235 236 %ptr = getelementptr inbounds i32, i32* %A, i8 %idx 237 store i32 %sphi, i32* %ptr 238 239 %idx.inc = add i8 %idx, 1 240 %idx.inc.ext = zext i8 %idx.inc to i32 241 %mul = mul i32 %idx.inc.ext, 4 242 %idx.b.inc = add nuw nsw i32 %idx.b, 1 243 244 %c = icmp ult i32 %idx.b, %len 245 br i1 %c, label %loop, label %exit 246 247 exit: 248 ret void 249} 250 251; Check that we generate vectorized IVs in the pre-header 252; instead of widening the scalar IV inside the loop, when 253; we know how to do that. 254; IND-LABEL: veciv 255; IND: vector.body: 256; IND: %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ] 257; IND: %vec.ind = phi <2 x i32> [ <i32 0, i32 1>, %vector.ph ], [ %step.add, %vector.body ] 258; IND: %step.add = add <2 x i32> %vec.ind, <i32 2, i32 2> 259; IND: %index.next = add i32 %index, 2 260; IND: %[[CMP:.*]] = icmp eq i32 %index.next 261; IND: br i1 %[[CMP]] 262; UNROLL-LABEL: veciv 263; UNROLL: vector.body: 264; UNROLL: %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ] 265; UNROLL: %vec.ind = phi <2 x i32> [ <i32 0, i32 1>, %vector.ph ], [ %step.add1, %vector.body ] 266; UNROLL: %step.add = add <2 x i32> %vec.ind, <i32 2, i32 2> 267; UNROLL: %step.add1 = add <2 x i32> %vec.ind, <i32 4, i32 4> 268; UNROLL: %index.next = add i32 %index, 4 269; UNROLL: %[[CMP:.*]] = icmp eq i32 %index.next 270; UNROLL: br i1 %[[CMP]] 271define void @veciv(i32* nocapture %a, i32 %start, i32 %k) { 272for.body.preheader: 273 br label %for.body 274 275for.body: 276 %indvars.iv = phi i32 [ %indvars.iv.next, %for.body ], [ 0, %for.body.preheader ] 277 %arrayidx = getelementptr inbounds i32, i32* %a, i32 %indvars.iv 278 store i32 %indvars.iv, i32* %arrayidx, align 4 279 %indvars.iv.next = add nuw nsw i32 %indvars.iv, 1 280 %exitcond = icmp eq i32 %indvars.iv.next, %k 281 br i1 %exitcond, label %exit, label %for.body 282 283exit: 284 ret void 285} 286 287; IND-LABEL: trunciv 288; IND: vector.body: 289; IND: %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] 290; IND: %[[VECIND:.*]] = phi <2 x i32> [ <i32 0, i32 1>, %vector.ph ], [ %[[STEPADD:.*]], %vector.body ] 291; IND: %[[STEPADD]] = add <2 x i32> %[[VECIND]], <i32 2, i32 2> 292; IND: %index.next = add i64 %index, 2 293; IND: %[[CMP:.*]] = icmp eq i64 %index.next 294; IND: br i1 %[[CMP]] 295define void @trunciv(i32* nocapture %a, i32 %start, i64 %k) { 296for.body.preheader: 297 br label %for.body 298 299for.body: 300 %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %for.body.preheader ] 301 %trunc.iv = trunc i64 %indvars.iv to i32 302 %arrayidx = getelementptr inbounds i32, i32* %a, i32 %trunc.iv 303 store i32 %trunc.iv, i32* %arrayidx, align 4 304 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 305 %exitcond = icmp eq i64 %indvars.iv.next, %k 306 br i1 %exitcond, label %exit, label %for.body 307 308exit: 309 ret void 310} 311 312; IND-LABEL: nonprimary 313; IND-LABEL: vector.ph 314; IND: %[[INSERT:.*]] = insertelement <2 x i32> undef, i32 %i, i32 0 315; IND: %[[SPLAT:.*]] = shufflevector <2 x i32> %[[INSERT]], <2 x i32> undef, <2 x i32> zeroinitializer 316; IND: %[[START:.*]] = add <2 x i32> %[[SPLAT]], <i32 0, i32 42> 317; IND-LABEL: vector.body: 318; IND: %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ] 319; IND: %vec.ind = phi <2 x i32> [ %[[START]], %vector.ph ], [ %step.add, %vector.body ] 320; IND: %step.add = add <2 x i32> %vec.ind, <i32 84, i32 84> 321; IND: %index.next = add i32 %index, 2 322; IND: %[[CMP:.*]] = icmp eq i32 %index.next 323; IND: br i1 %[[CMP]] 324; UNROLL-LABEL: nonprimary 325; UNROLL-LABEL: vector.ph 326; UNROLL: %[[INSERT:.*]] = insertelement <2 x i32> undef, i32 %i, i32 0 327; UNROLL: %[[SPLAT:.*]] = shufflevector <2 x i32> %[[INSERT]], <2 x i32> undef, <2 x i32> zeroinitializer 328; UNROLL: %[[START:.*]] = add <2 x i32> %[[SPLAT]], <i32 0, i32 42> 329; UNROLL-LABEL: vector.body: 330; UNROLL: %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ] 331; UNROLL: %vec.ind = phi <2 x i32> [ %[[START]], %vector.ph ], [ %step.add1, %vector.body ] 332; UNROLL: %step.add = add <2 x i32> %vec.ind, <i32 84, i32 84> 333; UNROLL: %step.add1 = add <2 x i32> %vec.ind, <i32 168, i32 168> 334; UNROLL: %index.next = add i32 %index, 4 335; UNROLL: %[[CMP:.*]] = icmp eq i32 %index.next 336; UNROLL: br i1 %[[CMP]] 337define void @nonprimary(i32* nocapture %a, i32 %start, i32 %i, i32 %k) { 338for.body.preheader: 339 br label %for.body 340 341for.body: 342 %indvars.iv = phi i32 [ %indvars.iv.next, %for.body ], [ %i, %for.body.preheader ] 343 %arrayidx = getelementptr inbounds i32, i32* %a, i32 %indvars.iv 344 store i32 %indvars.iv, i32* %arrayidx, align 4 345 %indvars.iv.next = add nuw nsw i32 %indvars.iv, 42 346 %exitcond = icmp eq i32 %indvars.iv.next, %k 347 br i1 %exitcond, label %exit, label %for.body 348 349exit: 350 ret void 351} 352