1*cee313d2SEric Christopher; RUN: opt < %s  -loop-vectorize -force-vector-interleave=1 -force-vector-width=4 -dce -instcombine -S | FileCheck %s
2*cee313d2SEric Christopher
3*cee313d2SEric Christophertarget datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
4*cee313d2SEric Christopher
5*cee313d2SEric Christopher; PR15882: This test ensures that we do not produce wrapping arithmetic when
6*cee313d2SEric Christopher; creating constant reverse step vectors.
7*cee313d2SEric Christopher;
8*cee313d2SEric Christopher; int foo(int n, int *A) {
9*cee313d2SEric Christopher;   int sum;
10*cee313d2SEric Christopher;   for (int i=n; i > 0; i--)
11*cee313d2SEric Christopher;     sum += A[i*2];
12*cee313d2SEric Christopher;   return sum;
13*cee313d2SEric Christopher; }
14*cee313d2SEric Christopher;
15*cee313d2SEric Christopher
16*cee313d2SEric Christopher;CHECK-LABEL: @foo(
17*cee313d2SEric Christopher;CHECK:  <i32 0, i32 -1, i32 -2, i32 -3>
18*cee313d2SEric Christopher;CHECK: ret
19*cee313d2SEric Christopherdefine i32 @foo(i32 %n, i32* nocapture %A) {
20*cee313d2SEric Christopher  %1 = icmp sgt i32 %n, 0
21*cee313d2SEric Christopher  br i1 %1, label %.lr.ph, label %._crit_edge
22*cee313d2SEric Christopher
23*cee313d2SEric Christopher.lr.ph:                                           ; preds = %0
24*cee313d2SEric Christopher  %2 = sext i32 %n to i64
25*cee313d2SEric Christopher  br label %3
26*cee313d2SEric Christopher
27*cee313d2SEric Christopher; <label>:3                                       ; preds = %.lr.ph, %3
28*cee313d2SEric Christopher  %indvars.iv = phi i64 [ %2, %.lr.ph ], [ %indvars.iv.next, %3 ]
29*cee313d2SEric Christopher  %sum.01 = phi i32 [ undef, %.lr.ph ], [ %9, %3 ]
30*cee313d2SEric Christopher  %4 = trunc i64 %indvars.iv to i32
31*cee313d2SEric Christopher  %5 = shl nsw i32 %4, 1
32*cee313d2SEric Christopher  %6 = sext i32 %5 to i64
33*cee313d2SEric Christopher  %7 = getelementptr inbounds i32, i32* %A, i64 %6
34*cee313d2SEric Christopher  %8 = load i32, i32* %7, align 4
35*cee313d2SEric Christopher  %9 = add nsw i32 %8, %sum.01
36*cee313d2SEric Christopher  %indvars.iv.next = add i64 %indvars.iv, -1
37*cee313d2SEric Christopher  %10 = trunc i64 %indvars.iv.next to i32
38*cee313d2SEric Christopher  %11 = icmp sgt i32 %10, 0
39*cee313d2SEric Christopher  br i1 %11, label %3, label %._crit_edge
40*cee313d2SEric Christopher
41*cee313d2SEric Christopher._crit_edge:                                      ; preds = %3, %0
42*cee313d2SEric Christopher  %sum.0.lcssa = phi i32 [ undef, %0 ], [ %9, %3 ]
43*cee313d2SEric Christopher  ret i32 %sum.0.lcssa
44*cee313d2SEric Christopher}
45*cee313d2SEric Christopher
46