1*cee313d2SEric Christopher; RUN: opt < %s -loop-vectorize -force-vector-interleave=1 -force-vector-width=4 -dce -instcombine -S -enable-if-conversion | FileCheck %s 2*cee313d2SEric Christopher 3*cee313d2SEric Christophertarget datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" 4*cee313d2SEric Christopher 5*cee313d2SEric Christopher; This is kernel11 from "LivermoreLoops". We can't vectorize it because we 6*cee313d2SEric Christopher; access both x[k] and x[k-1]. 7*cee313d2SEric Christopher; 8*cee313d2SEric Christopher; void kernel11(double *x, double *y, int n) { 9*cee313d2SEric Christopher; for ( int k=1 ; k<n ; k++ ) 10*cee313d2SEric Christopher; x[k] = x[k-1] + y[k]; 11*cee313d2SEric Christopher; } 12*cee313d2SEric Christopher 13*cee313d2SEric Christopher; CHECK-LABEL: @kernel11( 14*cee313d2SEric Christopher; CHECK-NOT: <4 x double> 15*cee313d2SEric Christopher; CHECK: ret 16*cee313d2SEric Christopherdefine i32 @kernel11(double* %x, double* %y, i32 %n) nounwind uwtable ssp { 17*cee313d2SEric Christopher %1 = alloca double*, align 8 18*cee313d2SEric Christopher %2 = alloca double*, align 8 19*cee313d2SEric Christopher %3 = alloca i32, align 4 20*cee313d2SEric Christopher %k = alloca i32, align 4 21*cee313d2SEric Christopher store double* %x, double** %1, align 8 22*cee313d2SEric Christopher store double* %y, double** %2, align 8 23*cee313d2SEric Christopher store i32 %n, i32* %3, align 4 24*cee313d2SEric Christopher store i32 1, i32* %k, align 4 25*cee313d2SEric Christopher br label %4 26*cee313d2SEric Christopher 27*cee313d2SEric Christopher; <label>:4 ; preds = %25, %0 28*cee313d2SEric Christopher %5 = load i32, i32* %k, align 4 29*cee313d2SEric Christopher %6 = load i32, i32* %3, align 4 30*cee313d2SEric Christopher %7 = icmp slt i32 %5, %6 31*cee313d2SEric Christopher br i1 %7, label %8, label %28 32*cee313d2SEric Christopher 33*cee313d2SEric Christopher; <label>:8 ; preds = %4 34*cee313d2SEric Christopher %9 = load i32, i32* %k, align 4 35*cee313d2SEric Christopher %10 = sub nsw i32 %9, 1 36*cee313d2SEric Christopher %11 = sext i32 %10 to i64 37*cee313d2SEric Christopher %12 = load double*, double** %1, align 8 38*cee313d2SEric Christopher %13 = getelementptr inbounds double, double* %12, i64 %11 39*cee313d2SEric Christopher %14 = load double, double* %13, align 8 40*cee313d2SEric Christopher %15 = load i32, i32* %k, align 4 41*cee313d2SEric Christopher %16 = sext i32 %15 to i64 42*cee313d2SEric Christopher %17 = load double*, double** %2, align 8 43*cee313d2SEric Christopher %18 = getelementptr inbounds double, double* %17, i64 %16 44*cee313d2SEric Christopher %19 = load double, double* %18, align 8 45*cee313d2SEric Christopher %20 = fadd double %14, %19 46*cee313d2SEric Christopher %21 = load i32, i32* %k, align 4 47*cee313d2SEric Christopher %22 = sext i32 %21 to i64 48*cee313d2SEric Christopher %23 = load double*, double** %1, align 8 49*cee313d2SEric Christopher %24 = getelementptr inbounds double, double* %23, i64 %22 50*cee313d2SEric Christopher store double %20, double* %24, align 8 51*cee313d2SEric Christopher br label %25 52*cee313d2SEric Christopher 53*cee313d2SEric Christopher; <label>:25 ; preds = %8 54*cee313d2SEric Christopher %26 = load i32, i32* %k, align 4 55*cee313d2SEric Christopher %27 = add nsw i32 %26, 1 56*cee313d2SEric Christopher store i32 %27, i32* %k, align 4 57*cee313d2SEric Christopher br label %4 58*cee313d2SEric Christopher 59*cee313d2SEric Christopher; <label>:28 ; preds = %4 60*cee313d2SEric Christopher ret i32 0 61*cee313d2SEric Christopher} 62*cee313d2SEric Christopher 63*cee313d2SEric Christopher 64*cee313d2SEric Christopher; A[i*7] is scalarized, and the different scalars can in theory wrap 65*cee313d2SEric Christopher; around and overwrite other scalar elements. However we can still 66*cee313d2SEric Christopher; vectorize because we can version the loop to avoid this case. 67*cee313d2SEric Christopher; 68*cee313d2SEric Christopher; void foo(int *a) { 69*cee313d2SEric Christopher; for (int i=0; i<256; ++i) { 70*cee313d2SEric Christopher; int x = a[i*7]; 71*cee313d2SEric Christopher; if (x>3) 72*cee313d2SEric Christopher; x = x*x+x*4; 73*cee313d2SEric Christopher; a[i*7] = x+3; 74*cee313d2SEric Christopher; } 75*cee313d2SEric Christopher; } 76*cee313d2SEric Christopher 77*cee313d2SEric Christopher; CHECK-LABEL: @func2( 78*cee313d2SEric Christopher; CHECK: <4 x i32> 79*cee313d2SEric Christopher; CHECK: ret 80*cee313d2SEric Christopherdefine i32 @func2(i32* nocapture %a) nounwind uwtable ssp { 81*cee313d2SEric Christopher br label %1 82*cee313d2SEric Christopher 83*cee313d2SEric Christopher; <label>:1 ; preds = %7, %0 84*cee313d2SEric Christopher %indvars.iv = phi i64 [ 0, %0 ], [ %indvars.iv.next, %7 ] 85*cee313d2SEric Christopher %2 = mul nsw i64 %indvars.iv, 7 86*cee313d2SEric Christopher %3 = getelementptr inbounds i32, i32* %a, i64 %2 87*cee313d2SEric Christopher %4 = load i32, i32* %3, align 4 88*cee313d2SEric Christopher %5 = icmp sgt i32 %4, 3 89*cee313d2SEric Christopher br i1 %5, label %6, label %7 90*cee313d2SEric Christopher 91*cee313d2SEric Christopher; <label>:6 ; preds = %1 92*cee313d2SEric Christopher %tmp = add i32 %4, 4 93*cee313d2SEric Christopher %tmp1 = mul i32 %tmp, %4 94*cee313d2SEric Christopher br label %7 95*cee313d2SEric Christopher 96*cee313d2SEric Christopher; <label>:7 ; preds = %6, %1 97*cee313d2SEric Christopher %x.0 = phi i32 [ %tmp1, %6 ], [ %4, %1 ] 98*cee313d2SEric Christopher %8 = add nsw i32 %x.0, 3 99*cee313d2SEric Christopher store i32 %8, i32* %3, align 4 100*cee313d2SEric Christopher %indvars.iv.next = add i64 %indvars.iv, 1 101*cee313d2SEric Christopher %lftr.wideiv = trunc i64 %indvars.iv.next to i32 102*cee313d2SEric Christopher %exitcond = icmp eq i32 %lftr.wideiv, 256 103*cee313d2SEric Christopher br i1 %exitcond, label %9, label %1 104*cee313d2SEric Christopher 105*cee313d2SEric Christopher; <label>:9 ; preds = %7 106*cee313d2SEric Christopher ret i32 0 107*cee313d2SEric Christopher} 108