194a2bd5aSTiehu Zhang; NOTE: Assertions have been autogenerated by utils/update_test_checks.py 294a2bd5aSTiehu Zhang; RUN: opt -mtriple=powerpc64-unknown-linux-gnu -mcpu=a2 -S -loop-vectorize < %s -o - | FileCheck %s 394a2bd5aSTiehu Zhang 494a2bd5aSTiehu Zhang; The case will do aggressive interleave on PowerPC, resulting in a lot of memory checks. 594a2bd5aSTiehu Zhang; (On the A2, always unroll aggressively. In fact, if aggressive interleaving is enabled, 694a2bd5aSTiehu Zhang; similar issues may occur on other targets). 794a2bd5aSTiehu Zhang; Interleaving should also be restricted by the threshold of memory checks similar to VF. 894a2bd5aSTiehu Zhang; (e.g., runtime-memory-check-threshold, default 8). 994a2bd5aSTiehu Zhang 1094a2bd5aSTiehu Zhang; CHECK-LABEL: @eddy_diff_caleddy_ 11*3ed9f603STiehu Zhang; CHECK-NOT: vector.memcheck 1294a2bd5aSTiehu Zhang 1394a2bd5aSTiehu Zhangdefine fastcc void @eddy_diff_caleddy_(i64* %wet_cl, i64 %0, i32 %ncol.cast.val) { 1494a2bd5aSTiehu Zhangentry: 1594a2bd5aSTiehu Zhang %trip.count = add nuw i32 %ncol.cast.val, 1 1694a2bd5aSTiehu Zhang %wide.trip.count = zext i32 %ncol.cast.val to i64 1794a2bd5aSTiehu Zhang %1 = shl i64 %0, 1 1894a2bd5aSTiehu Zhang %2 = mul i64 %0, 3 1994a2bd5aSTiehu Zhang %3 = shl i64 %0, 2 2094a2bd5aSTiehu Zhang %4 = mul i64 %0, 5 2194a2bd5aSTiehu Zhang %5 = mul i64 %0, 6 2294a2bd5aSTiehu Zhang %6 = mul i64 %0, 7 2394a2bd5aSTiehu Zhang %7 = shl i64 %0, 3 2494a2bd5aSTiehu Zhang %8 = mul i64 %0, 9 2594a2bd5aSTiehu Zhang %9 = mul i64 %0, 10 2694a2bd5aSTiehu Zhang %10 = mul i64 %0, 11 2794a2bd5aSTiehu Zhang %11 = mul i64 %0, 12 2894a2bd5aSTiehu Zhang br label %loop.body 2994a2bd5aSTiehu Zhang 3094a2bd5aSTiehu Zhangloop.body: 3194a2bd5aSTiehu Zhang %indvars.iv774 = phi i64 [ 0, %entry ], [ %indvars.iv.next775, %loop.body ] 3294a2bd5aSTiehu Zhang %12 = add nsw i64 %indvars.iv774, -5 3394a2bd5aSTiehu Zhang %13 = add i64 %12, %0 3494a2bd5aSTiehu Zhang %14 = getelementptr i64, i64* %wet_cl, i64 %13 3594a2bd5aSTiehu Zhang %15 = bitcast i64* %14 to double* 3694a2bd5aSTiehu Zhang store double 0.000000e+00, double* %15, align 8 3794a2bd5aSTiehu Zhang %16 = add i64 %12, %1 3894a2bd5aSTiehu Zhang %17 = getelementptr i64, i64* %wet_cl, i64 %16 3994a2bd5aSTiehu Zhang %18 = bitcast i64* %17 to double* 4094a2bd5aSTiehu Zhang store double 0.000000e+00, double* %18, align 8 4194a2bd5aSTiehu Zhang %19 = add i64 %12, %2 4294a2bd5aSTiehu Zhang %20 = getelementptr i64, i64* %wet_cl, i64 %19 4394a2bd5aSTiehu Zhang %21 = bitcast i64* %20 to double* 4494a2bd5aSTiehu Zhang store double 0.000000e+00, double* %21, align 8 4594a2bd5aSTiehu Zhang %22 = add i64 %12, %3 4694a2bd5aSTiehu Zhang %23 = getelementptr i64, i64* %wet_cl, i64 %22 4794a2bd5aSTiehu Zhang %24 = bitcast i64* %23 to double* 4894a2bd5aSTiehu Zhang store double 0.000000e+00, double* %24, align 8 4994a2bd5aSTiehu Zhang %25 = add i64 %12, %4 5094a2bd5aSTiehu Zhang %26 = getelementptr i64, i64* %wet_cl, i64 %25 5194a2bd5aSTiehu Zhang %27 = bitcast i64* %26 to double* 5294a2bd5aSTiehu Zhang store double 0.000000e+00, double* %27, align 8 5394a2bd5aSTiehu Zhang %28 = add i64 %12, %5 5494a2bd5aSTiehu Zhang %29 = getelementptr i64, i64* %wet_cl, i64 %28 5594a2bd5aSTiehu Zhang %30 = bitcast i64* %29 to double* 5694a2bd5aSTiehu Zhang store double 0.000000e+00, double* %30, align 8 5794a2bd5aSTiehu Zhang %31 = add i64 %12, %6 5894a2bd5aSTiehu Zhang %32 = getelementptr i64, i64* %wet_cl, i64 %31 5994a2bd5aSTiehu Zhang %33 = bitcast i64* %32 to double* 6094a2bd5aSTiehu Zhang store double 0.000000e+00, double* %33, align 8 6194a2bd5aSTiehu Zhang %34 = add i64 %12, %7 6294a2bd5aSTiehu Zhang %35 = getelementptr i64, i64* %wet_cl, i64 %34 6394a2bd5aSTiehu Zhang %36 = bitcast i64* %35 to double* 6494a2bd5aSTiehu Zhang store double 0.000000e+00, double* %36, align 8 6594a2bd5aSTiehu Zhang %37 = add i64 %12, %8 6694a2bd5aSTiehu Zhang %38 = getelementptr i64, i64* %wet_cl, i64 %37 6794a2bd5aSTiehu Zhang %39 = bitcast i64* %38 to double* 6894a2bd5aSTiehu Zhang store double 0.000000e+00, double* %39, align 8 6994a2bd5aSTiehu Zhang %40 = add i64 %12, %9 7094a2bd5aSTiehu Zhang %41 = getelementptr i64, i64* %wet_cl, i64 %40 7194a2bd5aSTiehu Zhang %42 = bitcast i64* %41 to double* 7294a2bd5aSTiehu Zhang store double 0.000000e+00, double* %42, align 8 7394a2bd5aSTiehu Zhang %43 = add i64 %12, %10 7494a2bd5aSTiehu Zhang %44 = getelementptr i64, i64* %wet_cl, i64 %43 7594a2bd5aSTiehu Zhang %45 = bitcast i64* %44 to double* 7694a2bd5aSTiehu Zhang store double 0.000000e+00, double* %45, align 8 7794a2bd5aSTiehu Zhang %46 = add i64 %12, %11 7894a2bd5aSTiehu Zhang %47 = getelementptr i64, i64* %wet_cl, i64 %46 7994a2bd5aSTiehu Zhang %48 = bitcast i64* %47 to double* 8094a2bd5aSTiehu Zhang store double 0.000000e+00, double* %48, align 8 8194a2bd5aSTiehu Zhang %indvars.iv.next775 = add nuw nsw i64 %indvars.iv774, 1 8294a2bd5aSTiehu Zhang %exitcond778.not = icmp eq i64 %indvars.iv.next775, %wide.trip.count 8394a2bd5aSTiehu Zhang br i1 %exitcond778.not, label %loop.end, label %loop.body 8494a2bd5aSTiehu Zhang 8594a2bd5aSTiehu Zhangloop.end: 8694a2bd5aSTiehu Zhang ret void 8794a2bd5aSTiehu Zhang} 88