1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py 2; RUN: opt < %s -loop-vectorize -force-target-supports-scalable-vectors=true -scalable-vectorization=on -S | FileCheck %s 3 4target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" 5 6define i8 @reduction_add_trunc(i8* noalias nocapture %A) { 7; CHECK-LABEL: @reduction_add_trunc( 8; CHECK: vector.body: 9; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %vector.ph ], [ [[INDEX_NEXT:%.*]], %vector.body ] 10; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 8 x i32> [ insertelement (<vscale x 8 x i32> zeroinitializer, i32 255, i32 0), %vector.ph ], [ [[TMP34:%.*]], %vector.body ] 11; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <vscale x 8 x i32> [ zeroinitializer, %vector.ph ], [ [[TMP36:%.*]], %vector.body ] 12; CHECK: [[TMP14:%.*]] = and <vscale x 8 x i32> [[VEC_PHI]], shufflevector (<vscale x 8 x i32> insertelement (<vscale x 8 x i32> poison, i32 255, i32 0), <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer) 13; CHECK-NEXT: [[TMP15:%.*]] = and <vscale x 8 x i32> [[VEC_PHI1]], shufflevector (<vscale x 8 x i32> insertelement (<vscale x 8 x i32> poison, i32 255, i32 0), <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer) 14; CHECK: [[WIDE_LOAD:%.*]] = load <vscale x 8 x i8>, <vscale x 8 x i8>* 15; CHECK: [[WIDE_LOAD2:%.*]] = load <vscale x 8 x i8>, <vscale x 8 x i8>* 16; CHECK-NEXT: [[TMP26:%.*]] = zext <vscale x 8 x i8> [[WIDE_LOAD]] to <vscale x 8 x i32> 17; CHECK-NEXT: [[TMP27:%.*]] = zext <vscale x 8 x i8> [[WIDE_LOAD2]] to <vscale x 8 x i32> 18; CHECK-NEXT: [[TMP28:%.*]] = add <vscale x 8 x i32> [[TMP14]], [[TMP26]] 19; CHECK-NEXT: [[TMP29:%.*]] = add <vscale x 8 x i32> [[TMP15]], [[TMP27]] 20; CHECK-NEXT: [[TMP30:%.*]] = call i32 @llvm.vscale.i32() 21; CHECK-NEXT: [[TMP31:%.*]] = mul i32 [[TMP30]], 16 22; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP31]] 23; CHECK-NEXT: [[TMP32:%.*]] = icmp eq i32 [[INDEX_NEXT]], {{%.*}} 24; CHECK-NEXT: [[TMP33:%.*]] = trunc <vscale x 8 x i32> [[TMP28]] to <vscale x 8 x i8> 25; CHECK-NEXT: [[TMP34]] = zext <vscale x 8 x i8> [[TMP33]] to <vscale x 8 x i32> 26; CHECK-NEXT: [[TMP35:%.*]] = trunc <vscale x 8 x i32> [[TMP29]] to <vscale x 8 x i8> 27; CHECK-NEXT: [[TMP36]] = zext <vscale x 8 x i8> [[TMP35]] to <vscale x 8 x i32> 28; CHECK: middle.block: 29; CHECK-NEXT: [[TMP37:%.*]] = trunc <vscale x 8 x i32> [[TMP34]] to <vscale x 8 x i8> 30; CHECK-NEXT: [[TMP38:%.*]] = trunc <vscale x 8 x i32> [[TMP36]] to <vscale x 8 x i8> 31; CHECK-NEXT: [[BIN_RDX:%.*]] = add <vscale x 8 x i8> [[TMP38]], [[TMP37]] 32; CHECK-NEXT: [[TMP39:%.*]] = call i8 @llvm.vector.reduce.add.nxv8i8(<vscale x 8 x i8> [[BIN_RDX]]) 33; CHECK-NEXT: [[TMP40:%.*]] = zext i8 [[TMP39]] to i32 34; 35entry: 36 br label %loop 37 38loop: ; preds = %entry, %loop 39 %indvars.iv = phi i32 [ %indvars.iv.next, %loop ], [ 0, %entry ] 40 %sum.02p = phi i32 [ %l9, %loop ], [ 255, %entry ] 41 %sum.02 = and i32 %sum.02p, 255 42 %l2 = getelementptr inbounds i8, i8* %A, i32 %indvars.iv 43 %l3 = load i8, i8* %l2, align 4 44 %l3e = zext i8 %l3 to i32 45 %l9 = add i32 %sum.02, %l3e 46 %indvars.iv.next = add i32 %indvars.iv, 1 47 %exitcond = icmp eq i32 %indvars.iv.next, 256 48 br i1 %exitcond, label %exit, label %loop, !llvm.loop !0 49 50exit: ; preds = %loop 51 %sum.0.lcssa = phi i32 [ %l9, %loop ] 52 %ret = trunc i32 %sum.0.lcssa to i8 53 ret i8 %ret 54} 55 56!0 = distinct !{!0, !1, !2, !3, !4} 57!1 = !{!"llvm.loop.vectorize.width", i32 8} 58!2 = !{!"llvm.loop.vectorize.scalable.enable", i1 true} 59!3 = !{!"llvm.loop.interleave.count", i32 2} 60!4 = !{!"llvm.loop.vectorize.enable", i1 true} 61