1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt -loop-vectorize -riscv-v-vector-bits-min=128 -scalable-vectorization=on -force-target-instruction-cost=1 -S < %s | FileCheck %s
3
4target triple = "riscv64"
5
6define void @trip5_i8(i8* noalias nocapture noundef %dst, i8* noalias nocapture noundef readonly %src) #0 {
7; CHECK-LABEL: @trip5_i8(
8; CHECK-NEXT:  entry:
9; CHECK-NEXT:    [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
10; CHECK-NEXT:    [[TMP1:%.*]] = mul i64 [[TMP0]], 8
11; CHECK-NEXT:    [[TMP2:%.*]] = icmp ult i64 -6, [[TMP1]]
12; CHECK-NEXT:    br i1 [[TMP2]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
13; CHECK:       vector.ph:
14; CHECK-NEXT:    [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
15; CHECK-NEXT:    [[TMP4:%.*]] = mul i64 [[TMP3]], 8
16; CHECK-NEXT:    [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
17; CHECK-NEXT:    [[TMP6:%.*]] = mul i64 [[TMP5]], 8
18; CHECK-NEXT:    [[TMP7:%.*]] = sub i64 [[TMP6]], 1
19; CHECK-NEXT:    [[N_RND_UP:%.*]] = add i64 5, [[TMP7]]
20; CHECK-NEXT:    [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP4]]
21; CHECK-NEXT:    [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
22; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
23; CHECK:       vector.body:
24; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
25; CHECK-NEXT:    [[TMP8:%.*]] = add i64 [[INDEX]], 0
26; CHECK-NEXT:    [[ACTIVE_LANE_MASK:%.*]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[TMP8]], i64 5)
27; CHECK-NEXT:    [[TMP9:%.*]] = getelementptr inbounds i8, i8* [[SRC:%.*]], i64 [[TMP8]]
28; CHECK-NEXT:    [[TMP10:%.*]] = getelementptr inbounds i8, i8* [[TMP9]], i32 0
29; CHECK-NEXT:    [[TMP11:%.*]] = bitcast i8* [[TMP10]] to <vscale x 8 x i8>*
30; CHECK-NEXT:    [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8.p0nxv8i8(<vscale x 8 x i8>* [[TMP11]], i32 1, <vscale x 8 x i1> [[ACTIVE_LANE_MASK]], <vscale x 8 x i8> poison)
31; CHECK-NEXT:    [[TMP12:%.*]] = shl <vscale x 8 x i8> [[WIDE_MASKED_LOAD]], shufflevector (<vscale x 8 x i8> insertelement (<vscale x 8 x i8> poison, i8 1, i32 0), <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer)
32; CHECK-NEXT:    [[TMP13:%.*]] = getelementptr inbounds i8, i8* [[DST:%.*]], i64 [[TMP8]]
33; CHECK-NEXT:    [[TMP14:%.*]] = getelementptr inbounds i8, i8* [[TMP13]], i32 0
34; CHECK-NEXT:    [[TMP15:%.*]] = bitcast i8* [[TMP14]] to <vscale x 8 x i8>*
35; CHECK-NEXT:    [[WIDE_MASKED_LOAD1:%.*]] = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8.p0nxv8i8(<vscale x 8 x i8>* [[TMP15]], i32 1, <vscale x 8 x i1> [[ACTIVE_LANE_MASK]], <vscale x 8 x i8> poison)
36; CHECK-NEXT:    [[TMP16:%.*]] = add <vscale x 8 x i8> [[TMP12]], [[WIDE_MASKED_LOAD1]]
37; CHECK-NEXT:    [[TMP17:%.*]] = bitcast i8* [[TMP14]] to <vscale x 8 x i8>*
38; CHECK-NEXT:    call void @llvm.masked.store.nxv8i8.p0nxv8i8(<vscale x 8 x i8> [[TMP16]], <vscale x 8 x i8>* [[TMP17]], i32 1, <vscale x 8 x i1> [[ACTIVE_LANE_MASK]])
39; CHECK-NEXT:    [[TMP18:%.*]] = call i64 @llvm.vscale.i64()
40; CHECK-NEXT:    [[TMP19:%.*]] = mul i64 [[TMP18]], 8
41; CHECK-NEXT:    [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP19]]
42; CHECK-NEXT:    br i1 true, label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
43; CHECK:       middle.block:
44; CHECK-NEXT:    br i1 true, label [[FOR_END:%.*]], label [[SCALAR_PH]]
45; CHECK:       scalar.ph:
46; CHECK-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
47; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
48; CHECK:       for.body:
49; CHECK-NEXT:    [[I_08:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
50; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i8, i8* [[SRC]], i64 [[I_08]]
51; CHECK-NEXT:    [[TMP20:%.*]] = load i8, i8* [[ARRAYIDX]], align 1
52; CHECK-NEXT:    [[MUL:%.*]] = shl i8 [[TMP20]], 1
53; CHECK-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, i8* [[DST]], i64 [[I_08]]
54; CHECK-NEXT:    [[TMP21:%.*]] = load i8, i8* [[ARRAYIDX1]], align 1
55; CHECK-NEXT:    [[ADD:%.*]] = add i8 [[MUL]], [[TMP21]]
56; CHECK-NEXT:    store i8 [[ADD]], i8* [[ARRAYIDX1]], align 1
57; CHECK-NEXT:    [[INC]] = add nuw nsw i64 [[I_08]], 1
58; CHECK-NEXT:    [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], 5
59; CHECK-NEXT:    br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP2:![0-9]+]]
60; CHECK:       for.end:
61; CHECK-NEXT:    ret void
62;
63entry:
64  br label %for.body
65
66for.body:                                         ; preds = %entry, %for.body
67  %i.08 = phi i64 [ 0, %entry ], [ %inc, %for.body ]
68  %arrayidx = getelementptr inbounds i8, i8* %src, i64 %i.08
69  %0 = load i8, i8* %arrayidx, align 1
70  %mul = shl i8 %0, 1
71  %arrayidx1 = getelementptr inbounds i8, i8* %dst, i64 %i.08
72  %1 = load i8, i8* %arrayidx1, align 1
73  %add = add i8 %mul, %1
74  store i8 %add, i8* %arrayidx1, align 1
75  %inc = add nuw nsw i64 %i.08, 1
76  %exitcond.not = icmp eq i64 %inc, 5
77  br i1 %exitcond.not, label %for.end, label %for.body
78
79for.end:                                          ; preds = %for.body
80  ret void
81}
82
83attributes #0 = { "target-features"="+v,+d" }
84