1; RUN: opt -mtriple=thumbv8.1m.main-arm-eabihf -mattr=+mve.fp -loop-vectorize -tail-predication=enabled -S < %s | \
2; RUN:  FileCheck %s
3
4target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64"
5
6; Check that loop hint predicate.enable loop can overrule the TTI hook. For
7; this test case, the TTI hook rejects tail-predication:
8;
9;   ARMHWLoops: Trip count does not fit into 32bits
10;   preferPredicateOverEpilogue: hardware-loop is not profitable.
11;
12define dso_local void @tail_folding(i32* noalias nocapture %A, i32* noalias nocapture readonly %B, i32* noalias nocapture readonly %C) {
13; CHECK-LABEL: tail_folding(
14; CHECK:       vector.body:
15; CHECK-NOT:   call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(
16; CHECK-NOT:   call void @llvm.masked.store.v4i32.p0v4i32(
17; CHECK:       br i1 %{{.*}}, label %{{.*}}, label %vector.body
18entry:
19  br label %for.body
20
21for.cond.cleanup:
22  ret void
23
24for.body:
25  %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
26  %arrayidx = getelementptr inbounds i32, i32* %B, i64 %indvars.iv
27  %0 = load i32, i32* %arrayidx, align 4
28  %arrayidx2 = getelementptr inbounds i32, i32* %C, i64 %indvars.iv
29  %1 = load i32, i32* %arrayidx2, align 4
30  %add = add nsw i32 %1, %0
31  %arrayidx4 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
32  store i32 %add, i32* %arrayidx4, align 4
33  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
34  %exitcond = icmp eq i64 %indvars.iv.next, 430
35  br i1 %exitcond, label %for.cond.cleanup, label %for.body
36}
37
38; The same test case but now with predicate.enable = true should get
39; tail-folded.
40;
41define dso_local void @predicate_loop_hint(i32* noalias nocapture %A, i32* noalias nocapture readonly %B, i32* noalias nocapture readonly %C) {
42; CHECK-LABEL: predicate_loop_hint(
43; CHECK:       vector.body:
44; CHECK:         %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
45; CHECK:         %[[ELEM0:.*]] = add i64 %index, 0
46; CHECK:         %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i64(i64 %[[ELEM0]], i64 430)
47; CHECK:         %[[WML1:.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32({{.*}}<4 x i1> %active.lane.mask
48; CHECK:         %[[WML2:.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32({{.*}}<4 x i1> %active.lane.mask
49; CHECK:         %[[ADD:.*]] = add nsw <4 x i32> %[[WML2]], %[[WML1]]
50; CHECK:         call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %[[ADD]], {{.*}}<4 x i1> %active.lane.mask
51; CHECK:         %index.next = add i64 %index, 4
52; CHECK:         br i1 %{{.*}}, label %{{.*}}, label %vector.body
53entry:
54  br label %for.body
55
56for.cond.cleanup:
57  ret void
58
59for.body:
60  %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
61  %arrayidx = getelementptr inbounds i32, i32* %B, i64 %indvars.iv
62  %0 = load i32, i32* %arrayidx, align 4
63  %arrayidx2 = getelementptr inbounds i32, i32* %C, i64 %indvars.iv
64  %1 = load i32, i32* %arrayidx2, align 4
65  %add = add nsw i32 %1, %0
66  %arrayidx4 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
67  store i32 %add, i32* %arrayidx4, align 4
68  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
69  %exitcond = icmp eq i64 %indvars.iv.next, 430
70  br i1 %exitcond, label %for.cond.cleanup, label %for.body, !llvm.loop !6
71}
72
73; CHECK:      !0 = distinct !{!0, !1}
74; CHECK-NEXT: !1 = !{!"llvm.loop.isvectorized", i32 1}
75; CHECK-NEXT: !2 = distinct !{!2, !3, !1}
76; CHECK-NEXT: !3 = !{!"llvm.loop.unroll.runtime.disable"}
77; CHECK-NEXT: !4 = distinct !{!4, !1}
78; CHECK-NEXT: !5 = distinct !{!5, !3, !1}
79
80!6 = distinct !{!6, !7, !8}
81!7 = !{!"llvm.loop.vectorize.predicate.enable", i1 true}
82!8 = !{!"llvm.loop.vectorize.enable", i1 true}
83