1; RUN: opt -loop-vectorize -scalable-vectorization=off -force-vector-width=4 -prefer-predicate-over-epilogue=predicate-dont-vectorize -S < %s | FileCheck %s
2
3; NOTE: These tests aren't really target-specific, but it's convenient to target AArch64
4; so that TTI.isLegalMaskedLoad can return true.
5
6target triple = "aarch64-linux-gnu"
7
8; The original loop had an unconditional uniform load. Let's make sure
9; we don't artificially create new predicated blocks for the load.
10define void @uniform_load(i32* noalias %dst, i32* noalias readonly %src, i64 %n) #0 {
11; CHECK-LABEL: @uniform_load(
12; CHECK:       vector.body:
13; CHECK-NEXT:    [[IDX:%.*]] = phi i64 [ 0, %vector.ph ], [ [[IDX_NEXT:%.*]], %vector.body ]
14; CHECK-NEXT:    [[TMP1:%.*]] = insertelement <4 x i64> poison, i64 [[IDX]], i32 0
15; CHECK-NEXT:    [[TMP2:%.*]] = shufflevector <4 x i64> [[TMP1]], <4 x i64> poison, <4 x i32> zeroinitializer
16; CHECK-NEXT:    [[INDUCTION:%.*]] = add <4 x i64> [[TMP2]], <i64 0, i64 1, i64 2, i64 3>
17; CHECK-NEXT:    [[TMP3:%.*]] = add i64 [[IDX]], 0
18; CHECK-NEXT:    [[LOOP_PRED:%.*]] = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i64(i64 [[TMP3]], i64 %n)
19; CHECK-NEXT:    [[LOAD_VAL:%.*]] = load i32, i32* %src, align 4
20; CHECK-NOT:     load i32, i32* %src, align 4
21; CHECK-NEXT:    [[TMP4:%.*]] = insertelement <4 x i32> poison, i32 [[LOAD_VAL]], i32 0
22; CHECK-NEXT:    [[TMP5:%.*]] = shufflevector <4 x i32> [[TMP4]], <4 x i32> poison, <4 x i32> zeroinitializer
23; CHECK-NEXT:    [[TMP6:%.*]] = getelementptr inbounds i32, i32* %dst, i64 [[TMP3]]
24; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds i32, i32* [[TMP6]], i32 0
25; CHECK-NEXT:    [[STORE_PTR:%.*]] = bitcast i32* [[TMP7]] to <4 x i32>*
26; CHECK-NEXT:    call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> [[TMP5]], <4 x i32>* [[STORE_PTR]], i32 4, <4 x i1> [[LOOP_PRED]])
27; CHECK-NEXT:    [[IDX_NEXT]] = add i64 [[IDX]], 4
28; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i64 [[IDX_NEXT]], %n.vec
29; CHECK-NEXT:    br i1 [[CMP]], label %middle.block, label %vector.body
30
31entry:
32  br label %for.body
33
34for.body:                                         ; preds = %entry, %for.body
35  %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
36  %val = load i32, i32* %src, align 4
37  %arrayidx = getelementptr inbounds i32, i32* %dst, i64 %indvars.iv
38  store i32 %val, i32* %arrayidx, align 4
39  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
40  %exitcond.not = icmp eq i64 %indvars.iv.next, %n
41  br i1 %exitcond.not, label %for.end, label %for.body
42
43for.end:                                          ; preds = %for.body, %entry
44  ret void
45}
46
47; The original loop had a conditional uniform load. In this case we actually
48; do need to perform conditional loads and so we end up using a gather instead.
49; However, we at least ensure the mask is the overlap of the loop predicate
50; and the original condition.
51define void @cond_uniform_load(i32* nocapture %dst, i32* nocapture readonly %src, i32* nocapture readonly %cond, i64 %n) #0 {
52; CHECK-LABEL: @cond_uniform_load(
53; CHECK:       vector.ph:
54; CHECK:         [[TMP1:%.*]] = insertelement <4 x i32*> poison, i32* %src, i32 0
55; CHECK-NEXT:    [[SRC_SPLAT:%.*]] = shufflevector <4 x i32*> [[TMP1]], <4 x i32*> poison, <4 x i32> zeroinitializer
56; CHECK:       vector.body:
57; CHECK-NEXT:    [[IDX:%.*]] = phi i64 [ 0, %vector.ph ], [ [[IDX_NEXT:%.*]], %vector.body ]
58; CHECK:         [[TMP1:%.*]] = insertelement <4 x i64> poison, i64 [[IDX]], i32 0
59; CHECK-NEXT:    [[TMP2:%.*]] = shufflevector <4 x i64> [[TMP1]], <4 x i64> poison, <4 x i32> zeroinitializer
60; CHECK-NEXT:    [[INDUCTION:%.*]] = add <4 x i64> [[TMP2]], <i64 0, i64 1, i64 2, i64 3>
61; CHECK-NEXT:    [[TMP3:%.*]] = add i64 [[IDX]], 0
62; CHECK-NEXT:    [[LOOP_PRED:%.*]] = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i64(i64 [[TMP3]], i64 %n)
63; CHECK:         [[COND_LOAD:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* {{%.*}}, i32 4, <4 x i1> [[LOOP_PRED]], <4 x i32> poison)
64; CHECK-NEXT:    [[TMP4:%.*]] = icmp eq <4 x i32> [[COND_LOAD]], zeroinitializer
65; CHECK-NEXT:    [[TMP5:%.*]] = xor <4 x i1> [[TMP4]], <i1 true, i1 true, i1 true, i1 true>
66; CHECK-NEXT:    [[MASK:%.*]] = select <4 x i1> [[LOOP_PRED]], <4 x i1> [[TMP5]], <4 x i1> zeroinitializer
67; CHECK-NEXT:    call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> [[SRC_SPLAT]], i32 4, <4 x i1> [[MASK]], <4 x i32> undef)
68entry:
69  br label %for.body
70
71for.body:                                         ; preds = %entry, %if.end
72  %index = phi i64 [ %index.next, %if.end ], [ 0, %entry ]
73  %arrayidx = getelementptr inbounds i32, i32* %cond, i64 %index
74  %0 = load i32, i32* %arrayidx, align 4
75  %tobool.not = icmp eq i32 %0, 0
76  br i1 %tobool.not, label %if.end, label %if.then
77
78if.then:                                          ; preds = %for.body
79  %1 = load i32, i32* %src, align 4
80  br label %if.end
81
82if.end:                                           ; preds = %if.then, %for.body
83  %val.0 = phi i32 [ %1, %if.then ], [ 0, %for.body ]
84  %arrayidx1 = getelementptr inbounds i32, i32* %dst, i64 %index
85  store i32 %val.0, i32* %arrayidx1, align 4
86  %index.next = add nuw i64 %index, 1
87  %exitcond.not = icmp eq i64 %index.next, %n
88  br i1 %exitcond.not, label %for.end, label %for.body
89
90for.end:                                          ; preds = %for.inc, %entry
91  ret void
92}
93
94attributes #0 = { "target-features"="+neon,+sve,+v8.1a" vscale_range(2, 0) }
95