1; RUN: opt -loop-vectorize -scalable-vectorization=off -force-vector-width=4 -prefer-predicate-over-epilogue=predicate-dont-vectorize -S < %s | FileCheck %s 2 3; NOTE: These tests aren't really target-specific, but it's convenient to target AArch64 4; so that TTI.isLegalMaskedLoad can return true. 5 6target triple = "aarch64-linux-gnu" 7 8; The original loop had an unconditional uniform load. Let's make sure 9; we don't artificially create new predicated blocks for the load. 10define void @uniform_load(i32* noalias %dst, i32* noalias readonly %src, i64 %n) #0 { 11; CHECK-LABEL: @uniform_load( 12; CHECK: vector.ph: 13; CHECK: [[INIT_ACTIVE_LANE_MASK:%.*]] = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i64(i64 0, i64 %n) 14; CHECK: vector.body: 15; CHECK-NEXT: [[IDX:%.*]] = phi i64 [ 0, %vector.ph ], [ [[IDX_NEXT:%.*]], %vector.body ] 16; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <4 x i1> [ [[INIT_ACTIVE_LANE_MASK]], %vector.ph ], [ [[NEXT_ACTIVE_LANE_MASK:%.*]], %vector.body ] 17; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[IDX]], 0 18; CHECK-NEXT: [[LOAD_VAL:%.*]] = load i32, i32* %src, align 4 19; CHECK-NOT: load i32, i32* %src, align 4 20; CHECK-NEXT: [[TMP4:%.*]] = insertelement <4 x i32> poison, i32 [[LOAD_VAL]], i32 0 21; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <4 x i32> [[TMP4]], <4 x i32> poison, <4 x i32> zeroinitializer 22; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, i32* %dst, i64 [[TMP3]] 23; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, i32* [[TMP6]], i32 0 24; CHECK-NEXT: [[STORE_PTR:%.*]] = bitcast i32* [[TMP7]] to <4 x i32>* 25; CHECK-NEXT: call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> [[TMP5]], <4 x i32>* [[STORE_PTR]], i32 4, <4 x i1> [[ACTIVE_LANE_MASK]]) 26; CHECK-NEXT: [[IDX_NEXT]] = add i64 [[IDX]], 4 27; CHECK-NEXT: [[NEXT_ACTIVE_LANE_MASK]] = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i64(i64 [[IDX_NEXT]], i64 %n) 28; CHECK-NEXT: [[NOT_ACTIVE_LANE_MASK:%.*]] = xor <4 x i1> [[NEXT_ACTIVE_LANE_MASK]], <i1 true, i1 true, i1 true, i1 true> 29; CHECK-NEXT: [[FIRST_LANE_SET:%.*]] = extractelement <4 x i1> [[NOT_ACTIVE_LANE_MASK]], i32 0 30; CHECK-NEXT: br i1 [[FIRST_LANE_SET]], label %middle.block, label %vector.body 31 32entry: 33 br label %for.body 34 35for.body: ; preds = %entry, %for.body 36 %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ] 37 %val = load i32, i32* %src, align 4 38 %arrayidx = getelementptr inbounds i32, i32* %dst, i64 %indvars.iv 39 store i32 %val, i32* %arrayidx, align 4 40 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 41 %exitcond.not = icmp eq i64 %indvars.iv.next, %n 42 br i1 %exitcond.not, label %for.end, label %for.body 43 44for.end: ; preds = %for.body, %entry 45 ret void 46} 47 48; The original loop had a conditional uniform load. In this case we actually 49; do need to perform conditional loads and so we end up using a gather instead. 50; However, we at least ensure the mask is the overlap of the loop predicate 51; and the original condition. 52define void @cond_uniform_load(i32* nocapture %dst, i32* nocapture readonly %src, i32* nocapture readonly %cond, i64 %n) #0 { 53; CHECK-LABEL: @cond_uniform_load( 54; CHECK: vector.ph: 55; CHECK: [[INIT_ACTIVE_LANE_MASK:%.*]] = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i64(i64 0, i64 %n) 56; CHECK: [[TMP1:%.*]] = insertelement <4 x i32*> poison, i32* %src, i32 0 57; CHECK-NEXT: [[SRC_SPLAT:%.*]] = shufflevector <4 x i32*> [[TMP1]], <4 x i32*> poison, <4 x i32> zeroinitializer 58; CHECK: vector.body: 59; CHECK-NEXT: [[IDX:%.*]] = phi i64 [ 0, %vector.ph ], [ [[IDX_NEXT:%.*]], %vector.body ] 60; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <4 x i1> [ [[INIT_ACTIVE_LANE_MASK]], %vector.ph ], [ [[NEXT_ACTIVE_LANE_MASK:%.*]], %vector.body ] 61; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[IDX]], 0 62; CHECK: [[COND_LOAD:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* {{%.*}}, i32 4, <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> poison) 63; CHECK-NEXT: [[TMP4:%.*]] = icmp eq <4 x i32> [[COND_LOAD]], zeroinitializer 64; CHECK-NEXT: [[TMP5:%.*]] = xor <4 x i1> [[TMP4]], <i1 true, i1 true, i1 true, i1 true> 65; CHECK-NEXT: [[MASK:%.*]] = select <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i1> [[TMP5]], <4 x i1> zeroinitializer 66; CHECK-NEXT: call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> [[SRC_SPLAT]], i32 4, <4 x i1> [[MASK]], <4 x i32> undef) 67entry: 68 br label %for.body 69 70for.body: ; preds = %entry, %if.end 71 %index = phi i64 [ %index.next, %if.end ], [ 0, %entry ] 72 %arrayidx = getelementptr inbounds i32, i32* %cond, i64 %index 73 %0 = load i32, i32* %arrayidx, align 4 74 %tobool.not = icmp eq i32 %0, 0 75 br i1 %tobool.not, label %if.end, label %if.then 76 77if.then: ; preds = %for.body 78 %1 = load i32, i32* %src, align 4 79 br label %if.end 80 81if.end: ; preds = %if.then, %for.body 82 %val.0 = phi i32 [ %1, %if.then ], [ 0, %for.body ] 83 %arrayidx1 = getelementptr inbounds i32, i32* %dst, i64 %index 84 store i32 %val.0, i32* %arrayidx1, align 4 85 %index.next = add nuw i64 %index, 1 86 %exitcond.not = icmp eq i64 %index.next, %n 87 br i1 %exitcond.not, label %for.end, label %for.body 88 89for.end: ; preds = %for.inc, %entry 90 ret void 91} 92 93attributes #0 = { "target-features"="+neon,+sve,+v8.1a" vscale_range(2, 0) } 94