1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py 2; RUN: opt -mtriple=armv8.1m.main -mattr=+mve -S -mve-tail-predication -tail-predication=enabled %s -o - | FileCheck %s 3 4define void @mat_vec_sext_i16(i16** nocapture readonly %A, i16* nocapture readonly %B, i32* noalias nocapture %C, i32 %N) { 5; CHECK-LABEL: @mat_vec_sext_i16( 6; CHECK-NEXT: entry: 7; CHECK-NEXT: [[CMP24:%.*]] = icmp eq i32 [[N:%.*]], 0 8; CHECK-NEXT: br i1 [[CMP24]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_COND1_PREHEADER_US_PREHEADER:%.*]] 9; CHECK: for.cond1.preheader.us.preheader: 10; CHECK-NEXT: [[N_RND_UP:%.*]] = add i32 [[N]], 3 11; CHECK-NEXT: [[N_VEC:%.*]] = and i32 [[N_RND_UP]], -4 12; CHECK-NEXT: [[TT:%.*]] = add i32 [[N_VEC]], -4 13; CHECK-NEXT: [[TT1:%.*]] = lshr i32 [[TT]], 2 14; CHECK-NEXT: [[TT2:%.*]] = add nuw nsw i32 [[TT1]], 1 15; CHECK-NEXT: br label [[FOR_COND1_PREHEADER_US:%.*]] 16; CHECK: for.cond1.preheader.us: 17; CHECK-NEXT: [[I_025_US:%.*]] = phi i32 [ [[INC10_US:%.*]], [[MIDDLE_BLOCK:%.*]] ], [ 0, [[FOR_COND1_PREHEADER_US_PREHEADER]] ] 18; CHECK-NEXT: [[ARRAYIDX_US:%.*]] = getelementptr inbounds i16*, i16** [[A:%.*]], i32 [[I_025_US]] 19; CHECK-NEXT: [[TT3:%.*]] = load i16*, i16** [[ARRAYIDX_US]], align 4 20; CHECK-NEXT: [[ARRAYIDX8_US:%.*]] = getelementptr inbounds i32, i32* [[C:%.*]], i32 [[I_025_US]] 21; CHECK-NEXT: [[ARRAYIDX8_PROMOTED_US:%.*]] = load i32, i32* [[ARRAYIDX8_US]], align 4 22; CHECK-NEXT: [[TT4:%.*]] = insertelement <4 x i32> <i32 undef, i32 0, i32 0, i32 0>, i32 [[ARRAYIDX8_PROMOTED_US]], i32 0 23; CHECK-NEXT: [[START:%.*]] = call i32 @llvm.start.loop.iterations.i32(i32 [[TT2]]) 24; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] 25; CHECK: vector.body: 26; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[FOR_COND1_PREHEADER_US]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] 27; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ [[TT4]], [[FOR_COND1_PREHEADER_US]] ], [ [[TT14:%.*]], [[VECTOR_BODY]] ] 28; CHECK-NEXT: [[TT5:%.*]] = phi i32 [ [[START]], [[FOR_COND1_PREHEADER_US]] ], [ [[TT15:%.*]], [[VECTOR_BODY]] ] 29; CHECK-NEXT: [[TMP0:%.*]] = phi i32 [ [[N]], [[FOR_COND1_PREHEADER_US]] ], [ [[TMP2:%.*]], [[VECTOR_BODY]] ] 30; CHECK-NEXT: [[TT6:%.*]] = getelementptr inbounds i16, i16* [[TT3]], i32 [[INDEX]] 31; CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.vctp32(i32 [[TMP0]]) 32; CHECK-NEXT: [[TMP2]] = sub i32 [[TMP0]], 4 33; CHECK-NEXT: [[TT8:%.*]] = bitcast i16* [[TT6]] to <4 x i16>* 34; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* [[TT8]], i32 2, <4 x i1> [[TMP1]], <4 x i16> undef) 35; CHECK-NEXT: [[TT9:%.*]] = sext <4 x i16> [[WIDE_MASKED_LOAD]] to <4 x i32> 36; CHECK-NEXT: [[TT10:%.*]] = getelementptr inbounds i16, i16* [[B:%.*]], i32 [[INDEX]] 37; CHECK-NEXT: [[TT11:%.*]] = bitcast i16* [[TT10]] to <4 x i16>* 38; CHECK-NEXT: [[WIDE_MASKED_LOAD30:%.*]] = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* [[TT11]], i32 2, <4 x i1> [[TMP1]], <4 x i16> undef) 39; CHECK-NEXT: [[TT12:%.*]] = sext <4 x i16> [[WIDE_MASKED_LOAD30]] to <4 x i32> 40; CHECK-NEXT: [[TT13:%.*]] = mul nsw <4 x i32> [[TT12]], [[TT9]] 41; CHECK-NEXT: [[TT14]] = add nsw <4 x i32> [[TT13]], [[VEC_PHI]] 42; CHECK-NEXT: [[INDEX_NEXT]] = add i32 [[INDEX]], 4 43; CHECK-NEXT: [[TT15]] = call i32 @llvm.loop.decrement.reg.i32(i32 [[TT5]], i32 1) 44; CHECK-NEXT: [[TT16:%.*]] = icmp ne i32 [[TT15]], 0 45; CHECK-NEXT: br i1 [[TT16]], label [[VECTOR_BODY]], label [[MIDDLE_BLOCK]] 46; CHECK: middle.block: 47; CHECK-NEXT: [[TT17:%.*]] = select <4 x i1> [[TMP1]], <4 x i32> [[TT14]], <4 x i32> [[VEC_PHI]] 48; CHECK-NEXT: [[TT18:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TT17]]) 49; CHECK-NEXT: store i32 [[TT18]], i32* [[ARRAYIDX8_US]], align 4 50; CHECK-NEXT: [[INC10_US]] = add nuw i32 [[I_025_US]], 1 51; CHECK-NEXT: [[EXITCOND27:%.*]] = icmp eq i32 [[INC10_US]], [[N]] 52; CHECK-NEXT: br i1 [[EXITCOND27]], label [[FOR_COND_CLEANUP]], label [[FOR_COND1_PREHEADER_US]] 53; CHECK: for.cond.cleanup: 54; CHECK-NEXT: ret void 55; 56entry: 57 %cmp24 = icmp eq i32 %N, 0 58 br i1 %cmp24, label %for.cond.cleanup, label %for.cond1.preheader.us.preheader 59 60for.cond1.preheader.us.preheader: ; preds = %entry 61 %n.rnd.up = add i32 %N, 3 62 %n.vec = and i32 %n.rnd.up, -4 63 %tt = add i32 %n.vec, -4 64 %tt1 = lshr i32 %tt, 2 65 %tt2 = add nuw nsw i32 %tt1, 1 66 br label %for.cond1.preheader.us 67 68for.cond1.preheader.us: ; preds = %middle.block, %for.cond1.preheader.us.preheader 69 %i.025.us = phi i32 [ %inc10.us, %middle.block ], [ 0, %for.cond1.preheader.us.preheader ] 70 %arrayidx.us = getelementptr inbounds i16*, i16** %A, i32 %i.025.us 71 %tt3 = load i16*, i16** %arrayidx.us, align 4 72 %arrayidx8.us = getelementptr inbounds i32, i32* %C, i32 %i.025.us 73 %arrayidx8.promoted.us = load i32, i32* %arrayidx8.us, align 4 74 %tt4 = insertelement <4 x i32> <i32 undef, i32 0, i32 0, i32 0>, i32 %arrayidx8.promoted.us, i32 0 75 %start = call i32 @llvm.start.loop.iterations.i32(i32 %tt2) 76 br label %vector.body 77 78vector.body: ; preds = %vector.body, %for.cond1.preheader.us 79 %index = phi i32 [ 0, %for.cond1.preheader.us ], [ %index.next, %vector.body ] 80 %vec.phi = phi <4 x i32> [ %tt4, %for.cond1.preheader.us ], [ %tt14, %vector.body ] 81 %tt5 = phi i32 [ %start, %for.cond1.preheader.us ], [ %tt15, %vector.body ] 82 %tt6 = getelementptr inbounds i16, i16* %tt3, i32 %index 83 %tt7 = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %N) 84 %tt8 = bitcast i16* %tt6 to <4 x i16>* 85 %wide.masked.load = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %tt8, i32 2, <4 x i1> %tt7, <4 x i16> undef) 86 %tt9 = sext <4 x i16> %wide.masked.load to <4 x i32> 87 %tt10 = getelementptr inbounds i16, i16* %B, i32 %index 88 %tt11 = bitcast i16* %tt10 to <4 x i16>* 89 %wide.masked.load30 = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %tt11, i32 2, <4 x i1> %tt7, <4 x i16> undef) 90 %tt12 = sext <4 x i16> %wide.masked.load30 to <4 x i32> 91 %tt13 = mul nsw <4 x i32> %tt12, %tt9 92 %tt14 = add nsw <4 x i32> %tt13, %vec.phi 93 %index.next = add i32 %index, 4 94 %tt15 = call i32 @llvm.loop.decrement.reg.i32(i32 %tt5, i32 1) 95 %tt16 = icmp ne i32 %tt15, 0 96 br i1 %tt16, label %vector.body, label %middle.block 97 98middle.block: ; preds = %vector.body 99 %tt17 = select <4 x i1> %tt7, <4 x i32> %tt14, <4 x i32> %vec.phi 100 %tt18 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %tt17) 101 store i32 %tt18, i32* %arrayidx8.us, align 4 102 %inc10.us = add nuw i32 %i.025.us, 1 103 %exitcond27 = icmp eq i32 %inc10.us, %N 104 br i1 %exitcond27, label %for.cond.cleanup, label %for.cond1.preheader.us 105 106for.cond.cleanup: ; preds = %middle.block, %entry 107 ret void 108} 109 110define void @mat_vec_i32(i32** nocapture readonly %A, i32* nocapture readonly %B, i32* noalias nocapture %C, i32 %N) { 111; CHECK-LABEL: @mat_vec_i32( 112; CHECK-NEXT: entry: 113; CHECK-NEXT: [[CMP23:%.*]] = icmp eq i32 [[N:%.*]], 0 114; CHECK-NEXT: br i1 [[CMP23]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_COND1_PREHEADER_US_PREHEADER:%.*]] 115; CHECK: for.cond1.preheader.us.preheader: 116; CHECK-NEXT: [[N_RND_UP:%.*]] = add i32 [[N]], 3 117; CHECK-NEXT: [[N_VEC:%.*]] = and i32 [[N_RND_UP]], -4 118; CHECK-NEXT: [[TT:%.*]] = add i32 [[N_VEC]], -4 119; CHECK-NEXT: [[TT1:%.*]] = lshr i32 [[TT]], 2 120; CHECK-NEXT: [[TT2:%.*]] = add nuw nsw i32 [[TT1]], 1 121; CHECK-NEXT: br label [[FOR_COND1_PREHEADER_US:%.*]] 122; CHECK: for.cond1.preheader.us: 123; CHECK-NEXT: [[I_024_US:%.*]] = phi i32 [ [[INC9_US:%.*]], [[MIDDLE_BLOCK:%.*]] ], [ 0, [[FOR_COND1_PREHEADER_US_PREHEADER]] ] 124; CHECK-NEXT: [[ARRAYIDX_US:%.*]] = getelementptr inbounds i32*, i32** [[A:%.*]], i32 [[I_024_US]] 125; CHECK-NEXT: [[TT3:%.*]] = load i32*, i32** [[ARRAYIDX_US]], align 4 126; CHECK-NEXT: [[ARRAYIDX7_US:%.*]] = getelementptr inbounds i32, i32* [[C:%.*]], i32 [[I_024_US]] 127; CHECK-NEXT: [[ARRAYIDX7_PROMOTED_US:%.*]] = load i32, i32* [[ARRAYIDX7_US]], align 4 128; CHECK-NEXT: [[TT4:%.*]] = insertelement <4 x i32> <i32 undef, i32 0, i32 0, i32 0>, i32 [[ARRAYIDX7_PROMOTED_US]], i32 0 129; CHECK-NEXT: [[START:%.*]] = call i32 @llvm.start.loop.iterations.i32(i32 [[TT2]]) 130; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] 131; CHECK: vector.body: 132; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[FOR_COND1_PREHEADER_US]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] 133; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ [[TT4]], [[FOR_COND1_PREHEADER_US]] ], [ [[TT12:%.*]], [[VECTOR_BODY]] ] 134; CHECK-NEXT: [[TT5:%.*]] = phi i32 [ [[START]], [[FOR_COND1_PREHEADER_US]] ], [ [[TT13:%.*]], [[VECTOR_BODY]] ] 135; CHECK-NEXT: [[TMP0:%.*]] = phi i32 [ [[N]], [[FOR_COND1_PREHEADER_US]] ], [ [[TMP2:%.*]], [[VECTOR_BODY]] ] 136; CHECK-NEXT: [[TT6:%.*]] = getelementptr inbounds i32, i32* [[TT3]], i32 [[INDEX]] 137; CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.vctp32(i32 [[TMP0]]) 138; CHECK-NEXT: [[TMP2]] = sub i32 [[TMP0]], 4 139; CHECK-NEXT: [[TT8:%.*]] = bitcast i32* [[TT6]] to <4 x i32>* 140; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* [[TT8]], i32 4, <4 x i1> [[TMP1]], <4 x i32> undef) 141; CHECK-NEXT: [[TT9:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i32 [[INDEX]] 142; CHECK-NEXT: [[TT10:%.*]] = bitcast i32* [[TT9]] to <4 x i32>* 143; CHECK-NEXT: [[WIDE_MASKED_LOAD29:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* [[TT10]], i32 4, <4 x i1> [[TMP1]], <4 x i32> undef) 144; CHECK-NEXT: [[TT11:%.*]] = mul nsw <4 x i32> [[WIDE_MASKED_LOAD29]], [[WIDE_MASKED_LOAD]] 145; CHECK-NEXT: [[TT12]] = add nsw <4 x i32> [[VEC_PHI]], [[TT11]] 146; CHECK-NEXT: [[INDEX_NEXT]] = add i32 [[INDEX]], 4 147; CHECK-NEXT: [[TT13]] = call i32 @llvm.loop.decrement.reg.i32(i32 [[TT5]], i32 1) 148; CHECK-NEXT: [[TT14:%.*]] = icmp ne i32 [[TT13]], 0 149; CHECK-NEXT: br i1 [[TT14]], label [[VECTOR_BODY]], label [[MIDDLE_BLOCK]] 150; CHECK: middle.block: 151; CHECK-NEXT: [[TT15:%.*]] = select <4 x i1> [[TMP1]], <4 x i32> [[TT12]], <4 x i32> [[VEC_PHI]] 152; CHECK-NEXT: [[TT16:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TT15]]) 153; CHECK-NEXT: store i32 [[TT16]], i32* [[ARRAYIDX7_US]], align 4 154; CHECK-NEXT: [[INC9_US]] = add nuw i32 [[I_024_US]], 1 155; CHECK-NEXT: [[EXITCOND26:%.*]] = icmp eq i32 [[INC9_US]], [[N]] 156; CHECK-NEXT: br i1 [[EXITCOND26]], label [[FOR_COND_CLEANUP]], label [[FOR_COND1_PREHEADER_US]] 157; CHECK: for.cond.cleanup: 158; CHECK-NEXT: ret void 159; 160entry: 161 %cmp23 = icmp eq i32 %N, 0 162 br i1 %cmp23, label %for.cond.cleanup, label %for.cond1.preheader.us.preheader 163 164for.cond1.preheader.us.preheader: ; preds = %entry 165 %n.rnd.up = add i32 %N, 3 166 %n.vec = and i32 %n.rnd.up, -4 167 %tt = add i32 %n.vec, -4 168 %tt1 = lshr i32 %tt, 2 169 %tt2 = add nuw nsw i32 %tt1, 1 170 br label %for.cond1.preheader.us 171 172for.cond1.preheader.us: ; preds = %middle.block, %for.cond1.preheader.us.preheader 173 %i.024.us = phi i32 [ %inc9.us, %middle.block ], [ 0, %for.cond1.preheader.us.preheader ] 174 %arrayidx.us = getelementptr inbounds i32*, i32** %A, i32 %i.024.us 175 %tt3 = load i32*, i32** %arrayidx.us, align 4 176 %arrayidx7.us = getelementptr inbounds i32, i32* %C, i32 %i.024.us 177 %arrayidx7.promoted.us = load i32, i32* %arrayidx7.us, align 4 178 %tt4 = insertelement <4 x i32> <i32 undef, i32 0, i32 0, i32 0>, i32 %arrayidx7.promoted.us, i32 0 179 %start = call i32 @llvm.start.loop.iterations.i32(i32 %tt2) 180 br label %vector.body 181 182vector.body: ; preds = %vector.body, %for.cond1.preheader.us 183 %index = phi i32 [ 0, %for.cond1.preheader.us ], [ %index.next, %vector.body ] 184 %vec.phi = phi <4 x i32> [ %tt4, %for.cond1.preheader.us ], [ %tt12, %vector.body ] 185 %tt5 = phi i32 [ %start, %for.cond1.preheader.us ], [ %tt13, %vector.body ] 186 %tt6 = getelementptr inbounds i32, i32* %tt3, i32 %index 187 %tt7 = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %N) 188 %tt8 = bitcast i32* %tt6 to <4 x i32>* 189 %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %tt8, i32 4, <4 x i1> %tt7, <4 x i32> undef) 190 %tt9 = getelementptr inbounds i32, i32* %B, i32 %index 191 %tt10 = bitcast i32* %tt9 to <4 x i32>* 192 %wide.masked.load29 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %tt10, i32 4, <4 x i1> %tt7, <4 x i32> undef) 193 %tt11 = mul nsw <4 x i32> %wide.masked.load29, %wide.masked.load 194 %tt12 = add nsw <4 x i32> %vec.phi, %tt11 195 %index.next = add i32 %index, 4 196 %tt13 = call i32 @llvm.loop.decrement.reg.i32(i32 %tt5, i32 1) 197 %tt14 = icmp ne i32 %tt13, 0 198 br i1 %tt14, label %vector.body, label %middle.block 199 200middle.block: ; preds = %vector.body 201 %tt15 = select <4 x i1> %tt7, <4 x i32> %tt12, <4 x i32> %vec.phi 202 %tt16 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %tt15) 203 store i32 %tt16, i32* %arrayidx7.us, align 4 204 %inc9.us = add nuw i32 %i.024.us, 1 205 %exitcond26 = icmp eq i32 %inc9.us, %N 206 br i1 %exitcond26, label %for.cond.cleanup, label %for.cond1.preheader.us 207 208for.cond.cleanup: ; preds = %middle.block, %entry 209 ret void 210} 211 212 213; Function Attrs: argmemonly nounwind readonly willreturn 214declare <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>*, i32 immarg, <4 x i1>, <4 x i32>) #0 215 216; Function Attrs: argmemonly nounwind readonly willreturn 217declare <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>*, i32 immarg, <4 x i1>, <4 x i16>) #0 218 219; Function Attrs: nounwind readnone willreturn 220declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>) #1 221 222; Function Attrs: noduplicate nounwind 223declare i32 @llvm.start.loop.iterations.i32(i32) #2 224 225; Function Attrs: noduplicate nounwind 226declare i32 @llvm.loop.decrement.reg.i32(i32, i32) #2 227 228declare <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32, i32) 229 230attributes #0 = { argmemonly nounwind readonly willreturn } 231attributes #1 = { nounwind readnone willreturn } 232attributes #2 = { noduplicate nounwind } 233