1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py 2; RUN: opt -loop-vectorize -mtriple=x86_64-unknown-linux-gnu -S < %s | FileCheck %s 3 4; The test checks that there is no assert caused by issue described in PR35432 5 6target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" 7target triple = "x86_64-unknown-linux-gnu" 8 9@a = common local_unnamed_addr global [192 x [192 x i32]] zeroinitializer, align 16 10 11define i32 @main(i32* %ptr) { 12; CHECK-LABEL: @main( 13; CHECK-NEXT: entry: 14; CHECK-NEXT: [[I:%.*]] = alloca i32, align 4 15; CHECK-NEXT: [[S:%.*]] = alloca i16, align 2 16; CHECK-NEXT: [[TMP0:%.*]] = bitcast i32* [[I]] to i8* 17; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull [[TMP0]]) 18; CHECK-NEXT: store i32 0, i32* [[I]], align 4 19; CHECK-NEXT: [[TMP1:%.*]] = bitcast i16* [[S]] to i8* 20; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 2, i8* nonnull [[TMP1]]) 21; CHECK-NEXT: [[CALL:%.*]] = call i32 (i32*, ...) bitcast (i32 (...)* @goo to i32 (i32*, ...)*)(i32* nonnull [[I]]) 22; CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* [[I]], align 4 23; CHECK-NEXT: [[STOREMERGE6:%.*]] = trunc i32 [[TMP2]] to i16 24; CHECK-NEXT: store i16 [[STOREMERGE6]], i16* [[S]], align 2 25; CHECK-NEXT: [[CONV17:%.*]] = and i32 [[TMP2]], 65472 26; CHECK-NEXT: [[CMP8:%.*]] = icmp eq i32 [[CONV17]], 0 27; CHECK-NEXT: br i1 [[CMP8]], label [[FOR_BODY_LR_PH:%.*]], label [[FOR_END12:%.*]] 28; CHECK: for.body.lr.ph: 29; CHECK-NEXT: br label [[FOR_BODY:%.*]] 30; CHECK: for.body: 31; CHECK-NEXT: [[STOREMERGE_IN9:%.*]] = phi i32 [ [[TMP2]], [[FOR_BODY_LR_PH]] ], [ [[ADD:%.*]], [[FOR_INC9:%.*]] ] 32; CHECK-NEXT: [[CONV52:%.*]] = and i32 [[STOREMERGE_IN9]], 255 33; CHECK-NEXT: [[CMP63:%.*]] = icmp ult i32 [[TMP2]], [[CONV52]] 34; CHECK-NEXT: br i1 [[CMP63]], label [[FOR_BODY8_LR_PH:%.*]], label [[FOR_INC9]] 35; CHECK: for.body8.lr.ph: 36; CHECK-NEXT: [[CONV3:%.*]] = trunc i32 [[STOREMERGE_IN9]] to i8 37; CHECK-NEXT: [[DOTPROMOTED:%.*]] = load i32, i32* getelementptr inbounds ([192 x [192 x i32]], [192 x [192 x i32]]* @a, i64 0, i64 0, i64 0), align 16 38; CHECK-NEXT: [[TMP3:%.*]] = add i8 [[CONV3]], -1 39; CHECK-NEXT: [[TMP4:%.*]] = zext i8 [[TMP3]] to i32 40; CHECK-NEXT: [[TMP5:%.*]] = add i32 [[TMP4]], 1 41; CHECK-NEXT: [[UMIN1:%.*]] = call i32 @llvm.umin.i32(i32 [[TMP2]], i32 [[TMP4]]) 42; CHECK-NEXT: [[TMP6:%.*]] = sub i32 [[TMP5]], [[UMIN1]] 43; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[TMP6]], 32 44; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]] 45; CHECK: vector.scevcheck: 46; CHECK-NEXT: [[TMP7:%.*]] = add i8 [[CONV3]], -1 47; CHECK-NEXT: [[TMP8:%.*]] = zext i8 [[TMP7]] to i32 48; CHECK-NEXT: [[UMIN:%.*]] = call i32 @llvm.umin.i32(i32 [[TMP2]], i32 [[TMP8]]) 49; CHECK-NEXT: [[TMP9:%.*]] = sub i32 [[TMP8]], [[UMIN]] 50; CHECK-NEXT: [[TMP10:%.*]] = trunc i32 [[TMP9]] to i8 51; CHECK-NEXT: [[MUL:%.*]] = call { i8, i1 } @llvm.umul.with.overflow.i8(i8 1, i8 [[TMP10]]) 52; CHECK-NEXT: [[MUL_RESULT:%.*]] = extractvalue { i8, i1 } [[MUL]], 0 53; CHECK-NEXT: [[MUL_OVERFLOW:%.*]] = extractvalue { i8, i1 } [[MUL]], 1 54; CHECK-NEXT: [[TMP11:%.*]] = sub i8 [[TMP7]], [[MUL_RESULT]] 55; CHECK-NEXT: [[TMP12:%.*]] = icmp ugt i8 [[TMP11]], [[TMP7]] 56; CHECK-NEXT: [[TMP13:%.*]] = or i1 [[TMP12]], [[MUL_OVERFLOW]] 57; CHECK-NEXT: [[TMP14:%.*]] = icmp ugt i32 [[TMP9]], 255 58; CHECK-NEXT: [[TMP15:%.*]] = or i1 [[TMP13]], [[TMP14]] 59; CHECK-NEXT: [[TMP16:%.*]] = add i32 [[DOTPROMOTED]], 1 60; CHECK-NEXT: [[TMP17:%.*]] = add i32 [[TMP16]], [[TMP9]] 61; CHECK-NEXT: [[TMP18:%.*]] = icmp slt i32 [[TMP17]], [[TMP16]] 62; CHECK-NEXT: [[TMP19:%.*]] = or i1 [[TMP15]], [[TMP18]] 63; CHECK-NEXT: br i1 [[TMP19]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]] 64; CHECK: vector.ph: 65; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[TMP6]], 8 66; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 [[TMP6]], [[N_MOD_VF]] 67; CHECK-NEXT: [[IND_END:%.*]] = add i32 [[DOTPROMOTED]], [[N_VEC]] 68; CHECK-NEXT: [[CAST_CRD:%.*]] = trunc i32 [[N_VEC]] to i8 69; CHECK-NEXT: [[IND_END3:%.*]] = sub i8 [[CONV3]], [[CAST_CRD]] 70; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] 71; CHECK: vector.body: 72; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] 73; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i32 [[DOTPROMOTED]], [[INDEX]] 74; CHECK-NEXT: [[TMP20:%.*]] = add i32 [[OFFSET_IDX]], 0 75; CHECK-NEXT: [[TMP21:%.*]] = add i32 [[OFFSET_IDX]], 4 76; CHECK-NEXT: [[TMP25:%.*]] = add i32 [[TMP20]], 1 77; CHECK-NEXT: [[TMP26:%.*]] = add i32 [[TMP21]], 1 78; CHECK-NEXT: [[TMP29:%.*]] = getelementptr inbounds i32, i32* [[PTR:%.*]], i32 [[TMP25]] 79; CHECK-NEXT: [[TMP30:%.*]] = getelementptr inbounds i32, i32* [[PTR]], i32 [[TMP26]] 80; CHECK-NEXT: [[TMP31:%.*]] = getelementptr inbounds i32, i32* [[TMP29]], i32 0 81; CHECK-NEXT: [[TMP32:%.*]] = bitcast i32* [[TMP31]] to <4 x i32>* 82; CHECK-NEXT: store <4 x i32> zeroinitializer, <4 x i32>* [[TMP32]], align 4 83; CHECK-NEXT: [[TMP33:%.*]] = getelementptr inbounds i32, i32* [[TMP29]], i32 4 84; CHECK-NEXT: [[TMP34:%.*]] = bitcast i32* [[TMP33]] to <4 x i32>* 85; CHECK-NEXT: store <4 x i32> zeroinitializer, <4 x i32>* [[TMP34]], align 4 86; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8 87; CHECK-NEXT: [[TMP37:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] 88; CHECK-NEXT: br i1 [[TMP37]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] 89; CHECK: middle.block: 90; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[TMP6]], [[N_VEC]] 91; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_COND4_FOR_INC9_CRIT_EDGE:%.*]], label [[SCALAR_PH]] 92; CHECK: scalar.ph: 93; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ [[DOTPROMOTED]], [[FOR_BODY8_LR_PH]] ], [ [[DOTPROMOTED]], [[VECTOR_SCEVCHECK]] ] 94; CHECK-NEXT: [[BC_RESUME_VAL2:%.*]] = phi i8 [ [[IND_END3]], [[MIDDLE_BLOCK]] ], [ [[CONV3]], [[FOR_BODY8_LR_PH]] ], [ [[CONV3]], [[VECTOR_SCEVCHECK]] ] 95; CHECK-NEXT: br label [[FOR_BODY8:%.*]] 96; CHECK: for.body8: 97; CHECK-NEXT: [[INC5:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY8]] ] 98; CHECK-NEXT: [[C_04:%.*]] = phi i8 [ [[BC_RESUME_VAL2]], [[SCALAR_PH]] ], [ [[DEC:%.*]], [[FOR_BODY8]] ] 99; CHECK-NEXT: [[INC]] = add i32 [[INC5]], 1 100; CHECK-NEXT: [[DEC]] = add i8 [[C_04]], -1 101; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, i32* [[PTR]], i32 [[INC]] 102; CHECK-NEXT: store i32 0, i32* [[GEP]], align 4 103; CHECK-NEXT: [[CONV5:%.*]] = zext i8 [[DEC]] to i32 104; CHECK-NEXT: [[CMP6:%.*]] = icmp ult i32 [[TMP2]], [[CONV5]] 105; CHECK-NEXT: br i1 [[CMP6]], label [[FOR_BODY8]], label [[FOR_COND4_FOR_INC9_CRIT_EDGE]], !llvm.loop [[LOOP2:![0-9]+]] 106; CHECK: for.cond4.for.inc9_crit_edge: 107; CHECK-NEXT: [[INC_LCSSA:%.*]] = phi i32 [ [[INC]], [[FOR_BODY8]] ], [ [[IND_END]], [[MIDDLE_BLOCK]] ] 108; CHECK-NEXT: store i32 [[INC_LCSSA]], i32* getelementptr inbounds ([192 x [192 x i32]], [192 x [192 x i32]]* @a, i64 0, i64 0, i64 0), align 16 109; CHECK-NEXT: br label [[FOR_INC9]] 110; CHECK: for.inc9: 111; CHECK-NEXT: [[CONV10:%.*]] = and i32 [[STOREMERGE_IN9]], 65535 112; CHECK-NEXT: [[ADD]] = add nuw nsw i32 [[CONV10]], 1 113; CHECK-NEXT: [[CONV1:%.*]] = and i32 [[ADD]], 65472 114; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[CONV1]], 0 115; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_COND_FOR_END12_CRIT_EDGE:%.*]] 116; CHECK: for.cond.for.end12_crit_edge: 117; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_INC9]] ] 118; CHECK-NEXT: [[STOREMERGE:%.*]] = trunc i32 [[ADD_LCSSA]] to i16 119; CHECK-NEXT: store i16 [[STOREMERGE]], i16* [[S]], align 2 120; CHECK-NEXT: br label [[FOR_END12]] 121; CHECK: for.end12: 122; CHECK-NEXT: [[CALL13:%.*]] = call i32 (i16*, ...) bitcast (i32 (...)* @foo to i32 (i16*, ...)*)(i16* nonnull [[S]]) 123; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 2, i8* nonnull [[TMP1]]) 124; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull [[TMP0]]) 125; CHECK-NEXT: ret i32 0 126; 127entry: 128 %i = alloca i32, align 4 129 %s = alloca i16, align 2 130 %0 = bitcast i32* %i to i8* 131 call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %0) #3 132 store i32 0, i32* %i, align 4 133 %1 = bitcast i16* %s to i8* 134 call void @llvm.lifetime.start.p0i8(i64 2, i8* nonnull %1) #3 135 %call = call i32 (i32*, ...) bitcast (i32 (...)* @goo to i32 (i32*, ...)*)(i32* nonnull %i) #3 136 %2 = load i32, i32* %i, align 4 137 %storemerge6 = trunc i32 %2 to i16 138 store i16 %storemerge6, i16* %s, align 2 139 %conv17 = and i32 %2, 65472 140 %cmp8 = icmp eq i32 %conv17, 0 141 br i1 %cmp8, label %for.body.lr.ph, label %for.end12 142 143for.body.lr.ph: ; preds = %entry 144 br label %for.body 145 146for.body: ; preds = %for.body.lr.ph, %for.inc9 147 %storemerge.in9 = phi i32 [ %2, %for.body.lr.ph ], [ %add, %for.inc9 ] 148 %conv52 = and i32 %storemerge.in9, 255 149 %cmp63 = icmp ult i32 %2, %conv52 150 br i1 %cmp63, label %for.body8.lr.ph, label %for.inc9 151 152for.body8.lr.ph: ; preds = %for.body 153 %conv3 = trunc i32 %storemerge.in9 to i8 154 %.promoted = load i32, i32* getelementptr inbounds ([192 x [192 x i32]], [192 x [192 x i32]]* @a, i64 0, i64 0, i64 0), align 16 155 br label %for.body8 156 157for.body8: ; preds = %for.body8.lr.ph, %for.body8 158 %inc5 = phi i32 [ %.promoted, %for.body8.lr.ph ], [ %inc, %for.body8 ] 159 %c.04 = phi i8 [ %conv3, %for.body8.lr.ph ], [ %dec, %for.body8 ] 160 %inc = add i32 %inc5, 1 161 %dec = add i8 %c.04, -1 162 %gep = getelementptr inbounds i32, i32* %ptr, i32 %inc 163 store i32 0, i32* %gep 164 %conv5 = zext i8 %dec to i32 165 %cmp6 = icmp ult i32 %2, %conv5 166 br i1 %cmp6, label %for.body8, label %for.cond4.for.inc9_crit_edge 167 168for.cond4.for.inc9_crit_edge: ; preds = %for.body8 169 %inc.lcssa = phi i32 [ %inc, %for.body8 ] 170 store i32 %inc.lcssa, i32* getelementptr inbounds ([192 x [192 x i32]], [192 x [192 x i32]]* @a, i64 0, i64 0, i64 0), align 16 171 br label %for.inc9 172 173for.inc9: ; preds = %for.cond4.for.inc9_crit_edge, %for.body 174 %conv10 = and i32 %storemerge.in9, 65535 175 %add = add nuw nsw i32 %conv10, 1 176 %conv1 = and i32 %add, 65472 177 %cmp = icmp eq i32 %conv1, 0 178 br i1 %cmp, label %for.body, label %for.cond.for.end12_crit_edge 179 180for.cond.for.end12_crit_edge: ; preds = %for.inc9 181 %add.lcssa = phi i32 [ %add, %for.inc9 ] 182 %storemerge = trunc i32 %add.lcssa to i16 183 store i16 %storemerge, i16* %s, align 2 184 br label %for.end12 185 186for.end12: ; preds = %for.cond.for.end12_crit_edge, %entry 187 %call13 = call i32 (i16*, ...) bitcast (i32 (...)* @foo to i32 (i16*, ...)*)(i16* nonnull %s) #3 188 call void @llvm.lifetime.end.p0i8(i64 2, i8* nonnull %1) #3 189 call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %0) #3 190 ret i32 0 191} 192 193; Function Attrs: argmemonly nounwind 194declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #1 195 196declare i32 @goo(...) local_unnamed_addr #2 197 198declare i32 @foo(...) local_unnamed_addr #2 199 200; Function Attrs: argmemonly nounwind 201declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1 202