1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py 2; RUN: opt -loop-vectorize -mtriple=x86_64-unknown-linux-gnu -S < %s | FileCheck %s 3 4; The test checks that there is no assert caused by issue described in PR35432 5 6target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" 7target triple = "x86_64-unknown-linux-gnu" 8 9@a = common local_unnamed_addr global [192 x [192 x i32]] zeroinitializer, align 16 10 11; Function Attrs: nounwind uwtable 12define i32 @main() local_unnamed_addr #0 { 13; CHECK-LABEL: @main( 14; CHECK-NEXT: entry: 15; CHECK-NEXT: [[I:%.*]] = alloca i32, align 4 16; CHECK-NEXT: [[S:%.*]] = alloca i16, align 2 17; CHECK-NEXT: [[TMP0:%.*]] = bitcast i32* [[I]] to i8* 18; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull [[TMP0]]) 19; CHECK-NEXT: store i32 0, i32* [[I]], align 4 20; CHECK-NEXT: [[TMP1:%.*]] = bitcast i16* [[S]] to i8* 21; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 2, i8* nonnull [[TMP1]]) 22; CHECK-NEXT: [[CALL:%.*]] = call i32 (i32*, ...) bitcast (i32 (...)* @goo to i32 (i32*, ...)*)(i32* nonnull [[I]]) 23; CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* [[I]], align 4 24; CHECK-NEXT: [[STOREMERGE6:%.*]] = trunc i32 [[TMP2]] to i16 25; CHECK-NEXT: store i16 [[STOREMERGE6]], i16* [[S]], align 2 26; CHECK-NEXT: [[CONV17:%.*]] = and i32 [[TMP2]], 65472 27; CHECK-NEXT: [[CMP8:%.*]] = icmp eq i32 [[CONV17]], 0 28; CHECK-NEXT: br i1 [[CMP8]], label [[FOR_BODY_LR_PH:%.*]], label [[FOR_END12:%.*]] 29; CHECK: for.body.lr.ph: 30; CHECK-NEXT: br label [[FOR_BODY:%.*]] 31; CHECK: for.body: 32; CHECK-NEXT: [[STOREMERGE_IN9:%.*]] = phi i32 [ [[TMP2]], [[FOR_BODY_LR_PH]] ], [ [[ADD:%.*]], [[FOR_INC9:%.*]] ] 33; CHECK-NEXT: [[CONV52:%.*]] = and i32 [[STOREMERGE_IN9]], 255 34; CHECK-NEXT: [[CMP63:%.*]] = icmp ult i32 [[TMP2]], [[CONV52]] 35; CHECK-NEXT: br i1 [[CMP63]], label [[FOR_BODY8_LR_PH:%.*]], label [[FOR_INC9]] 36; CHECK: for.body8.lr.ph: 37; CHECK-NEXT: [[CONV3:%.*]] = trunc i32 [[STOREMERGE_IN9]] to i8 38; CHECK-NEXT: [[DOTPROMOTED:%.*]] = load i32, i32* getelementptr inbounds ([192 x [192 x i32]], [192 x [192 x i32]]* @a, i64 0, i64 0, i64 0), align 16 39; CHECK-NEXT: [[TMP3:%.*]] = add i8 [[CONV3]], -1 40; CHECK-NEXT: [[TMP4:%.*]] = zext i8 [[TMP3]] to i32 41; CHECK-NEXT: [[TMP5:%.*]] = add i32 [[TMP4]], 1 42; CHECK-NEXT: [[UMIN1:%.*]] = call i32 @llvm.umin.i32(i32 [[TMP2]], i32 [[TMP4]]) 43; CHECK-NEXT: [[TMP6:%.*]] = sub i32 [[TMP5]], [[UMIN1]] 44; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[TMP6]], 8 45; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]] 46; CHECK: vector.scevcheck: 47; CHECK-NEXT: [[TMP7:%.*]] = add i8 [[CONV3]], -1 48; CHECK-NEXT: [[TMP8:%.*]] = zext i8 [[TMP7]] to i32 49; CHECK-NEXT: [[UMIN:%.*]] = call i32 @llvm.umin.i32(i32 [[TMP2]], i32 [[TMP8]]) 50; CHECK-NEXT: [[TMP9:%.*]] = sub i32 [[TMP8]], [[UMIN]] 51; CHECK-NEXT: [[TMP10:%.*]] = trunc i32 [[TMP9]] to i8 52; CHECK-NEXT: [[MUL:%.*]] = call { i8, i1 } @llvm.umul.with.overflow.i8(i8 1, i8 [[TMP10]]) 53; CHECK-NEXT: [[MUL_RESULT:%.*]] = extractvalue { i8, i1 } [[MUL]], 0 54; CHECK-NEXT: [[MUL_OVERFLOW:%.*]] = extractvalue { i8, i1 } [[MUL]], 1 55; CHECK-NEXT: [[TMP12:%.*]] = sub i8 [[TMP7]], [[MUL_RESULT]] 56; CHECK-NEXT: [[TMP14:%.*]] = icmp ugt i8 [[TMP12]], [[TMP7]] 57; CHECK-NEXT: [[TMP17:%.*]] = or i1 [[TMP14]], [[MUL_OVERFLOW]] 58; CHECK-NEXT: [[TMP16:%.*]] = icmp ugt i32 [[TMP9]], 255 59; CHECK-NEXT: [[TMP18:%.*]] = or i1 [[TMP17]], [[TMP16]] 60; CHECK-NEXT: br i1 [[TMP18]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]] 61; CHECK: vector.ph: 62; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[TMP6]], 8 63; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 [[TMP6]], [[N_MOD_VF]] 64; CHECK-NEXT: [[CAST_CRD:%.*]] = trunc i32 [[N_VEC]] to i8 65; CHECK-NEXT: [[IND_END:%.*]] = sub i8 [[CONV3]], [[CAST_CRD]] 66; CHECK-NEXT: [[TMP20:%.*]] = insertelement <4 x i32> zeroinitializer, i32 [[DOTPROMOTED]], i32 0 67; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] 68; CHECK: vector.body: 69; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] 70; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ [[TMP20]], [[VECTOR_PH]] ], [ [[TMP24:%.*]], [[VECTOR_BODY]] ] 71; CHECK-NEXT: [[VEC_PHI2:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP25:%.*]], [[VECTOR_BODY]] ] 72; CHECK-NEXT: [[TMP21:%.*]] = trunc i32 [[INDEX]] to i8 73; CHECK-NEXT: [[OFFSET_IDX:%.*]] = sub i8 [[CONV3]], [[TMP21]] 74; CHECK-NEXT: [[TMP22:%.*]] = add i8 [[OFFSET_IDX]], 0 75; CHECK-NEXT: [[TMP23:%.*]] = add i8 [[OFFSET_IDX]], -4 76; CHECK-NEXT: [[TMP24]] = add <4 x i32> [[VEC_PHI]], <i32 1, i32 1, i32 1, i32 1> 77; CHECK-NEXT: [[TMP25]] = add <4 x i32> [[VEC_PHI2]], <i32 1, i32 1, i32 1, i32 1> 78; CHECK-NEXT: [[TMP26:%.*]] = add i8 [[TMP22]], -1 79; CHECK-NEXT: [[TMP27:%.*]] = add i8 [[TMP23]], -1 80; CHECK-NEXT: [[TMP28:%.*]] = zext i8 [[TMP26]] to i32 81; CHECK-NEXT: [[TMP29:%.*]] = zext i8 [[TMP27]] to i32 82; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8 83; CHECK-NEXT: [[TMP30:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] 84; CHECK-NEXT: br i1 [[TMP30]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] 85; CHECK: middle.block: 86; CHECK-NEXT: [[BIN_RDX:%.*]] = add <4 x i32> [[TMP25]], [[TMP24]] 87; CHECK-NEXT: [[TMP31:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX]]) 88; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[TMP6]], [[N_VEC]] 89; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_COND4_FOR_INC9_CRIT_EDGE:%.*]], label [[SCALAR_PH]] 90; CHECK: scalar.ph: 91; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i8 [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ [[CONV3]], [[FOR_BODY8_LR_PH]] ], [ [[CONV3]], [[VECTOR_SCEVCHECK]] ] 92; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[DOTPROMOTED]], [[FOR_BODY8_LR_PH]] ], [ [[DOTPROMOTED]], [[VECTOR_SCEVCHECK]] ], [ [[TMP31]], [[MIDDLE_BLOCK]] ] 93; CHECK-NEXT: br label [[FOR_BODY8:%.*]] 94; CHECK: for.body8: 95; CHECK-NEXT: [[INC5:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY8]] ] 96; CHECK-NEXT: [[C_04:%.*]] = phi i8 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[DEC:%.*]], [[FOR_BODY8]] ] 97; CHECK-NEXT: [[INC]] = add i32 [[INC5]], 1 98; CHECK-NEXT: [[DEC]] = add i8 [[C_04]], -1 99; CHECK-NEXT: [[CONV5:%.*]] = zext i8 [[DEC]] to i32 100; CHECK-NEXT: [[CMP6:%.*]] = icmp ult i32 [[TMP2]], [[CONV5]] 101; CHECK-NEXT: br i1 [[CMP6]], label [[FOR_BODY8]], label [[FOR_COND4_FOR_INC9_CRIT_EDGE]], !llvm.loop [[LOOP2:![0-9]+]] 102; CHECK: for.cond4.for.inc9_crit_edge: 103; CHECK-NEXT: [[INC_LCSSA:%.*]] = phi i32 [ [[INC]], [[FOR_BODY8]] ], [ [[TMP31]], [[MIDDLE_BLOCK]] ] 104; CHECK-NEXT: store i32 [[INC_LCSSA]], i32* getelementptr inbounds ([192 x [192 x i32]], [192 x [192 x i32]]* @a, i64 0, i64 0, i64 0), align 16 105; CHECK-NEXT: br label [[FOR_INC9]] 106; CHECK: for.inc9: 107; CHECK-NEXT: [[CONV10:%.*]] = and i32 [[STOREMERGE_IN9]], 65535 108; CHECK-NEXT: [[ADD]] = add nuw nsw i32 [[CONV10]], 1 109; CHECK-NEXT: [[CONV1:%.*]] = and i32 [[ADD]], 65472 110; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[CONV1]], 0 111; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_COND_FOR_END12_CRIT_EDGE:%.*]] 112; CHECK: for.cond.for.end12_crit_edge: 113; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_INC9]] ] 114; CHECK-NEXT: [[STOREMERGE:%.*]] = trunc i32 [[ADD_LCSSA]] to i16 115; CHECK-NEXT: store i16 [[STOREMERGE]], i16* [[S]], align 2 116; CHECK-NEXT: br label [[FOR_END12]] 117; CHECK: for.end12: 118; CHECK-NEXT: [[CALL13:%.*]] = call i32 (i16*, ...) bitcast (i32 (...)* @foo to i32 (i16*, ...)*)(i16* nonnull [[S]]) 119; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 2, i8* nonnull [[TMP1]]) 120; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull [[TMP0]]) 121; CHECK-NEXT: ret i32 0 122; 123entry: 124 %i = alloca i32, align 4 125 %s = alloca i16, align 2 126 %0 = bitcast i32* %i to i8* 127 call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %0) #3 128 store i32 0, i32* %i, align 4 129 %1 = bitcast i16* %s to i8* 130 call void @llvm.lifetime.start.p0i8(i64 2, i8* nonnull %1) #3 131 %call = call i32 (i32*, ...) bitcast (i32 (...)* @goo to i32 (i32*, ...)*)(i32* nonnull %i) #3 132 %2 = load i32, i32* %i, align 4 133 %storemerge6 = trunc i32 %2 to i16 134 store i16 %storemerge6, i16* %s, align 2 135 %conv17 = and i32 %2, 65472 136 %cmp8 = icmp eq i32 %conv17, 0 137 br i1 %cmp8, label %for.body.lr.ph, label %for.end12 138 139for.body.lr.ph: ; preds = %entry 140 br label %for.body 141 142for.body: ; preds = %for.body.lr.ph, %for.inc9 143 %storemerge.in9 = phi i32 [ %2, %for.body.lr.ph ], [ %add, %for.inc9 ] 144 %conv52 = and i32 %storemerge.in9, 255 145 %cmp63 = icmp ult i32 %2, %conv52 146 br i1 %cmp63, label %for.body8.lr.ph, label %for.inc9 147 148for.body8.lr.ph: ; preds = %for.body 149 %conv3 = trunc i32 %storemerge.in9 to i8 150 %.promoted = load i32, i32* getelementptr inbounds ([192 x [192 x i32]], [192 x [192 x i32]]* @a, i64 0, i64 0, i64 0), align 16 151 br label %for.body8 152 153for.body8: ; preds = %for.body8.lr.ph, %for.body8 154 %inc5 = phi i32 [ %.promoted, %for.body8.lr.ph ], [ %inc, %for.body8 ] 155 %c.04 = phi i8 [ %conv3, %for.body8.lr.ph ], [ %dec, %for.body8 ] 156 %inc = add i32 %inc5, 1 157 %dec = add i8 %c.04, -1 158 %conv5 = zext i8 %dec to i32 159 %cmp6 = icmp ult i32 %2, %conv5 160 br i1 %cmp6, label %for.body8, label %for.cond4.for.inc9_crit_edge 161 162for.cond4.for.inc9_crit_edge: ; preds = %for.body8 163 %inc.lcssa = phi i32 [ %inc, %for.body8 ] 164 store i32 %inc.lcssa, i32* getelementptr inbounds ([192 x [192 x i32]], [192 x [192 x i32]]* @a, i64 0, i64 0, i64 0), align 16 165 br label %for.inc9 166 167for.inc9: ; preds = %for.cond4.for.inc9_crit_edge, %for.body 168 %conv10 = and i32 %storemerge.in9, 65535 169 %add = add nuw nsw i32 %conv10, 1 170 %conv1 = and i32 %add, 65472 171 %cmp = icmp eq i32 %conv1, 0 172 br i1 %cmp, label %for.body, label %for.cond.for.end12_crit_edge 173 174for.cond.for.end12_crit_edge: ; preds = %for.inc9 175 %add.lcssa = phi i32 [ %add, %for.inc9 ] 176 %storemerge = trunc i32 %add.lcssa to i16 177 store i16 %storemerge, i16* %s, align 2 178 br label %for.end12 179 180for.end12: ; preds = %for.cond.for.end12_crit_edge, %entry 181 %call13 = call i32 (i16*, ...) bitcast (i32 (...)* @foo to i32 (i16*, ...)*)(i16* nonnull %s) #3 182 call void @llvm.lifetime.end.p0i8(i64 2, i8* nonnull %1) #3 183 call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %0) #3 184 ret i32 0 185} 186 187; Function Attrs: argmemonly nounwind 188declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #1 189 190declare i32 @goo(...) local_unnamed_addr #2 191 192declare i32 @foo(...) local_unnamed_addr #2 193 194; Function Attrs: argmemonly nounwind 195declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1 196