1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py 2; RUN: opt -loop-versioning -S < %s | FileCheck %s -check-prefix=LV 3 4target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128" 5 6; For this loop: 7; unsigned index = 0; 8; for (int i = 0; i < n; i++) { 9; A[2 * index] = A[2 * index] + B[i]; 10; index++; 11; } 12; 13; SCEV is unable to prove that A[2 * i] does not overflow. 14; 15; Analyzing the IR does not help us because the GEPs are not 16; affine AddRecExprs. However, we can turn them into AddRecExprs 17; using SCEV Predicates. 18; 19; Once we have an affine expression we need to add an additional NUSW 20; to check that the pointers don't wrap since the GEPs are not 21; inbound. 22 23; The expression for %mul_ext as analyzed by SCEV is 24; (zext i32 {0,+,2}<%for.body> to i64) 25; We have added the nusw flag to turn this expression into the SCEV expression: 26; i64 {0,+,2}<%for.body> 27 28define void @f1(i16* noalias %a, 29; LV-LABEL: @f1( 30; LV-NEXT: for.body.lver.check: 31; LV-NEXT: [[A5:%.*]] = bitcast i16* [[A:%.*]] to i8* 32; LV-NEXT: [[TMP0:%.*]] = add i64 [[N:%.*]], -1 33; LV-NEXT: [[TMP7:%.*]] = icmp ugt i64 [[TMP0]], 4294967295 34; LV-NEXT: [[MUL2:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 4, i64 [[TMP0]]) 35; LV-NEXT: [[MUL_RESULT3:%.*]] = extractvalue { i64, i1 } [[MUL2]], 0 36; LV-NEXT: [[MUL_OVERFLOW4:%.*]] = extractvalue { i64, i1 } [[MUL2]], 1 37; LV-NEXT: [[TMP11:%.*]] = sub i64 0, [[MUL_RESULT3]] 38; LV-NEXT: [[TMP12:%.*]] = getelementptr i8, i8* [[A5]], i64 [[MUL_RESULT3]] 39; LV-NEXT: [[TMP15:%.*]] = icmp ult i8* [[TMP12]], [[A5]] 40; LV-NEXT: [[TMP17:%.*]] = or i1 [[TMP15]], [[MUL_OVERFLOW4]] 41; LV-NEXT: [[TMP18:%.*]] = or i1 [[TMP7]], [[TMP17]] 42; LV-NEXT: br i1 [[TMP18]], label [[FOR_BODY_PH_LVER_ORIG:%.*]], label [[FOR_BODY_PH:%.*]] 43; LV: for.body.ph.lver.orig: 44; LV-NEXT: br label [[FOR_BODY_LVER_ORIG:%.*]] 45; LV: for.body.lver.orig: 46; LV-NEXT: [[IND_LVER_ORIG:%.*]] = phi i64 [ 0, [[FOR_BODY_PH_LVER_ORIG]] ], [ [[INC_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ] 47; LV-NEXT: [[IND1_LVER_ORIG:%.*]] = phi i32 [ 0, [[FOR_BODY_PH_LVER_ORIG]] ], [ [[INC1_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ] 48; LV-NEXT: [[MUL_LVER_ORIG:%.*]] = mul i32 [[IND1_LVER_ORIG]], 2 49; LV-NEXT: [[MUL_EXT_LVER_ORIG:%.*]] = zext i32 [[MUL_LVER_ORIG]] to i64 50; LV-NEXT: [[ARRAYIDXA_LVER_ORIG:%.*]] = getelementptr i16, i16* [[A]], i64 [[MUL_EXT_LVER_ORIG]] 51; LV-NEXT: [[LOADA_LVER_ORIG:%.*]] = load i16, i16* [[ARRAYIDXA_LVER_ORIG]], align 2 52; LV-NEXT: [[ARRAYIDXB_LVER_ORIG:%.*]] = getelementptr i16, i16* [[B:%.*]], i64 [[IND_LVER_ORIG]] 53; LV-NEXT: [[LOADB_LVER_ORIG:%.*]] = load i16, i16* [[ARRAYIDXB_LVER_ORIG]], align 2 54; LV-NEXT: [[ADD_LVER_ORIG:%.*]] = mul i16 [[LOADA_LVER_ORIG]], [[LOADB_LVER_ORIG]] 55; LV-NEXT: store i16 [[ADD_LVER_ORIG]], i16* [[ARRAYIDXA_LVER_ORIG]], align 2 56; LV-NEXT: [[INC_LVER_ORIG]] = add nuw nsw i64 [[IND_LVER_ORIG]], 1 57; LV-NEXT: [[INC1_LVER_ORIG]] = add i32 [[IND1_LVER_ORIG]], 1 58; LV-NEXT: [[EXITCOND_LVER_ORIG:%.*]] = icmp eq i64 [[INC_LVER_ORIG]], [[N]] 59; LV-NEXT: br i1 [[EXITCOND_LVER_ORIG]], label [[FOR_END_LOOPEXIT:%.*]], label [[FOR_BODY_LVER_ORIG]] 60; LV: for.body.ph: 61; LV-NEXT: br label [[FOR_BODY:%.*]] 62; LV: for.body: 63; LV-NEXT: [[IND:%.*]] = phi i64 [ 0, [[FOR_BODY_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ] 64; LV-NEXT: [[IND1:%.*]] = phi i32 [ 0, [[FOR_BODY_PH]] ], [ [[INC1:%.*]], [[FOR_BODY]] ] 65; LV-NEXT: [[MUL:%.*]] = mul i32 [[IND1]], 2 66; LV-NEXT: [[MUL_EXT:%.*]] = zext i32 [[MUL]] to i64 67; LV-NEXT: [[ARRAYIDXA:%.*]] = getelementptr i16, i16* [[A]], i64 [[MUL_EXT]] 68; LV-NEXT: [[LOADA:%.*]] = load i16, i16* [[ARRAYIDXA]], align 2 69; LV-NEXT: [[ARRAYIDXB:%.*]] = getelementptr i16, i16* [[B]], i64 [[IND]] 70; LV-NEXT: [[LOADB:%.*]] = load i16, i16* [[ARRAYIDXB]], align 2 71; LV-NEXT: [[ADD:%.*]] = mul i16 [[LOADA]], [[LOADB]] 72; LV-NEXT: store i16 [[ADD]], i16* [[ARRAYIDXA]], align 2 73; LV-NEXT: [[INC]] = add nuw nsw i64 [[IND]], 1 74; LV-NEXT: [[INC1]] = add i32 [[IND1]], 1 75; LV-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INC]], [[N]] 76; LV-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT3:%.*]], label [[FOR_BODY]] 77; LV: for.end.loopexit: 78; LV-NEXT: br label [[FOR_END:%.*]] 79; LV: for.end.loopexit3: 80; LV-NEXT: br label [[FOR_END]] 81; LV: for.end: 82; LV-NEXT: ret void 83; 84 i16* noalias %b, i64 %N) { 85entry: 86 br label %for.body 87 88for.body: ; preds = %for.body, %entry 89 %ind = phi i64 [ 0, %entry ], [ %inc, %for.body ] 90 %ind1 = phi i32 [ 0, %entry ], [ %inc1, %for.body ] 91 92 %mul = mul i32 %ind1, 2 93 %mul_ext = zext i32 %mul to i64 94 95 %arrayidxA = getelementptr i16, i16* %a, i64 %mul_ext 96 %loadA = load i16, i16* %arrayidxA, align 2 97 98 %arrayidxB = getelementptr i16, i16* %b, i64 %ind 99 %loadB = load i16, i16* %arrayidxB, align 2 100 101 %add = mul i16 %loadA, %loadB 102 103 store i16 %add, i16* %arrayidxA, align 2 104 105 %inc = add nuw nsw i64 %ind, 1 106 %inc1 = add i32 %ind1, 1 107 108 %exitcond = icmp eq i64 %inc, %N 109 br i1 %exitcond, label %for.end, label %for.body 110 111for.end: ; preds = %for.body 112 ret void 113} 114 115; For this loop: 116; unsigned index = n; 117; for (int i = 0; i < n; i++) { 118; A[2 * index] = A[2 * index] + B[i]; 119; index--; 120; } 121; 122; the SCEV expression for 2 * index is not an AddRecExpr 123; (and implictly not affine). However, we are able to make assumptions 124; that will turn the expression into an affine one and continue the 125; analysis. 126; 127; Once we have an affine expression we need to add an additional NUSW 128; to check that the pointers don't wrap since the GEPs are not 129; inbounds. 130; 131; This loop has a negative stride for A, and the nusw flag is required in 132; order to properly extend the increment from i32 -4 to i64 -4. 133 134; The expression for %mul_ext as analyzed by SCEV is 135; (zext i32 {(2 * (trunc i64 %N to i32)),+,-2}<%for.body> to i64) 136; We have added the nusw flag to turn this expression into the following SCEV: 137; i64 {zext i32 (2 * (trunc i64 %N to i32)) to i64,+,-2}<%for.body> 138 139define void @f2(i16* noalias %a, 140; LV-LABEL: @f2( 141; LV-NEXT: for.body.lver.check: 142; LV-NEXT: [[TRUNCN:%.*]] = trunc i64 [[N:%.*]] to i32 143; LV-NEXT: [[TMP0:%.*]] = add i64 [[N]], -1 144; LV-NEXT: [[TMP1:%.*]] = shl i32 [[TRUNCN]], 1 145; LV-NEXT: [[TMP2:%.*]] = trunc i64 [[TMP0]] to i32 146; LV-NEXT: [[MUL1:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 2, i32 [[TMP2]]) 147; LV-NEXT: [[MUL_RESULT:%.*]] = extractvalue { i32, i1 } [[MUL1]], 0 148; LV-NEXT: [[MUL_OVERFLOW:%.*]] = extractvalue { i32, i1 } [[MUL1]], 1 149; LV-NEXT: [[TMP4:%.*]] = sub i32 [[TMP1]], [[MUL_RESULT]] 150; LV-NEXT: [[TMP5:%.*]] = icmp ugt i32 [[TMP4]], [[TMP1]] 151; LV-NEXT: [[TMP9:%.*]] = or i1 [[TMP5]], [[MUL_OVERFLOW]] 152; LV-NEXT: [[TMP8:%.*]] = icmp ugt i64 [[TMP0]], 4294967295 153; LV-NEXT: [[TMP10:%.*]] = or i1 [[TMP9]], [[TMP8]] 154; LV-NEXT: [[TMP12:%.*]] = trunc i64 [[N]] to i31 155; LV-NEXT: [[TMP13:%.*]] = zext i31 [[TMP12]] to i64 156; LV-NEXT: [[TMP14:%.*]] = shl nuw nsw i64 [[TMP13]], 1 157; LV-NEXT: [[SCEVGEP:%.*]] = getelementptr i16, i16* [[A:%.*]], i64 [[TMP14]] 158; LV-NEXT: [[MUL2:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 4, i64 [[TMP0]]) 159; LV-NEXT: [[MUL_RESULT3:%.*]] = extractvalue { i64, i1 } [[MUL2]], 0 160; LV-NEXT: [[MUL_OVERFLOW4:%.*]] = extractvalue { i64, i1 } [[MUL2]], 1 161; LV-NEXT: [[SCEVGEP5:%.*]] = bitcast i16* [[SCEVGEP]] to i8* 162; LV-NEXT: [[TMP15:%.*]] = sub i64 0, [[MUL_RESULT3]] 163; LV-NEXT: [[TMP17:%.*]] = getelementptr i8, i8* [[SCEVGEP5]], i64 [[TMP15]] 164; LV-NEXT: [[TMP18:%.*]] = icmp ugt i8* [[TMP17]], [[SCEVGEP5]] 165; LV-NEXT: [[TMP21:%.*]] = or i1 [[TMP18]], [[MUL_OVERFLOW4]] 166; LV-NEXT: [[TMP22:%.*]] = or i1 [[TMP10]], [[TMP21]] 167; LV-NEXT: br i1 [[TMP22]], label [[FOR_BODY_PH_LVER_ORIG:%.*]], label [[FOR_BODY_PH:%.*]] 168; LV: for.body.ph.lver.orig: 169; LV-NEXT: br label [[FOR_BODY_LVER_ORIG:%.*]] 170; LV: for.body.lver.orig: 171; LV-NEXT: [[IND_LVER_ORIG:%.*]] = phi i64 [ 0, [[FOR_BODY_PH_LVER_ORIG]] ], [ [[INC_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ] 172; LV-NEXT: [[IND1_LVER_ORIG:%.*]] = phi i32 [ [[TRUNCN]], [[FOR_BODY_PH_LVER_ORIG]] ], [ [[DEC_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ] 173; LV-NEXT: [[MUL_LVER_ORIG:%.*]] = mul i32 [[IND1_LVER_ORIG]], 2 174; LV-NEXT: [[MUL_EXT_LVER_ORIG:%.*]] = zext i32 [[MUL_LVER_ORIG]] to i64 175; LV-NEXT: [[ARRAYIDXA_LVER_ORIG:%.*]] = getelementptr i16, i16* [[A]], i64 [[MUL_EXT_LVER_ORIG]] 176; LV-NEXT: [[LOADA_LVER_ORIG:%.*]] = load i16, i16* [[ARRAYIDXA_LVER_ORIG]], align 2 177; LV-NEXT: [[ARRAYIDXB_LVER_ORIG:%.*]] = getelementptr i16, i16* [[B:%.*]], i64 [[IND_LVER_ORIG]] 178; LV-NEXT: [[LOADB_LVER_ORIG:%.*]] = load i16, i16* [[ARRAYIDXB_LVER_ORIG]], align 2 179; LV-NEXT: [[ADD_LVER_ORIG:%.*]] = mul i16 [[LOADA_LVER_ORIG]], [[LOADB_LVER_ORIG]] 180; LV-NEXT: store i16 [[ADD_LVER_ORIG]], i16* [[ARRAYIDXA_LVER_ORIG]], align 2 181; LV-NEXT: [[INC_LVER_ORIG]] = add nuw nsw i64 [[IND_LVER_ORIG]], 1 182; LV-NEXT: [[DEC_LVER_ORIG]] = sub i32 [[IND1_LVER_ORIG]], 1 183; LV-NEXT: [[EXITCOND_LVER_ORIG:%.*]] = icmp eq i64 [[INC_LVER_ORIG]], [[N]] 184; LV-NEXT: br i1 [[EXITCOND_LVER_ORIG]], label [[FOR_END_LOOPEXIT:%.*]], label [[FOR_BODY_LVER_ORIG]] 185; LV: for.body.ph: 186; LV-NEXT: br label [[FOR_BODY:%.*]] 187; LV: for.body: 188; LV-NEXT: [[IND:%.*]] = phi i64 [ 0, [[FOR_BODY_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ] 189; LV-NEXT: [[IND1:%.*]] = phi i32 [ [[TRUNCN]], [[FOR_BODY_PH]] ], [ [[DEC:%.*]], [[FOR_BODY]] ] 190; LV-NEXT: [[MUL:%.*]] = mul i32 [[IND1]], 2 191; LV-NEXT: [[MUL_EXT:%.*]] = zext i32 [[MUL]] to i64 192; LV-NEXT: [[ARRAYIDXA:%.*]] = getelementptr i16, i16* [[A]], i64 [[MUL_EXT]] 193; LV-NEXT: [[LOADA:%.*]] = load i16, i16* [[ARRAYIDXA]], align 2 194; LV-NEXT: [[ARRAYIDXB:%.*]] = getelementptr i16, i16* [[B]], i64 [[IND]] 195; LV-NEXT: [[LOADB:%.*]] = load i16, i16* [[ARRAYIDXB]], align 2 196; LV-NEXT: [[ADD:%.*]] = mul i16 [[LOADA]], [[LOADB]] 197; LV-NEXT: store i16 [[ADD]], i16* [[ARRAYIDXA]], align 2 198; LV-NEXT: [[INC]] = add nuw nsw i64 [[IND]], 1 199; LV-NEXT: [[DEC]] = sub i32 [[IND1]], 1 200; LV-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INC]], [[N]] 201; LV-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT6:%.*]], label [[FOR_BODY]] 202; LV: for.end.loopexit: 203; LV-NEXT: br label [[FOR_END:%.*]] 204; LV: for.end.loopexit6: 205; LV-NEXT: br label [[FOR_END]] 206; LV: for.end: 207; LV-NEXT: ret void 208; 209 i16* noalias %b, i64 %N) { 210entry: 211 %TruncN = trunc i64 %N to i32 212 br label %for.body 213 214for.body: ; preds = %for.body, %entry 215 %ind = phi i64 [ 0, %entry ], [ %inc, %for.body ] 216 %ind1 = phi i32 [ %TruncN, %entry ], [ %dec, %for.body ] 217 218 %mul = mul i32 %ind1, 2 219 %mul_ext = zext i32 %mul to i64 220 221 %arrayidxA = getelementptr i16, i16* %a, i64 %mul_ext 222 %loadA = load i16, i16* %arrayidxA, align 2 223 224 %arrayidxB = getelementptr i16, i16* %b, i64 %ind 225 %loadB = load i16, i16* %arrayidxB, align 2 226 227 %add = mul i16 %loadA, %loadB 228 229 store i16 %add, i16* %arrayidxA, align 2 230 231 %inc = add nuw nsw i64 %ind, 1 232 %dec = sub i32 %ind1, 1 233 234 %exitcond = icmp eq i64 %inc, %N 235 br i1 %exitcond, label %for.end, label %for.body 236 237for.end: ; preds = %for.body 238 ret void 239} 240 241; We replicate the tests above, but this time sign extend 2 * index instead 242; of zero extending it. 243 244; The expression for %mul_ext as analyzed by SCEV is 245; i64 (sext i32 {0,+,2}<%for.body> to i64) 246; We have added the nssw flag to turn this expression into the following SCEV: 247; i64 {0,+,2}<%for.body> 248 249define void @f3(i16* noalias %a, 250; LV-LABEL: @f3( 251; LV-NEXT: for.body.lver.check: 252; LV-NEXT: [[A5:%.*]] = bitcast i16* [[A:%.*]] to i8* 253; LV-NEXT: [[TMP0:%.*]] = add i64 [[N:%.*]], -1 254; LV-NEXT: [[TMP1:%.*]] = trunc i64 [[TMP0]] to i32 255; LV-NEXT: [[MUL1:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 2, i32 [[TMP1]]) 256; LV-NEXT: [[MUL_RESULT:%.*]] = extractvalue { i32, i1 } [[MUL1]], 0 257; LV-NEXT: [[MUL_OVERFLOW:%.*]] = extractvalue { i32, i1 } [[MUL1]], 1 258; LV-NEXT: [[TMP2:%.*]] = add i32 0, [[MUL_RESULT]] 259; LV-NEXT: [[TMP5:%.*]] = icmp slt i32 [[TMP2]], 0 260; LV-NEXT: [[TMP8:%.*]] = or i1 [[TMP5]], [[MUL_OVERFLOW]] 261; LV-NEXT: [[TMP7:%.*]] = icmp ugt i64 [[TMP0]], 4294967295 262; LV-NEXT: [[TMP9:%.*]] = or i1 [[TMP8]], [[TMP7]] 263; LV-NEXT: [[MUL2:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 4, i64 [[TMP0]]) 264; LV-NEXT: [[MUL_RESULT3:%.*]] = extractvalue { i64, i1 } [[MUL2]], 0 265; LV-NEXT: [[MUL_OVERFLOW4:%.*]] = extractvalue { i64, i1 } [[MUL2]], 1 266; LV-NEXT: [[TMP11:%.*]] = sub i64 0, [[MUL_RESULT3]] 267; LV-NEXT: [[TMP12:%.*]] = getelementptr i8, i8* [[A5]], i64 [[MUL_RESULT3]] 268; LV-NEXT: [[TMP15:%.*]] = icmp ult i8* [[TMP12]], [[A5]] 269; LV-NEXT: [[TMP17:%.*]] = or i1 [[TMP15]], [[MUL_OVERFLOW4]] 270; LV-NEXT: [[TMP18:%.*]] = or i1 [[TMP9]], [[TMP17]] 271; LV-NEXT: br i1 [[TMP18]], label [[FOR_BODY_PH_LVER_ORIG:%.*]], label [[FOR_BODY_PH:%.*]] 272; LV: for.body.ph.lver.orig: 273; LV-NEXT: br label [[FOR_BODY_LVER_ORIG:%.*]] 274; LV: for.body.lver.orig: 275; LV-NEXT: [[IND_LVER_ORIG:%.*]] = phi i64 [ 0, [[FOR_BODY_PH_LVER_ORIG]] ], [ [[INC_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ] 276; LV-NEXT: [[IND1_LVER_ORIG:%.*]] = phi i32 [ 0, [[FOR_BODY_PH_LVER_ORIG]] ], [ [[INC1_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ] 277; LV-NEXT: [[MUL_LVER_ORIG:%.*]] = mul i32 [[IND1_LVER_ORIG]], 2 278; LV-NEXT: [[MUL_EXT_LVER_ORIG:%.*]] = sext i32 [[MUL_LVER_ORIG]] to i64 279; LV-NEXT: [[ARRAYIDXA_LVER_ORIG:%.*]] = getelementptr i16, i16* [[A]], i64 [[MUL_EXT_LVER_ORIG]] 280; LV-NEXT: [[LOADA_LVER_ORIG:%.*]] = load i16, i16* [[ARRAYIDXA_LVER_ORIG]], align 2 281; LV-NEXT: [[ARRAYIDXB_LVER_ORIG:%.*]] = getelementptr i16, i16* [[B:%.*]], i64 [[IND_LVER_ORIG]] 282; LV-NEXT: [[LOADB_LVER_ORIG:%.*]] = load i16, i16* [[ARRAYIDXB_LVER_ORIG]], align 2 283; LV-NEXT: [[ADD_LVER_ORIG:%.*]] = mul i16 [[LOADA_LVER_ORIG]], [[LOADB_LVER_ORIG]] 284; LV-NEXT: store i16 [[ADD_LVER_ORIG]], i16* [[ARRAYIDXA_LVER_ORIG]], align 2 285; LV-NEXT: [[INC_LVER_ORIG]] = add nuw nsw i64 [[IND_LVER_ORIG]], 1 286; LV-NEXT: [[INC1_LVER_ORIG]] = add i32 [[IND1_LVER_ORIG]], 1 287; LV-NEXT: [[EXITCOND_LVER_ORIG:%.*]] = icmp eq i64 [[INC_LVER_ORIG]], [[N]] 288; LV-NEXT: br i1 [[EXITCOND_LVER_ORIG]], label [[FOR_END_LOOPEXIT:%.*]], label [[FOR_BODY_LVER_ORIG]] 289; LV: for.body.ph: 290; LV-NEXT: br label [[FOR_BODY:%.*]] 291; LV: for.body: 292; LV-NEXT: [[IND:%.*]] = phi i64 [ 0, [[FOR_BODY_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ] 293; LV-NEXT: [[IND1:%.*]] = phi i32 [ 0, [[FOR_BODY_PH]] ], [ [[INC1:%.*]], [[FOR_BODY]] ] 294; LV-NEXT: [[MUL:%.*]] = mul i32 [[IND1]], 2 295; LV-NEXT: [[MUL_EXT:%.*]] = sext i32 [[MUL]] to i64 296; LV-NEXT: [[ARRAYIDXA:%.*]] = getelementptr i16, i16* [[A]], i64 [[MUL_EXT]] 297; LV-NEXT: [[LOADA:%.*]] = load i16, i16* [[ARRAYIDXA]], align 2 298; LV-NEXT: [[ARRAYIDXB:%.*]] = getelementptr i16, i16* [[B]], i64 [[IND]] 299; LV-NEXT: [[LOADB:%.*]] = load i16, i16* [[ARRAYIDXB]], align 2 300; LV-NEXT: [[ADD:%.*]] = mul i16 [[LOADA]], [[LOADB]] 301; LV-NEXT: store i16 [[ADD]], i16* [[ARRAYIDXA]], align 2 302; LV-NEXT: [[INC]] = add nuw nsw i64 [[IND]], 1 303; LV-NEXT: [[INC1]] = add i32 [[IND1]], 1 304; LV-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INC]], [[N]] 305; LV-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT6:%.*]], label [[FOR_BODY]] 306; LV: for.end.loopexit: 307; LV-NEXT: br label [[FOR_END:%.*]] 308; LV: for.end.loopexit6: 309; LV-NEXT: br label [[FOR_END]] 310; LV: for.end: 311; LV-NEXT: ret void 312; 313 i16* noalias %b, i64 %N) { 314entry: 315 br label %for.body 316 317for.body: ; preds = %for.body, %entry 318 %ind = phi i64 [ 0, %entry ], [ %inc, %for.body ] 319 %ind1 = phi i32 [ 0, %entry ], [ %inc1, %for.body ] 320 321 %mul = mul i32 %ind1, 2 322 %mul_ext = sext i32 %mul to i64 323 324 %arrayidxA = getelementptr i16, i16* %a, i64 %mul_ext 325 %loadA = load i16, i16* %arrayidxA, align 2 326 327 %arrayidxB = getelementptr i16, i16* %b, i64 %ind 328 %loadB = load i16, i16* %arrayidxB, align 2 329 330 %add = mul i16 %loadA, %loadB 331 332 store i16 %add, i16* %arrayidxA, align 2 333 334 %inc = add nuw nsw i64 %ind, 1 335 %inc1 = add i32 %ind1, 1 336 337 %exitcond = icmp eq i64 %inc, %N 338 br i1 %exitcond, label %for.end, label %for.body 339 340for.end: ; preds = %for.body 341 ret void 342} 343 344define void @f4(i16* noalias %a, 345; LV-LABEL: @f4( 346; LV-NEXT: for.body.lver.check: 347; LV-NEXT: [[TRUNCN:%.*]] = trunc i64 [[N:%.*]] to i32 348; LV-NEXT: [[TMP0:%.*]] = add i64 [[N]], -1 349; LV-NEXT: [[TMP1:%.*]] = shl i32 [[TRUNCN]], 1 350; LV-NEXT: [[TMP2:%.*]] = trunc i64 [[TMP0]] to i32 351; LV-NEXT: [[MUL1:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 2, i32 [[TMP2]]) 352; LV-NEXT: [[MUL_RESULT:%.*]] = extractvalue { i32, i1 } [[MUL1]], 0 353; LV-NEXT: [[MUL_OVERFLOW:%.*]] = extractvalue { i32, i1 } [[MUL1]], 1 354; LV-NEXT: [[TMP4:%.*]] = sub i32 [[TMP1]], [[MUL_RESULT]] 355; LV-NEXT: [[TMP5:%.*]] = icmp sgt i32 [[TMP4]], [[TMP1]] 356; LV-NEXT: [[TMP9:%.*]] = or i1 [[TMP5]], [[MUL_OVERFLOW]] 357; LV-NEXT: [[TMP8:%.*]] = icmp ugt i64 [[TMP0]], 4294967295 358; LV-NEXT: [[TMP10:%.*]] = or i1 [[TMP9]], [[TMP8]] 359; LV-NEXT: [[TMP12:%.*]] = sext i32 [[TMP1]] to i64 360; LV-NEXT: [[SCEVGEP:%.*]] = getelementptr i16, i16* [[A:%.*]], i64 [[TMP12]] 361; LV-NEXT: [[MUL2:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 4, i64 [[TMP0]]) 362; LV-NEXT: [[MUL_RESULT3:%.*]] = extractvalue { i64, i1 } [[MUL2]], 0 363; LV-NEXT: [[MUL_OVERFLOW4:%.*]] = extractvalue { i64, i1 } [[MUL2]], 1 364; LV-NEXT: [[SCEVGEP5:%.*]] = bitcast i16* [[SCEVGEP]] to i8* 365; LV-NEXT: [[TMP13:%.*]] = sub i64 0, [[MUL_RESULT3]] 366; LV-NEXT: [[TMP15:%.*]] = getelementptr i8, i8* [[SCEVGEP5]], i64 [[TMP13]] 367; LV-NEXT: [[TMP16:%.*]] = icmp ugt i8* [[TMP15]], [[SCEVGEP5]] 368; LV-NEXT: [[TMP19:%.*]] = or i1 [[TMP16]], [[MUL_OVERFLOW4]] 369; LV-NEXT: [[TMP20:%.*]] = or i1 [[TMP10]], [[TMP19]] 370; LV-NEXT: br i1 [[TMP20]], label [[FOR_BODY_PH_LVER_ORIG:%.*]], label [[FOR_BODY_PH:%.*]] 371; LV: for.body.ph.lver.orig: 372; LV-NEXT: br label [[FOR_BODY_LVER_ORIG:%.*]] 373; LV: for.body.lver.orig: 374; LV-NEXT: [[IND_LVER_ORIG:%.*]] = phi i64 [ 0, [[FOR_BODY_PH_LVER_ORIG]] ], [ [[INC_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ] 375; LV-NEXT: [[IND1_LVER_ORIG:%.*]] = phi i32 [ [[TRUNCN]], [[FOR_BODY_PH_LVER_ORIG]] ], [ [[DEC_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ] 376; LV-NEXT: [[MUL_LVER_ORIG:%.*]] = mul i32 [[IND1_LVER_ORIG]], 2 377; LV-NEXT: [[MUL_EXT_LVER_ORIG:%.*]] = sext i32 [[MUL_LVER_ORIG]] to i64 378; LV-NEXT: [[ARRAYIDXA_LVER_ORIG:%.*]] = getelementptr i16, i16* [[A]], i64 [[MUL_EXT_LVER_ORIG]] 379; LV-NEXT: [[LOADA_LVER_ORIG:%.*]] = load i16, i16* [[ARRAYIDXA_LVER_ORIG]], align 2 380; LV-NEXT: [[ARRAYIDXB_LVER_ORIG:%.*]] = getelementptr i16, i16* [[B:%.*]], i64 [[IND_LVER_ORIG]] 381; LV-NEXT: [[LOADB_LVER_ORIG:%.*]] = load i16, i16* [[ARRAYIDXB_LVER_ORIG]], align 2 382; LV-NEXT: [[ADD_LVER_ORIG:%.*]] = mul i16 [[LOADA_LVER_ORIG]], [[LOADB_LVER_ORIG]] 383; LV-NEXT: store i16 [[ADD_LVER_ORIG]], i16* [[ARRAYIDXA_LVER_ORIG]], align 2 384; LV-NEXT: [[INC_LVER_ORIG]] = add nuw nsw i64 [[IND_LVER_ORIG]], 1 385; LV-NEXT: [[DEC_LVER_ORIG]] = sub i32 [[IND1_LVER_ORIG]], 1 386; LV-NEXT: [[EXITCOND_LVER_ORIG:%.*]] = icmp eq i64 [[INC_LVER_ORIG]], [[N]] 387; LV-NEXT: br i1 [[EXITCOND_LVER_ORIG]], label [[FOR_END_LOOPEXIT:%.*]], label [[FOR_BODY_LVER_ORIG]] 388; LV: for.body.ph: 389; LV-NEXT: br label [[FOR_BODY:%.*]] 390; LV: for.body: 391; LV-NEXT: [[IND:%.*]] = phi i64 [ 0, [[FOR_BODY_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ] 392; LV-NEXT: [[IND1:%.*]] = phi i32 [ [[TRUNCN]], [[FOR_BODY_PH]] ], [ [[DEC:%.*]], [[FOR_BODY]] ] 393; LV-NEXT: [[MUL:%.*]] = mul i32 [[IND1]], 2 394; LV-NEXT: [[MUL_EXT:%.*]] = sext i32 [[MUL]] to i64 395; LV-NEXT: [[ARRAYIDXA:%.*]] = getelementptr i16, i16* [[A]], i64 [[MUL_EXT]] 396; LV-NEXT: [[LOADA:%.*]] = load i16, i16* [[ARRAYIDXA]], align 2 397; LV-NEXT: [[ARRAYIDXB:%.*]] = getelementptr i16, i16* [[B]], i64 [[IND]] 398; LV-NEXT: [[LOADB:%.*]] = load i16, i16* [[ARRAYIDXB]], align 2 399; LV-NEXT: [[ADD:%.*]] = mul i16 [[LOADA]], [[LOADB]] 400; LV-NEXT: store i16 [[ADD]], i16* [[ARRAYIDXA]], align 2 401; LV-NEXT: [[INC]] = add nuw nsw i64 [[IND]], 1 402; LV-NEXT: [[DEC]] = sub i32 [[IND1]], 1 403; LV-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INC]], [[N]] 404; LV-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT6:%.*]], label [[FOR_BODY]] 405; LV: for.end.loopexit: 406; LV-NEXT: br label [[FOR_END:%.*]] 407; LV: for.end.loopexit6: 408; LV-NEXT: br label [[FOR_END]] 409; LV: for.end: 410; LV-NEXT: ret void 411; 412 i16* noalias %b, i64 %N) { 413entry: 414 %TruncN = trunc i64 %N to i32 415 br label %for.body 416 417for.body: ; preds = %for.body, %entry 418 %ind = phi i64 [ 0, %entry ], [ %inc, %for.body ] 419 %ind1 = phi i32 [ %TruncN, %entry ], [ %dec, %for.body ] 420 421 %mul = mul i32 %ind1, 2 422 %mul_ext = sext i32 %mul to i64 423 424 %arrayidxA = getelementptr i16, i16* %a, i64 %mul_ext 425 %loadA = load i16, i16* %arrayidxA, align 2 426 427 %arrayidxB = getelementptr i16, i16* %b, i64 %ind 428 %loadB = load i16, i16* %arrayidxB, align 2 429 430 %add = mul i16 %loadA, %loadB 431 432 store i16 %add, i16* %arrayidxA, align 2 433 434 %inc = add nuw nsw i64 %ind, 1 435 %dec = sub i32 %ind1, 1 436 437 %exitcond = icmp eq i64 %inc, %N 438 br i1 %exitcond, label %for.end, label %for.body 439 440for.end: ; preds = %for.body 441 ret void 442} 443 444; The following function is similar to the one above, but has the GEP 445; to pointer %A inbounds. The index %mul doesn't have the nsw flag. 446; This means that the SCEV expression for %mul can wrap and we need 447; a SCEV predicate to continue analysis. 448; 449; We can still analyze this by adding the required no wrap SCEV predicates. 450 451define void @f5(i16* noalias %a, 452; LV-LABEL: @f5( 453; LV-NEXT: for.body.lver.check: 454; LV-NEXT: [[TRUNCN:%.*]] = trunc i64 [[N:%.*]] to i32 455; LV-NEXT: [[TMP0:%.*]] = add i64 [[N]], -1 456; LV-NEXT: [[TMP1:%.*]] = shl i32 [[TRUNCN]], 1 457; LV-NEXT: [[TMP2:%.*]] = trunc i64 [[TMP0]] to i32 458; LV-NEXT: [[MUL1:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 2, i32 [[TMP2]]) 459; LV-NEXT: [[MUL_RESULT:%.*]] = extractvalue { i32, i1 } [[MUL1]], 0 460; LV-NEXT: [[MUL_OVERFLOW:%.*]] = extractvalue { i32, i1 } [[MUL1]], 1 461; LV-NEXT: [[TMP4:%.*]] = sub i32 [[TMP1]], [[MUL_RESULT]] 462; LV-NEXT: [[TMP5:%.*]] = icmp sgt i32 [[TMP4]], [[TMP1]] 463; LV-NEXT: [[TMP9:%.*]] = or i1 [[TMP5]], [[MUL_OVERFLOW]] 464; LV-NEXT: [[TMP8:%.*]] = icmp ugt i64 [[TMP0]], 4294967295 465; LV-NEXT: [[TMP10:%.*]] = or i1 [[TMP9]], [[TMP8]] 466; LV-NEXT: [[TMP12:%.*]] = sext i32 [[TMP1]] to i64 467; LV-NEXT: [[SCEVGEP:%.*]] = getelementptr i16, i16* [[A:%.*]], i64 [[TMP12]] 468; LV-NEXT: [[MUL2:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 4, i64 [[TMP0]]) 469; LV-NEXT: [[MUL_RESULT3:%.*]] = extractvalue { i64, i1 } [[MUL2]], 0 470; LV-NEXT: [[MUL_OVERFLOW4:%.*]] = extractvalue { i64, i1 } [[MUL2]], 1 471; LV-NEXT: [[SCEVGEP5:%.*]] = bitcast i16* [[SCEVGEP]] to i8* 472; LV-NEXT: [[TMP13:%.*]] = sub i64 0, [[MUL_RESULT3]] 473; LV-NEXT: [[TMP15:%.*]] = getelementptr i8, i8* [[SCEVGEP5]], i64 [[TMP13]] 474; LV-NEXT: [[TMP16:%.*]] = icmp ugt i8* [[TMP15]], [[SCEVGEP5]] 475; LV-NEXT: [[TMP19:%.*]] = or i1 [[TMP16]], [[MUL_OVERFLOW4]] 476; LV-NEXT: [[TMP20:%.*]] = or i1 [[TMP10]], [[TMP19]] 477; LV-NEXT: br i1 [[TMP20]], label [[FOR_BODY_PH_LVER_ORIG:%.*]], label [[FOR_BODY_PH:%.*]] 478; LV: for.body.ph.lver.orig: 479; LV-NEXT: br label [[FOR_BODY_LVER_ORIG:%.*]] 480; LV: for.body.lver.orig: 481; LV-NEXT: [[IND_LVER_ORIG:%.*]] = phi i64 [ 0, [[FOR_BODY_PH_LVER_ORIG]] ], [ [[INC_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ] 482; LV-NEXT: [[IND1_LVER_ORIG:%.*]] = phi i32 [ [[TRUNCN]], [[FOR_BODY_PH_LVER_ORIG]] ], [ [[DEC_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ] 483; LV-NEXT: [[MUL_LVER_ORIG:%.*]] = mul i32 [[IND1_LVER_ORIG]], 2 484; LV-NEXT: [[ARRAYIDXA_LVER_ORIG:%.*]] = getelementptr inbounds i16, i16* [[A]], i32 [[MUL_LVER_ORIG]] 485; LV-NEXT: [[LOADA_LVER_ORIG:%.*]] = load i16, i16* [[ARRAYIDXA_LVER_ORIG]], align 2 486; LV-NEXT: [[ARRAYIDXB_LVER_ORIG:%.*]] = getelementptr inbounds i16, i16* [[B:%.*]], i64 [[IND_LVER_ORIG]] 487; LV-NEXT: [[LOADB_LVER_ORIG:%.*]] = load i16, i16* [[ARRAYIDXB_LVER_ORIG]], align 2 488; LV-NEXT: [[ADD_LVER_ORIG:%.*]] = mul i16 [[LOADA_LVER_ORIG]], [[LOADB_LVER_ORIG]] 489; LV-NEXT: store i16 [[ADD_LVER_ORIG]], i16* [[ARRAYIDXA_LVER_ORIG]], align 2 490; LV-NEXT: [[INC_LVER_ORIG]] = add nuw nsw i64 [[IND_LVER_ORIG]], 1 491; LV-NEXT: [[DEC_LVER_ORIG]] = sub i32 [[IND1_LVER_ORIG]], 1 492; LV-NEXT: [[EXITCOND_LVER_ORIG:%.*]] = icmp eq i64 [[INC_LVER_ORIG]], [[N]] 493; LV-NEXT: br i1 [[EXITCOND_LVER_ORIG]], label [[FOR_END_LOOPEXIT:%.*]], label [[FOR_BODY_LVER_ORIG]] 494; LV: for.body.ph: 495; LV-NEXT: br label [[FOR_BODY:%.*]] 496; LV: for.body: 497; LV-NEXT: [[IND:%.*]] = phi i64 [ 0, [[FOR_BODY_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ] 498; LV-NEXT: [[IND1:%.*]] = phi i32 [ [[TRUNCN]], [[FOR_BODY_PH]] ], [ [[DEC:%.*]], [[FOR_BODY]] ] 499; LV-NEXT: [[MUL:%.*]] = mul i32 [[IND1]], 2 500; LV-NEXT: [[ARRAYIDXA:%.*]] = getelementptr inbounds i16, i16* [[A]], i32 [[MUL]] 501; LV-NEXT: [[LOADA:%.*]] = load i16, i16* [[ARRAYIDXA]], align 2 502; LV-NEXT: [[ARRAYIDXB:%.*]] = getelementptr inbounds i16, i16* [[B]], i64 [[IND]] 503; LV-NEXT: [[LOADB:%.*]] = load i16, i16* [[ARRAYIDXB]], align 2 504; LV-NEXT: [[ADD:%.*]] = mul i16 [[LOADA]], [[LOADB]] 505; LV-NEXT: store i16 [[ADD]], i16* [[ARRAYIDXA]], align 2 506; LV-NEXT: [[INC]] = add nuw nsw i64 [[IND]], 1 507; LV-NEXT: [[DEC]] = sub i32 [[IND1]], 1 508; LV-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INC]], [[N]] 509; LV-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT6:%.*]], label [[FOR_BODY]] 510; LV: for.end.loopexit: 511; LV-NEXT: br label [[FOR_END:%.*]] 512; LV: for.end.loopexit6: 513; LV-NEXT: br label [[FOR_END]] 514; LV: for.end: 515; LV-NEXT: ret void 516; 517 i16* noalias %b, i64 %N) { 518entry: 519 %TruncN = trunc i64 %N to i32 520 br label %for.body 521 522for.body: ; preds = %for.body, %entry 523 %ind = phi i64 [ 0, %entry ], [ %inc, %for.body ] 524 %ind1 = phi i32 [ %TruncN, %entry ], [ %dec, %for.body ] 525 526 %mul = mul i32 %ind1, 2 527 528 %arrayidxA = getelementptr inbounds i16, i16* %a, i32 %mul 529 %loadA = load i16, i16* %arrayidxA, align 2 530 531 %arrayidxB = getelementptr inbounds i16, i16* %b, i64 %ind 532 %loadB = load i16, i16* %arrayidxB, align 2 533 534 %add = mul i16 %loadA, %loadB 535 536 store i16 %add, i16* %arrayidxA, align 2 537 538 %inc = add nuw nsw i64 %ind, 1 539 %dec = sub i32 %ind1, 1 540 541 %exitcond = icmp eq i64 %inc, %N 542 br i1 %exitcond, label %for.end, label %for.body 543 544for.end: ; preds = %for.body 545 ret void 546} 547