1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py 2; RUN: opt -basic-aa -loop-distribute -enable-loop-distribute -S -enable-mem-access-versioning=0 < %s | FileCheck %s 3 4target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128" 5 6; PredicatedScalarEvolution decides it needs to insert a bounds check 7; not based on memory access. 8 9define void @f(i32* noalias %a, i32* noalias %b, i32* noalias %c, i32* noalias %d, i32* noalias %e, i64 %N) { 10; CHECK-LABEL: @f( 11; CHECK-NEXT: entry: 12; CHECK-NEXT: [[A2:%.*]] = ptrtoint i32* [[A:%.*]] to i64 13; CHECK-NEXT: br label [[FOR_BODY_LVER_CHECK:%.*]] 14; CHECK: for.body.lver.check: 15; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[N:%.*]], -1 16; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[TMP0]] to i32 17; CHECK-NEXT: [[MUL1:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 2, i32 [[TMP1]]) 18; CHECK-NEXT: [[MUL_RESULT:%.*]] = extractvalue { i32, i1 } [[MUL1]], 0 19; CHECK-NEXT: [[MUL_OVERFLOW:%.*]] = extractvalue { i32, i1 } [[MUL1]], 1 20; CHECK-NEXT: [[TMP2:%.*]] = add i32 0, [[MUL_RESULT]] 21; CHECK-NEXT: [[TMP3:%.*]] = sub i32 0, [[MUL_RESULT]] 22; CHECK-NEXT: [[TMP4:%.*]] = icmp ugt i32 [[TMP3]], 0 23; CHECK-NEXT: [[TMP5:%.*]] = icmp ult i32 [[TMP2]], 0 24; CHECK-NEXT: [[TMP6:%.*]] = select i1 false, i1 [[TMP4]], i1 [[TMP5]] 25; CHECK-NEXT: [[TMP7:%.*]] = icmp ugt i64 [[TMP0]], 4294967295 26; CHECK-NEXT: [[TMP8:%.*]] = or i1 [[TMP6]], [[TMP7]] 27; CHECK-NEXT: [[TMP9:%.*]] = or i1 [[TMP8]], [[MUL_OVERFLOW]] 28; CHECK-NEXT: [[TMP10:%.*]] = or i1 false, [[TMP9]] 29; CHECK-NEXT: [[MUL3:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 8, i64 [[TMP0]]) 30; CHECK-NEXT: [[MUL_RESULT4:%.*]] = extractvalue { i64, i1 } [[MUL3]], 0 31; CHECK-NEXT: [[MUL_OVERFLOW5:%.*]] = extractvalue { i64, i1 } [[MUL3]], 1 32; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[A2]], [[MUL_RESULT4]] 33; CHECK-NEXT: [[TMP12:%.*]] = sub i64 [[A2]], [[MUL_RESULT4]] 34; CHECK-NEXT: [[TMP13:%.*]] = icmp ugt i64 [[TMP12]], [[A2]] 35; CHECK-NEXT: [[TMP14:%.*]] = icmp ult i64 [[TMP11]], [[A2]] 36; CHECK-NEXT: [[TMP15:%.*]] = select i1 false, i1 [[TMP13]], i1 [[TMP14]] 37; CHECK-NEXT: [[TMP16:%.*]] = or i1 [[TMP15]], [[MUL_OVERFLOW5]] 38; CHECK-NEXT: [[TMP17:%.*]] = or i1 [[TMP10]], [[TMP16]] 39; CHECK-NEXT: br i1 [[TMP17]], label [[FOR_BODY_PH_LVER_ORIG:%.*]], label [[FOR_BODY_PH_LDIST1:%.*]] 40; CHECK: for.body.ph.lver.orig: 41; CHECK-NEXT: br label [[FOR_BODY_LVER_ORIG:%.*]] 42; CHECK: for.body.lver.orig: 43; CHECK-NEXT: [[IND_LVER_ORIG:%.*]] = phi i64 [ 0, [[FOR_BODY_PH_LVER_ORIG]] ], [ [[ADD_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ] 44; CHECK-NEXT: [[IND1_LVER_ORIG:%.*]] = phi i32 [ 0, [[FOR_BODY_PH_LVER_ORIG]] ], [ [[INC1_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ] 45; CHECK-NEXT: [[MUL_LVER_ORIG:%.*]] = mul i32 [[IND1_LVER_ORIG]], 2 46; CHECK-NEXT: [[MUL_EXT_LVER_ORIG:%.*]] = zext i32 [[MUL_LVER_ORIG]] to i64 47; CHECK-NEXT: [[ARRAYIDXA_LVER_ORIG:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[MUL_EXT_LVER_ORIG]] 48; CHECK-NEXT: [[LOADA_LVER_ORIG:%.*]] = load i32, i32* [[ARRAYIDXA_LVER_ORIG]], align 4 49; CHECK-NEXT: [[ARRAYIDXB_LVER_ORIG:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i64 [[MUL_EXT_LVER_ORIG]] 50; CHECK-NEXT: [[LOADB_LVER_ORIG:%.*]] = load i32, i32* [[ARRAYIDXB_LVER_ORIG]], align 4 51; CHECK-NEXT: [[MULA_LVER_ORIG:%.*]] = mul i32 [[LOADB_LVER_ORIG]], [[LOADA_LVER_ORIG]] 52; CHECK-NEXT: [[ADD_LVER_ORIG]] = add nuw nsw i64 [[IND_LVER_ORIG]], 1 53; CHECK-NEXT: [[INC1_LVER_ORIG]] = add i32 [[IND1_LVER_ORIG]], 1 54; CHECK-NEXT: [[ARRAYIDXA_PLUS_4_LVER_ORIG:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[ADD_LVER_ORIG]] 55; CHECK-NEXT: store i32 [[MULA_LVER_ORIG]], i32* [[ARRAYIDXA_PLUS_4_LVER_ORIG]], align 4 56; CHECK-NEXT: [[ARRAYIDXD_LVER_ORIG:%.*]] = getelementptr inbounds i32, i32* [[D:%.*]], i64 [[MUL_EXT_LVER_ORIG]] 57; CHECK-NEXT: [[LOADD_LVER_ORIG:%.*]] = load i32, i32* [[ARRAYIDXD_LVER_ORIG]], align 4 58; CHECK-NEXT: [[ARRAYIDXE_LVER_ORIG:%.*]] = getelementptr inbounds i32, i32* [[E:%.*]], i64 [[MUL_EXT_LVER_ORIG]] 59; CHECK-NEXT: [[LOADE_LVER_ORIG:%.*]] = load i32, i32* [[ARRAYIDXE_LVER_ORIG]], align 4 60; CHECK-NEXT: [[MULC_LVER_ORIG:%.*]] = mul i32 [[LOADD_LVER_ORIG]], [[LOADE_LVER_ORIG]] 61; CHECK-NEXT: [[ARRAYIDXC_LVER_ORIG:%.*]] = getelementptr inbounds i32, i32* [[C:%.*]], i64 [[MUL_EXT_LVER_ORIG]] 62; CHECK-NEXT: store i32 [[MULC_LVER_ORIG]], i32* [[ARRAYIDXC_LVER_ORIG]], align 4 63; CHECK-NEXT: [[EXITCOND_LVER_ORIG:%.*]] = icmp eq i64 [[ADD_LVER_ORIG]], [[N]] 64; CHECK-NEXT: br i1 [[EXITCOND_LVER_ORIG]], label [[FOR_END_LOOPEXIT:%.*]], label [[FOR_BODY_LVER_ORIG]] 65; CHECK: for.body.ph.ldist1: 66; CHECK-NEXT: br label [[FOR_BODY_LDIST1:%.*]] 67; CHECK: for.body.ldist1: 68; CHECK-NEXT: [[IND_LDIST1:%.*]] = phi i64 [ 0, [[FOR_BODY_PH_LDIST1]] ], [ [[ADD_LDIST1:%.*]], [[FOR_BODY_LDIST1]] ] 69; CHECK-NEXT: [[IND1_LDIST1:%.*]] = phi i32 [ 0, [[FOR_BODY_PH_LDIST1]] ], [ [[INC1_LDIST1:%.*]], [[FOR_BODY_LDIST1]] ] 70; CHECK-NEXT: [[MUL_LDIST1:%.*]] = mul i32 [[IND1_LDIST1]], 2 71; CHECK-NEXT: [[MUL_EXT_LDIST1:%.*]] = zext i32 [[MUL_LDIST1]] to i64 72; CHECK-NEXT: [[ARRAYIDXA_LDIST1:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[MUL_EXT_LDIST1]] 73; CHECK-NEXT: [[LOADA_LDIST1:%.*]] = load i32, i32* [[ARRAYIDXA_LDIST1]], align 4, !alias.scope !0 74; CHECK-NEXT: [[ARRAYIDXB_LDIST1:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 [[MUL_EXT_LDIST1]] 75; CHECK-NEXT: [[LOADB_LDIST1:%.*]] = load i32, i32* [[ARRAYIDXB_LDIST1]], align 4 76; CHECK-NEXT: [[MULA_LDIST1:%.*]] = mul i32 [[LOADB_LDIST1]], [[LOADA_LDIST1]] 77; CHECK-NEXT: [[ADD_LDIST1]] = add nuw nsw i64 [[IND_LDIST1]], 1 78; CHECK-NEXT: [[INC1_LDIST1]] = add i32 [[IND1_LDIST1]], 1 79; CHECK-NEXT: [[ARRAYIDXA_PLUS_4_LDIST1:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[ADD_LDIST1]] 80; CHECK-NEXT: store i32 [[MULA_LDIST1]], i32* [[ARRAYIDXA_PLUS_4_LDIST1]], align 4, !alias.scope !3 81; CHECK-NEXT: [[EXITCOND_LDIST1:%.*]] = icmp eq i64 [[ADD_LDIST1]], [[N]] 82; CHECK-NEXT: br i1 [[EXITCOND_LDIST1]], label [[FOR_BODY_PH:%.*]], label [[FOR_BODY_LDIST1]] 83; CHECK: for.body.ph: 84; CHECK-NEXT: br label [[FOR_BODY:%.*]] 85; CHECK: for.body: 86; CHECK-NEXT: [[IND:%.*]] = phi i64 [ 0, [[FOR_BODY_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] 87; CHECK-NEXT: [[IND1:%.*]] = phi i32 [ 0, [[FOR_BODY_PH]] ], [ [[INC1:%.*]], [[FOR_BODY]] ] 88; CHECK-NEXT: [[MUL:%.*]] = mul i32 [[IND1]], 2 89; CHECK-NEXT: [[MUL_EXT:%.*]] = zext i32 [[MUL]] to i64 90; CHECK-NEXT: [[ADD]] = add nuw nsw i64 [[IND]], 1 91; CHECK-NEXT: [[INC1]] = add i32 [[IND1]], 1 92; CHECK-NEXT: [[ARRAYIDXD:%.*]] = getelementptr inbounds i32, i32* [[D]], i64 [[MUL_EXT]] 93; CHECK-NEXT: [[LOADD:%.*]] = load i32, i32* [[ARRAYIDXD]], align 4 94; CHECK-NEXT: [[ARRAYIDXE:%.*]] = getelementptr inbounds i32, i32* [[E]], i64 [[MUL_EXT]] 95; CHECK-NEXT: [[LOADE:%.*]] = load i32, i32* [[ARRAYIDXE]], align 4 96; CHECK-NEXT: [[MULC:%.*]] = mul i32 [[LOADD]], [[LOADE]] 97; CHECK-NEXT: [[ARRAYIDXC:%.*]] = getelementptr inbounds i32, i32* [[C]], i64 [[MUL_EXT]] 98; CHECK-NEXT: store i32 [[MULC]], i32* [[ARRAYIDXC]], align 4 99; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[ADD]], [[N]] 100; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT6:%.*]], label [[FOR_BODY]] 101; CHECK: for.end.loopexit: 102; CHECK-NEXT: br label [[FOR_END:%.*]] 103; CHECK: for.end.loopexit6: 104; CHECK-NEXT: br label [[FOR_END]] 105; CHECK: for.end: 106; CHECK-NEXT: ret void 107; 108entry: 109 br label %for.body 110 111for.body: ; preds = %for.body, %entry 112 %ind = phi i64 [ 0, %entry ], [ %add, %for.body ] 113 %ind1 = phi i32 [ 0, %entry ], [ %inc1, %for.body ] 114 115 %mul = mul i32 %ind1, 2 116 %mul_ext = zext i32 %mul to i64 117 118 119 %arrayidxA = getelementptr inbounds i32, i32* %a, i64 %mul_ext 120 %loadA = load i32, i32* %arrayidxA, align 4 121 122 %arrayidxB = getelementptr inbounds i32, i32* %b, i64 %mul_ext 123 %loadB = load i32, i32* %arrayidxB, align 4 124 125 %mulA = mul i32 %loadB, %loadA 126 127 %add = add nuw nsw i64 %ind, 1 128 %inc1 = add i32 %ind1, 1 129 130 %arrayidxA_plus_4 = getelementptr inbounds i32, i32* %a, i64 %add 131 store i32 %mulA, i32* %arrayidxA_plus_4, align 4 132 133 %arrayidxD = getelementptr inbounds i32, i32* %d, i64 %mul_ext 134 %loadD = load i32, i32* %arrayidxD, align 4 135 136 %arrayidxE = getelementptr inbounds i32, i32* %e, i64 %mul_ext 137 %loadE = load i32, i32* %arrayidxE, align 4 138 139 %mulC = mul i32 %loadD, %loadE 140 141 %arrayidxC = getelementptr inbounds i32, i32* %c, i64 %mul_ext 142 store i32 %mulC, i32* %arrayidxC, align 4 143 144 %exitcond = icmp eq i64 %add, %N 145 br i1 %exitcond, label %for.end, label %for.body 146 147for.end: ; preds = %for.body 148 ret void 149} 150 151declare void @use64(i64) 152@global_a = common local_unnamed_addr global [8192 x i32] zeroinitializer, align 16 153 154define void @f_with_offset(i32* noalias %b, i32* noalias %c, i32* noalias %d, i32* noalias %e, i64 %N) { 155; CHECK-LABEL: @f_with_offset( 156; CHECK-NEXT: entry: 157; CHECK-NEXT: [[A_BASE:%.*]] = getelementptr [8192 x i32], [8192 x i32]* @global_a, i32 0, i32 0 158; CHECK-NEXT: [[A_INTPTR:%.*]] = ptrtoint i32* [[A_BASE]] to i64 159; CHECK-NEXT: call void @use64(i64 [[A_INTPTR]]) 160; CHECK-NEXT: [[A:%.*]] = getelementptr i32, i32* [[A_BASE]], i32 42 161; CHECK-NEXT: br label [[FOR_BODY_LVER_CHECK:%.*]] 162; CHECK: for.body.lver.check: 163; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[N:%.*]], -1 164; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[TMP0]] to i32 165; CHECK-NEXT: [[MUL1:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 2, i32 [[TMP1]]) 166; CHECK-NEXT: [[MUL_RESULT:%.*]] = extractvalue { i32, i1 } [[MUL1]], 0 167; CHECK-NEXT: [[MUL_OVERFLOW:%.*]] = extractvalue { i32, i1 } [[MUL1]], 1 168; CHECK-NEXT: [[TMP2:%.*]] = add i32 0, [[MUL_RESULT]] 169; CHECK-NEXT: [[TMP3:%.*]] = sub i32 0, [[MUL_RESULT]] 170; CHECK-NEXT: [[TMP4:%.*]] = icmp ugt i32 [[TMP3]], 0 171; CHECK-NEXT: [[TMP5:%.*]] = icmp ult i32 [[TMP2]], 0 172; CHECK-NEXT: [[TMP6:%.*]] = select i1 false, i1 [[TMP4]], i1 [[TMP5]] 173; CHECK-NEXT: [[TMP7:%.*]] = icmp ugt i64 [[TMP0]], 4294967295 174; CHECK-NEXT: [[TMP8:%.*]] = or i1 [[TMP6]], [[TMP7]] 175; CHECK-NEXT: [[TMP9:%.*]] = or i1 [[TMP8]], [[MUL_OVERFLOW]] 176; CHECK-NEXT: [[TMP10:%.*]] = or i1 false, [[TMP9]] 177; CHECK-NEXT: [[MUL2:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 8, i64 [[TMP0]]) 178; CHECK-NEXT: [[MUL_RESULT3:%.*]] = extractvalue { i64, i1 } [[MUL2]], 0 179; CHECK-NEXT: [[MUL_OVERFLOW4:%.*]] = extractvalue { i64, i1 } [[MUL2]], 1 180; CHECK-NEXT: [[TMP11:%.*]] = add i64 add (i64 ptrtoint ([8192 x i32]* @global_a to i64), i64 168), [[MUL_RESULT3]] 181; CHECK-NEXT: [[TMP12:%.*]] = sub i64 add (i64 ptrtoint ([8192 x i32]* @global_a to i64), i64 168), [[MUL_RESULT3]] 182; CHECK-NEXT: [[TMP13:%.*]] = icmp ugt i64 [[TMP12]], add (i64 ptrtoint ([8192 x i32]* @global_a to i64), i64 168) 183; CHECK-NEXT: [[TMP14:%.*]] = icmp ult i64 [[TMP11]], add (i64 ptrtoint ([8192 x i32]* @global_a to i64), i64 168) 184; CHECK-NEXT: [[TMP15:%.*]] = select i1 false, i1 [[TMP13]], i1 [[TMP14]] 185; CHECK-NEXT: [[TMP16:%.*]] = or i1 [[TMP15]], [[MUL_OVERFLOW4]] 186; CHECK-NEXT: [[TMP17:%.*]] = or i1 [[TMP10]], [[TMP16]] 187; CHECK-NEXT: br i1 [[TMP17]], label [[FOR_BODY_PH_LVER_ORIG:%.*]], label [[FOR_BODY_PH_LDIST1:%.*]] 188; CHECK: for.body.ph.lver.orig: 189; CHECK-NEXT: br label [[FOR_BODY_LVER_ORIG:%.*]] 190; CHECK: for.body.lver.orig: 191; CHECK-NEXT: [[IND_LVER_ORIG:%.*]] = phi i64 [ 0, [[FOR_BODY_PH_LVER_ORIG]] ], [ [[ADD_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ] 192; CHECK-NEXT: [[IND1_LVER_ORIG:%.*]] = phi i32 [ 0, [[FOR_BODY_PH_LVER_ORIG]] ], [ [[INC1_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ] 193; CHECK-NEXT: [[MUL_LVER_ORIG:%.*]] = mul i32 [[IND1_LVER_ORIG]], 2 194; CHECK-NEXT: [[MUL_EXT_LVER_ORIG:%.*]] = zext i32 [[MUL_LVER_ORIG]] to i64 195; CHECK-NEXT: [[ARRAYIDXA_LVER_ORIG:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[MUL_EXT_LVER_ORIG]] 196; CHECK-NEXT: [[LOADA_LVER_ORIG:%.*]] = load i32, i32* [[ARRAYIDXA_LVER_ORIG]], align 4 197; CHECK-NEXT: [[ARRAYIDXB_LVER_ORIG:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i64 [[MUL_EXT_LVER_ORIG]] 198; CHECK-NEXT: [[LOADB_LVER_ORIG:%.*]] = load i32, i32* [[ARRAYIDXB_LVER_ORIG]], align 4 199; CHECK-NEXT: [[MULA_LVER_ORIG:%.*]] = mul i32 [[LOADB_LVER_ORIG]], [[LOADA_LVER_ORIG]] 200; CHECK-NEXT: [[ADD_LVER_ORIG]] = add nuw nsw i64 [[IND_LVER_ORIG]], 1 201; CHECK-NEXT: [[INC1_LVER_ORIG]] = add i32 [[IND1_LVER_ORIG]], 1 202; CHECK-NEXT: [[ARRAYIDXA_PLUS_4_LVER_ORIG:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[ADD_LVER_ORIG]] 203; CHECK-NEXT: store i32 [[MULA_LVER_ORIG]], i32* [[ARRAYIDXA_PLUS_4_LVER_ORIG]], align 4 204; CHECK-NEXT: [[ARRAYIDXD_LVER_ORIG:%.*]] = getelementptr inbounds i32, i32* [[D:%.*]], i64 [[MUL_EXT_LVER_ORIG]] 205; CHECK-NEXT: [[LOADD_LVER_ORIG:%.*]] = load i32, i32* [[ARRAYIDXD_LVER_ORIG]], align 4 206; CHECK-NEXT: [[ARRAYIDXE_LVER_ORIG:%.*]] = getelementptr inbounds i32, i32* [[E:%.*]], i64 [[MUL_EXT_LVER_ORIG]] 207; CHECK-NEXT: [[LOADE_LVER_ORIG:%.*]] = load i32, i32* [[ARRAYIDXE_LVER_ORIG]], align 4 208; CHECK-NEXT: [[MULC_LVER_ORIG:%.*]] = mul i32 [[LOADD_LVER_ORIG]], [[LOADE_LVER_ORIG]] 209; CHECK-NEXT: [[ARRAYIDXC_LVER_ORIG:%.*]] = getelementptr inbounds i32, i32* [[C:%.*]], i64 [[MUL_EXT_LVER_ORIG]] 210; CHECK-NEXT: store i32 [[MULC_LVER_ORIG]], i32* [[ARRAYIDXC_LVER_ORIG]], align 4 211; CHECK-NEXT: [[EXITCOND_LVER_ORIG:%.*]] = icmp eq i64 [[ADD_LVER_ORIG]], [[N]] 212; CHECK-NEXT: br i1 [[EXITCOND_LVER_ORIG]], label [[FOR_END_LOOPEXIT:%.*]], label [[FOR_BODY_LVER_ORIG]] 213; CHECK: for.body.ph.ldist1: 214; CHECK-NEXT: br label [[FOR_BODY_LDIST1:%.*]] 215; CHECK: for.body.ldist1: 216; CHECK-NEXT: [[IND_LDIST1:%.*]] = phi i64 [ 0, [[FOR_BODY_PH_LDIST1]] ], [ [[ADD_LDIST1:%.*]], [[FOR_BODY_LDIST1]] ] 217; CHECK-NEXT: [[IND1_LDIST1:%.*]] = phi i32 [ 0, [[FOR_BODY_PH_LDIST1]] ], [ [[INC1_LDIST1:%.*]], [[FOR_BODY_LDIST1]] ] 218; CHECK-NEXT: [[MUL_LDIST1:%.*]] = mul i32 [[IND1_LDIST1]], 2 219; CHECK-NEXT: [[MUL_EXT_LDIST1:%.*]] = zext i32 [[MUL_LDIST1]] to i64 220; CHECK-NEXT: [[ARRAYIDXA_LDIST1:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[MUL_EXT_LDIST1]] 221; CHECK-NEXT: [[LOADA_LDIST1:%.*]] = load i32, i32* [[ARRAYIDXA_LDIST1]], align 4, !alias.scope !5 222; CHECK-NEXT: [[ARRAYIDXB_LDIST1:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 [[MUL_EXT_LDIST1]] 223; CHECK-NEXT: [[LOADB_LDIST1:%.*]] = load i32, i32* [[ARRAYIDXB_LDIST1]], align 4 224; CHECK-NEXT: [[MULA_LDIST1:%.*]] = mul i32 [[LOADB_LDIST1]], [[LOADA_LDIST1]] 225; CHECK-NEXT: [[ADD_LDIST1]] = add nuw nsw i64 [[IND_LDIST1]], 1 226; CHECK-NEXT: [[INC1_LDIST1]] = add i32 [[IND1_LDIST1]], 1 227; CHECK-NEXT: [[ARRAYIDXA_PLUS_4_LDIST1:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[ADD_LDIST1]] 228; CHECK-NEXT: store i32 [[MULA_LDIST1]], i32* [[ARRAYIDXA_PLUS_4_LDIST1]], align 4, !alias.scope !8 229; CHECK-NEXT: [[EXITCOND_LDIST1:%.*]] = icmp eq i64 [[ADD_LDIST1]], [[N]] 230; CHECK-NEXT: br i1 [[EXITCOND_LDIST1]], label [[FOR_BODY_PH:%.*]], label [[FOR_BODY_LDIST1]] 231; CHECK: for.body.ph: 232; CHECK-NEXT: br label [[FOR_BODY:%.*]] 233; CHECK: for.body: 234; CHECK-NEXT: [[IND:%.*]] = phi i64 [ 0, [[FOR_BODY_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] 235; CHECK-NEXT: [[IND1:%.*]] = phi i32 [ 0, [[FOR_BODY_PH]] ], [ [[INC1:%.*]], [[FOR_BODY]] ] 236; CHECK-NEXT: [[MUL:%.*]] = mul i32 [[IND1]], 2 237; CHECK-NEXT: [[MUL_EXT:%.*]] = zext i32 [[MUL]] to i64 238; CHECK-NEXT: [[ADD]] = add nuw nsw i64 [[IND]], 1 239; CHECK-NEXT: [[INC1]] = add i32 [[IND1]], 1 240; CHECK-NEXT: [[ARRAYIDXD:%.*]] = getelementptr inbounds i32, i32* [[D]], i64 [[MUL_EXT]] 241; CHECK-NEXT: [[LOADD:%.*]] = load i32, i32* [[ARRAYIDXD]], align 4 242; CHECK-NEXT: [[ARRAYIDXE:%.*]] = getelementptr inbounds i32, i32* [[E]], i64 [[MUL_EXT]] 243; CHECK-NEXT: [[LOADE:%.*]] = load i32, i32* [[ARRAYIDXE]], align 4 244; CHECK-NEXT: [[MULC:%.*]] = mul i32 [[LOADD]], [[LOADE]] 245; CHECK-NEXT: [[ARRAYIDXC:%.*]] = getelementptr inbounds i32, i32* [[C]], i64 [[MUL_EXT]] 246; CHECK-NEXT: store i32 [[MULC]], i32* [[ARRAYIDXC]], align 4 247; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[ADD]], [[N]] 248; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT5:%.*]], label [[FOR_BODY]] 249; CHECK: for.end.loopexit: 250; CHECK-NEXT: br label [[FOR_END:%.*]] 251; CHECK: for.end.loopexit5: 252; CHECK-NEXT: br label [[FOR_END]] 253; CHECK: for.end: 254; CHECK-NEXT: ret void 255; 256entry: 257 %a_base = getelementptr [8192 x i32], [8192 x i32]* @global_a, i32 0, i32 0 258 %a_intptr = ptrtoint i32* %a_base to i64 259 call void @use64(i64 %a_intptr) 260 %a = getelementptr i32, i32* %a_base, i32 42 261 br label %for.body 262 263for.body: ; preds = %for.body, %entry 264 %ind = phi i64 [ 0, %entry ], [ %add, %for.body ] 265 %ind1 = phi i32 [ 0, %entry ], [ %inc1, %for.body ] 266 267 %mul = mul i32 %ind1, 2 268 %mul_ext = zext i32 %mul to i64 269 270 271 %arrayidxA = getelementptr inbounds i32, i32* %a, i64 %mul_ext 272 %loadA = load i32, i32* %arrayidxA, align 4 273 274 %arrayidxB = getelementptr inbounds i32, i32* %b, i64 %mul_ext 275 %loadB = load i32, i32* %arrayidxB, align 4 276 277 %mulA = mul i32 %loadB, %loadA 278 279 %add = add nuw nsw i64 %ind, 1 280 %inc1 = add i32 %ind1, 1 281 282 %arrayidxA_plus_4 = getelementptr inbounds i32, i32* %a, i64 %add 283 store i32 %mulA, i32* %arrayidxA_plus_4, align 4 284 285 %arrayidxD = getelementptr inbounds i32, i32* %d, i64 %mul_ext 286 %loadD = load i32, i32* %arrayidxD, align 4 287 288 %arrayidxE = getelementptr inbounds i32, i32* %e, i64 %mul_ext 289 %loadE = load i32, i32* %arrayidxE, align 4 290 291 %mulC = mul i32 %loadD, %loadE 292 293 %arrayidxC = getelementptr inbounds i32, i32* %c, i64 %mul_ext 294 store i32 %mulC, i32* %arrayidxC, align 4 295 296 %exitcond = icmp eq i64 %add, %N 297 br i1 %exitcond, label %for.end, label %for.body 298 299for.end: ; preds = %for.body 300 ret void 301} 302 303; Can't add control dependency with convergent in loop body. 304define void @f_with_convergent(i32* noalias %a, i32* noalias %b, i32* noalias %c, i32* noalias %d, i32* noalias %e, i64 %N) #1 { 305; CHECK-LABEL: @f_with_convergent( 306; CHECK-NEXT: entry: 307; CHECK-NEXT: br label [[FOR_BODY:%.*]] 308; CHECK: for.body: 309; CHECK-NEXT: [[IND:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] 310; CHECK-NEXT: [[IND1:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[INC1:%.*]], [[FOR_BODY]] ] 311; CHECK-NEXT: [[MUL:%.*]] = mul i32 [[IND1]], 2 312; CHECK-NEXT: [[MUL_EXT:%.*]] = zext i32 [[MUL]] to i64 313; CHECK-NEXT: [[ARRAYIDXA:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[MUL_EXT]] 314; CHECK-NEXT: [[LOADA:%.*]] = load i32, i32* [[ARRAYIDXA]], align 4 315; CHECK-NEXT: [[ARRAYIDXB:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i64 [[MUL_EXT]] 316; CHECK-NEXT: [[LOADB:%.*]] = load i32, i32* [[ARRAYIDXB]], align 4 317; CHECK-NEXT: [[MULA:%.*]] = mul i32 [[LOADB]], [[LOADA]] 318; CHECK-NEXT: [[ADD]] = add nuw nsw i64 [[IND]], 1 319; CHECK-NEXT: [[INC1]] = add i32 [[IND1]], 1 320; CHECK-NEXT: [[ARRAYIDXA_PLUS_4:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[ADD]] 321; CHECK-NEXT: store i32 [[MULA]], i32* [[ARRAYIDXA_PLUS_4]], align 4 322; CHECK-NEXT: [[ARRAYIDXD:%.*]] = getelementptr inbounds i32, i32* [[D:%.*]], i64 [[MUL_EXT]] 323; CHECK-NEXT: [[LOADD:%.*]] = load i32, i32* [[ARRAYIDXD]], align 4 324; CHECK-NEXT: [[ARRAYIDXE:%.*]] = getelementptr inbounds i32, i32* [[E:%.*]], i64 [[MUL_EXT]] 325; CHECK-NEXT: [[LOADE:%.*]] = load i32, i32* [[ARRAYIDXE]], align 4 326; CHECK-NEXT: [[CONVERGENTD:%.*]] = call i32 @llvm.convergent(i32 [[LOADD]]) 327; CHECK-NEXT: [[MULC:%.*]] = mul i32 [[CONVERGENTD]], [[LOADE]] 328; CHECK-NEXT: [[ARRAYIDXC:%.*]] = getelementptr inbounds i32, i32* [[C:%.*]], i64 [[MUL_EXT]] 329; CHECK-NEXT: store i32 [[MULC]], i32* [[ARRAYIDXC]], align 4 330; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[ADD]], [[N:%.*]] 331; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]] 332; CHECK: for.end: 333; CHECK-NEXT: ret void 334; 335entry: 336 br label %for.body 337 338for.body: ; preds = %for.body, %entry 339 %ind = phi i64 [ 0, %entry ], [ %add, %for.body ] 340 %ind1 = phi i32 [ 0, %entry ], [ %inc1, %for.body ] 341 342 %mul = mul i32 %ind1, 2 343 %mul_ext = zext i32 %mul to i64 344 345 346 %arrayidxA = getelementptr inbounds i32, i32* %a, i64 %mul_ext 347 %loadA = load i32, i32* %arrayidxA, align 4 348 349 %arrayidxB = getelementptr inbounds i32, i32* %b, i64 %mul_ext 350 %loadB = load i32, i32* %arrayidxB, align 4 351 352 %mulA = mul i32 %loadB, %loadA 353 354 %add = add nuw nsw i64 %ind, 1 355 %inc1 = add i32 %ind1, 1 356 357 %arrayidxA_plus_4 = getelementptr inbounds i32, i32* %a, i64 %add 358 store i32 %mulA, i32* %arrayidxA_plus_4, align 4 359 360 %arrayidxD = getelementptr inbounds i32, i32* %d, i64 %mul_ext 361 %loadD = load i32, i32* %arrayidxD, align 4 362 363 %arrayidxE = getelementptr inbounds i32, i32* %e, i64 %mul_ext 364 %loadE = load i32, i32* %arrayidxE, align 4 365 366 %convergentD = call i32 @llvm.convergent(i32 %loadD) 367 %mulC = mul i32 %convergentD, %loadE 368 369 %arrayidxC = getelementptr inbounds i32, i32* %c, i64 %mul_ext 370 store i32 %mulC, i32* %arrayidxC, align 4 371 372 %exitcond = icmp eq i64 %add, %N 373 br i1 %exitcond, label %for.end, label %for.body 374 375for.end: ; preds = %for.body 376 ret void 377} 378 379declare i32 @llvm.convergent(i32) #0 380 381attributes #0 = { nounwind readnone convergent } 382attributes #1 = { nounwind convergent } 383