1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py 2; RUN: opt < %s -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp -loop-vectorize -tail-predication=enabled -dce -instcombine -S | FileCheck %s 3 4target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64" 5target triple = "thumbv8.1m.main-none-none-eabi" 6 7define i32 @reduction_sum_single(i32* noalias nocapture %A) { 8; CHECK-LABEL: @reduction_sum_single( 9; CHECK-NEXT: entry: 10; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] 11; CHECK: vector.ph: 12; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] 13; CHECK: vector.body: 14; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] 15; CHECK-NEXT: [[VEC_PHI:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[TMP3:%.*]], [[VECTOR_BODY]] ] 16; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 [[INDEX]], i32 257) 17; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i32 [[INDEX]] 18; CHECK-NEXT: [[TMP1:%.*]] = bitcast i32* [[TMP0]] to <4 x i32>* 19; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* [[TMP1]], i32 4, <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> zeroinitializer) 20; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[WIDE_MASKED_LOAD]]) 21; CHECK-NEXT: [[TMP3]] = add i32 [[TMP2]], [[VEC_PHI]] 22; CHECK-NEXT: [[INDEX_NEXT]] = add i32 [[INDEX]], 4 23; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i32 [[INDEX_NEXT]], 260 24; CHECK-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] 25; CHECK: middle.block: 26; CHECK-NEXT: br i1 true, label [[DOT_CRIT_EDGE:%.*]], label [[SCALAR_PH]] 27; CHECK: scalar.ph: 28; CHECK-NEXT: br label [[DOTLR_PH:%.*]] 29; CHECK: .lr.ph: 30; CHECK-NEXT: br i1 poison, label [[DOT_CRIT_EDGE]], label [[DOTLR_PH]], !llvm.loop [[LOOP2:![0-9]+]] 31; CHECK: ._crit_edge: 32; CHECK-NEXT: [[SUM_0_LCSSA:%.*]] = phi i32 [ poison, [[DOTLR_PH]] ], [ [[TMP3]], [[MIDDLE_BLOCK]] ] 33; CHECK-NEXT: ret i32 [[SUM_0_LCSSA]] 34; 35entry: 36 br label %.lr.ph 37 38.lr.ph: ; preds = %entry, %.lr.ph 39 %indvars.iv = phi i32 [ %indvars.iv.next, %.lr.ph ], [ 0, %entry ] 40 %sum.02 = phi i32 [ %l7, %.lr.ph ], [ 0, %entry ] 41 %l2 = getelementptr inbounds i32, i32* %A, i32 %indvars.iv 42 %l3 = load i32, i32* %l2, align 4 43 %l7 = add i32 %sum.02, %l3 44 %indvars.iv.next = add i32 %indvars.iv, 1 45 %exitcond = icmp eq i32 %indvars.iv.next, 257 46 br i1 %exitcond, label %._crit_edge, label %.lr.ph 47 48._crit_edge: ; preds = %.lr.ph 49 %sum.0.lcssa = phi i32 [ %l7, %.lr.ph ] 50 ret i32 %sum.0.lcssa 51} 52 53define i32 @reduction_sum(i32* noalias nocapture %A, i32* noalias nocapture %B) { 54; CHECK-LABEL: @reduction_sum( 55; CHECK-NEXT: entry: 56; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] 57; CHECK: vector.ph: 58; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] 59; CHECK: vector.body: 60; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] 61; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ <i32 0, i32 1, i32 2, i32 3>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] 62; CHECK-NEXT: [[VEC_PHI:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[TMP10:%.*]], [[VECTOR_BODY]] ] 63; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 [[INDEX]], i32 257) 64; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i32 [[INDEX]] 65; CHECK-NEXT: [[TMP1:%.*]] = bitcast i32* [[TMP0]] to <4 x i32>* 66; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* [[TMP1]], i32 4, <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> zeroinitializer) 67; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i32 [[INDEX]] 68; CHECK-NEXT: [[TMP3:%.*]] = bitcast i32* [[TMP2]] to <4 x i32>* 69; CHECK-NEXT: [[WIDE_MASKED_LOAD1:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* [[TMP3]], i32 4, <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> zeroinitializer) 70; CHECK-NEXT: [[TMP4:%.*]] = select <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> [[VEC_IND]], <4 x i32> zeroinitializer 71; CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP4]]) 72; CHECK-NEXT: [[TMP6:%.*]] = add i32 [[TMP5]], [[VEC_PHI]] 73; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[WIDE_MASKED_LOAD]]) 74; CHECK-NEXT: [[TMP8:%.*]] = add i32 [[TMP7]], [[TMP6]] 75; CHECK-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[WIDE_MASKED_LOAD1]]) 76; CHECK-NEXT: [[TMP10]] = add i32 [[TMP9]], [[TMP8]] 77; CHECK-NEXT: [[INDEX_NEXT]] = add i32 [[INDEX]], 4 78; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i32> [[VEC_IND]], <i32 4, i32 4, i32 4, i32 4> 79; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i32 [[INDEX_NEXT]], 260 80; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] 81; CHECK: middle.block: 82; CHECK-NEXT: br i1 true, label [[DOT_CRIT_EDGE:%.*]], label [[SCALAR_PH]] 83; CHECK: scalar.ph: 84; CHECK-NEXT: br label [[DOTLR_PH:%.*]] 85; CHECK: .lr.ph: 86; CHECK-NEXT: br i1 poison, label [[DOT_CRIT_EDGE]], label [[DOTLR_PH]], !llvm.loop [[LOOP5:![0-9]+]] 87; CHECK: ._crit_edge: 88; CHECK-NEXT: [[SUM_0_LCSSA:%.*]] = phi i32 [ poison, [[DOTLR_PH]] ], [ [[TMP10]], [[MIDDLE_BLOCK]] ] 89; CHECK-NEXT: ret i32 [[SUM_0_LCSSA]] 90; 91entry: 92 br label %.lr.ph 93 94.lr.ph: ; preds = %entry, %.lr.ph 95 %indvars.iv = phi i32 [ %indvars.iv.next, %.lr.ph ], [ 0, %entry ] 96 %sum.02 = phi i32 [ %l9, %.lr.ph ], [ 0, %entry ] 97 %l2 = getelementptr inbounds i32, i32* %A, i32 %indvars.iv 98 %l3 = load i32, i32* %l2, align 4 99 %l4 = getelementptr inbounds i32, i32* %B, i32 %indvars.iv 100 %l5 = load i32, i32* %l4, align 4 101 %l7 = add i32 %sum.02, %indvars.iv 102 %l8 = add i32 %l7, %l3 103 %l9 = add i32 %l8, %l5 104 %indvars.iv.next = add i32 %indvars.iv, 1 105 %exitcond = icmp eq i32 %indvars.iv.next, 257 106 br i1 %exitcond, label %._crit_edge, label %.lr.ph 107 108._crit_edge: ; preds = %.lr.ph 109 %sum.0.lcssa = phi i32 [ %l9, %.lr.ph ] 110 ret i32 %sum.0.lcssa 111} 112 113define i32 @reduction_prod(i32* noalias nocapture %A, i32* noalias nocapture %B) { 114; CHECK-LABEL: @reduction_prod( 115; CHECK-NEXT: entry: 116; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] 117; CHECK: vector.ph: 118; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] 119; CHECK: vector.body: 120; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] 121; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ <i32 1, i32 1, i32 1, i32 1>, [[VECTOR_PH]] ], [ [[TMP6:%.*]], [[VECTOR_BODY]] ] 122; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 [[INDEX]], i32 257) 123; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i32 [[INDEX]] 124; CHECK-NEXT: [[TMP1:%.*]] = bitcast i32* [[TMP0]] to <4 x i32>* 125; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* [[TMP1]], i32 4, <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> poison) 126; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i32 [[INDEX]] 127; CHECK-NEXT: [[TMP3:%.*]] = bitcast i32* [[TMP2]] to <4 x i32>* 128; CHECK-NEXT: [[WIDE_MASKED_LOAD1:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* [[TMP3]], i32 4, <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> poison) 129; CHECK-NEXT: [[TMP4:%.*]] = mul <4 x i32> [[VEC_PHI]], [[WIDE_MASKED_LOAD]] 130; CHECK-NEXT: [[TMP5:%.*]] = mul <4 x i32> [[TMP4]], [[WIDE_MASKED_LOAD1]] 131; CHECK-NEXT: [[TMP6]] = select <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> [[TMP5]], <4 x i32> [[VEC_PHI]] 132; CHECK-NEXT: [[INDEX_NEXT]] = add i32 [[INDEX]], 4 133; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i32 [[INDEX_NEXT]], 260 134; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] 135; CHECK: middle.block: 136; CHECK-NEXT: [[TMP8:%.*]] = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> [[TMP6]]) 137; CHECK-NEXT: br i1 true, label [[DOT_CRIT_EDGE:%.*]], label [[SCALAR_PH]] 138; CHECK: scalar.ph: 139; CHECK-NEXT: br label [[DOTLR_PH:%.*]] 140; CHECK: .lr.ph: 141; CHECK-NEXT: br i1 poison, label [[DOT_CRIT_EDGE]], label [[DOTLR_PH]], !llvm.loop [[LOOP7:![0-9]+]] 142; CHECK: ._crit_edge: 143; CHECK-NEXT: [[PROD_0_LCSSA:%.*]] = phi i32 [ poison, [[DOTLR_PH]] ], [ [[TMP8]], [[MIDDLE_BLOCK]] ] 144; CHECK-NEXT: ret i32 [[PROD_0_LCSSA]] 145; 146entry: 147 br label %.lr.ph 148 149.lr.ph: ; preds = %entry, %.lr.ph 150 %indvars.iv = phi i32 [ %indvars.iv.next, %.lr.ph ], [ 0, %entry ] 151 %prod.02 = phi i32 [ %l9, %.lr.ph ], [ 1, %entry ] 152 %l2 = getelementptr inbounds i32, i32* %A, i32 %indvars.iv 153 %l3 = load i32, i32* %l2, align 4 154 %l4 = getelementptr inbounds i32, i32* %B, i32 %indvars.iv 155 %l5 = load i32, i32* %l4, align 4 156 %l8 = mul i32 %prod.02, %l3 157 %l9 = mul i32 %l8, %l5 158 %indvars.iv.next = add i32 %indvars.iv, 1 159 %exitcond = icmp eq i32 %indvars.iv.next, 257 160 br i1 %exitcond, label %._crit_edge, label %.lr.ph 161 162._crit_edge: ; preds = %.lr.ph 163 %prod.0.lcssa = phi i32 [ %l9, %.lr.ph ] 164 ret i32 %prod.0.lcssa 165} 166 167define i32 @reduction_and(i32* nocapture %A, i32* nocapture %B) { 168; CHECK-LABEL: @reduction_and( 169; CHECK-NEXT: entry: 170; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] 171; CHECK: vector.ph: 172; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] 173; CHECK: vector.body: 174; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] 175; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ <i32 -1, i32 -1, i32 -1, i32 -1>, [[VECTOR_PH]] ], [ [[TMP6:%.*]], [[VECTOR_BODY]] ] 176; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 [[INDEX]], i32 257) 177; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i32 [[INDEX]] 178; CHECK-NEXT: [[TMP1:%.*]] = bitcast i32* [[TMP0]] to <4 x i32>* 179; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* [[TMP1]], i32 4, <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> poison) 180; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i32 [[INDEX]] 181; CHECK-NEXT: [[TMP3:%.*]] = bitcast i32* [[TMP2]] to <4 x i32>* 182; CHECK-NEXT: [[WIDE_MASKED_LOAD1:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* [[TMP3]], i32 4, <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> poison) 183; CHECK-NEXT: [[TMP4:%.*]] = and <4 x i32> [[VEC_PHI]], [[WIDE_MASKED_LOAD]] 184; CHECK-NEXT: [[TMP5:%.*]] = and <4 x i32> [[TMP4]], [[WIDE_MASKED_LOAD1]] 185; CHECK-NEXT: [[TMP6]] = select <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> [[TMP5]], <4 x i32> [[VEC_PHI]] 186; CHECK-NEXT: [[INDEX_NEXT]] = add i32 [[INDEX]], 4 187; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i32 [[INDEX_NEXT]], 260 188; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] 189; CHECK: middle.block: 190; CHECK-NEXT: [[TMP8:%.*]] = call i32 @llvm.vector.reduce.and.v4i32(<4 x i32> [[TMP6]]) 191; CHECK-NEXT: br i1 true, label [[FOR_END:%.*]], label [[SCALAR_PH]] 192; CHECK: scalar.ph: 193; CHECK-NEXT: br label [[FOR_BODY:%.*]] 194; CHECK: for.body: 195; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] 196; CHECK: for.end: 197; CHECK-NEXT: [[RESULT_0_LCSSA:%.*]] = phi i32 [ poison, [[FOR_BODY]] ], [ [[TMP8]], [[MIDDLE_BLOCK]] ] 198; CHECK-NEXT: ret i32 [[RESULT_0_LCSSA]] 199; 200entry: 201 br label %for.body 202 203for.body: ; preds = %entry, %for.body 204 %indvars.iv = phi i32 [ %indvars.iv.next, %for.body ], [ 0, %entry ] 205 %result.08 = phi i32 [ %and, %for.body ], [ -1, %entry ] 206 %arrayidx = getelementptr inbounds i32, i32* %A, i32 %indvars.iv 207 %l0 = load i32, i32* %arrayidx, align 4 208 %arrayidx2 = getelementptr inbounds i32, i32* %B, i32 %indvars.iv 209 %l1 = load i32, i32* %arrayidx2, align 4 210 %add = and i32 %result.08, %l0 211 %and = and i32 %add, %l1 212 %indvars.iv.next = add i32 %indvars.iv, 1 213 %exitcond = icmp eq i32 %indvars.iv.next, 257 214 br i1 %exitcond, label %for.end, label %for.body 215 216for.end: ; preds = %for.body, %entry 217 %result.0.lcssa = phi i32 [ %and, %for.body ] 218 ret i32 %result.0.lcssa 219} 220 221define i32 @reduction_or(i32* nocapture %A, i32* nocapture %B) { 222; CHECK-LABEL: @reduction_or( 223; CHECK-NEXT: entry: 224; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] 225; CHECK: vector.ph: 226; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] 227; CHECK: vector.body: 228; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] 229; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP6:%.*]], [[VECTOR_BODY]] ] 230; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 [[INDEX]], i32 257) 231; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i32 [[INDEX]] 232; CHECK-NEXT: [[TMP1:%.*]] = bitcast i32* [[TMP0]] to <4 x i32>* 233; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* [[TMP1]], i32 4, <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> poison) 234; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i32 [[INDEX]] 235; CHECK-NEXT: [[TMP3:%.*]] = bitcast i32* [[TMP2]] to <4 x i32>* 236; CHECK-NEXT: [[WIDE_MASKED_LOAD1:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* [[TMP3]], i32 4, <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> poison) 237; CHECK-NEXT: [[TMP4:%.*]] = add nsw <4 x i32> [[WIDE_MASKED_LOAD1]], [[WIDE_MASKED_LOAD]] 238; CHECK-NEXT: [[TMP5:%.*]] = select <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> [[TMP4]], <4 x i32> zeroinitializer 239; CHECK-NEXT: [[TMP6]] = or <4 x i32> [[VEC_PHI]], [[TMP5]] 240; CHECK-NEXT: [[INDEX_NEXT]] = add i32 [[INDEX]], 4 241; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i32 [[INDEX_NEXT]], 260 242; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] 243; CHECK: middle.block: 244; CHECK-NEXT: [[TMP8:%.*]] = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> [[TMP6]]) 245; CHECK-NEXT: br i1 true, label [[FOR_END:%.*]], label [[SCALAR_PH]] 246; CHECK: scalar.ph: 247; CHECK-NEXT: br label [[FOR_BODY:%.*]] 248; CHECK: for.body: 249; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] 250; CHECK: for.end: 251; CHECK-NEXT: [[RESULT_0_LCSSA:%.*]] = phi i32 [ poison, [[FOR_BODY]] ], [ [[TMP8]], [[MIDDLE_BLOCK]] ] 252; CHECK-NEXT: ret i32 [[RESULT_0_LCSSA]] 253; 254entry: 255 br label %for.body 256 257for.body: ; preds = %entry, %for.body 258 %indvars.iv = phi i32 [ %indvars.iv.next, %for.body ], [ 0, %entry ] 259 %result.08 = phi i32 [ %or, %for.body ], [ 0, %entry ] 260 %arrayidx = getelementptr inbounds i32, i32* %A, i32 %indvars.iv 261 %l0 = load i32, i32* %arrayidx, align 4 262 %arrayidx2 = getelementptr inbounds i32, i32* %B, i32 %indvars.iv 263 %l1 = load i32, i32* %arrayidx2, align 4 264 %add = add nsw i32 %l1, %l0 265 %or = or i32 %add, %result.08 266 %indvars.iv.next = add i32 %indvars.iv, 1 267 %exitcond = icmp eq i32 %indvars.iv.next, 257 268 br i1 %exitcond, label %for.end, label %for.body 269 270for.end: ; preds = %for.body, %entry 271 %result.0.lcssa = phi i32 [ %or, %for.body ] 272 ret i32 %result.0.lcssa 273} 274 275define i32 @reduction_xor(i32* nocapture %A, i32* nocapture %B) { 276; CHECK-LABEL: @reduction_xor( 277; CHECK-NEXT: entry: 278; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] 279; CHECK: vector.ph: 280; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] 281; CHECK: vector.body: 282; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] 283; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP6:%.*]], [[VECTOR_BODY]] ] 284; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 [[INDEX]], i32 257) 285; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i32 [[INDEX]] 286; CHECK-NEXT: [[TMP1:%.*]] = bitcast i32* [[TMP0]] to <4 x i32>* 287; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* [[TMP1]], i32 4, <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> poison) 288; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i32 [[INDEX]] 289; CHECK-NEXT: [[TMP3:%.*]] = bitcast i32* [[TMP2]] to <4 x i32>* 290; CHECK-NEXT: [[WIDE_MASKED_LOAD1:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* [[TMP3]], i32 4, <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> poison) 291; CHECK-NEXT: [[TMP4:%.*]] = add nsw <4 x i32> [[WIDE_MASKED_LOAD1]], [[WIDE_MASKED_LOAD]] 292; CHECK-NEXT: [[TMP5:%.*]] = select <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> [[TMP4]], <4 x i32> zeroinitializer 293; CHECK-NEXT: [[TMP6]] = xor <4 x i32> [[VEC_PHI]], [[TMP5]] 294; CHECK-NEXT: [[INDEX_NEXT]] = add i32 [[INDEX]], 4 295; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i32 [[INDEX_NEXT]], 260 296; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] 297; CHECK: middle.block: 298; CHECK-NEXT: [[TMP8:%.*]] = call i32 @llvm.vector.reduce.xor.v4i32(<4 x i32> [[TMP6]]) 299; CHECK-NEXT: br i1 true, label [[FOR_END:%.*]], label [[SCALAR_PH]] 300; CHECK: scalar.ph: 301; CHECK-NEXT: br label [[FOR_BODY:%.*]] 302; CHECK: for.body: 303; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] 304; CHECK: for.end: 305; CHECK-NEXT: [[RESULT_0_LCSSA:%.*]] = phi i32 [ poison, [[FOR_BODY]] ], [ [[TMP8]], [[MIDDLE_BLOCK]] ] 306; CHECK-NEXT: ret i32 [[RESULT_0_LCSSA]] 307; 308entry: 309 br label %for.body 310 311for.body: ; preds = %entry, %for.body 312 %indvars.iv = phi i32 [ %indvars.iv.next, %for.body ], [ 0, %entry ] 313 %result.08 = phi i32 [ %xor, %for.body ], [ 0, %entry ] 314 %arrayidx = getelementptr inbounds i32, i32* %A, i32 %indvars.iv 315 %l0 = load i32, i32* %arrayidx, align 4 316 %arrayidx2 = getelementptr inbounds i32, i32* %B, i32 %indvars.iv 317 %l1 = load i32, i32* %arrayidx2, align 4 318 %add = add nsw i32 %l1, %l0 319 %xor = xor i32 %add, %result.08 320 %indvars.iv.next = add i32 %indvars.iv, 1 321 %exitcond = icmp eq i32 %indvars.iv.next, 257 322 br i1 %exitcond, label %for.end, label %for.body 323 324for.end: ; preds = %for.body, %entry 325 %result.0.lcssa = phi i32 [ %xor, %for.body ] 326 ret i32 %result.0.lcssa 327} 328 329define float @reduction_fadd(float* nocapture %A, float* nocapture %B) { 330; CHECK-LABEL: @reduction_fadd( 331; CHECK-NEXT: entry: 332; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] 333; CHECK: vector.ph: 334; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] 335; CHECK: vector.body: 336; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] 337; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x float> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP6:%.*]], [[VECTOR_BODY]] ] 338; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 [[INDEX]], i32 257) 339; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds float, float* [[A:%.*]], i32 [[INDEX]] 340; CHECK-NEXT: [[TMP1:%.*]] = bitcast float* [[TMP0]] to <4 x float>* 341; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* [[TMP1]], i32 4, <4 x i1> [[ACTIVE_LANE_MASK]], <4 x float> poison) 342; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds float, float* [[B:%.*]], i32 [[INDEX]] 343; CHECK-NEXT: [[TMP3:%.*]] = bitcast float* [[TMP2]] to <4 x float>* 344; CHECK-NEXT: [[WIDE_MASKED_LOAD1:%.*]] = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* [[TMP3]], i32 4, <4 x i1> [[ACTIVE_LANE_MASK]], <4 x float> poison) 345; CHECK-NEXT: [[TMP4:%.*]] = fadd fast <4 x float> [[VEC_PHI]], [[WIDE_MASKED_LOAD]] 346; CHECK-NEXT: [[TMP5:%.*]] = fadd fast <4 x float> [[TMP4]], [[WIDE_MASKED_LOAD1]] 347; CHECK-NEXT: [[TMP6]] = select fast <4 x i1> [[ACTIVE_LANE_MASK]], <4 x float> [[TMP5]], <4 x float> [[VEC_PHI]] 348; CHECK-NEXT: [[INDEX_NEXT]] = add i32 [[INDEX]], 4 349; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i32 [[INDEX_NEXT]], 260 350; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] 351; CHECK: middle.block: 352; CHECK-NEXT: [[TMP8:%.*]] = call fast float @llvm.vector.reduce.fadd.v4f32(float -0.000000e+00, <4 x float> [[TMP6]]) 353; CHECK-NEXT: br i1 true, label [[FOR_END:%.*]], label [[SCALAR_PH]] 354; CHECK: scalar.ph: 355; CHECK-NEXT: br label [[FOR_BODY:%.*]] 356; CHECK: for.body: 357; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] 358; CHECK: for.end: 359; CHECK-NEXT: [[RESULT_0_LCSSA:%.*]] = phi float [ poison, [[FOR_BODY]] ], [ [[TMP8]], [[MIDDLE_BLOCK]] ] 360; CHECK-NEXT: ret float [[RESULT_0_LCSSA]] 361; 362entry: 363 br label %for.body 364 365for.body: ; preds = %entry, %for.body 366 %indvars.iv = phi i32 [ %indvars.iv.next, %for.body ], [ 0, %entry ] 367 %result.08 = phi float [ %fadd, %for.body ], [ 0.0, %entry ] 368 %arrayidx = getelementptr inbounds float, float* %A, i32 %indvars.iv 369 %l0 = load float, float* %arrayidx, align 4 370 %arrayidx2 = getelementptr inbounds float, float* %B, i32 %indvars.iv 371 %l1 = load float, float* %arrayidx2, align 4 372 %add = fadd fast float %result.08, %l0 373 %fadd = fadd fast float %add, %l1 374 %indvars.iv.next = add i32 %indvars.iv, 1 375 %exitcond = icmp eq i32 %indvars.iv.next, 257 376 br i1 %exitcond, label %for.end, label %for.body 377 378for.end: ; preds = %for.body, %entry 379 %result.0.lcssa = phi float [ %fadd, %for.body ] 380 ret float %result.0.lcssa 381} 382 383define float @reduction_fmul(float* nocapture %A, float* nocapture %B) { 384; CHECK-LABEL: @reduction_fmul( 385; CHECK-NEXT: entry: 386; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] 387; CHECK: vector.ph: 388; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] 389; CHECK: vector.body: 390; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] 391; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x float> [ <float 0.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>, [[VECTOR_PH]] ], [ [[TMP6:%.*]], [[VECTOR_BODY]] ] 392; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 [[INDEX]], i32 257) 393; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds float, float* [[A:%.*]], i32 [[INDEX]] 394; CHECK-NEXT: [[TMP1:%.*]] = bitcast float* [[TMP0]] to <4 x float>* 395; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* [[TMP1]], i32 4, <4 x i1> [[ACTIVE_LANE_MASK]], <4 x float> poison) 396; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds float, float* [[B:%.*]], i32 [[INDEX]] 397; CHECK-NEXT: [[TMP3:%.*]] = bitcast float* [[TMP2]] to <4 x float>* 398; CHECK-NEXT: [[WIDE_MASKED_LOAD1:%.*]] = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* [[TMP3]], i32 4, <4 x i1> [[ACTIVE_LANE_MASK]], <4 x float> poison) 399; CHECK-NEXT: [[TMP4:%.*]] = fmul fast <4 x float> [[VEC_PHI]], [[WIDE_MASKED_LOAD]] 400; CHECK-NEXT: [[TMP5:%.*]] = fmul fast <4 x float> [[TMP4]], [[WIDE_MASKED_LOAD1]] 401; CHECK-NEXT: [[TMP6]] = select fast <4 x i1> [[ACTIVE_LANE_MASK]], <4 x float> [[TMP5]], <4 x float> [[VEC_PHI]] 402; CHECK-NEXT: [[INDEX_NEXT]] = add i32 [[INDEX]], 4 403; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i32 [[INDEX_NEXT]], 260 404; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] 405; CHECK: middle.block: 406; CHECK-NEXT: [[TMP8:%.*]] = call fast float @llvm.vector.reduce.fmul.v4f32(float 1.000000e+00, <4 x float> [[TMP6]]) 407; CHECK-NEXT: br i1 true, label [[FOR_END:%.*]], label [[SCALAR_PH]] 408; CHECK: scalar.ph: 409; CHECK-NEXT: br label [[FOR_BODY:%.*]] 410; CHECK: for.body: 411; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] 412; CHECK: for.end: 413; CHECK-NEXT: [[RESULT_0_LCSSA:%.*]] = phi float [ poison, [[FOR_BODY]] ], [ [[TMP8]], [[MIDDLE_BLOCK]] ] 414; CHECK-NEXT: ret float [[RESULT_0_LCSSA]] 415; 416entry: 417 br label %for.body 418 419for.body: ; preds = %entry, %for.body 420 %indvars.iv = phi i32 [ %indvars.iv.next, %for.body ], [ 0, %entry ] 421 %result.08 = phi float [ %fmul, %for.body ], [ 0.0, %entry ] 422 %arrayidx = getelementptr inbounds float, float* %A, i32 %indvars.iv 423 %l0 = load float, float* %arrayidx, align 4 424 %arrayidx2 = getelementptr inbounds float, float* %B, i32 %indvars.iv 425 %l1 = load float, float* %arrayidx2, align 4 426 %add = fmul fast float %result.08, %l0 427 %fmul = fmul fast float %add, %l1 428 %indvars.iv.next = add i32 %indvars.iv, 1 429 %exitcond = icmp eq i32 %indvars.iv.next, 257 430 br i1 %exitcond, label %for.end, label %for.body 431 432for.end: ; preds = %for.body, %entry 433 %result.0.lcssa = phi float [ %fmul, %for.body ] 434 ret float %result.0.lcssa 435} 436 437define i32 @reduction_min(i32* nocapture %A, i32* nocapture %B) { 438; CHECK-LABEL: @reduction_min( 439; CHECK-NEXT: entry: 440; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] 441; CHECK: vector.ph: 442; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] 443; CHECK: vector.body: 444; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] 445; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ <i32 1000, i32 1000, i32 1000, i32 1000>, [[VECTOR_PH]] ], [ [[TMP2:%.*]], [[VECTOR_BODY]] ] 446; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i32 [[INDEX]] 447; CHECK-NEXT: [[TMP1:%.*]] = bitcast i32* [[TMP0]] to <4 x i32>* 448; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, <4 x i32>* [[TMP1]], align 4 449; CHECK-NEXT: [[TMP2]] = call <4 x i32> @llvm.smin.v4i32(<4 x i32> [[VEC_PHI]], <4 x i32> [[WIDE_LOAD]]) 450; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4 451; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], 256 452; CHECK-NEXT: br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] 453; CHECK: middle.block: 454; CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.vector.reduce.smin.v4i32(<4 x i32> [[TMP2]]) 455; CHECK-NEXT: br i1 false, label [[FOR_END:%.*]], label [[SCALAR_PH]] 456; CHECK: scalar.ph: 457; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 256, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] 458; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP4]], [[MIDDLE_BLOCK]] ], [ 1000, [[ENTRY]] ] 459; CHECK-NEXT: br label [[FOR_BODY:%.*]] 460; CHECK: for.body: 461; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i32 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] 462; CHECK-NEXT: [[RESULT_08:%.*]] = phi i32 [ [[TMP5:%.*]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ] 463; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[A]], i32 [[INDVARS_IV]] 464; CHECK-NEXT: [[L0:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 465; CHECK-NEXT: [[TMP5]] = call i32 @llvm.smin.i32(i32 [[RESULT_08]], i32 [[L0]]) 466; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i32 [[INDVARS_IV]], 1 467; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INDVARS_IV_NEXT]], 257 468; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] 469; CHECK: for.end: 470; CHECK-NEXT: [[RESULT_0_LCSSA:%.*]] = phi i32 [ [[TMP5]], [[FOR_BODY]] ], [ [[TMP4]], [[MIDDLE_BLOCK]] ] 471; CHECK-NEXT: ret i32 [[RESULT_0_LCSSA]] 472; 473entry: 474 br label %for.body 475 476for.body: ; preds = %entry, %for.body 477 %indvars.iv = phi i32 [ %indvars.iv.next, %for.body ], [ 0, %entry ] 478 %result.08 = phi i32 [ %v0, %for.body ], [ 1000, %entry ] 479 %arrayidx = getelementptr inbounds i32, i32* %A, i32 %indvars.iv 480 %l0 = load i32, i32* %arrayidx, align 4 481 %c0 = icmp slt i32 %result.08, %l0 482 %v0 = select i1 %c0, i32 %result.08, i32 %l0 483 %indvars.iv.next = add i32 %indvars.iv, 1 484 %exitcond = icmp eq i32 %indvars.iv.next, 257 485 br i1 %exitcond, label %for.end, label %for.body 486 487for.end: ; preds = %for.body, %entry 488 %result.0.lcssa = phi i32 [ %v0, %for.body ] 489 ret i32 %result.0.lcssa 490} 491 492define i32 @reduction_max(i32* nocapture %A, i32* nocapture %B) { 493; CHECK-LABEL: @reduction_max( 494; CHECK-NEXT: entry: 495; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] 496; CHECK: vector.ph: 497; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] 498; CHECK: vector.body: 499; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] 500; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ <i32 1000, i32 1000, i32 1000, i32 1000>, [[VECTOR_PH]] ], [ [[TMP2:%.*]], [[VECTOR_BODY]] ] 501; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i32 [[INDEX]] 502; CHECK-NEXT: [[TMP1:%.*]] = bitcast i32* [[TMP0]] to <4 x i32>* 503; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, <4 x i32>* [[TMP1]], align 4 504; CHECK-NEXT: [[TMP2]] = call <4 x i32> @llvm.umax.v4i32(<4 x i32> [[VEC_PHI]], <4 x i32> [[WIDE_LOAD]]) 505; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4 506; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], 256 507; CHECK-NEXT: br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] 508; CHECK: middle.block: 509; CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.vector.reduce.umax.v4i32(<4 x i32> [[TMP2]]) 510; CHECK-NEXT: br i1 false, label [[FOR_END:%.*]], label [[SCALAR_PH]] 511; CHECK: scalar.ph: 512; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 256, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] 513; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP4]], [[MIDDLE_BLOCK]] ], [ 1000, [[ENTRY]] ] 514; CHECK-NEXT: br label [[FOR_BODY:%.*]] 515; CHECK: for.body: 516; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i32 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] 517; CHECK-NEXT: [[RESULT_08:%.*]] = phi i32 [ [[TMP5:%.*]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ] 518; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[A]], i32 [[INDVARS_IV]] 519; CHECK-NEXT: [[L0:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 520; CHECK-NEXT: [[TMP5]] = call i32 @llvm.umax.i32(i32 [[RESULT_08]], i32 [[L0]]) 521; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i32 [[INDVARS_IV]], 1 522; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INDVARS_IV_NEXT]], 257 523; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]] 524; CHECK: for.end: 525; CHECK-NEXT: [[RESULT_0_LCSSA:%.*]] = phi i32 [ [[TMP5]], [[FOR_BODY]] ], [ [[TMP4]], [[MIDDLE_BLOCK]] ] 526; CHECK-NEXT: ret i32 [[RESULT_0_LCSSA]] 527; 528entry: 529 br label %for.body 530 531for.body: ; preds = %entry, %for.body 532 %indvars.iv = phi i32 [ %indvars.iv.next, %for.body ], [ 0, %entry ] 533 %result.08 = phi i32 [ %v0, %for.body ], [ 1000, %entry ] 534 %arrayidx = getelementptr inbounds i32, i32* %A, i32 %indvars.iv 535 %l0 = load i32, i32* %arrayidx, align 4 536 %c0 = icmp ugt i32 %result.08, %l0 537 %v0 = select i1 %c0, i32 %result.08, i32 %l0 538 %indvars.iv.next = add i32 %indvars.iv, 1 539 %exitcond = icmp eq i32 %indvars.iv.next, 257 540 br i1 %exitcond, label %for.end, label %for.body 541 542for.end: ; preds = %for.body, %entry 543 %result.0.lcssa = phi i32 [ %v0, %for.body ] 544 ret i32 %result.0.lcssa 545} 546 547define float @reduction_fmax(float* nocapture %A, float* nocapture %B) { 548; CHECK-LABEL: @reduction_fmax( 549; CHECK-NEXT: entry: 550; CHECK-NEXT: br label [[FOR_BODY:%.*]] 551; CHECK: for.body: 552; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i32 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ], [ 0, [[ENTRY:%.*]] ] 553; CHECK-NEXT: [[RESULT_08:%.*]] = phi float [ [[V0:%.*]], [[FOR_BODY]] ], [ 1.000000e+03, [[ENTRY]] ] 554; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[A:%.*]], i32 [[INDVARS_IV]] 555; CHECK-NEXT: [[L0:%.*]] = load float, float* [[ARRAYIDX]], align 4 556; CHECK-NEXT: [[C0:%.*]] = fcmp ogt float [[RESULT_08]], [[L0]] 557; CHECK-NEXT: [[V0]] = select i1 [[C0]], float [[RESULT_08]], float [[L0]] 558; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i32 [[INDVARS_IV]], 1 559; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INDVARS_IV_NEXT]], 257 560; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]] 561; CHECK: for.end: 562; CHECK-NEXT: ret float [[V0]] 563; 564entry: 565 br label %for.body 566 567for.body: ; preds = %entry, %for.body 568 %indvars.iv = phi i32 [ %indvars.iv.next, %for.body ], [ 0, %entry ] 569 %result.08 = phi float [ %v0, %for.body ], [ 1000.0, %entry ] 570 %arrayidx = getelementptr inbounds float, float* %A, i32 %indvars.iv 571 %l0 = load float, float* %arrayidx, align 4 572 %c0 = fcmp ogt float %result.08, %l0 573 %v0 = select i1 %c0, float %result.08, float %l0 574 %indvars.iv.next = add i32 %indvars.iv, 1 575 %exitcond = icmp eq i32 %indvars.iv.next, 257 576 br i1 %exitcond, label %for.end, label %for.body 577 578for.end: ; preds = %for.body, %entry 579 %result.0.lcssa = phi float [ %v0, %for.body ] 580 ret float %result.0.lcssa 581} 582