1; REQUIRES: asserts 2 3; RUN: opt -loop-vectorize -debug-only=loop-vectorize -force-vector-interleave=1 -force-vector-width=4 -prefer-inloop-reductions -enable-interleaved-mem-accesses=true -enable-masked-interleaved-mem-accesses -disable-output %s 2>&1 | FileCheck %s 4 5target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" 6 7; Tests for printing VPlans. 8 9define void @print_call_and_memory(i64 %n, float* noalias %y, float* noalias %x) nounwind uwtable { 10; CHECK-LABEL: Checking a loop in "print_call_and_memory" 11; CHECK: VPlan 'Initial VPlan for VF={4},UF>=1' { 12; CHECK-NEXT: <x1> vector loop: { 13; CHECK-NEXT: for.body: 14; CHECK-NEXT: WIDEN-INDUCTION %iv = phi %iv.next, 0 15; CHECK-NEXT: CLONE ir<%arrayidx> = getelementptr ir<%y>, ir<%iv> 16; CHECK-NEXT: WIDEN ir<%lv> = load ir<%arrayidx> 17; CHECK-NEXT: WIDEN-CALL ir<%call> = call @llvm.sqrt.f32(ir<%lv>) 18; CHECK-NEXT: CLONE ir<%arrayidx2> = getelementptr ir<%x>, ir<%iv> 19; CHECK-NEXT: WIDEN store ir<%arrayidx2>, ir<%call> 20; CHECK-NEXT: No successors 21; CHECK-NEXT: } 22; CHECK-NEXT: No successors 23; CHECK-NEXT: } 24; 25entry: 26 %cmp6 = icmp sgt i64 %n, 0 27 br i1 %cmp6, label %for.body, label %for.end 28 29for.body: ; preds = %entry, %for.body 30 %iv = phi i64 [ %iv.next, %for.body ], [ 0, %entry ] 31 %arrayidx = getelementptr inbounds float, float* %y, i64 %iv 32 %lv = load float, float* %arrayidx, align 4 33 %call = tail call float @llvm.sqrt.f32(float %lv) nounwind readnone 34 %arrayidx2 = getelementptr inbounds float, float* %x, i64 %iv 35 store float %call, float* %arrayidx2, align 4 36 %iv.next = add i64 %iv, 1 37 %exitcond = icmp eq i64 %iv.next, %n 38 br i1 %exitcond, label %for.end, label %for.body 39 40for.end: ; preds = %for.body, %entry 41 ret void 42} 43 44define void @print_widen_gep_and_select(i64 %n, float* noalias %y, float* noalias %x, float* %z) nounwind uwtable { 45; CHECK-LABEL: Checking a loop in "print_widen_gep_and_select" 46; CHECK: VPlan 'Initial VPlan for VF={4},UF>=1' { 47; CHECK-NEXT: <x1> vector loop: { 48; CHECK-NEXT: for.body: 49; CHECK-NEXT: WIDEN-INDUCTION %iv = phi %iv.next, 0 50; CHECK-NEXT: WIDEN-GEP Inv[Var] ir<%arrayidx> = getelementptr ir<%y>, ir<%iv> 51; CHECK-NEXT: WIDEN ir<%lv> = load ir<%arrayidx> 52; CHECK-NEXT: WIDEN ir<%cmp> = icmp ir<%arrayidx>, ir<%z> 53; CHECK-NEXT: WIDEN-SELECT ir<%sel> = select ir<%cmp>, ir<1.000000e+01>, ir<2.000000e+01> 54; CHECK-NEXT: WIDEN ir<%add> = fadd ir<%lv>, ir<%sel> 55; CHECK-NEXT: CLONE ir<%arrayidx2> = getelementptr ir<%x>, ir<%iv> 56; CHECK-NEXT: WIDEN store ir<%arrayidx2>, ir<%add> 57; CHECK-NEXT: No successors 58; CHECK-NEXT: } 59; CHECK-NEXT: No successors 60; CHECK-NEXT: } 61; 62entry: 63 %cmp6 = icmp sgt i64 %n, 0 64 br i1 %cmp6, label %for.body, label %for.end 65 66for.body: ; preds = %entry, %for.body 67 %iv = phi i64 [ %iv.next, %for.body ], [ 0, %entry ] 68 %arrayidx = getelementptr inbounds float, float* %y, i64 %iv 69 %lv = load float, float* %arrayidx, align 4 70 %cmp = icmp eq float* %arrayidx, %z 71 %sel = select i1 %cmp, float 10.0, float 20.0 72 %add = fadd float %lv, %sel 73 %arrayidx2 = getelementptr inbounds float, float* %x, i64 %iv 74 store float %add, float* %arrayidx2, align 4 75 %iv.next = add i64 %iv, 1 76 %exitcond = icmp eq i64 %iv.next, %n 77 br i1 %exitcond, label %for.end, label %for.body 78 79for.end: ; preds = %for.body, %entry 80 ret void 81} 82 83define float @print_reduction(i64 %n, float* noalias %y) { 84; CHECK-LABEL: Checking a loop in "print_reduction" 85; CHECK: VPlan 'Initial VPlan for VF={4},UF>=1' { 86; CHECK-NEXT: <x1> vector loop: { 87; CHECK-NEXT: for.body: 88; CHECK-NEXT: WIDEN-INDUCTION %iv = phi %iv.next, 0 89; CHECK-NEXT: WIDEN-REDUCTION-PHI ir<%red> = phi ir<0.000000e+00>, ir<%red.next> 90; CHECK-NEXT: CLONE ir<%arrayidx> = getelementptr ir<%y>, ir<%iv> 91; CHECK-NEXT: WIDEN ir<%lv> = load ir<%arrayidx> 92; CHECK-NEXT: REDUCE ir<%red.next> = ir<%red> + fast reduce.fadd (ir<%lv>) 93; CHECK-NEXT: No successors 94; CHECK-NEXT: } 95; CHECK-NEXT: No successors 96; CHECK-NEXT: } 97; 98entry: 99 br label %for.body 100 101for.body: ; preds = %entry, %for.body 102 %iv = phi i64 [ %iv.next, %for.body ], [ 0, %entry ] 103 %red = phi float [ %red.next, %for.body ], [ 0.0, %entry ] 104 %arrayidx = getelementptr inbounds float, float* %y, i64 %iv 105 %lv = load float, float* %arrayidx, align 4 106 %red.next = fadd fast float %lv, %red 107 %iv.next = add i64 %iv, 1 108 %exitcond = icmp eq i64 %iv.next, %n 109 br i1 %exitcond, label %for.end, label %for.body 110 111for.end: ; preds = %for.body, %entry 112 ret float %red.next 113} 114 115define void @print_replicate_predicated_phi(i64 %n, i64* %x) { 116; CHECK-LABEL: Checking a loop in "print_replicate_predicated_phi" 117; CHECK: VPlan 'Initial VPlan for VF={4},UF>=1' { 118; CHECK-NEXT: <x1> vector loop: { 119; CHECK-NEXT: for.body: 120; CHECK-NEXT: WIDEN-INDUCTION %i = phi 0, %i.next 121; CHECK-NEXT: WIDEN ir<%cmp> = icmp ir<%i>, ir<5> 122; CHECK-NEXT: Successor(s): if.then 123; CHECK-EMPTY: 124; CHECK-NEXT: if.then: 125; CHECK-NEXT: Successor(s): pred.udiv 126; CHECK-EMPTY: 127; CHECK-NEXT: <xVFxUF> pred.udiv: { 128; CHECK-NEXT: pred.udiv.entry: 129; CHECK-NEXT: BRANCH-ON-MASK ir<%cmp> 130; CHECK-NEXT: Successor(s): pred.udiv.if, pred.udiv.continue 131; CHECK-NEXT: CondBit: ir<%cmp> 132; CHECK-EMPTY: 133; CHECK-NEXT: pred.udiv.if: 134; CHECK-NEXT: REPLICATE ir<%tmp4> = udiv ir<%n>, ir<%i> (S->V) 135; CHECK-NEXT: Successor(s): pred.udiv.continue 136; CHECK-EMPTY: 137; CHECK-NEXT: pred.udiv.continue: 138; CHECK-NEXT: PHI-PREDICATED-INSTRUCTION vp<[[PRED:%.+]]> = ir<%tmp4> 139; CHECK-NEXT: No successors 140; CHECK-NEXT: } 141; CHECK-NEXT: Successor(s): if.then.0 142; CHECK-EMPTY: 143; CHECK-NEXT: if.then.0: 144; CHECK-NEXT: Successor(s): for.inc 145; CHECK-EMPTY: 146; CHECK-NEXT: for.inc: 147; CHECK-NEXT: EMIT vp<[[NOT:%.+]]> = not ir<%cmp> 148; CHECK-NEXT: BLEND %d = ir<0>/vp<[[NOT]]> vp<[[PRED]]>/ir<%cmp> 149; CHECK-NEXT: CLONE ir<%idx> = getelementptr ir<%x>, ir<%i> 150; CHECK-NEXT: WIDEN store ir<%idx>, ir<%d> 151; CHECK-NEXT: No successors 152; CHECK-NEXT: } 153; CHECK-NEXT: No successors 154; CHECK-NEXT: } 155; 156entry: 157 br label %for.body 158 159for.body: ; preds = %for.inc, %entry 160 %i = phi i64 [ 0, %entry ], [ %i.next, %for.inc ] 161 %cmp = icmp ult i64 %i, 5 162 br i1 %cmp, label %if.then, label %for.inc 163 164if.then: ; preds = %for.body 165 %tmp4 = udiv i64 %n, %i 166 br label %for.inc 167 168for.inc: ; preds = %if.then, %for.body 169 %d = phi i64 [ 0, %for.body ], [ %tmp4, %if.then ] 170 %idx = getelementptr i64, i64* %x, i64 %i 171 store i64 %d, i64* %idx 172 %i.next = add nuw nsw i64 %i, 1 173 %cond = icmp slt i64 %i.next, %n 174 br i1 %cond, label %for.body, label %for.end 175 176for.end: ; preds = %for.inc 177 ret void 178} 179 180@AB = common global [1024 x i32] zeroinitializer, align 4 181@CD = common global [1024 x i32] zeroinitializer, align 4 182 183define void @print_interleave_groups(i32 %C, i32 %D) { 184; CHECK-LABEL: Checking a loop in "print_interleave_groups" 185; CHECK: VPlan 'Initial VPlan for VF={4},UF>=1' { 186; CHECK-NEXT: <x1> vector loop: { 187; CHECK-NEXT: for.body: 188; CHECK-NEXT: WIDEN-INDUCTION %iv = phi 0, %iv.next 189; CHECK-NEXT: CLONE ir<%gep.AB.0> = getelementptr ir<@AB>, ir<0>, ir<%iv> 190; CHECK-NEXT: INTERLEAVE-GROUP with factor 4 at %AB.0, ir<%gep.AB.0> 191; CHECK-NEXT: ir<%AB.0> = load from index 0 192; CHECK-NEXT: ir<%AB.1> = load from index 1 193; CHECK-NEXT: ir<%AB.3> = load from index 3 194; CHECK-NEXT: CLONE ir<%iv.plus.1> = add ir<%iv>, ir<1> 195; CHECK-NEXT: CLONE ir<%gep.AB.1> = getelementptr ir<@AB>, ir<0>, ir<%iv.plus.1> 196; CHECK-NEXT: CLONE ir<%iv.plus.2> = add ir<%iv>, ir<2> 197; CHECK-NEXT: CLONE ir<%iv.plus.3> = add ir<%iv>, ir<3> 198; CHECK-NEXT: CLONE ir<%gep.AB.3> = getelementptr ir<@AB>, ir<0>, ir<%iv.plus.3> 199; CHECK-NEXT: WIDEN ir<%add> = add ir<%AB.0>, ir<%AB.1> 200; CHECK-NEXT: CLONE ir<%gep.CD.0> = getelementptr ir<@CD>, ir<0>, ir<%iv> 201; CHECK-NEXT: CLONE ir<%gep.CD.1> = getelementptr ir<@CD>, ir<0>, ir<%iv.plus.1> 202; CHECK-NEXT: CLONE ir<%gep.CD.2> = getelementptr ir<@CD>, ir<0>, ir<%iv.plus.2> 203; CHECK-NEXT: CLONE ir<%gep.CD.3> = getelementptr ir<@CD>, ir<0>, ir<%iv.plus.3> 204; CHECK-NEXT: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%gep.CD.3> 205; CHECK-NEXT: store ir<%add> to index 0 206; CHECK-NEXT: store ir<1> to index 1 207; CHECK-NEXT: store ir<2> to index 2 208; CHECK-NEXT: store ir<%AB.3> to index 3 209; CHECK-NEXT: No successors 210; CHECK-NEXT: } 211; CHECK-NEXT: No successors 212; CHECK-NEXT: } 213; 214entry: 215 br label %for.body 216 217for.body: 218 %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ] 219 %gep.AB.0= getelementptr inbounds [1024 x i32], [1024 x i32]* @AB, i64 0, i64 %iv 220 %AB.0 = load i32, i32* %gep.AB.0, align 4 221 %iv.plus.1 = add i64 %iv, 1 222 %gep.AB.1 = getelementptr inbounds [1024 x i32], [1024 x i32]* @AB, i64 0, i64 %iv.plus.1 223 %AB.1 = load i32, i32* %gep.AB.1, align 4 224 %iv.plus.2 = add i64 %iv, 2 225 %iv.plus.3 = add i64 %iv, 3 226 %gep.AB.3 = getelementptr inbounds [1024 x i32], [1024 x i32]* @AB, i64 0, i64 %iv.plus.3 227 %AB.3 = load i32, i32* %gep.AB.3, align 4 228 %add = add nsw i32 %AB.0, %AB.1 229 %gep.CD.0 = getelementptr inbounds [1024 x i32], [1024 x i32]* @CD, i64 0, i64 %iv 230 store i32 %add, i32* %gep.CD.0, align 4 231 %gep.CD.1 = getelementptr inbounds [1024 x i32], [1024 x i32]* @CD, i64 0, i64 %iv.plus.1 232 store i32 1, i32* %gep.CD.1, align 4 233 %gep.CD.2 = getelementptr inbounds [1024 x i32], [1024 x i32]* @CD, i64 0, i64 %iv.plus.2 234 store i32 2, i32* %gep.CD.2, align 4 235 %gep.CD.3 = getelementptr inbounds [1024 x i32], [1024 x i32]* @CD, i64 0, i64 %iv.plus.3 236 store i32 %AB.3, i32* %gep.CD.3, align 4 237 %iv.next = add nuw nsw i64 %iv, 4 238 %cmp = icmp slt i64 %iv.next, 1024 239 br i1 %cmp, label %for.body, label %for.end 240 241for.end: 242 ret void 243} 244 245define float @print_fmuladd_strict(float* %a, float* %b, i64 %n) { 246; CHECK-LABEL: Checking a loop in "print_fmuladd_strict" 247; CHECK: VPlan 'Initial VPlan for VF={4},UF>=1' { 248; CHECK-NEXT: <x1> vector loop: { 249; CHECK-NEXT: for.body: 250; CHECK-NEXT: WIDEN-INDUCTION %iv = phi 0, %iv.next 251; CHECK-NEXT: WIDEN-REDUCTION-PHI ir<%sum.07> = phi ir<0.000000e+00>, ir<%muladd> 252; CHECK-NEXT: CLONE ir<%arrayidx> = getelementptr ir<%a>, ir<%iv> 253; CHECK-NEXT: WIDEN ir<%l.a> = load ir<%arrayidx> 254; CHECK-NEXT: CLONE ir<%arrayidx2> = getelementptr ir<%b>, ir<%iv> 255; CHECK-NEXT: WIDEN ir<%l.b> = load ir<%arrayidx2> 256; CHECK-NEXT: EMIT vp<[[FMUL:%.]]> = fmul nnan ninf nsz ir<%l.a> ir<%l.b> 257; CHECK-NEXT: REDUCE ir<[[MULADD:%.+]]> = ir<%sum.07> + nnan ninf nsz reduce.fadd (vp<[[FMUL]]>) 258; CHECK-NEXT: No successors 259; CHECK-NEXT: } 260 261entry: 262 br label %for.body 263 264for.body: 265 %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ] 266 %sum.07 = phi float [ 0.000000e+00, %entry ], [ %muladd, %for.body ] 267 %arrayidx = getelementptr inbounds float, float* %a, i64 %iv 268 %l.a = load float, float* %arrayidx, align 4 269 %arrayidx2 = getelementptr inbounds float, float* %b, i64 %iv 270 %l.b = load float, float* %arrayidx2, align 4 271 %muladd = tail call nnan ninf nsz float @llvm.fmuladd.f32(float %l.a, float %l.b, float %sum.07) 272 %iv.next = add nuw nsw i64 %iv, 1 273 %exitcond.not = icmp eq i64 %iv.next, %n 274 br i1 %exitcond.not, label %for.end, label %for.body 275 276for.end: 277 ret float %muladd 278} 279 280declare float @llvm.sqrt.f32(float) nounwind readnone 281declare float @llvm.fmuladd.f32(float, float, float) 282