1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py 2; RUN: opt < %s -loop-vectorize -force-vector-interleave=1 -force-vector-width=4 -dce -S | FileCheck %s 3 4target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" 5target triple = "x86_64-unknown-linux-gnu" 6 7; PR15794 8; incorrect addition of llvm.mem.parallel_loop_access metadata is undefined 9; behaviour. Vectorizer ignores the memory dependency checks and goes ahead and 10; vectorizes this loop with uniform stores which has an output dependency. 11 12; void foo(int *a, int *b, int k, int m) { 13; for (int i = 0; i < m; i++) { 14; for (int j = 0; j < m; j++) { 15; a[i] = a[i + j + k] + 1; <<< 16; } 17; b[i] = b[i] + 3; 18; } 19; } 20 21; Function Attrs: nounwind uwtable 22define void @foo(i32* nocapture %a, i32* nocapture %b, i32 %k, i32 %m) #0 { 23; CHECK-LABEL: @foo( 24; CHECK-NEXT: entry: 25; CHECK-NEXT: [[CMP27:%.*]] = icmp sgt i32 [[M:%.*]], 0 26; CHECK-NEXT: br i1 [[CMP27]], label [[FOR_BODY3_LR_PH_US_PREHEADER:%.*]], label [[FOR_END15:%.*]] 27; CHECK: for.body3.lr.ph.us.preheader: 28; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[M]], -1 29; CHECK-NEXT: [[TMP1:%.*]] = zext i32 [[K:%.*]] to i64 30; CHECK-NEXT: [[TMP2:%.*]] = zext i32 [[TMP0]] to i64 31; CHECK-NEXT: [[TMP3:%.*]] = add nuw nsw i64 [[TMP2]], 1 32; CHECK-NEXT: br label [[FOR_BODY3_LR_PH_US:%.*]] 33; CHECK: for.end.us: 34; CHECK-NEXT: [[ARRAYIDX9_US:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i64 [[INDVARS_IV33:%.*]] 35; CHECK-NEXT: [[TMP4:%.*]] = load i32, i32* [[ARRAYIDX9_US]], align 4, !llvm.mem.parallel_loop_access !0 36; CHECK-NEXT: [[ADD10_US:%.*]] = add nsw i32 [[TMP4]], 3 37; CHECK-NEXT: store i32 [[ADD10_US]], i32* [[ARRAYIDX9_US]], align 4, !llvm.mem.parallel_loop_access !0 38; CHECK-NEXT: [[INDVARS_IV_NEXT34:%.*]] = add i64 [[INDVARS_IV33]], 1 39; CHECK-NEXT: [[LFTR_WIDEIV35:%.*]] = trunc i64 [[INDVARS_IV_NEXT34]] to i32 40; CHECK-NEXT: [[EXITCOND36:%.*]] = icmp eq i32 [[LFTR_WIDEIV35]], [[M]] 41; CHECK-NEXT: br i1 [[EXITCOND36]], label [[FOR_END15_LOOPEXIT:%.*]], label [[FOR_BODY3_LR_PH_US]], !llvm.loop [[LOOP2:![0-9]+]] 42; CHECK: for.body3.us: 43; CHECK-NEXT: [[INDVARS_IV29:%.*]] = phi i64 [ [[BC_RESUME_VAL:%.*]], [[SCALAR_PH:%.*]] ], [ [[INDVARS_IV_NEXT30:%.*]], [[FOR_BODY3_US:%.*]] ] 44; CHECK-NEXT: [[TMP5:%.*]] = trunc i64 [[INDVARS_IV29]] to i32 45; CHECK-NEXT: [[ADD4_US:%.*]] = add i32 [[ADD_US:%.*]], [[TMP5]] 46; CHECK-NEXT: [[IDXPROM_US:%.*]] = sext i32 [[ADD4_US]] to i64 47; CHECK-NEXT: [[ARRAYIDX_US:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[IDXPROM_US]] 48; CHECK-NEXT: [[TMP6:%.*]] = load i32, i32* [[ARRAYIDX_US]], align 4, !llvm.mem.parallel_loop_access !0 49; CHECK-NEXT: [[ADD5_US:%.*]] = add nsw i32 [[TMP6]], 1 50; CHECK-NEXT: store i32 [[ADD5_US]], i32* [[ARRAYIDX7_US:%.*]], align 4, !llvm.mem.parallel_loop_access !0 51; CHECK-NEXT: [[INDVARS_IV_NEXT30]] = add i64 [[INDVARS_IV29]], 1 52; CHECK-NEXT: [[LFTR_WIDEIV31:%.*]] = trunc i64 [[INDVARS_IV_NEXT30]] to i32 53; CHECK-NEXT: [[EXITCOND32:%.*]] = icmp eq i32 [[LFTR_WIDEIV31]], [[M]] 54; CHECK-NEXT: br i1 [[EXITCOND32]], label [[FOR_END_US:%.*]], label [[FOR_BODY3_US]], !llvm.loop [[LOOP3:![0-9]+]] 55; CHECK: for.body3.lr.ph.us: 56; CHECK-NEXT: [[INDVARS_IV33]] = phi i64 [ [[INDVARS_IV_NEXT34]], [[FOR_END_US]] ], [ 0, [[FOR_BODY3_LR_PH_US_PREHEADER]] ] 57; CHECK-NEXT: [[TMP7:%.*]] = add i64 [[TMP1]], [[INDVARS_IV33]] 58; CHECK-NEXT: [[TMP8:%.*]] = trunc i64 [[TMP7]] to i32 59; CHECK-NEXT: [[TMP9:%.*]] = trunc i64 [[INDVARS_IV33]] to i32 60; CHECK-NEXT: [[ADD_US]] = add i32 [[TMP9]], [[K]] 61; CHECK-NEXT: [[ARRAYIDX7_US]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV33]] 62; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP3]], 4 63; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH]], label [[VECTOR_SCEVCHECK:%.*]] 64; CHECK: vector.scevcheck: 65; CHECK-NEXT: [[TMP10:%.*]] = add i32 [[TMP8]], [[TMP0]] 66; CHECK-NEXT: [[TMP11:%.*]] = sub i32 [[TMP8]], [[TMP0]] 67; CHECK-NEXT: [[TMP12:%.*]] = icmp sgt i32 [[TMP11]], [[TMP8]] 68; CHECK-NEXT: [[TMP13:%.*]] = icmp slt i32 [[TMP10]], [[TMP8]] 69; CHECK-NEXT: [[TMP14:%.*]] = select i1 false, i1 [[TMP12]], i1 [[TMP13]] 70; CHECK-NEXT: [[TMP15:%.*]] = or i1 false, [[TMP14]] 71; CHECK-NEXT: br i1 [[TMP15]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]] 72; CHECK: vector.ph: 73; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP3]], 4 74; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP3]], [[N_MOD_VF]] 75; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] 76; CHECK: vector.body: 77; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] 78; CHECK-NEXT: [[TMP16:%.*]] = trunc i64 [[INDEX]] to i32 79; CHECK-NEXT: [[TMP17:%.*]] = add i32 [[TMP16]], 0 80; CHECK-NEXT: [[TMP18:%.*]] = add i32 [[ADD_US]], [[TMP17]] 81; CHECK-NEXT: [[TMP19:%.*]] = sext i32 [[TMP18]] to i64 82; CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[TMP19]] 83; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds i32, i32* [[TMP20]], i32 0 84; CHECK-NEXT: [[TMP22:%.*]] = bitcast i32* [[TMP21]] to <4 x i32>* 85; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, <4 x i32>* [[TMP22]], align 4 86; CHECK-NEXT: [[TMP23:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], <i32 1, i32 1, i32 1, i32 1> 87; CHECK-NEXT: [[TMP24:%.*]] = extractelement <4 x i32> [[TMP23]], i32 0 88; CHECK-NEXT: store i32 [[TMP24]], i32* [[ARRAYIDX7_US]], align 4, !llvm.mem.parallel_loop_access !0 89; CHECK-NEXT: [[TMP25:%.*]] = extractelement <4 x i32> [[TMP23]], i32 1 90; CHECK-NEXT: store i32 [[TMP25]], i32* [[ARRAYIDX7_US]], align 4, !llvm.mem.parallel_loop_access !0 91; CHECK-NEXT: [[TMP26:%.*]] = extractelement <4 x i32> [[TMP23]], i32 2 92; CHECK-NEXT: store i32 [[TMP26]], i32* [[ARRAYIDX7_US]], align 4, !llvm.mem.parallel_loop_access !0 93; CHECK-NEXT: [[TMP27:%.*]] = extractelement <4 x i32> [[TMP23]], i32 3 94; CHECK-NEXT: store i32 [[TMP27]], i32* [[ARRAYIDX7_US]], align 4, !llvm.mem.parallel_loop_access !0 95; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 96; CHECK-NEXT: [[TMP28:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] 97; CHECK-NEXT: br i1 [[TMP28]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] 98; CHECK: middle.block: 99; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP3]], [[N_VEC]] 100; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END_US]], label [[SCALAR_PH]] 101; CHECK: scalar.ph: 102; CHECK-NEXT: [[BC_RESUME_VAL]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[FOR_BODY3_LR_PH_US]] ], [ 0, [[VECTOR_SCEVCHECK]] ] 103; CHECK-NEXT: br label [[FOR_BODY3_US]] 104; CHECK: for.end15.loopexit: 105; CHECK-NEXT: br label [[FOR_END15]] 106; CHECK: for.end15: 107; CHECK-NEXT: ret void 108; 109entry: 110 %cmp27 = icmp sgt i32 %m, 0 111 br i1 %cmp27, label %for.body3.lr.ph.us, label %for.end15 112 113for.end.us: ; preds = %for.body3.us 114 %arrayidx9.us = getelementptr inbounds i32, i32* %b, i64 %indvars.iv33 115 %0 = load i32, i32* %arrayidx9.us, align 4, !llvm.mem.parallel_loop_access !3 116 %add10.us = add nsw i32 %0, 3 117 store i32 %add10.us, i32* %arrayidx9.us, align 4, !llvm.mem.parallel_loop_access !3 118 %indvars.iv.next34 = add i64 %indvars.iv33, 1 119 %lftr.wideiv35 = trunc i64 %indvars.iv.next34 to i32 120 %exitcond36 = icmp eq i32 %lftr.wideiv35, %m 121 br i1 %exitcond36, label %for.end15, label %for.body3.lr.ph.us, !llvm.loop !5 122 123for.body3.us: ; preds = %for.body3.us, %for.body3.lr.ph.us 124 %indvars.iv29 = phi i64 [ 0, %for.body3.lr.ph.us ], [ %indvars.iv.next30, %for.body3.us ] 125 %1 = trunc i64 %indvars.iv29 to i32 126 %add4.us = add i32 %add.us, %1 127 %idxprom.us = sext i32 %add4.us to i64 128 %arrayidx.us = getelementptr inbounds i32, i32* %a, i64 %idxprom.us 129 %2 = load i32, i32* %arrayidx.us, align 4, !llvm.mem.parallel_loop_access !3 130 %add5.us = add nsw i32 %2, 1 131 store i32 %add5.us, i32* %arrayidx7.us, align 4, !llvm.mem.parallel_loop_access !3 132 %indvars.iv.next30 = add i64 %indvars.iv29, 1 133 %lftr.wideiv31 = trunc i64 %indvars.iv.next30 to i32 134 %exitcond32 = icmp eq i32 %lftr.wideiv31, %m 135 br i1 %exitcond32, label %for.end.us, label %for.body3.us, !llvm.loop !4 136 137for.body3.lr.ph.us: ; preds = %for.end.us, %entry 138 %indvars.iv33 = phi i64 [ %indvars.iv.next34, %for.end.us ], [ 0, %entry ] 139 %3 = trunc i64 %indvars.iv33 to i32 140 %add.us = add i32 %3, %k 141 %arrayidx7.us = getelementptr inbounds i32, i32* %a, i64 %indvars.iv33 142 br label %for.body3.us 143 144for.end15: ; preds = %for.end.us, %entry 145 ret void 146} 147 148; Same test as above, but without the invalid parallel_loop_access metadata. 149 150; Here we can see the vectorizer does the mem dep checks and decides it is 151; unsafe to vectorize. 152define void @no-par-mem-metadata(i32* nocapture %a, i32* nocapture %b, i32 %k, i32 %m) #0 { 153; CHECK-LABEL: @no-par-mem-metadata( 154; CHECK-NEXT: entry: 155; CHECK-NEXT: [[CMP27:%.*]] = icmp sgt i32 [[M:%.*]], 0 156; CHECK-NEXT: br i1 [[CMP27]], label [[FOR_BODY3_LR_PH_US_PREHEADER:%.*]], label [[FOR_END15:%.*]] 157; CHECK: for.body3.lr.ph.us.preheader: 158; CHECK-NEXT: br label [[FOR_BODY3_LR_PH_US:%.*]] 159; CHECK: for.end.us: 160; CHECK-NEXT: [[ARRAYIDX9_US:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i64 [[INDVARS_IV33:%.*]] 161; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[ARRAYIDX9_US]], align 4 162; CHECK-NEXT: [[ADD10_US:%.*]] = add nsw i32 [[TMP0]], 3 163; CHECK-NEXT: store i32 [[ADD10_US]], i32* [[ARRAYIDX9_US]], align 4 164; CHECK-NEXT: [[INDVARS_IV_NEXT34:%.*]] = add i64 [[INDVARS_IV33]], 1 165; CHECK-NEXT: [[LFTR_WIDEIV35:%.*]] = trunc i64 [[INDVARS_IV_NEXT34]] to i32 166; CHECK-NEXT: [[EXITCOND36:%.*]] = icmp eq i32 [[LFTR_WIDEIV35]], [[M]] 167; CHECK-NEXT: br i1 [[EXITCOND36]], label [[FOR_END15_LOOPEXIT:%.*]], label [[FOR_BODY3_LR_PH_US]], !llvm.loop [[LOOP2]] 168; CHECK: for.body3.us: 169; CHECK-NEXT: [[INDVARS_IV29:%.*]] = phi i64 [ 0, [[FOR_BODY3_LR_PH_US]] ], [ [[INDVARS_IV_NEXT30:%.*]], [[FOR_BODY3_US:%.*]] ] 170; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[INDVARS_IV29]] to i32 171; CHECK-NEXT: [[ADD4_US:%.*]] = add i32 [[ADD_US:%.*]], [[TMP1]] 172; CHECK-NEXT: [[IDXPROM_US:%.*]] = sext i32 [[ADD4_US]] to i64 173; CHECK-NEXT: [[ARRAYIDX_US:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[IDXPROM_US]] 174; CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* [[ARRAYIDX_US]], align 4 175; CHECK-NEXT: [[ADD5_US:%.*]] = add nsw i32 [[TMP2]], 1 176; CHECK-NEXT: store i32 [[ADD5_US]], i32* [[ARRAYIDX7_US:%.*]], align 4 177; CHECK-NEXT: [[INDVARS_IV_NEXT30]] = add i64 [[INDVARS_IV29]], 1 178; CHECK-NEXT: [[LFTR_WIDEIV31:%.*]] = trunc i64 [[INDVARS_IV_NEXT30]] to i32 179; CHECK-NEXT: [[EXITCOND32:%.*]] = icmp eq i32 [[LFTR_WIDEIV31]], [[M]] 180; CHECK-NEXT: br i1 [[EXITCOND32]], label [[FOR_END_US:%.*]], label [[FOR_BODY3_US]], !llvm.loop [[LOOP1:![0-9]+]] 181; CHECK: for.body3.lr.ph.us: 182; CHECK-NEXT: [[INDVARS_IV33]] = phi i64 [ [[INDVARS_IV_NEXT34]], [[FOR_END_US]] ], [ 0, [[FOR_BODY3_LR_PH_US_PREHEADER]] ] 183; CHECK-NEXT: [[TMP3:%.*]] = trunc i64 [[INDVARS_IV33]] to i32 184; CHECK-NEXT: [[ADD_US]] = add i32 [[TMP3]], [[K:%.*]] 185; CHECK-NEXT: [[ARRAYIDX7_US]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV33]] 186; CHECK-NEXT: br label [[FOR_BODY3_US]] 187; CHECK: for.end15.loopexit: 188; CHECK-NEXT: br label [[FOR_END15]] 189; CHECK: for.end15: 190; CHECK-NEXT: ret void 191; 192entry: 193 %cmp27 = icmp sgt i32 %m, 0 194 br i1 %cmp27, label %for.body3.lr.ph.us, label %for.end15 195 196for.end.us: ; preds = %for.body3.us 197 %arrayidx9.us = getelementptr inbounds i32, i32* %b, i64 %indvars.iv33 198 %0 = load i32, i32* %arrayidx9.us, align 4 199 %add10.us = add nsw i32 %0, 3 200 store i32 %add10.us, i32* %arrayidx9.us, align 4 201 %indvars.iv.next34 = add i64 %indvars.iv33, 1 202 %lftr.wideiv35 = trunc i64 %indvars.iv.next34 to i32 203 %exitcond36 = icmp eq i32 %lftr.wideiv35, %m 204 br i1 %exitcond36, label %for.end15, label %for.body3.lr.ph.us, !llvm.loop !5 205 206for.body3.us: ; preds = %for.body3.us, %for.body3.lr.ph.us 207 %indvars.iv29 = phi i64 [ 0, %for.body3.lr.ph.us ], [ %indvars.iv.next30, %for.body3.us ] 208 %1 = trunc i64 %indvars.iv29 to i32 209 %add4.us = add i32 %add.us, %1 210 %idxprom.us = sext i32 %add4.us to i64 211 %arrayidx.us = getelementptr inbounds i32, i32* %a, i64 %idxprom.us 212 %2 = load i32, i32* %arrayidx.us, align 4 213 %add5.us = add nsw i32 %2, 1 214 store i32 %add5.us, i32* %arrayidx7.us, align 4 215 %indvars.iv.next30 = add i64 %indvars.iv29, 1 216 %lftr.wideiv31 = trunc i64 %indvars.iv.next30 to i32 217 %exitcond32 = icmp eq i32 %lftr.wideiv31, %m 218 br i1 %exitcond32, label %for.end.us, label %for.body3.us, !llvm.loop !4 219 220for.body3.lr.ph.us: ; preds = %for.end.us, %entry 221 %indvars.iv33 = phi i64 [ %indvars.iv.next34, %for.end.us ], [ 0, %entry ] 222 %3 = trunc i64 %indvars.iv33 to i32 223 %add.us = add i32 %3, %k 224 %arrayidx7.us = getelementptr inbounds i32, i32* %a, i64 %indvars.iv33 225 br label %for.body3.us 226 227for.end15: ; preds = %for.end.us, %entry 228 ret void 229} 230 231attributes #0 = { nounwind uwtable "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" } 232 233!3 = !{!4, !5} 234!4 = !{!4} 235!5 = !{!5} 236 237