1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py 2; RUN: opt -O2 -S -mattr=avx < %s | FileCheck %s 3; RUN: opt -passes="default<O2>" -S -mattr=avx < %s | FileCheck %s 4 5target triple = "x86_64--" 6target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" 7 8define i32 @ext_ext_or_reduction_v4i32(<4 x i32> %x, <4 x i32> %y) { 9; CHECK-LABEL: @ext_ext_or_reduction_v4i32( 10; CHECK-NEXT: [[Z:%.*]] = and <4 x i32> [[Y:%.*]], [[X:%.*]] 11; CHECK-NEXT: [[TMP1:%.*]] = tail call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> [[Z]]) 12; CHECK-NEXT: ret i32 [[TMP1]] 13; 14 %z = and <4 x i32> %x, %y 15 %z0 = extractelement <4 x i32> %z, i32 0 16 %z1 = extractelement <4 x i32> %z, i32 1 17 %z01 = or i32 %z0, %z1 18 %z2 = extractelement <4 x i32> %z, i32 2 19 %z012 = or i32 %z01, %z2 20 %z3 = extractelement <4 x i32> %z, i32 3 21 %z0123 = or i32 %z3, %z012 22 ret i32 %z0123 23} 24 25define i32 @ext_ext_partial_add_reduction_v4i32(<4 x i32> %x) { 26; CHECK-LABEL: @ext_ext_partial_add_reduction_v4i32( 27; CHECK-NEXT: [[SHIFT:%.*]] = shufflevector <4 x i32> [[X:%.*]], <4 x i32> poison, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef> 28; CHECK-NEXT: [[TMP1:%.*]] = add <4 x i32> [[SHIFT]], [[X]] 29; CHECK-NEXT: [[SHIFT1:%.*]] = shufflevector <4 x i32> [[X]], <4 x i32> poison, <4 x i32> <i32 2, i32 undef, i32 undef, i32 undef> 30; CHECK-NEXT: [[TMP2:%.*]] = add <4 x i32> [[TMP1]], [[SHIFT1]] 31; CHECK-NEXT: [[X210:%.*]] = extractelement <4 x i32> [[TMP2]], i64 0 32; CHECK-NEXT: ret i32 [[X210]] 33; 34 %x0 = extractelement <4 x i32> %x, i32 0 35 %x1 = extractelement <4 x i32> %x, i32 1 36 %x10 = add i32 %x1, %x0 37 %x2 = extractelement <4 x i32> %x, i32 2 38 %x210 = add i32 %x2, %x10 39 ret i32 %x210 40} 41 42define i32 @ext_ext_partial_add_reduction_and_extra_add_v4i32(<4 x i32> %x, <4 x i32> %y) { 43; CHECK-LABEL: @ext_ext_partial_add_reduction_and_extra_add_v4i32( 44; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x i32> [[Y:%.*]], <4 x i32> [[X:%.*]], <4 x i32> <i32 0, i32 1, i32 2, i32 6> 45; CHECK-NEXT: [[TMP2:%.*]] = tail call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP1]]) 46; CHECK-NEXT: ret i32 [[TMP2]] 47; 48 %y0 = extractelement <4 x i32> %y, i32 0 49 %y1 = extractelement <4 x i32> %y, i32 1 50 %y10 = add i32 %y1, %y0 51 %y2 = extractelement <4 x i32> %y, i32 2 52 %y210 = add i32 %y2, %y10 53 %x2 = extractelement <4 x i32> %x, i32 2 54 %x2y210 = add i32 %x2, %y210 55 ret i32 %x2y210 56} 57 58; PR43953 - https://bugs.llvm.org/show_bug.cgi?id=43953 59; We want to end up with a single reduction on the next 4 tests. 60 61define i32 @TestVectorsEqual(i32* noalias %Vec0, i32* noalias %Vec1, i32 %Tolerance) { 62; CHECK-LABEL: @TestVectorsEqual( 63; CHECK-NEXT: entry: 64; CHECK-NEXT: [[TMP0:%.*]] = bitcast i32* [[VEC0:%.*]] to <4 x i32>* 65; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* [[TMP0]], align 4 66; CHECK-NEXT: [[TMP2:%.*]] = bitcast i32* [[VEC1:%.*]] to <4 x i32>* 67; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, <4 x i32>* [[TMP2]], align 4 68; CHECK-NEXT: [[TMP4:%.*]] = sub nsw <4 x i32> [[TMP1]], [[TMP3]] 69; CHECK-NEXT: [[TMP5:%.*]] = tail call <4 x i32> @llvm.abs.v4i32(<4 x i32> [[TMP4]], i1 true) 70; CHECK-NEXT: [[TMP6:%.*]] = tail call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP5]]) 71; CHECK-NEXT: [[CMP5_NOT:%.*]] = icmp sle i32 [[TMP6]], [[TOLERANCE:%.*]] 72; CHECK-NEXT: [[COND6:%.*]] = zext i1 [[CMP5_NOT]] to i32 73; CHECK-NEXT: ret i32 [[COND6]] 74; 75entry: 76 br label %for.cond 77 78for.cond: 79 %sum.0 = phi i32 [ 0, %entry ], [ %add, %for.inc ] 80 %Component.0 = phi i32 [ 0, %entry ], [ %inc, %for.inc ] 81 %cmp = icmp slt i32 %Component.0, 4 82 br i1 %cmp, label %for.body, label %for.cond.cleanup 83 84for.cond.cleanup: 85 br label %for.end 86 87for.body: 88 %idxprom = sext i32 %Component.0 to i64 89 %arrayidx = getelementptr inbounds i32, i32* %Vec0, i64 %idxprom 90 %0 = load i32, i32* %arrayidx, align 4 91 %idxprom1 = sext i32 %Component.0 to i64 92 %arrayidx2 = getelementptr inbounds i32, i32* %Vec1, i64 %idxprom1 93 %1 = load i32, i32* %arrayidx2, align 4 94 %sub = sub nsw i32 %0, %1 95 %cmp3 = icmp sge i32 %sub, 0 96 br i1 %cmp3, label %cond.true, label %cond.false 97 98cond.true: 99 br label %cond.end 100 101cond.false: 102 %sub4 = sub nsw i32 0, %sub 103 br label %cond.end 104 105cond.end: 106 %cond = phi i32 [ %sub, %cond.true ], [ %sub4, %cond.false ] 107 %add = add nsw i32 %sum.0, %cond 108 br label %for.inc 109 110for.inc: 111 %inc = add nsw i32 %Component.0, 1 112 br label %for.cond 113 114for.end: 115 %cmp5 = icmp sle i32 %sum.0, %Tolerance 116 %2 = zext i1 %cmp5 to i64 117 %cond6 = select i1 %cmp5, i32 1, i32 0 118 ret i32 %cond6 119} 120 121define i32 @TestVectorsEqual_alt(i32* noalias %Vec0, i32* noalias %Vec1, i32 %Tolerance) { 122; CHECK-LABEL: @TestVectorsEqual_alt( 123; CHECK-NEXT: entry: 124; CHECK-NEXT: [[TMP0:%.*]] = bitcast i32* [[VEC0:%.*]] to <4 x i32>* 125; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* [[TMP0]], align 4 126; CHECK-NEXT: [[TMP2:%.*]] = bitcast i32* [[VEC1:%.*]] to <4 x i32>* 127; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, <4 x i32>* [[TMP2]], align 4 128; CHECK-NEXT: [[TMP4:%.*]] = sub <4 x i32> [[TMP1]], [[TMP3]] 129; CHECK-NEXT: [[TMP5:%.*]] = tail call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP4]]) 130; CHECK-NEXT: [[CMP3_NOT:%.*]] = icmp ule i32 [[TMP5]], [[TOLERANCE:%.*]] 131; CHECK-NEXT: [[COND:%.*]] = zext i1 [[CMP3_NOT]] to i32 132; CHECK-NEXT: ret i32 [[COND]] 133; 134entry: 135 br label %for.cond 136 137for.cond: 138 %sum.0 = phi i32 [ 0, %entry ], [ %add, %for.inc ] 139 %Component.0 = phi i32 [ 0, %entry ], [ %inc, %for.inc ] 140 %cmp = icmp slt i32 %Component.0, 4 141 br i1 %cmp, label %for.body, label %for.cond.cleanup 142 143for.cond.cleanup: 144 br label %for.end 145 146for.body: 147 %idxprom = sext i32 %Component.0 to i64 148 %arrayidx = getelementptr inbounds i32, i32* %Vec0, i64 %idxprom 149 %0 = load i32, i32* %arrayidx, align 4 150 %idxprom1 = sext i32 %Component.0 to i64 151 %arrayidx2 = getelementptr inbounds i32, i32* %Vec1, i64 %idxprom1 152 %1 = load i32, i32* %arrayidx2, align 4 153 %sub = sub i32 %0, %1 154 %add = add i32 %sum.0, %sub 155 br label %for.inc 156 157for.inc: 158 %inc = add nsw i32 %Component.0, 1 159 br label %for.cond 160 161for.end: 162 %cmp3 = icmp ule i32 %sum.0, %Tolerance 163 %2 = zext i1 %cmp3 to i64 164 %cond = select i1 %cmp3, i32 1, i32 0 165 ret i32 %cond 166} 167 168define i32 @TestVectorsEqualFP(float* noalias %Vec0, float* noalias %Vec1, float %Tolerance) { 169; CHECK-LABEL: @TestVectorsEqualFP( 170; CHECK-NEXT: entry: 171; CHECK-NEXT: [[TMP0:%.*]] = bitcast float* [[VEC0:%.*]] to <4 x float>* 172; CHECK-NEXT: [[TMP1:%.*]] = load <4 x float>, <4 x float>* [[TMP0]], align 4 173; CHECK-NEXT: [[TMP2:%.*]] = bitcast float* [[VEC1:%.*]] to <4 x float>* 174; CHECK-NEXT: [[TMP3:%.*]] = load <4 x float>, <4 x float>* [[TMP2]], align 4 175; CHECK-NEXT: [[TMP4:%.*]] = fsub fast <4 x float> [[TMP1]], [[TMP3]] 176; CHECK-NEXT: [[TMP5:%.*]] = tail call fast <4 x float> @llvm.fabs.v4f32(<4 x float> [[TMP4]]) 177; CHECK-NEXT: [[TMP6:%.*]] = tail call fast float @llvm.vector.reduce.fadd.v4f32(float -0.000000e+00, <4 x float> [[TMP5]]) 178; CHECK-NEXT: [[CMP4:%.*]] = fcmp fast ole float [[TMP6]], [[TOLERANCE:%.*]] 179; CHECK-NEXT: [[COND5:%.*]] = zext i1 [[CMP4]] to i32 180; CHECK-NEXT: ret i32 [[COND5]] 181; 182entry: 183 br label %for.cond 184 185for.cond: 186 %sum.0 = phi float [ 0.000000e+00, %entry ], [ %add, %for.inc ] 187 %Component.0 = phi i32 [ 0, %entry ], [ %inc, %for.inc ] 188 %cmp = icmp slt i32 %Component.0, 4 189 br i1 %cmp, label %for.body, label %for.cond.cleanup 190 191for.cond.cleanup: 192 br label %for.end 193 194for.body: 195 %idxprom = sext i32 %Component.0 to i64 196 %arrayidx = getelementptr inbounds float, float* %Vec0, i64 %idxprom 197 %0 = load float, float* %arrayidx, align 4 198 %idxprom1 = sext i32 %Component.0 to i64 199 %arrayidx2 = getelementptr inbounds float, float* %Vec1, i64 %idxprom1 200 %1 = load float, float* %arrayidx2, align 4 201 %sub = fsub fast float %0, %1 202 %cmp3 = fcmp fast oge float %sub, 0.000000e+00 203 br i1 %cmp3, label %cond.true, label %cond.false 204 205cond.true: 206 br label %cond.end 207 208cond.false: 209 %fneg = fneg fast float %sub 210 br label %cond.end 211 212cond.end: 213 %cond = phi fast float [ %sub, %cond.true ], [ %fneg, %cond.false ] 214 %add = fadd fast float %sum.0, %cond 215 br label %for.inc 216 217for.inc: 218 %inc = add nsw i32 %Component.0, 1 219 br label %for.cond 220 221for.end: 222 %cmp4 = fcmp fast ole float %sum.0, %Tolerance 223 %2 = zext i1 %cmp4 to i64 224 %cond5 = select i1 %cmp4, i32 1, i32 0 225 ret i32 %cond5 226} 227 228define i32 @TestVectorsEqualFP_alt(float* noalias %Vec0, float* noalias %Vec1, float %Tolerance) { 229; CHECK-LABEL: @TestVectorsEqualFP_alt( 230; CHECK-NEXT: entry: 231; CHECK-NEXT: [[TMP0:%.*]] = bitcast float* [[VEC0:%.*]] to <4 x float>* 232; CHECK-NEXT: [[TMP1:%.*]] = load <4 x float>, <4 x float>* [[TMP0]], align 4 233; CHECK-NEXT: [[TMP2:%.*]] = bitcast float* [[VEC1:%.*]] to <4 x float>* 234; CHECK-NEXT: [[TMP3:%.*]] = load <4 x float>, <4 x float>* [[TMP2]], align 4 235; CHECK-NEXT: [[TMP4:%.*]] = fsub fast <4 x float> [[TMP1]], [[TMP3]] 236; CHECK-NEXT: [[TMP5:%.*]] = tail call fast float @llvm.vector.reduce.fadd.v4f32(float -0.000000e+00, <4 x float> [[TMP4]]) 237; CHECK-NEXT: [[CMP3:%.*]] = fcmp fast ole float [[TMP5]], [[TOLERANCE:%.*]] 238; CHECK-NEXT: [[COND:%.*]] = zext i1 [[CMP3]] to i32 239; CHECK-NEXT: ret i32 [[COND]] 240; 241entry: 242 br label %for.cond 243 244for.cond: 245 %sum.0 = phi float [ 0.000000e+00, %entry ], [ %add, %for.inc ] 246 %Component.0 = phi i32 [ 0, %entry ], [ %inc, %for.inc ] 247 %cmp = icmp slt i32 %Component.0, 4 248 br i1 %cmp, label %for.body, label %for.cond.cleanup 249 250for.cond.cleanup: 251 br label %for.end 252 253for.body: 254 %idxprom = sext i32 %Component.0 to i64 255 %arrayidx = getelementptr inbounds float, float* %Vec0, i64 %idxprom 256 %0 = load float, float* %arrayidx, align 4 257 %idxprom1 = sext i32 %Component.0 to i64 258 %arrayidx2 = getelementptr inbounds float, float* %Vec1, i64 %idxprom1 259 %1 = load float, float* %arrayidx2, align 4 260 %sub = fsub fast float %0, %1 261 %add = fadd fast float %sum.0, %sub 262 br label %for.inc 263 264for.inc: 265 %inc = add nsw i32 %Component.0, 1 266 br label %for.cond 267 268for.end: 269 %cmp3 = fcmp fast ole float %sum.0, %Tolerance 270 %2 = zext i1 %cmp3 to i64 271 %cond = select i1 %cmp3, i32 1, i32 0 272 ret i32 %cond 273} 274 275; PR43745 - https://bugs.llvm.org/show_bug.cgi?id=43745 276 277; FIXME: this should be vectorized 278define i1 @cmp_lt_gt(double %a, double %b, double %c) { 279; CHECK-LABEL: @cmp_lt_gt( 280; CHECK-NEXT: entry: 281; CHECK-NEXT: [[FNEG:%.*]] = fneg double [[B:%.*]] 282; CHECK-NEXT: [[MUL:%.*]] = fmul double [[A:%.*]], 2.000000e+00 283; CHECK-NEXT: [[TMP0:%.*]] = insertelement <2 x double> poison, double [[C:%.*]], i64 1 284; CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x double> [[TMP0]], double [[FNEG]], i64 0 285; CHECK-NEXT: [[TMP2:%.*]] = insertelement <2 x double> poison, double [[C]], i64 0 286; CHECK-NEXT: [[TMP3:%.*]] = insertelement <2 x double> [[TMP2]], double [[B]], i64 1 287; CHECK-NEXT: [[TMP4:%.*]] = fsub <2 x double> [[TMP1]], [[TMP3]] 288; CHECK-NEXT: [[TMP5:%.*]] = insertelement <2 x double> poison, double [[MUL]], i64 0 289; CHECK-NEXT: [[TMP6:%.*]] = shufflevector <2 x double> [[TMP5]], <2 x double> poison, <2 x i32> zeroinitializer 290; CHECK-NEXT: [[TMP7:%.*]] = fdiv <2 x double> [[TMP4]], [[TMP6]] 291; CHECK-NEXT: [[TMP8:%.*]] = extractelement <2 x double> [[TMP7]], i64 1 292; CHECK-NEXT: [[CMP:%.*]] = fcmp olt double [[TMP8]], 0x3EB0C6F7A0B5ED8D 293; CHECK-NEXT: [[TMP9:%.*]] = extractelement <2 x double> [[TMP7]], i64 0 294; CHECK-NEXT: [[CMP4:%.*]] = fcmp olt double [[TMP9]], 0x3EB0C6F7A0B5ED8D 295; CHECK-NEXT: [[OR_COND:%.*]] = select i1 [[CMP]], i1 [[CMP4]], i1 false 296; CHECK-NEXT: br i1 [[OR_COND]], label [[CLEANUP:%.*]], label [[LOR_LHS_FALSE:%.*]] 297; CHECK: lor.lhs.false: 298; CHECK-NEXT: [[TMP10:%.*]] = fcmp ule <2 x double> [[TMP7]], <double 1.000000e+00, double 1.000000e+00> 299; CHECK-NEXT: [[TMP11:%.*]] = extractelement <2 x i1> [[TMP10]], i64 0 300; CHECK-NEXT: [[TMP12:%.*]] = extractelement <2 x i1> [[TMP10]], i64 1 301; CHECK-NEXT: [[OR_COND1:%.*]] = select i1 [[TMP12]], i1 true, i1 [[TMP11]] 302; CHECK-NEXT: br label [[CLEANUP]] 303; CHECK: cleanup: 304; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i1 [ false, [[ENTRY:%.*]] ], [ [[OR_COND1]], [[LOR_LHS_FALSE]] ] 305; CHECK-NEXT: ret i1 [[RETVAL_0]] 306; 307entry: 308 %fneg = fneg double %b 309 %add = fadd double %fneg, %c 310 %mul = fmul double 2.0, %a 311 %div = fdiv double %add, %mul 312 %fneg1 = fneg double %b 313 %sub = fsub double %fneg1, %c 314 %mul2 = fmul double 2.0, %a 315 %div3 = fdiv double %sub, %mul2 316 %cmp = fcmp olt double %div, 0x3EB0C6F7A0B5ED8D 317 br i1 %cmp, label %land.lhs.true, label %lor.lhs.false 318 319land.lhs.true: 320 %cmp4 = fcmp olt double %div3, 0x3EB0C6F7A0B5ED8D 321 br i1 %cmp4, label %if.then, label %lor.lhs.false 322 323lor.lhs.false: 324 %cmp5 = fcmp ogt double %div, 1.0 325 br i1 %cmp5, label %land.lhs.true6, label %if.end 326 327land.lhs.true6: 328 %cmp7 = fcmp ogt double %div3, 1.0 329 br i1 %cmp7, label %if.then, label %if.end 330 331if.then: 332 br label %cleanup 333 334if.end: 335 br label %cleanup 336 337cleanup: 338 %retval.0 = phi i1 [ false, %if.then ], [ true, %if.end ] 339 ret i1 %retval.0 340} 341