1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py 2; RUN: opt < %s -basic-aa -slp-vectorizer -dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s 3 4target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" 5target triple = "x86_64-apple-macosx10.8.0" 6 7; At this point we can't vectorize only parts of the tree. 8 9define i32 @test(double* nocapture %A, i8* nocapture %B) { 10; CHECK-LABEL: @test( 11; CHECK-NEXT: entry: 12; CHECK-NEXT: [[TMP0:%.*]] = bitcast i8* [[B:%.*]] to <2 x i8>* 13; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i8>, <2 x i8>* [[TMP0]], align 1 14; CHECK-NEXT: [[TMP2:%.*]] = add <2 x i8> [[TMP1]], <i8 3, i8 3> 15; CHECK-NEXT: [[TMP3:%.*]] = sitofp <2 x i8> [[TMP2]] to <2 x double> 16; CHECK-NEXT: [[TMP4:%.*]] = fmul <2 x double> [[TMP3]], [[TMP3]] 17; CHECK-NEXT: [[TMP5:%.*]] = fadd <2 x double> [[TMP4]], <double 1.000000e+00, double 1.000000e+00> 18; CHECK-NEXT: [[TMP6:%.*]] = fmul <2 x double> [[TMP5]], [[TMP5]] 19; CHECK-NEXT: [[TMP7:%.*]] = fadd <2 x double> [[TMP6]], <double 1.000000e+00, double 1.000000e+00> 20; CHECK-NEXT: [[TMP8:%.*]] = fmul <2 x double> [[TMP7]], [[TMP7]] 21; CHECK-NEXT: [[TMP9:%.*]] = fadd <2 x double> [[TMP8]], <double 1.000000e+00, double 1.000000e+00> 22; CHECK-NEXT: [[TMP10:%.*]] = fmul <2 x double> [[TMP9]], [[TMP9]] 23; CHECK-NEXT: [[TMP11:%.*]] = fadd <2 x double> [[TMP10]], <double 1.000000e+00, double 1.000000e+00> 24; CHECK-NEXT: [[TMP12:%.*]] = fmul <2 x double> [[TMP11]], [[TMP11]] 25; CHECK-NEXT: [[TMP13:%.*]] = fadd <2 x double> [[TMP12]], <double 1.000000e+00, double 1.000000e+00> 26; CHECK-NEXT: [[TMP14:%.*]] = bitcast double* [[A:%.*]] to <2 x double>* 27; CHECK-NEXT: store <2 x double> [[TMP13]], <2 x double>* [[TMP14]], align 8 28; CHECK-NEXT: ret i32 undef 29; 30entry: 31 %0 = load i8, i8* %B, align 1 32 %arrayidx1 = getelementptr inbounds i8, i8* %B, i64 1 33 %1 = load i8, i8* %arrayidx1, align 1 34 %add = add i8 %0, 3 35 %add4 = add i8 %1, 3 36 %conv6 = sitofp i8 %add to double 37 %conv7 = sitofp i8 %add4 to double 38 %mul = fmul double %conv6, %conv6 39 %add8 = fadd double %mul, 1.000000e+00 40 %mul9 = fmul double %conv7, %conv7 41 %add10 = fadd double %mul9, 1.000000e+00 42 %mul11 = fmul double %add8, %add8 43 %add12 = fadd double %mul11, 1.000000e+00 44 %mul13 = fmul double %add10, %add10 45 %add14 = fadd double %mul13, 1.000000e+00 46 %mul15 = fmul double %add12, %add12 47 %add16 = fadd double %mul15, 1.000000e+00 48 %mul17 = fmul double %add14, %add14 49 %add18 = fadd double %mul17, 1.000000e+00 50 %mul19 = fmul double %add16, %add16 51 %add20 = fadd double %mul19, 1.000000e+00 52 %mul21 = fmul double %add18, %add18 53 %add22 = fadd double %mul21, 1.000000e+00 54 %mul23 = fmul double %add20, %add20 55 %add24 = fadd double %mul23, 1.000000e+00 56 %mul25 = fmul double %add22, %add22 57 %add26 = fadd double %mul25, 1.000000e+00 58 store double %add24, double* %A, align 8 59 %arrayidx28 = getelementptr inbounds double, double* %A, i64 1 60 store double %add26, double* %arrayidx28, align 8 61 ret i32 undef 62} 63