1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt < %s -basic-aa -slp-vectorizer -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s
3
4target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
5target triple = "x86_64-apple-macosx10.8.0"
6
7; We will keep trying to vectorize the basic block even we already find vectorized store.
8define void @test1(double* %a, double* %b, double* %c, double* %d) {
9; CHECK-LABEL: @test1(
10; CHECK-NEXT:  entry:
11; CHECK-NEXT:    [[ARRAYIDX3:%.*]] = getelementptr inbounds double, double* [[A:%.*]], i64 1
12; CHECK-NEXT:    [[TMP0:%.*]] = bitcast double* [[A]] to <2 x double>*
13; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x double>, <2 x double>* [[TMP0]], align 8
14; CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds double, double* [[B:%.*]], i64 1
15; CHECK-NEXT:    [[TMP2:%.*]] = bitcast double* [[B]] to <2 x double>*
16; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x double>, <2 x double>* [[TMP2]], align 8
17; CHECK-NEXT:    [[TMP4:%.*]] = fmul <2 x double> [[TMP1]], [[TMP3]]
18; CHECK-NEXT:    [[ARRAYIDX5:%.*]] = getelementptr inbounds double, double* [[C:%.*]], i64 1
19; CHECK-NEXT:    [[TMP5:%.*]] = bitcast double* [[C]] to <2 x double>*
20; CHECK-NEXT:    store <2 x double> [[TMP4]], <2 x double>* [[TMP5]], align 8
21; CHECK-NEXT:    [[TMP6:%.*]] = bitcast double* [[A]] to <4 x i32>*
22; CHECK-NEXT:    [[TMP7:%.*]] = load <4 x i32>, <4 x i32>* [[TMP6]], align 8
23; CHECK-NEXT:    [[TMP8:%.*]] = bitcast double* [[B]] to <4 x i32>*
24; CHECK-NEXT:    [[TMP9:%.*]] = load <4 x i32>, <4 x i32>* [[TMP8]], align 8
25; CHECK-NEXT:    [[TMP10:%.*]] = mul <4 x i32> [[TMP7]], [[TMP9]]
26; CHECK-NEXT:    [[TMP11:%.*]] = bitcast double* [[D:%.*]] to <4 x i32>*
27; CHECK-NEXT:    store <4 x i32> [[TMP10]], <4 x i32>* [[TMP11]], align 8
28; CHECK-NEXT:    ret void
29;
30entry:
31  %i0 = load double, double* %a, align 8
32  %i1 = load double, double* %b, align 8
33  %mul = fmul double %i0, %i1
34  %arrayidx3 = getelementptr inbounds double, double* %a, i64 1
35  %i3 = load double, double* %arrayidx3, align 8
36  %arrayidx4 = getelementptr inbounds double, double* %b, i64 1
37  %i4 = load double, double* %arrayidx4, align 8
38  %mul5 = fmul double %i3, %i4
39  store double %mul, double* %c, align 8
40  %arrayidx5 = getelementptr inbounds double, double* %c, i64 1
41  store double %mul5, double* %arrayidx5, align 8
42  %0 = bitcast double* %a to <4 x i32>*
43  %1 = load <4 x i32>, <4 x i32>* %0, align 8
44  %2 = bitcast double* %b to <4 x i32>*
45  %3 = load <4 x i32>, <4 x i32>* %2, align 8
46  %4 = mul <4 x i32> %1, %3
47  %5 = bitcast double* %d to <4 x i32>*
48  store <4 x i32> %4, <4 x i32>* %5, align 8
49  ret void
50}
51