1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt < %s -basic-aa -slp-vectorizer -S -mtriple=i386-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s
3
4define i32 @foo(i32* nocapture readonly %diff) #0 {
5; CHECK-LABEL: @foo(
6; CHECK-NEXT:  entry:
7; CHECK-NEXT:    [[M2:%.*]] = alloca [8 x [8 x i32]], align 16
8; CHECK-NEXT:    [[TMP0:%.*]] = bitcast [8 x [8 x i32]]* [[M2]] to i8*
9; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
10; CHECK:       for.body:
11; CHECK-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
12; CHECK-NEXT:    [[A_088:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[OP_RDX:%.*]], [[FOR_BODY]] ]
13; CHECK-NEXT:    [[TMP1:%.*]] = shl i64 [[INDVARS_IV]], 3
14; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[DIFF:%.*]], i64 [[TMP1]]
15; CHECK-NEXT:    [[TMP2:%.*]] = or i64 [[TMP1]], 4
16; CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, i32* [[DIFF]], i64 [[TMP2]]
17; CHECK-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds [8 x [8 x i32]], [8 x [8 x i32]]* [[M2]], i64 0, i64 [[INDVARS_IV]], i64 0
18; CHECK-NEXT:    [[TMP3:%.*]] = bitcast i32* [[ARRAYIDX]] to <4 x i32>*
19; CHECK-NEXT:    [[TMP4:%.*]] = load <4 x i32>, <4 x i32>* [[TMP3]], align 4
20; CHECK-NEXT:    [[TMP5:%.*]] = bitcast i32* [[ARRAYIDX2]] to <4 x i32>*
21; CHECK-NEXT:    [[TMP6:%.*]] = load <4 x i32>, <4 x i32>* [[TMP5]], align 4
22; CHECK-NEXT:    [[TMP7:%.*]] = add nsw <4 x i32> [[TMP6]], [[TMP4]]
23; CHECK-NEXT:    [[TMP8:%.*]] = bitcast i32* [[ARRAYIDX6]] to <4 x i32>*
24; CHECK-NEXT:    store <4 x i32> [[TMP7]], <4 x i32>* [[TMP8]], align 16
25; CHECK-NEXT:    [[TMP9:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP7]])
26; CHECK-NEXT:    [[OP_RDX]] = add i32 [[TMP9]], [[A_088]]
27; CHECK-NEXT:    [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
28; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 8
29; CHECK-NEXT:    br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
30; CHECK:       for.end:
31; CHECK-NEXT:    [[ARRAYDECAY:%.*]] = getelementptr inbounds [8 x [8 x i32]], [8 x [8 x i32]]* [[M2]], i64 0, i64 0
32; CHECK-NEXT:    call void @ff([8 x i32]* [[ARRAYDECAY]])
33; CHECK-NEXT:    ret i32 [[OP_RDX]]
34;
35entry:
36  %m2 = alloca [8 x [8 x i32]], align 16
37  %0 = bitcast [8 x [8 x i32]]* %m2 to i8*
38  br label %for.body
39
40for.body:                                         ; preds = %for.body, %entry
41  %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
42  %a.088 = phi i32 [ 0, %entry ], [ %add52, %for.body ]
43  %1 = shl i64 %indvars.iv, 3
44  %arrayidx = getelementptr inbounds i32, i32* %diff, i64 %1
45  %2 = load i32, i32* %arrayidx, align 4
46  %3 = or i64 %1, 4
47  %arrayidx2 = getelementptr inbounds i32, i32* %diff, i64 %3
48  %4 = load i32, i32* %arrayidx2, align 4
49  %add3 = add nsw i32 %4, %2
50  %arrayidx6 = getelementptr inbounds [8 x [8 x i32]], [8 x [8 x i32]]* %m2, i64 0, i64 %indvars.iv, i64 0
51  store i32 %add3, i32* %arrayidx6, align 16
52  %add10 = add nsw i32 %add3, %a.088
53  %5 = or i64 %1, 1
54  %arrayidx13 = getelementptr inbounds i32, i32* %diff, i64 %5
55  %6 = load i32, i32* %arrayidx13, align 4
56  %7 = or i64 %1, 5
57  %arrayidx16 = getelementptr inbounds i32, i32* %diff, i64 %7
58  %8 = load i32, i32* %arrayidx16, align 4
59  %add17 = add nsw i32 %8, %6
60  %arrayidx20 = getelementptr inbounds [8 x [8 x i32]], [8 x [8 x i32]]* %m2, i64 0, i64 %indvars.iv, i64 1
61  store i32 %add17, i32* %arrayidx20, align 4
62  %add24 = add nsw i32 %add10, %add17
63  %9 = or i64 %1, 2
64  %arrayidx27 = getelementptr inbounds i32, i32* %diff, i64 %9
65  %10 = load i32, i32* %arrayidx27, align 4
66  %11 = or i64 %1, 6
67  %arrayidx30 = getelementptr inbounds i32, i32* %diff, i64 %11
68  %12 = load i32, i32* %arrayidx30, align 4
69  %add31 = add nsw i32 %12, %10
70  %arrayidx34 = getelementptr inbounds [8 x [8 x i32]], [8 x [8 x i32]]* %m2, i64 0, i64 %indvars.iv, i64 2
71  store i32 %add31, i32* %arrayidx34, align 8
72  %add38 = add nsw i32 %add24, %add31
73  %13 = or i64 %1, 3
74  %arrayidx41 = getelementptr inbounds i32, i32* %diff, i64 %13
75  %14 = load i32, i32* %arrayidx41, align 4
76  %15 = or i64 %1, 7
77  %arrayidx44 = getelementptr inbounds i32, i32* %diff, i64 %15
78  %16 = load i32, i32* %arrayidx44, align 4
79  %add45 = add nsw i32 %16, %14
80  %arrayidx48 = getelementptr inbounds [8 x [8 x i32]], [8 x [8 x i32]]* %m2, i64 0, i64 %indvars.iv, i64 3
81  store i32 %add45, i32* %arrayidx48, align 4
82  %add52 = add nsw i32 %add38, %add45
83  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
84  %exitcond = icmp eq i64 %indvars.iv.next, 8
85  br i1 %exitcond, label %for.end, label %for.body
86
87for.end:                                          ; preds = %for.body
88  %arraydecay = getelementptr inbounds [8 x [8 x i32]], [8 x [8 x i32]]* %m2, i64 0, i64 0
89  call void @ff([8 x i32]* %arraydecay) #1
90  ret i32 %add52
91}
92
93declare void @ff([8 x i32]*) #2
94
95
96