1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py 2; RUN: opt < %s -basic-aa -slp-vectorizer -S -mtriple=i386-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s 3 4define i32 @foo(i32* nocapture readonly %diff) #0 { 5; CHECK-LABEL: @foo( 6; CHECK-NEXT: entry: 7; CHECK-NEXT: [[M2:%.*]] = alloca [8 x [8 x i32]], align 16 8; CHECK-NEXT: [[TMP0:%.*]] = bitcast [8 x [8 x i32]]* [[M2]] to i8* 9; CHECK-NEXT: br label [[FOR_BODY:%.*]] 10; CHECK: for.body: 11; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] 12; CHECK-NEXT: [[A_088:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[OP_EXTRA:%.*]], [[FOR_BODY]] ] 13; CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[INDVARS_IV]], 3 14; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[DIFF:%.*]], i64 [[TMP1]] 15; CHECK-NEXT: [[TMP2:%.*]] = or i64 [[TMP1]], 4 16; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, i32* [[DIFF]], i64 [[TMP2]] 17; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds [8 x [8 x i32]], [8 x [8 x i32]]* [[M2]], i64 0, i64 [[INDVARS_IV]], i64 0 18; CHECK-NEXT: [[TMP3:%.*]] = or i64 [[TMP1]], 1 19; CHECK-NEXT: [[ARRAYIDX13:%.*]] = getelementptr inbounds i32, i32* [[DIFF]], i64 [[TMP3]] 20; CHECK-NEXT: [[TMP4:%.*]] = or i64 [[TMP1]], 5 21; CHECK-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds i32, i32* [[DIFF]], i64 [[TMP4]] 22; CHECK-NEXT: [[TMP5:%.*]] = or i64 [[TMP1]], 2 23; CHECK-NEXT: [[ARRAYIDX27:%.*]] = getelementptr inbounds i32, i32* [[DIFF]], i64 [[TMP5]] 24; CHECK-NEXT: [[TMP6:%.*]] = or i64 [[TMP1]], 6 25; CHECK-NEXT: [[ARRAYIDX30:%.*]] = getelementptr inbounds i32, i32* [[DIFF]], i64 [[TMP6]] 26; CHECK-NEXT: [[TMP7:%.*]] = or i64 [[TMP1]], 3 27; CHECK-NEXT: [[ARRAYIDX41:%.*]] = getelementptr inbounds i32, i32* [[DIFF]], i64 [[TMP7]] 28; CHECK-NEXT: [[TMP8:%.*]] = bitcast i32* [[ARRAYIDX]] to <4 x i32>* 29; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i32>, <4 x i32>* [[TMP8]], align 4 30; CHECK-NEXT: [[TMP10:%.*]] = or i64 [[TMP1]], 7 31; CHECK-NEXT: [[ARRAYIDX44:%.*]] = getelementptr inbounds i32, i32* [[DIFF]], i64 [[TMP10]] 32; CHECK-NEXT: [[TMP11:%.*]] = bitcast i32* [[ARRAYIDX2]] to <4 x i32>* 33; CHECK-NEXT: [[TMP12:%.*]] = load <4 x i32>, <4 x i32>* [[TMP11]], align 4 34; CHECK-NEXT: [[TMP13:%.*]] = add nsw <4 x i32> [[TMP12]], [[TMP9]] 35; CHECK-NEXT: [[ARRAYIDX20:%.*]] = getelementptr inbounds [8 x [8 x i32]], [8 x [8 x i32]]* [[M2]], i64 0, i64 [[INDVARS_IV]], i64 1 36; CHECK-NEXT: [[ARRAYIDX34:%.*]] = getelementptr inbounds [8 x [8 x i32]], [8 x [8 x i32]]* [[M2]], i64 0, i64 [[INDVARS_IV]], i64 2 37; CHECK-NEXT: [[ARRAYIDX48:%.*]] = getelementptr inbounds [8 x [8 x i32]], [8 x [8 x i32]]* [[M2]], i64 0, i64 [[INDVARS_IV]], i64 3 38; CHECK-NEXT: [[TMP14:%.*]] = bitcast i32* [[ARRAYIDX6]] to <4 x i32>* 39; CHECK-NEXT: store <4 x i32> [[TMP13]], <4 x i32>* [[TMP14]], align 16 40; CHECK-NEXT: [[TMP15:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP13]]) 41; CHECK-NEXT: [[OP_EXTRA]] = add nsw i32 [[TMP15]], [[A_088]] 42; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 43; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 8 44; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]] 45; CHECK: for.end: 46; CHECK-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [8 x [8 x i32]], [8 x [8 x i32]]* [[M2]], i64 0, i64 0 47; CHECK-NEXT: call void @ff([8 x i32]* [[ARRAYDECAY]]) 48; CHECK-NEXT: ret i32 [[OP_EXTRA]] 49; 50entry: 51 %m2 = alloca [8 x [8 x i32]], align 16 52 %0 = bitcast [8 x [8 x i32]]* %m2 to i8* 53 br label %for.body 54 55for.body: ; preds = %for.body, %entry 56 %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ] 57 %a.088 = phi i32 [ 0, %entry ], [ %add52, %for.body ] 58 %1 = shl i64 %indvars.iv, 3 59 %arrayidx = getelementptr inbounds i32, i32* %diff, i64 %1 60 %2 = load i32, i32* %arrayidx, align 4 61 %3 = or i64 %1, 4 62 %arrayidx2 = getelementptr inbounds i32, i32* %diff, i64 %3 63 %4 = load i32, i32* %arrayidx2, align 4 64 %add3 = add nsw i32 %4, %2 65 %arrayidx6 = getelementptr inbounds [8 x [8 x i32]], [8 x [8 x i32]]* %m2, i64 0, i64 %indvars.iv, i64 0 66 store i32 %add3, i32* %arrayidx6, align 16 67 %add10 = add nsw i32 %add3, %a.088 68 %5 = or i64 %1, 1 69 %arrayidx13 = getelementptr inbounds i32, i32* %diff, i64 %5 70 %6 = load i32, i32* %arrayidx13, align 4 71 %7 = or i64 %1, 5 72 %arrayidx16 = getelementptr inbounds i32, i32* %diff, i64 %7 73 %8 = load i32, i32* %arrayidx16, align 4 74 %add17 = add nsw i32 %8, %6 75 %arrayidx20 = getelementptr inbounds [8 x [8 x i32]], [8 x [8 x i32]]* %m2, i64 0, i64 %indvars.iv, i64 1 76 store i32 %add17, i32* %arrayidx20, align 4 77 %add24 = add nsw i32 %add10, %add17 78 %9 = or i64 %1, 2 79 %arrayidx27 = getelementptr inbounds i32, i32* %diff, i64 %9 80 %10 = load i32, i32* %arrayidx27, align 4 81 %11 = or i64 %1, 6 82 %arrayidx30 = getelementptr inbounds i32, i32* %diff, i64 %11 83 %12 = load i32, i32* %arrayidx30, align 4 84 %add31 = add nsw i32 %12, %10 85 %arrayidx34 = getelementptr inbounds [8 x [8 x i32]], [8 x [8 x i32]]* %m2, i64 0, i64 %indvars.iv, i64 2 86 store i32 %add31, i32* %arrayidx34, align 8 87 %add38 = add nsw i32 %add24, %add31 88 %13 = or i64 %1, 3 89 %arrayidx41 = getelementptr inbounds i32, i32* %diff, i64 %13 90 %14 = load i32, i32* %arrayidx41, align 4 91 %15 = or i64 %1, 7 92 %arrayidx44 = getelementptr inbounds i32, i32* %diff, i64 %15 93 %16 = load i32, i32* %arrayidx44, align 4 94 %add45 = add nsw i32 %16, %14 95 %arrayidx48 = getelementptr inbounds [8 x [8 x i32]], [8 x [8 x i32]]* %m2, i64 0, i64 %indvars.iv, i64 3 96 store i32 %add45, i32* %arrayidx48, align 4 97 %add52 = add nsw i32 %add38, %add45 98 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 99 %exitcond = icmp eq i64 %indvars.iv.next, 8 100 br i1 %exitcond, label %for.end, label %for.body 101 102for.end: ; preds = %for.body 103 %arraydecay = getelementptr inbounds [8 x [8 x i32]], [8 x [8 x i32]]* %m2, i64 0, i64 0 104 call void @ff([8 x i32]* %arraydecay) #1 105 ret i32 %add52 106} 107 108declare void @ff([8 x i32]*) #2 109 110 111