1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py 2; RUN: opt < %s -loop-vectorize -force-vector-width=4 -force-vector-interleave=1 -instcombine -S | FileCheck %s 3 4target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128" 5 6; 7define void @vector_gep_stored(i32** %a, i32 *%b, i64 %n) { 8; CHECK-LABEL: @vector_gep_stored( 9; CHECK-NEXT: entry: 10; CHECK-NEXT: [[SMAX:%.*]] = call i64 @llvm.smax.i64(i64 [[N:%.*]], i64 1) 11; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[SMAX]], 4 12; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] 13; CHECK: vector.ph: 14; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[SMAX]], 9223372036854775804 15; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] 16; CHECK: vector.body: 17; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] 18; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] 19; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], <4 x i64> [[VEC_IND]] 20; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32*, i32** [[A:%.*]], i64 [[INDEX]] 21; CHECK-NEXT: [[TMP2:%.*]] = bitcast i32** [[TMP1]] to <4 x i32*>* 22; CHECK-NEXT: store <4 x i32*> [[TMP0]], <4 x i32*>* [[TMP2]], align 8 23; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 24; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], <i64 4, i64 4, i64 4, i64 4> 25; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] 26; CHECK-NEXT: br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] 27; CHECK: middle.block: 28; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[SMAX]], [[N_VEC]] 29; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]] 30; CHECK: scalar.ph: 31; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] 32; CHECK-NEXT: br label [[FOR_BODY:%.*]] 33; CHECK: for.body: 34; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] 35; CHECK-NEXT: [[VAR0:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 [[I]] 36; CHECK-NEXT: [[VAR1:%.*]] = getelementptr inbounds i32*, i32** [[A]], i64 [[I]] 37; CHECK-NEXT: store i32* [[VAR0]], i32** [[VAR1]], align 8 38; CHECK-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I]], 1 39; CHECK-NEXT: [[COND:%.*]] = icmp slt i64 [[I_NEXT]], [[N]] 40; CHECK-NEXT: br i1 [[COND]], label [[FOR_BODY]], label [[FOR_END]], !llvm.loop [[LOOP2:![0-9]+]] 41; CHECK: for.end: 42; CHECK-NEXT: ret void 43; 44entry: 45 br label %for.body 46 47for.body: 48 %i = phi i64 [ %i.next, %for.body ], [ 0, %entry ] 49 %var0 = getelementptr inbounds i32, i32* %b, i64 %i 50 %var1 = getelementptr inbounds i32*, i32** %a, i64 %i 51 store i32* %var0, i32** %var1, align 8 52 %i.next = add nuw nsw i64 %i, 1 53 %cond = icmp slt i64 %i.next, %n 54 br i1 %cond, label %for.body, label %for.end 55 56for.end: 57 ret void 58} 59 60; 61define void @uniform_vector_gep_stored(i32** %a, i32 *%b, i64 %n) { 62; CHECK-LABEL: @uniform_vector_gep_stored( 63; CHECK-NEXT: entry: 64; CHECK-NEXT: [[SMAX:%.*]] = call i64 @llvm.smax.i64(i64 [[N:%.*]], i64 1) 65; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[SMAX]], 4 66; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] 67; CHECK: vector.ph: 68; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[SMAX]], 9223372036854775804 69; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] 70; CHECK: vector.body: 71; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] 72; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i64 1 73; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x i32*> poison, i32* [[TMP0]], i64 0 74; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x i32*> [[DOTSPLATINSERT]], <4 x i32*> poison, <4 x i32> zeroinitializer 75; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32*, i32** [[A:%.*]], i64 [[INDEX]] 76; CHECK-NEXT: [[TMP2:%.*]] = bitcast i32** [[TMP1]] to <4 x i32*>* 77; CHECK-NEXT: store <4 x i32*> [[DOTSPLAT]], <4 x i32*>* [[TMP2]], align 8 78; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 79; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] 80; CHECK-NEXT: br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] 81; CHECK: middle.block: 82; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[SMAX]], [[N_VEC]] 83; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]] 84; CHECK: scalar.ph: 85; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] 86; CHECK-NEXT: br label [[FOR_BODY:%.*]] 87; CHECK: for.body: 88; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] 89; CHECK-NEXT: [[VAR0:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 1 90; CHECK-NEXT: [[VAR1:%.*]] = getelementptr inbounds i32*, i32** [[A]], i64 [[I]] 91; CHECK-NEXT: store i32* [[VAR0]], i32** [[VAR1]], align 8 92; CHECK-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I]], 1 93; CHECK-NEXT: [[COND:%.*]] = icmp slt i64 [[I_NEXT]], [[N]] 94; CHECK-NEXT: br i1 [[COND]], label [[FOR_BODY]], label [[FOR_END]], !llvm.loop [[LOOP5:![0-9]+]] 95; CHECK: for.end: 96; CHECK-NEXT: ret void 97; 98entry: 99 br label %for.body 100 101for.body: 102 %i = phi i64 [ %i.next, %for.body ], [ 0, %entry ] 103 %var0 = getelementptr inbounds i32, i32* %b, i64 1 104 %var1 = getelementptr inbounds i32*, i32** %a, i64 %i 105 store i32* %var0, i32** %var1, align 8 106 %i.next = add nuw nsw i64 %i, 1 107 %cond = icmp slt i64 %i.next, %n 108 br i1 %cond, label %for.body, label %for.end 109 110for.end: 111 ret void 112} 113