1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py 2; RUN: opt -passes=loop-vectorize -force-vector-width=1 -force-vector-interleave=2 -S %s | FileCheck --check-prefix=VF1 %s 3; RUN: opt -passes=loop-vectorize -force-vector-width=2 -force-vector-interleave=1 -S %s | FileCheck --check-prefix=VF2 %s 4 5@f = external dso_local global i32, align 4 6 7define void @int_iv_based_on_pointer_iv(i8* %A) { 8; VF1-LABEL: @int_iv_based_on_pointer_iv( 9; VF1: vector.body: 10; VF1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %vector.ph ], [ [[INDEX_NEXT:%.*]], %vector.body ] 11; VF1-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 4 12; VF1-NEXT: [[INDUCTION:%.*]] = add i64 [[OFFSET_IDX]], 0 13; VF1-NEXT: [[INDUCTION3:%.*]] = add i64 [[OFFSET_IDX]], 4 14; VF1-NEXT: [[TMP3:%.*]] = add i64 [[INDEX]], 0 15; VF1-NEXT: [[NEXT_GEP:%.*]] = getelementptr i32, i32* null, i64 [[TMP3]] 16; VF1-NEXT: [[TMP4:%.*]] = add i64 [[INDEX]], 1 17; VF1-NEXT: [[NEXT_GEP4:%.*]] = getelementptr i32, i32* null, i64 [[TMP4]] 18; VF1-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, i32* [[NEXT_GEP]], i64 1 19; VF1-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, i32* [[NEXT_GEP4]], i64 1 20; VF1-NEXT: [[TMP7:%.*]] = getelementptr inbounds i8, i8* [[A:%.*]], i64 [[INDUCTION]] 21; VF1-NEXT: [[TMP8:%.*]] = getelementptr inbounds i8, i8* [[A]], i64 [[INDUCTION3]] 22; VF1-NEXT: store i8 0, i8* [[TMP7]], align 1 23; VF1-NEXT: store i8 0, i8* [[TMP8]], align 1 24; VF1-NEXT: [[TMP9:%.*]] = ptrtoint i32* [[TMP5]] to i64 25; VF1-NEXT: [[TMP10:%.*]] = ptrtoint i32* [[TMP6]] to i64 26; VF1-NEXT: [[TMP11:%.*]] = sub i64 ptrtoint (i32* @f to i64), [[TMP9]] 27; VF1-NEXT: [[TMP12:%.*]] = sub i64 ptrtoint (i32* @f to i64), [[TMP10]] 28; VF1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 29; VF1-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], 30; VF1-NEXT: br i1 [[TMP13]], label %middle.block, label %vector.body 31; 32; VF2-LABEL: @int_iv_based_on_pointer_iv( 33; VF2: vector.body: 34; VF2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %vector.ph ], [ [[INDEX_NEXT:%.*]], %vector.body ] 35; VF2-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 4 36; VF2-NEXT: [[TMP3:%.*]] = add i64 [[OFFSET_IDX]], 0 37; VF2-NEXT: [[TMP4:%.*]] = add i64 [[OFFSET_IDX]], 4 38; VF2-NEXT: [[TMP5:%.*]] = add i64 [[INDEX]], 0 39; VF2-NEXT: [[NEXT_GEP:%.*]] = getelementptr i32, i32* null, i64 [[TMP5]] 40; VF2-NEXT: [[TMP6:%.*]] = add i64 [[INDEX]], 1 41; VF2-NEXT: [[NEXT_GEP3:%.*]] = getelementptr i32, i32* null, i64 [[TMP6]] 42; VF2-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, i32* [[NEXT_GEP]], i64 1 43; VF2-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, i32* [[NEXT_GEP3]], i64 1 44; VF2-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, i8* [[A:%.*]], i64 [[TMP3]] 45; VF2-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, i8* [[A]], i64 [[TMP4]] 46; VF2-NEXT: store i8 0, i8* [[TMP9]], align 1 47; VF2-NEXT: store i8 0, i8* [[TMP10]], align 1 48; VF2-NEXT: [[TMP11:%.*]] = ptrtoint i32* [[TMP7]] to i64 49; VF2-NEXT: [[TMP12:%.*]] = ptrtoint i32* [[TMP8]] to i64 50; VF2-NEXT: [[TMP13:%.*]] = sub i64 ptrtoint (i32* @f to i64), [[TMP11]] 51; VF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 52; VF2-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], 53; VF2-NEXT: br i1 [[TMP14]], label %middle.block, label %vector.body 54; 55entry: 56 br label %loop 57 58loop: 59 %iv.int = phi i64 [ 0, %entry ], [ %iv.int.next, %loop ] 60 %iv.ptr = phi i32* [ null, %entry ], [ %iv.ptr.next, %loop ] 61 %iv.ptr.next = getelementptr inbounds i32, i32* %iv.ptr, i64 1 62 %gep.A = getelementptr inbounds i8, i8* %A, i64 %iv.int 63 store i8 0, i8* %gep.A 64 %iv.int.next = ptrtoint i32* %iv.ptr.next to i64 65 %sub.ptr.sub = sub i64 ptrtoint (i32* @f to i64), %iv.int.next 66 %cmp = icmp sgt i64 %sub.ptr.sub, 0 67 br i1 %cmp, label %loop, label %exit 68 69exit: 70 ret void 71} 72