1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt < %s -mtriple=aarch64 -loop-vectorize --force-vector-interleave=1 -S | FileCheck %s
3
4target triple = "aarch64-unknown-linux-gnu"
5
6; The test checks that scalarized code is not generated for SVE.
7; It creates a scenario where the gep instruction is used outside
8; the loop, preventing the gep (and consequently the loop induction
9; update variable) from being classified as 'uniform'.
10
11define void @test_no_scalarization(i64* %a, i32 %idx, i32 %n) #0 {
12; CHECK-LABEL: @test_no_scalarization(
13; CHECK-NEXT:  L.entry:
14; CHECK-NEXT:    [[TMP0:%.*]] = add nsw i32 [[IDX:%.*]], 1
15; CHECK-NEXT:    [[SMAX:%.*]] = call i32 @llvm.smax.i32(i32 [[N:%.*]], i32 [[TMP0]])
16; CHECK-NEXT:    [[TMP1:%.*]] = sub i32 [[SMAX]], [[IDX]]
17; CHECK-NEXT:    [[TMP2:%.*]] = call i32 @llvm.vscale.i32()
18; CHECK-NEXT:    [[TMP3:%.*]] = mul i32 [[TMP2]], 2
19; CHECK-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[TMP1]], [[TMP3]]
20; CHECK-NEXT:    br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
21; CHECK:       vector.ph:
22; CHECK-NEXT:    [[TMP4:%.*]] = call i32 @llvm.vscale.i32()
23; CHECK-NEXT:    [[TMP5:%.*]] = mul i32 [[TMP4]], 2
24; CHECK-NEXT:    [[N_MOD_VF:%.*]] = urem i32 [[TMP1]], [[TMP5]]
25; CHECK-NEXT:    [[N_VEC:%.*]] = sub i32 [[TMP1]], [[N_MOD_VF]]
26; CHECK-NEXT:    [[IND_END:%.*]] = add i32 [[IDX]], [[N_VEC]]
27; CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i32> poison, i32 [[IDX]], i32 0
28; CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i32> [[DOTSPLATINSERT]], <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
29; CHECK-NEXT:    [[TMP6:%.*]] = call <vscale x 2 x i32> @llvm.experimental.stepvector.nxv2i32()
30; CHECK-NEXT:    [[TMP7:%.*]] = add <vscale x 2 x i32> [[TMP6]], zeroinitializer
31; CHECK-NEXT:    [[TMP8:%.*]] = mul <vscale x 2 x i32> [[TMP7]], shufflevector (<vscale x 2 x i32> insertelement (<vscale x 2 x i32> poison, i32 1, i32 0), <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer)
32; CHECK-NEXT:    [[INDUCTION:%.*]] = add <vscale x 2 x i32> [[DOTSPLAT]], [[TMP8]]
33; CHECK-NEXT:    [[TMP9:%.*]] = call i32 @llvm.vscale.i32()
34; CHECK-NEXT:    [[TMP10:%.*]] = mul i32 [[TMP9]], 2
35; CHECK-NEXT:    [[TMP11:%.*]] = mul i32 1, [[TMP10]]
36; CHECK-NEXT:    [[DOTSPLATINSERT1:%.*]] = insertelement <vscale x 2 x i32> poison, i32 [[TMP11]], i32 0
37; CHECK-NEXT:    [[DOTSPLAT2:%.*]] = shufflevector <vscale x 2 x i32> [[DOTSPLATINSERT1]], <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
38; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
39; CHECK:       vector.body:
40; CHECK-NEXT:    [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
41; CHECK-NEXT:    [[VEC_IND:%.*]] = phi <vscale x 2 x i32> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
42; CHECK-NEXT:    [[TMP12:%.*]] = getelementptr i64, i64* [[A:%.*]], <vscale x 2 x i32> [[VEC_IND]]
43; CHECK-NEXT:    [[TMP13:%.*]] = extractelement <vscale x 2 x i64*> [[TMP12]], i32 0
44; CHECK-NEXT:    [[TMP14:%.*]] = bitcast i64* [[TMP13]] to double*
45; CHECK-NEXT:    [[TMP15:%.*]] = getelementptr double, double* [[TMP14]], i32 0
46; CHECK-NEXT:    [[TMP16:%.*]] = bitcast double* [[TMP15]] to <vscale x 2 x double>*
47; CHECK-NEXT:    [[WIDE_LOAD:%.*]] = load <vscale x 2 x double>, <vscale x 2 x double>* [[TMP16]], align 8
48; CHECK-NEXT:    [[TMP17:%.*]] = call i32 @llvm.vscale.i32()
49; CHECK-NEXT:    [[TMP18:%.*]] = mul i32 [[TMP17]], 2
50; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP18]]
51; CHECK-NEXT:    [[VEC_IND_NEXT]] = add <vscale x 2 x i32> [[VEC_IND]], [[DOTSPLAT2]]
52; CHECK-NEXT:    [[TMP19:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
53; CHECK-NEXT:    br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
54; CHECK:       middle.block:
55; CHECK-NEXT:    [[TMP20:%.*]] = call i32 @llvm.vscale.i32()
56; CHECK-NEXT:    [[TMP21:%.*]] = mul i32 [[TMP20]], 2
57; CHECK-NEXT:    [[TMP22:%.*]] = sub i32 [[TMP21]], 1
58; CHECK-NEXT:    [[TMP23:%.*]] = extractelement <vscale x 2 x i64*> [[TMP12]], i32 [[TMP22]]
59; CHECK-NEXT:    [[CMP_N:%.*]] = icmp eq i32 [[TMP1]], [[N_VEC]]
60; CHECK-NEXT:    br i1 [[CMP_N]], label [[L_EXIT:%.*]], label [[SCALAR_PH]]
61; CHECK:       scalar.ph:
62; CHECK-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i32 [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ [[IDX]], [[L_ENTRY:%.*]] ]
63; CHECK-NEXT:    br label [[L_LOOPBODY:%.*]]
64; CHECK:       L.LoopBody:
65; CHECK-NEXT:    [[INDVAR:%.*]] = phi i32 [ [[INDVAR_NEXT:%.*]], [[L_LOOPBODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
66; CHECK-NEXT:    [[INDVAR_NEXT]] = add nsw i32 [[INDVAR]], 1
67; CHECK-NEXT:    [[TMP24:%.*]] = getelementptr i64, i64* [[A]], i32 [[INDVAR]]
68; CHECK-NEXT:    [[TMP25:%.*]] = bitcast i64* [[TMP24]] to double*
69; CHECK-NEXT:    [[TMP26:%.*]] = load double, double* [[TMP25]], align 8
70; CHECK-NEXT:    [[TMP27:%.*]] = icmp slt i32 [[INDVAR_NEXT]], [[N]]
71; CHECK-NEXT:    br i1 [[TMP27]], label [[L_LOOPBODY]], label [[L_EXIT]], !llvm.loop [[LOOP2:![0-9]+]]
72; CHECK:       L.exit:
73; CHECK-NEXT:    [[DOTLCSSA:%.*]] = phi i64* [ [[TMP24]], [[L_LOOPBODY]] ], [ [[TMP23]], [[MIDDLE_BLOCK]] ]
74; CHECK-NEXT:    store i64 1, i64* [[DOTLCSSA]], align 8
75; CHECK-NEXT:    ret void
76;
77L.entry:
78  br label %L.LoopBody
79
80L.LoopBody:                                       ; preds = %L.LoopBody, %L.entry
81  %indvar = phi i32 [ %indvar.next, %L.LoopBody ], [ %idx, %L.entry ]
82  %indvar.next = add nsw i32 %indvar, 1
83  %0 = getelementptr i64, i64* %a, i32 %indvar
84  %1 = bitcast i64* %0 to double*
85  %2 = load double, double* %1, align 8
86  %3 = icmp slt i32 %indvar.next, %n
87  br i1 %3, label %L.LoopBody, label %L.exit
88
89L.exit:                                       ; preds = %L.LoopBody
90  store i64 1, i64* %0, align 8
91  ret void
92}
93
94attributes #0 = { nofree norecurse noreturn nosync nounwind "target-features"="+sve" }
95
96