1; REQUIRES: asserts
2
3; RUN: opt -passes='loop-vectorize' -mtriple=x86_64-unknown-linux -S -debug %s 2>&1 | FileCheck %s
4
5target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
6
7target triple = "x86_64-unknown-linux"
8
9declare double @llvm.pow.f64(double, double)
10
11; Test case where the memory runtime checks and vector body is more expensive
12; than running the scalar loop.
13define void @test(double* nocapture %A, double* nocapture %B, double* nocapture %C, double* nocapture %D, double* nocapture %E) {
14
15; CHECK: Calculating cost of runtime checks:
16; CHECK-NEXT:  0  for   {{.+}} = getelementptr double, double* %A, i64 16
17; CHECK-NEXT:  0  for   {{.+}} = bitcast double*
18; CHECK-NEXT:  0  for   {{.+}} = getelementptr double, double* %B, i64 16
19; CHECK-NEXT:  0  for   {{.+}} = bitcast double*
20; CHECK-NEXT:  0  for   {{.+}} = getelementptr double, double* %E, i64 16
21; CHECK-NEXT:  0  for   {{.+}} = bitcast double*
22; CHECK-NEXT:  0  for   {{.+}} = getelementptr double, double* %C, i64 16
23; CHECK-NEXT:  0  for   {{.+}} = bitcast double*
24; CHECK-NEXT:  0  for   {{.+}} = getelementptr double, double* %D, i64 16
25; CHECK-NEXT:  0  for   {{.+}} = bitcast double*
26; CHECK-NEXT:  1  for   {{.+}} = icmp ult i8*
27; CHECK-NEXT:  1  for   {{.+}} = icmp ult i8*
28; CHECK-NEXT:  1  for   {{.+}} = and i1
29; CHECK-NEXT:  1  for   {{.+}} = icmp ult i8*
30; CHECK-NEXT:  1  for   {{.+}} = icmp ult i8*
31; CHECK-NEXT:  1  for   {{.+}} = and i1
32; CHECK-NEXT:  1  for   {{.+}} = or i1
33; CHECK-NEXT:  1  for   {{.+}} = icmp ult i8*
34; CHECK-NEXT:  1  for   {{.+}} = icmp ult i8*
35; CHECK-NEXT:  1  for   {{.+}} = and i1
36; CHECK-NEXT:  1  for   {{.+}} = or i1
37; CHECK-NEXT:  1  for   {{.+}} = icmp ult i8*
38; CHECK-NEXT:  1  for   {{.+}} = icmp ult i8*
39; CHECK-NEXT:  1  for   {{.+}} = and i1
40; CHECK-NEXT:  1  for   {{.+}} = or i1
41; CHECK-NEXT:  1  for   {{.+}} = icmp ult i8*
42; CHECK-NEXT:  1  for   {{.+}} = icmp ult i8*
43; CHECK-NEXT:  1  for   {{.+}} = and i1
44; CHECK-NEXT:  1  for   {{.+}} = or i1
45; CHECK-NEXT:  1  for   {{.+}} = icmp ult i8*
46; CHECK-NEXT:  1  for   {{.+}} = icmp ult i8*
47; CHECK-NEXT:  1  for   {{.+}} = and i1
48; CHECK-NEXT:  1  for   {{.+}} = or i1
49; CHECK-NEXT:  1  for   {{.+}} = icmp ult i8*
50; CHECK-NEXT:  1  for   {{.+}} = icmp ult i8*
51; CHECK-NEXT:  1  for   {{.+}} = and i1
52; CHECK-NEXT:  1  for   {{.+}} = or i1
53; CHECK-NEXT:  1  for   {{.+}} = icmp ult i8*
54; CHECK-NEXT:  1  for   {{.+}} = icmp ult i8*
55; CHECK-NEXT:  1  for   {{.+}} = and i1
56; CHECK-NEXT:  1  for   {{.+}} = or i1
57; CHECK-NEXT:  1  for   {{.+}} = icmp ult i8*
58; CHECK-NEXT:  1  for   {{.+}} = icmp ult i8*
59; CHECK-NEXT:  1  for   {{.+}} = and i1
60; CHECK-NEXT:  1  for   {{.+}} = or i1
61; CHECK-NEXT: Total cost of runtime checks: 35
62
63; CHECK: LV: Vectorization is not beneficial: expected trip count < minimum profitable VF (16 < 70)
64;
65; CHECK-LABEL: @test(
66; CHECK-NEXT: entry:
67; CHECK-NEXT:  br label %for.body
68; CHECK-NOT: vector.memcheck
69; CHECK-NOT: vector.body
70;
71entry:
72  br label %for.body
73
74for.body:
75  %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
76  %gep.A = getelementptr inbounds double, double* %A, i64 %iv
77  %l.A = load double, double* %gep.A, align 4
78  store double 0.0, double* %gep.A, align 4
79  %p.1 = call double @llvm.pow.f64(double %l.A, double 2.0)
80
81  %gep.B = getelementptr inbounds double, double* %B, i64 %iv
82  %l.B = load double, double* %gep.B, align 4
83  %p.2 = call double @llvm.pow.f64(double %l.B, double %p.1)
84  store double 0.0, double* %gep.B, align 4
85
86  %gep.C = getelementptr inbounds double, double* %C, i64 %iv
87  %l.C = load double, double* %gep.C, align 4
88  %p.3 = call double @llvm.pow.f64(double %p.1, double %l.C)
89
90  %gep.D = getelementptr inbounds double, double* %D, i64 %iv
91  %l.D = load double, double* %gep.D
92  %p.4 = call double @llvm.pow.f64(double %p.3, double %l.D)
93  %p.5 = call double @llvm.pow.f64(double %p.4, double %p.3)
94  %mul = fmul double 2.0, %p.5
95  %mul.2 = fmul double %mul, 2.0
96  %mul.3 = fmul double %mul, %mul.2
97  %gep.E = getelementptr inbounds double, double* %E, i64 %iv
98  store double %mul.3, double* %gep.E, align 4
99  %iv.next = add i64 %iv, 1
100  %exitcond = icmp eq i64 %iv.next, 16
101  br i1 %exitcond, label %for.end, label %for.body
102
103for.end:
104  ret void
105}
106