1; Test loop tuning.
2;
3; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z10 | FileCheck %s
4; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 \
5; RUN:  | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-Z13
6
7; Test that strength reduction is applied to addresses with a scale factor,
8; but that indexed addressing can still be used.
9define void @f1(i32 *%dest, i32 %a) {
10; CHECK-LABEL: f1:
11; CHECK-NOT: sllg
12; CHECK: st %r3, 0({{%r[1-5],%r[1-5]}})
13; CHECK: br %r14
14entry:
15  br label %loop
16
17loop:
18  %index = phi i64 [ 0, %entry ], [ %next, %loop ]
19  %ptr = getelementptr i32, i32 *%dest, i64 %index
20  store i32 %a, i32 *%ptr
21  %next = add i64 %index, 1
22  %cmp = icmp ne i64 %next, 100
23  br i1 %cmp, label %loop, label %exit
24
25exit:
26  ret void
27}
28
29; Test a loop that should be converted into dbr form and then use BRCT.
30define void @f2(i32 *%src, i32 *%dest) {
31; CHECK-LABEL: f2:
32; CHECK: lhi [[REG:%r[0-5]]], 100
33; CHECK: [[LABEL:\.[^:]*]]:{{.*}} %loop
34; CHECK: brct [[REG]], [[LABEL]]
35; CHECK: br %r14
36entry:
37  br label %loop
38
39loop:
40  %count = phi i32 [ 0, %entry ], [ %next, %loop.next ]
41  %next = add i32 %count, 1
42  %val = load volatile i32 , i32 *%src
43  %cmp = icmp eq i32 %val, 0
44  br i1 %cmp, label %loop.next, label %loop.store
45
46loop.store:
47  %add = add i32 %val, 1
48  store volatile i32 %add, i32 *%dest
49  br label %loop.next
50
51loop.next:
52  %cont = icmp ne i32 %next, 100
53  br i1 %cont, label %loop, label %exit
54
55exit:
56  ret void
57}
58
59; Like f2, but for BRCTG.
60define void @f3(i64 *%src, i64 *%dest) {
61; CHECK-LABEL: f3:
62; CHECK: lghi [[REG:%r[0-5]]], 100
63; CHECK: [[LABEL:\.[^:]*]]:{{.*}} %loop
64; CHECK: brctg [[REG]], [[LABEL]]
65; CHECK: br %r14
66entry:
67  br label %loop
68
69loop:
70  %count = phi i64 [ 0, %entry ], [ %next, %loop.next ]
71  %next = add i64 %count, 1
72  %val = load volatile i64 , i64 *%src
73  %cmp = icmp eq i64 %val, 0
74  br i1 %cmp, label %loop.next, label %loop.store
75
76loop.store:
77  %add = add i64 %val, 1
78  store volatile i64 %add, i64 *%dest
79  br label %loop.next
80
81loop.next:
82  %cont = icmp ne i64 %next, 100
83  br i1 %cont, label %loop, label %exit
84
85exit:
86  ret void
87}
88
89; Test a loop with a 64-bit decremented counter in which the 32-bit
90; low part of the counter is used after the decrement.  This is an example
91; of a subregister use being the only thing that blocks a conversion to BRCTG.
92define void @f4(i32 *%src, i32 *%dest, i64 *%dest2, i64 %count) {
93; CHECK-LABEL: f4:
94; CHECK: aghi [[REG:%r[0-5]]], -1
95; CHECK: lr [[REG2:%r[0-5]]], [[REG]]
96; CHECK: stg [[REG2]],
97; CHECK: jne {{\..*}}
98; CHECK: br %r14
99entry:
100  br label %loop
101
102loop:
103  %left = phi i64 [ %count, %entry ], [ %next, %loop.next ]
104  store volatile i64 %left, i64 *%dest2
105  %val = load volatile i32 , i32 *%src
106  %cmp = icmp eq i32 %val, 0
107  br i1 %cmp, label %loop.next, label %loop.store
108
109loop.store:
110  %add = add i32 %val, 1
111  store volatile i32 %add, i32 *%dest
112  br label %loop.next
113
114loop.next:
115  %next = add i64 %left, -1
116  %ext = zext i32 %val to i64
117  %shl = shl i64 %ext, 32
118  %and = and i64 %next, 4294967295
119  %or = or i64 %shl, %and
120  store volatile i64 %or, i64 *%dest2
121  %cont = icmp ne i64 %next, 0
122  br i1 %cont, label %loop, label %exit
123
124exit:
125  ret void
126}
127
128; Test that negative offsets are avoided for loads of floating point.
129%s.float = type { float, float, float }
130define void @f5(%s.float* nocapture %a,
131                %s.float* nocapture readonly %b,
132                i32 zeroext %S) {
133; CHECK-Z13-LABEL: f5:
134; CHECK-Z13-NOT: -{{[0-9]+}}(%r
135
136entry:
137  %cmp9 = icmp eq i32 %S, 0
138  br i1 %cmp9, label %for.cond.cleanup, label %for.body.preheader
139
140for.body.preheader:                 ; preds = %entry
141  br label %for.body
142
143for.cond.cleanup.loopexit:          ; preds = %for.body
144  br label %for.cond.cleanup
145
146for.cond.cleanup:                   ; preds = %for.cond.cleanup.loopexit, %entry
147  ret void
148
149for.body:                           ; preds = %for.body.preheader, %for.body
150  %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %for.body.preheader ]
151  %a1 = getelementptr inbounds %s.float, %s.float* %b, i64 %indvars.iv, i32 0
152  %tmp = load float, float* %a1, align 4
153  %b4 = getelementptr inbounds %s.float, %s.float* %b, i64 %indvars.iv, i32 1
154  %tmp1 = load float, float* %b4, align 4
155  %add = fadd float %tmp, %tmp1
156  %c = getelementptr inbounds %s.float, %s.float* %b, i64 %indvars.iv, i32 2
157  %tmp2 = load float, float* %c, align 4
158  %add7 = fadd float %add, %tmp2
159  %a10 = getelementptr inbounds %s.float, %s.float* %a, i64 %indvars.iv, i32 0
160  store float %add7, float* %a10, align 4
161  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
162  %lftr.wideiv = trunc i64 %indvars.iv.next to i32
163  %exitcond = icmp eq i32 %lftr.wideiv, %S
164  br i1 %exitcond, label %for.cond.cleanup.loopexit, label %for.body
165}
166
167; Test that negative offsets are avoided for loads of double.
168%s.double = type { double, double, double }
169define void @f6(%s.double* nocapture %a,
170                %s.double* nocapture readonly %b,
171                i32 zeroext %S) {
172; CHECK-Z13-LABEL: f6:
173; CHECK-Z13-NOT: -{{[0-9]+}}(%r
174entry:
175  %cmp9 = icmp eq i32 %S, 0
176  br i1 %cmp9, label %for.cond.cleanup, label %for.body.preheader
177
178for.body.preheader:                  ; preds = %entry
179  br label %for.body
180
181for.cond.cleanup.loopexit:           ; preds = %for.body
182  br label %for.cond.cleanup
183
184for.cond.cleanup:                    ; preds = %for.cond.cleanup.loopexit, %entry
185  ret void
186
187for.body:                            ; preds = %for.body.preheader, %for.body
188  %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %for.body.preheader ]
189  %a1 = getelementptr inbounds %s.double, %s.double* %b, i64 %indvars.iv, i32 0
190  %tmp = load double, double* %a1, align 4
191  %b4 = getelementptr inbounds %s.double, %s.double* %b, i64 %indvars.iv, i32 1
192  %tmp1 = load double, double* %b4, align 4
193  %add = fadd double %tmp, %tmp1
194  %c = getelementptr inbounds %s.double, %s.double* %b, i64 %indvars.iv, i32 2
195  %tmp2 = load double, double* %c, align 4
196  %add7 = fadd double %add, %tmp2
197  %a10 = getelementptr inbounds %s.double, %s.double* %a, i64 %indvars.iv, i32 0
198  store double %add7, double* %a10, align 4
199  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
200  %lftr.wideiv = trunc i64 %indvars.iv.next to i32
201  %exitcond = icmp eq i32 %lftr.wideiv, %S
202  br i1 %exitcond, label %for.cond.cleanup.loopexit, label %for.body
203}
204
205; Test that negative offsets are avoided for memory accesses of vector type.
206%s.vec = type { <4 x i32>, <4 x i32>, <4 x i32> }
207define void @f7(%s.vec* nocapture %a,
208                %s.vec* nocapture readonly %b,
209                i32 zeroext %S) {
210; CHECK-Z13-LABEL: f7:
211; CHECK-Z13-NOT: -{{[0-9]+}}(%r
212entry:
213  %cmp9 = icmp eq i32 %S, 0
214  br i1 %cmp9, label %for.cond.cleanup, label %for.body.preheader
215
216for.body.preheader:                 ; preds = %entry
217  br label %for.body
218
219for.cond.cleanup.loopexit:          ; preds = %for.body
220  br label %for.cond.cleanup
221
222for.cond.cleanup:                   ; preds = %for.cond.cleanup.loopexit, %entry
223  ret void
224
225for.body:                           ; preds = %for.body.preheader, %for.body
226  %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %for.body.preheader ]
227  %a1 = getelementptr inbounds %s.vec, %s.vec* %b, i64 %indvars.iv, i32 0
228  %tmp = load <4 x i32>, <4 x i32>* %a1, align 4
229  %b4 = getelementptr inbounds %s.vec, %s.vec* %b, i64 %indvars.iv, i32 1
230  %tmp1 = load <4 x i32>, <4 x i32>* %b4, align 4
231  %add = add <4 x i32> %tmp1, %tmp
232  %c = getelementptr inbounds %s.vec, %s.vec* %b, i64 %indvars.iv, i32 2
233  %tmp2 = load <4 x i32>, <4 x i32>* %c, align 4
234  %add7 = add <4 x i32> %add, %tmp2
235  %a10 = getelementptr inbounds %s.vec, %s.vec* %a, i64 %indvars.iv, i32 0
236  store <4 x i32> %add7, <4 x i32>* %a10, align 4
237  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
238  %lftr.wideiv = trunc i64 %indvars.iv.next to i32
239  %exitcond = icmp eq i32 %lftr.wideiv, %S
240  br i1 %exitcond, label %for.cond.cleanup.loopexit, label %for.body
241}
242