1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt < %s -instcombine -S | FileCheck %s
3
4declare void @use8(i8)
5declare void @use32(i32)
6
7; These would crash if we didn't check for a negative shift.
8
9; https://llvm.org/bugs/show_bug.cgi?id=12967
10
11define void @pr12967() {
12; CHECK-LABEL: @pr12967(
13; CHECK-NEXT:  entry:
14; CHECK-NEXT:    br label [[LOOP:%.*]]
15; CHECK:       loop:
16; CHECK-NEXT:    br label [[LOOP]]
17;
18entry:
19  br label %loop
20
21loop:
22  %c = phi i32 [ %shl, %loop ], [ undef, %entry ]
23  %shr = shl i32 %c, 7
24  %shl = lshr i32 %shr, -2
25  br label %loop
26}
27
28; https://llvm.org/bugs/show_bug.cgi?id=26760
29
30define void @pr26760() {
31; CHECK-LABEL: @pr26760(
32; CHECK-NEXT:  entry:
33; CHECK-NEXT:    br label [[LOOP:%.*]]
34; CHECK:       loop:
35; CHECK-NEXT:    br label [[LOOP]]
36;
37entry:
38  br label %loop
39
40loop:
41  %c = phi i32 [ %shl, %loop ], [ undef, %entry ]
42  %shr = lshr i32 %c, 7
43  %shl = shl i32 %shr, -2
44  br label %loop
45}
46
47; Converting the 2 shifts to SHL 6 without the AND is wrong.
48; https://llvm.org/bugs/show_bug.cgi?id=8547
49
50define i32 @pr8547(i32* %g) {
51; CHECK-LABEL: @pr8547(
52; CHECK-NEXT:  codeRepl:
53; CHECK-NEXT:    br label [[FOR_COND:%.*]]
54; CHECK:       for.cond:
55; CHECK-NEXT:    [[STOREMERGE:%.*]] = phi i32 [ 0, [[CODEREPL:%.*]] ], [ 5, [[FOR_COND]] ]
56; CHECK-NEXT:    store i32 [[STOREMERGE]], i32* [[G:%.*]], align 4
57; CHECK-NEXT:    [[TMP0:%.*]] = shl nuw nsw i32 [[STOREMERGE]], 6
58; CHECK-NEXT:    [[CONV2:%.*]] = and i32 [[TMP0]], 64
59; CHECK-NEXT:    [[TOBOOL:%.*]] = icmp eq i32 [[CONV2]], 0
60; CHECK-NEXT:    br i1 [[TOBOOL]], label [[FOR_COND]], label [[CODEREPL2:%.*]]
61; CHECK:       codeRepl2:
62; CHECK-NEXT:    ret i32 [[CONV2]]
63;
64codeRepl:
65  br label %for.cond
66
67for.cond:
68  %storemerge = phi i32 [ 0, %codeRepl ], [ 5, %for.cond ]
69  store i32 %storemerge, i32* %g, align 4
70  %shl = shl i32 %storemerge, 30
71  %conv2 = lshr i32 %shl, 24
72  %tobool = icmp eq i32 %conv2, 0
73  br i1 %tobool, label %for.cond, label %codeRepl2
74
75codeRepl2:
76  ret i32 %conv2
77}
78
79; Two same direction shifts that add up to more than the bitwidth should get
80; folded to zero.
81
82define i32 @shl_shl(i32 %A) {
83; CHECK-LABEL: @shl_shl(
84; CHECK-NEXT:    ret i32 0
85;
86  %B = shl i32 %A, 6
87  %C = shl i32 %B, 28
88  ret i32 %C
89}
90
91define <2 x i33> @shl_shl_splat_vec(<2 x i33> %A) {
92; CHECK-LABEL: @shl_shl_splat_vec(
93; CHECK-NEXT:    ret <2 x i33> zeroinitializer
94;
95  %B = shl <2 x i33> %A, <i33 5, i33 5>
96  %C = shl <2 x i33> %B, <i33 28, i33 28>
97  ret <2 x i33> %C
98}
99
100; FIXME
101
102define <2 x i33> @shl_shl_vec(<2 x i33> %A) {
103; CHECK-LABEL: @shl_shl_vec(
104; CHECK-NEXT:    [[B:%.*]] = shl <2 x i33> [[A:%.*]], <i33 6, i33 5>
105; CHECK-NEXT:    [[C:%.*]] = shl <2 x i33> [[B]], <i33 27, i33 28>
106; CHECK-NEXT:    ret <2 x i33> [[C]]
107;
108  %B = shl <2 x i33> %A, <i33 6, i33 5>
109  %C = shl <2 x i33> %B, <i33 27, i33 28>
110  ret <2 x i33> %C
111}
112
113define i232 @lshr_lshr(i232 %A) {
114; CHECK-LABEL: @lshr_lshr(
115; CHECK-NEXT:    ret i232 0
116;
117  %B = lshr i232 %A, 231
118  %C = lshr i232 %B, 1
119  ret i232 %C
120}
121
122define <2 x i32> @lshr_lshr_splat_vec(<2 x i32> %A) {
123; CHECK-LABEL: @lshr_lshr_splat_vec(
124; CHECK-NEXT:    ret <2 x i32> zeroinitializer
125;
126  %B = lshr <2 x i32> %A, <i32 28, i32 28>
127  %C = lshr <2 x i32> %B, <i32 4, i32 4>
128  ret <2 x i32> %C
129}
130
131define <2 x i32> @lshr_lshr_vec(<2 x i32> %A) {
132; CHECK-LABEL: @lshr_lshr_vec(
133; CHECK-NEXT:    ret <2 x i32> zeroinitializer
134;
135  %B = lshr <2 x i32> %A, <i32 29, i32 28>
136  %C = lshr <2 x i32> %B, <i32 4, i32 5>
137  ret <2 x i32> %C
138}
139
140define i8 @shl_trunc_bigger_lshr(i32 %x) {
141; CHECK-LABEL: @shl_trunc_bigger_lshr(
142; CHECK-NEXT:    [[SH_DIFF:%.*]] = lshr i32 [[X:%.*]], 2
143; CHECK-NEXT:    [[TR_SH_DIFF:%.*]] = trunc i32 [[SH_DIFF]] to i8
144; CHECK-NEXT:    [[LT:%.*]] = and i8 [[TR_SH_DIFF]], -8
145; CHECK-NEXT:    ret i8 [[LT]]
146;
147  %rt = lshr i32 %x, 5
148  %tr = trunc i32 %rt to i8
149  %lt = shl i8 %tr, 3
150  ret i8 %lt
151}
152
153define i8 @shl_trunc_smaller_lshr(i32 %x) {
154; CHECK-LABEL: @shl_trunc_smaller_lshr(
155; CHECK-NEXT:    [[X_TR:%.*]] = trunc i32 [[X:%.*]] to i8
156; CHECK-NEXT:    [[TR_SH_DIFF:%.*]] = shl i8 [[X_TR]], 2
157; CHECK-NEXT:    [[LT:%.*]] = and i8 [[TR_SH_DIFF]], -32
158; CHECK-NEXT:    ret i8 [[LT]]
159;
160  %rt = lshr i32 %x, 3
161  %tr = trunc i32 %rt to i8
162  %lt = shl i8 %tr, 5
163  ret i8 %lt
164}
165
166define i24 @shl_trunc_bigger_ashr(i32 %x) {
167; CHECK-LABEL: @shl_trunc_bigger_ashr(
168; CHECK-NEXT:    [[SH_DIFF:%.*]] = ashr i32 [[X:%.*]], 9
169; CHECK-NEXT:    [[TR_SH_DIFF:%.*]] = trunc i32 [[SH_DIFF]] to i24
170; CHECK-NEXT:    [[LT:%.*]] = and i24 [[TR_SH_DIFF]], -8
171; CHECK-NEXT:    ret i24 [[LT]]
172;
173  %rt = ashr i32 %x, 12
174  %tr = trunc i32 %rt to i24
175  %lt = shl i24 %tr, 3
176  ret i24 %lt
177}
178
179define i24 @shl_trunc_smaller_ashr(i32 %x) {
180; CHECK-LABEL: @shl_trunc_smaller_ashr(
181; CHECK-NEXT:    [[X_TR:%.*]] = trunc i32 [[X:%.*]] to i24
182; CHECK-NEXT:    [[TR_SH_DIFF:%.*]] = shl i24 [[X_TR]], 3
183; CHECK-NEXT:    [[LT:%.*]] = and i24 [[TR_SH_DIFF]], -8192
184; CHECK-NEXT:    ret i24 [[LT]]
185;
186  %rt = ashr i32 %x, 10
187  %tr = trunc i32 %rt to i24
188  %lt = shl i24 %tr, 13
189  ret i24 %lt
190}
191
192define i8 @shl_trunc_bigger_shl(i32 %x) {
193; CHECK-LABEL: @shl_trunc_bigger_shl(
194; CHECK-NEXT:    [[X_TR:%.*]] = trunc i32 [[X:%.*]] to i8
195; CHECK-NEXT:    [[TR:%.*]] = shl i8 [[X_TR]], 6
196; CHECK-NEXT:    ret i8 [[TR]]
197;
198  %rt = shl i32 %x, 4
199  %tr = trunc i32 %rt to i8
200  %lt = shl i8 %tr, 2
201  ret i8 %lt
202}
203
204define i8 @shl_trunc_smaller_shl(i32 %x) {
205; CHECK-LABEL: @shl_trunc_smaller_shl(
206; CHECK-NEXT:    [[X_TR:%.*]] = trunc i32 [[X:%.*]] to i8
207; CHECK-NEXT:    [[TR:%.*]] = shl i8 [[X_TR]], 6
208; CHECK-NEXT:    ret i8 [[TR]]
209;
210  %rt = shl i32 %x, 2
211  %tr = trunc i32 %rt to i8
212  %lt = shl i8 %tr, 4
213  ret i8 %lt
214}
215
216define i8 @shl_trunc_bigger_lshr_use1(i32 %x) {
217; CHECK-LABEL: @shl_trunc_bigger_lshr_use1(
218; CHECK-NEXT:    [[RT:%.*]] = lshr i32 [[X:%.*]], 5
219; CHECK-NEXT:    call void @use32(i32 [[RT]])
220; CHECK-NEXT:    [[TR:%.*]] = trunc i32 [[RT]] to i8
221; CHECK-NEXT:    [[LT:%.*]] = shl i8 [[TR]], 3
222; CHECK-NEXT:    ret i8 [[LT]]
223;
224  %rt = lshr i32 %x, 5
225  call void @use32(i32 %rt)
226  %tr = trunc i32 %rt to i8
227  %lt = shl i8 %tr, 3
228  ret i8 %lt
229}
230
231define i8 @shl_trunc_smaller_lshr_use1(i32 %x) {
232; CHECK-LABEL: @shl_trunc_smaller_lshr_use1(
233; CHECK-NEXT:    [[RT:%.*]] = lshr i32 [[X:%.*]], 3
234; CHECK-NEXT:    call void @use32(i32 [[RT]])
235; CHECK-NEXT:    [[TR:%.*]] = trunc i32 [[RT]] to i8
236; CHECK-NEXT:    [[LT:%.*]] = shl i8 [[TR]], 5
237; CHECK-NEXT:    ret i8 [[LT]]
238;
239  %rt = lshr i32 %x, 3
240  call void @use32(i32 %rt)
241  %tr = trunc i32 %rt to i8
242  %lt = shl i8 %tr, 5
243  ret i8 %lt
244}
245
246define i8 @shl_trunc_bigger_lshr_use2(i32 %x) {
247; CHECK-LABEL: @shl_trunc_bigger_lshr_use2(
248; CHECK-NEXT:    [[RT:%.*]] = lshr i32 [[X:%.*]], 5
249; CHECK-NEXT:    [[TR:%.*]] = trunc i32 [[RT]] to i8
250; CHECK-NEXT:    call void @use8(i8 [[TR]])
251; CHECK-NEXT:    [[LT:%.*]] = shl i8 [[TR]], 3
252; CHECK-NEXT:    ret i8 [[LT]]
253;
254  %rt = lshr i32 %x, 5
255  %tr = trunc i32 %rt to i8
256  call void @use8(i8 %tr)
257  %lt = shl i8 %tr, 3
258  ret i8 %lt
259}
260
261define i8 @shl_trunc_smaller_lshr_use2(i32 %x) {
262; CHECK-LABEL: @shl_trunc_smaller_lshr_use2(
263; CHECK-NEXT:    [[RT:%.*]] = lshr i32 [[X:%.*]], 3
264; CHECK-NEXT:    [[TR:%.*]] = trunc i32 [[RT]] to i8
265; CHECK-NEXT:    call void @use8(i8 [[TR]])
266; CHECK-NEXT:    [[LT:%.*]] = shl i8 [[TR]], 5
267; CHECK-NEXT:    ret i8 [[LT]]
268;
269  %rt = lshr i32 %x, 3
270  %tr = trunc i32 %rt to i8
271  call void @use8(i8 %tr)
272  %lt = shl i8 %tr, 5
273  ret i8 %lt
274}
275