1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -disable-block-placement -verify-machineinstrs < %s \
3; RUN:   | FileCheck -check-prefixes=RV32I %s
4; RUN: llc -mtriple=riscv32 -mattr=+experimental-zbt -disable-block-placement -verify-machineinstrs < %s \
5; RUN:   | FileCheck -check-prefixes=RV32ZBT %s
6; RUN: llc -mtriple=riscv64 -disable-block-placement -verify-machineinstrs < %s \
7; RUN:   | FileCheck -check-prefixes=RV64I %s
8; RUN: llc -mtriple=riscv64 -mattr=+experimental-zbt -disable-block-placement -verify-machineinstrs < %s \
9; RUN:   | FileCheck -check-prefixes=RV64ZBT %s
10
11define signext i32 @foo(i32 signext %a, i32 *%b) nounwind {
12; RV32I-LABEL: foo:
13; RV32I:       # %bb.0:
14; RV32I-NEXT:    lw a2, 0(a1)
15; RV32I-NEXT:    beq a0, a2, .LBB0_2
16; RV32I-NEXT:  # %bb.1:
17; RV32I-NEXT:    mv a0, a2
18; RV32I-NEXT:  .LBB0_2:
19; RV32I-NEXT:    lw a2, 0(a1)
20; RV32I-NEXT:    bne a0, a2, .LBB0_4
21; RV32I-NEXT:  # %bb.3:
22; RV32I-NEXT:    mv a0, a2
23; RV32I-NEXT:  .LBB0_4:
24; RV32I-NEXT:    lw a2, 0(a1)
25; RV32I-NEXT:    bltu a2, a0, .LBB0_6
26; RV32I-NEXT:  # %bb.5:
27; RV32I-NEXT:    mv a0, a2
28; RV32I-NEXT:  .LBB0_6:
29; RV32I-NEXT:    lw a2, 0(a1)
30; RV32I-NEXT:    bgeu a0, a2, .LBB0_8
31; RV32I-NEXT:  # %bb.7:
32; RV32I-NEXT:    mv a0, a2
33; RV32I-NEXT:  .LBB0_8:
34; RV32I-NEXT:    lw a2, 0(a1)
35; RV32I-NEXT:    bltu a0, a2, .LBB0_10
36; RV32I-NEXT:  # %bb.9:
37; RV32I-NEXT:    mv a0, a2
38; RV32I-NEXT:  .LBB0_10:
39; RV32I-NEXT:    lw a2, 0(a1)
40; RV32I-NEXT:    bgeu a2, a0, .LBB0_12
41; RV32I-NEXT:  # %bb.11:
42; RV32I-NEXT:    mv a0, a2
43; RV32I-NEXT:  .LBB0_12:
44; RV32I-NEXT:    lw a2, 0(a1)
45; RV32I-NEXT:    blt a2, a0, .LBB0_14
46; RV32I-NEXT:  # %bb.13:
47; RV32I-NEXT:    mv a0, a2
48; RV32I-NEXT:  .LBB0_14:
49; RV32I-NEXT:    lw a2, 0(a1)
50; RV32I-NEXT:    bge a0, a2, .LBB0_16
51; RV32I-NEXT:  # %bb.15:
52; RV32I-NEXT:    mv a0, a2
53; RV32I-NEXT:  .LBB0_16:
54; RV32I-NEXT:    lw a2, 0(a1)
55; RV32I-NEXT:    blt a0, a2, .LBB0_18
56; RV32I-NEXT:  # %bb.17:
57; RV32I-NEXT:    mv a0, a2
58; RV32I-NEXT:  .LBB0_18:
59; RV32I-NEXT:    lw a2, 0(a1)
60; RV32I-NEXT:    bge a2, a0, .LBB0_20
61; RV32I-NEXT:  # %bb.19:
62; RV32I-NEXT:    mv a0, a2
63; RV32I-NEXT:  .LBB0_20:
64; RV32I-NEXT:    lw a2, 0(a1)
65; RV32I-NEXT:    blez a2, .LBB0_22
66; RV32I-NEXT:  # %bb.21:
67; RV32I-NEXT:    mv a0, a2
68; RV32I-NEXT:  .LBB0_22:
69; RV32I-NEXT:    lw a3, 0(a1)
70; RV32I-NEXT:    bgez a2, .LBB0_24
71; RV32I-NEXT:  # %bb.23:
72; RV32I-NEXT:    mv a0, a3
73; RV32I-NEXT:  .LBB0_24:
74; RV32I-NEXT:    lw a3, 0(a1)
75; RV32I-NEXT:    li a4, 1024
76; RV32I-NEXT:    blt a4, a3, .LBB0_26
77; RV32I-NEXT:  # %bb.25:
78; RV32I-NEXT:    mv a0, a3
79; RV32I-NEXT:  .LBB0_26:
80; RV32I-NEXT:    lw a1, 0(a1)
81; RV32I-NEXT:    li a3, 2046
82; RV32I-NEXT:    bltu a3, a2, .LBB0_28
83; RV32I-NEXT:  # %bb.27:
84; RV32I-NEXT:    mv a0, a1
85; RV32I-NEXT:  .LBB0_28:
86; RV32I-NEXT:    ret
87;
88; RV32ZBT-LABEL: foo:
89; RV32ZBT:       # %bb.0:
90; RV32ZBT-NEXT:    lw a2, 0(a1)
91; RV32ZBT-NEXT:    lw a3, 0(a1)
92; RV32ZBT-NEXT:    xor a4, a0, a2
93; RV32ZBT-NEXT:    cmov a0, a4, a2, a0
94; RV32ZBT-NEXT:    lw a2, 0(a1)
95; RV32ZBT-NEXT:    xor a4, a0, a3
96; RV32ZBT-NEXT:    cmov a0, a4, a0, a3
97; RV32ZBT-NEXT:    lw a3, 0(a1)
98; RV32ZBT-NEXT:    sltu a4, a2, a0
99; RV32ZBT-NEXT:    cmov a0, a4, a0, a2
100; RV32ZBT-NEXT:    lw a2, 0(a1)
101; RV32ZBT-NEXT:    sltu a4, a0, a3
102; RV32ZBT-NEXT:    cmov a0, a4, a3, a0
103; RV32ZBT-NEXT:    lw a3, 0(a1)
104; RV32ZBT-NEXT:    sltu a4, a0, a2
105; RV32ZBT-NEXT:    cmov a0, a4, a0, a2
106; RV32ZBT-NEXT:    lw a2, 0(a1)
107; RV32ZBT-NEXT:    sltu a4, a3, a0
108; RV32ZBT-NEXT:    cmov a0, a4, a3, a0
109; RV32ZBT-NEXT:    lw a3, 0(a1)
110; RV32ZBT-NEXT:    slt a4, a2, a0
111; RV32ZBT-NEXT:    cmov a0, a4, a0, a2
112; RV32ZBT-NEXT:    lw a2, 0(a1)
113; RV32ZBT-NEXT:    slt a4, a0, a3
114; RV32ZBT-NEXT:    cmov a0, a4, a3, a0
115; RV32ZBT-NEXT:    lw a3, 0(a1)
116; RV32ZBT-NEXT:    slt a4, a0, a2
117; RV32ZBT-NEXT:    lw a5, 0(a1)
118; RV32ZBT-NEXT:    cmov a0, a4, a0, a2
119; RV32ZBT-NEXT:    slt a2, a3, a0
120; RV32ZBT-NEXT:    cmov a0, a2, a3, a0
121; RV32ZBT-NEXT:    slti a2, a5, 1
122; RV32ZBT-NEXT:    lw a3, 0(a1)
123; RV32ZBT-NEXT:    cmov a0, a2, a0, a5
124; RV32ZBT-NEXT:    lw a2, 0(a1)
125; RV32ZBT-NEXT:    slti a4, a5, 0
126; RV32ZBT-NEXT:    cmov a0, a4, a3, a0
127; RV32ZBT-NEXT:    lw a1, 0(a1)
128; RV32ZBT-NEXT:    slti a3, a2, 1025
129; RV32ZBT-NEXT:    cmov a0, a3, a2, a0
130; RV32ZBT-NEXT:    sltiu a2, a5, 2047
131; RV32ZBT-NEXT:    cmov a0, a2, a1, a0
132; RV32ZBT-NEXT:    ret
133;
134; RV64I-LABEL: foo:
135; RV64I:       # %bb.0:
136; RV64I-NEXT:    lw a2, 0(a1)
137; RV64I-NEXT:    beq a0, a2, .LBB0_2
138; RV64I-NEXT:  # %bb.1:
139; RV64I-NEXT:    mv a0, a2
140; RV64I-NEXT:  .LBB0_2:
141; RV64I-NEXT:    lw a2, 0(a1)
142; RV64I-NEXT:    bne a0, a2, .LBB0_4
143; RV64I-NEXT:  # %bb.3:
144; RV64I-NEXT:    mv a0, a2
145; RV64I-NEXT:  .LBB0_4:
146; RV64I-NEXT:    lw a2, 0(a1)
147; RV64I-NEXT:    bltu a2, a0, .LBB0_6
148; RV64I-NEXT:  # %bb.5:
149; RV64I-NEXT:    mv a0, a2
150; RV64I-NEXT:  .LBB0_6:
151; RV64I-NEXT:    lw a2, 0(a1)
152; RV64I-NEXT:    bgeu a0, a2, .LBB0_8
153; RV64I-NEXT:  # %bb.7:
154; RV64I-NEXT:    mv a0, a2
155; RV64I-NEXT:  .LBB0_8:
156; RV64I-NEXT:    lw a2, 0(a1)
157; RV64I-NEXT:    bltu a0, a2, .LBB0_10
158; RV64I-NEXT:  # %bb.9:
159; RV64I-NEXT:    mv a0, a2
160; RV64I-NEXT:  .LBB0_10:
161; RV64I-NEXT:    lw a2, 0(a1)
162; RV64I-NEXT:    bgeu a2, a0, .LBB0_12
163; RV64I-NEXT:  # %bb.11:
164; RV64I-NEXT:    mv a0, a2
165; RV64I-NEXT:  .LBB0_12:
166; RV64I-NEXT:    lw a2, 0(a1)
167; RV64I-NEXT:    sext.w a3, a0
168; RV64I-NEXT:    blt a2, a3, .LBB0_14
169; RV64I-NEXT:  # %bb.13:
170; RV64I-NEXT:    mv a0, a2
171; RV64I-NEXT:  .LBB0_14:
172; RV64I-NEXT:    lw a2, 0(a1)
173; RV64I-NEXT:    sext.w a3, a0
174; RV64I-NEXT:    bge a3, a2, .LBB0_16
175; RV64I-NEXT:  # %bb.15:
176; RV64I-NEXT:    mv a0, a2
177; RV64I-NEXT:  .LBB0_16:
178; RV64I-NEXT:    lw a2, 0(a1)
179; RV64I-NEXT:    sext.w a3, a0
180; RV64I-NEXT:    blt a3, a2, .LBB0_18
181; RV64I-NEXT:  # %bb.17:
182; RV64I-NEXT:    mv a0, a2
183; RV64I-NEXT:  .LBB0_18:
184; RV64I-NEXT:    lw a2, 0(a1)
185; RV64I-NEXT:    sext.w a3, a0
186; RV64I-NEXT:    bge a2, a3, .LBB0_20
187; RV64I-NEXT:  # %bb.19:
188; RV64I-NEXT:    mv a0, a2
189; RV64I-NEXT:  .LBB0_20:
190; RV64I-NEXT:    lw a2, 0(a1)
191; RV64I-NEXT:    blez a2, .LBB0_22
192; RV64I-NEXT:  # %bb.21:
193; RV64I-NEXT:    mv a0, a2
194; RV64I-NEXT:  .LBB0_22:
195; RV64I-NEXT:    lw a3, 0(a1)
196; RV64I-NEXT:    bgez a2, .LBB0_24
197; RV64I-NEXT:  # %bb.23:
198; RV64I-NEXT:    mv a0, a3
199; RV64I-NEXT:  .LBB0_24:
200; RV64I-NEXT:    lw a3, 0(a1)
201; RV64I-NEXT:    li a4, 1024
202; RV64I-NEXT:    blt a4, a3, .LBB0_26
203; RV64I-NEXT:  # %bb.25:
204; RV64I-NEXT:    mv a0, a3
205; RV64I-NEXT:  .LBB0_26:
206; RV64I-NEXT:    lw a1, 0(a1)
207; RV64I-NEXT:    li a3, 2046
208; RV64I-NEXT:    bltu a3, a2, .LBB0_28
209; RV64I-NEXT:  # %bb.27:
210; RV64I-NEXT:    mv a0, a1
211; RV64I-NEXT:  .LBB0_28:
212; RV64I-NEXT:    sext.w a0, a0
213; RV64I-NEXT:    ret
214;
215; RV64ZBT-LABEL: foo:
216; RV64ZBT:       # %bb.0:
217; RV64ZBT-NEXT:    lw a2, 0(a1)
218; RV64ZBT-NEXT:    lw a3, 0(a1)
219; RV64ZBT-NEXT:    xor a4, a0, a2
220; RV64ZBT-NEXT:    cmov a0, a4, a2, a0
221; RV64ZBT-NEXT:    lw a2, 0(a1)
222; RV64ZBT-NEXT:    xor a4, a0, a3
223; RV64ZBT-NEXT:    cmov a0, a4, a0, a3
224; RV64ZBT-NEXT:    lw a3, 0(a1)
225; RV64ZBT-NEXT:    sltu a4, a2, a0
226; RV64ZBT-NEXT:    cmov a0, a4, a0, a2
227; RV64ZBT-NEXT:    lw a2, 0(a1)
228; RV64ZBT-NEXT:    sltu a4, a0, a3
229; RV64ZBT-NEXT:    cmov a0, a4, a3, a0
230; RV64ZBT-NEXT:    lw a3, 0(a1)
231; RV64ZBT-NEXT:    sltu a4, a0, a2
232; RV64ZBT-NEXT:    cmov a0, a4, a0, a2
233; RV64ZBT-NEXT:    lw a2, 0(a1)
234; RV64ZBT-NEXT:    sltu a4, a3, a0
235; RV64ZBT-NEXT:    cmov a0, a4, a3, a0
236; RV64ZBT-NEXT:    sext.w a3, a0
237; RV64ZBT-NEXT:    slt a3, a2, a3
238; RV64ZBT-NEXT:    lw a4, 0(a1)
239; RV64ZBT-NEXT:    cmov a0, a3, a0, a2
240; RV64ZBT-NEXT:    sext.w a2, a0
241; RV64ZBT-NEXT:    lw a3, 0(a1)
242; RV64ZBT-NEXT:    slt a2, a2, a4
243; RV64ZBT-NEXT:    cmov a0, a2, a4, a0
244; RV64ZBT-NEXT:    sext.w a2, a0
245; RV64ZBT-NEXT:    slt a2, a2, a3
246; RV64ZBT-NEXT:    lw a4, 0(a1)
247; RV64ZBT-NEXT:    cmov a0, a2, a0, a3
248; RV64ZBT-NEXT:    lw a2, 0(a1)
249; RV64ZBT-NEXT:    sext.w a3, a0
250; RV64ZBT-NEXT:    slt a3, a4, a3
251; RV64ZBT-NEXT:    cmov a0, a3, a4, a0
252; RV64ZBT-NEXT:    slti a3, a2, 1
253; RV64ZBT-NEXT:    lw a4, 0(a1)
254; RV64ZBT-NEXT:    cmov a0, a3, a0, a2
255; RV64ZBT-NEXT:    lw a3, 0(a1)
256; RV64ZBT-NEXT:    slti a5, a2, 0
257; RV64ZBT-NEXT:    cmov a0, a5, a4, a0
258; RV64ZBT-NEXT:    lw a1, 0(a1)
259; RV64ZBT-NEXT:    slti a4, a3, 1025
260; RV64ZBT-NEXT:    cmov a0, a4, a3, a0
261; RV64ZBT-NEXT:    sltiu a2, a2, 2047
262; RV64ZBT-NEXT:    cmov a0, a2, a1, a0
263; RV64ZBT-NEXT:    sext.w a0, a0
264; RV64ZBT-NEXT:    ret
265  %val1 = load volatile i32, i32* %b
266  %tst1 = icmp eq i32 %a, %val1
267  %val2 = select i1 %tst1, i32 %a, i32 %val1
268
269  %val3 = load volatile i32, i32* %b
270  %tst2 = icmp ne i32 %val2, %val3
271  %val4 = select i1 %tst2, i32 %val2, i32 %val3
272
273  %val5 = load volatile i32, i32* %b
274  %tst3 = icmp ugt i32 %val4, %val5
275  %val6 = select i1 %tst3, i32 %val4, i32 %val5
276
277  %val7 = load volatile i32, i32* %b
278  %tst4 = icmp uge i32 %val6, %val7
279  %val8 = select i1 %tst4, i32 %val6, i32 %val7
280
281  %val9 = load volatile i32, i32* %b
282  %tst5 = icmp ult i32 %val8, %val9
283  %val10 = select i1 %tst5, i32 %val8, i32 %val9
284
285  %val11 = load volatile i32, i32* %b
286  %tst6 = icmp ule i32 %val10, %val11
287  %val12 = select i1 %tst6, i32 %val10, i32 %val11
288
289  %val13 = load volatile i32, i32* %b
290  %tst7 = icmp sgt i32 %val12, %val13
291  %val14 = select i1 %tst7, i32 %val12, i32 %val13
292
293  %val15 = load volatile i32, i32* %b
294  %tst8 = icmp sge i32 %val14, %val15
295  %val16 = select i1 %tst8, i32 %val14, i32 %val15
296
297  %val17 = load volatile i32, i32* %b
298  %tst9 = icmp slt i32 %val16, %val17
299  %val18 = select i1 %tst9, i32 %val16, i32 %val17
300
301  %val19 = load volatile i32, i32* %b
302  %tst10 = icmp sle i32 %val18, %val19
303  %val20 = select i1 %tst10, i32 %val18, i32 %val19
304
305  %val21 = load volatile i32, i32* %b
306  %tst11 = icmp slt i32 %val21, 1
307  %val22 = select i1 %tst11, i32 %val20, i32 %val21
308
309  %val23 = load volatile i32, i32* %b
310  %tst12 = icmp sgt i32 %val21, -1
311  %val24 = select i1 %tst12, i32 %val22, i32 %val23
312
313  %val25 = load volatile i32, i32* %b
314  %tst13 = icmp sgt i32 %val25, 1024
315  %val26 = select i1 %tst13, i32 %val24, i32 %val25
316
317  %val27 = load volatile i32, i32* %b
318  %tst14 = icmp ugt i32 %val21, 2046
319  %val28 = select i1 %tst14, i32 %val26, i32 %val27
320  ret i32 %val28
321}
322
323; Test that we can ComputeNumSignBits across basic blocks when the live out is
324; RISCVISD::SELECT_CC. There should be no slli+srai or sext.h in the output.
325define signext i16 @numsignbits(i16 signext %0, i16 signext %1, i16 signext %2, i16 signext %3) nounwind {
326; RV32I-LABEL: numsignbits:
327; RV32I:       # %bb.0:
328; RV32I-NEXT:    addi sp, sp, -16
329; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
330; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
331; RV32I-NEXT:    mv s0, a3
332; RV32I-NEXT:    beqz a0, .LBB1_2
333; RV32I-NEXT:  # %bb.1:
334; RV32I-NEXT:    mv s0, a2
335; RV32I-NEXT:  .LBB1_2:
336; RV32I-NEXT:    beqz a1, .LBB1_4
337; RV32I-NEXT:  # %bb.3:
338; RV32I-NEXT:    mv a0, s0
339; RV32I-NEXT:    call bar@plt
340; RV32I-NEXT:  .LBB1_4:
341; RV32I-NEXT:    mv a0, s0
342; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
343; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
344; RV32I-NEXT:    addi sp, sp, 16
345; RV32I-NEXT:    ret
346;
347; RV32ZBT-LABEL: numsignbits:
348; RV32ZBT:       # %bb.0:
349; RV32ZBT-NEXT:    addi sp, sp, -16
350; RV32ZBT-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
351; RV32ZBT-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
352; RV32ZBT-NEXT:    cmov s0, a0, a2, a3
353; RV32ZBT-NEXT:    beqz a1, .LBB1_2
354; RV32ZBT-NEXT:  # %bb.1:
355; RV32ZBT-NEXT:    mv a0, s0
356; RV32ZBT-NEXT:    call bar@plt
357; RV32ZBT-NEXT:  .LBB1_2:
358; RV32ZBT-NEXT:    mv a0, s0
359; RV32ZBT-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
360; RV32ZBT-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
361; RV32ZBT-NEXT:    addi sp, sp, 16
362; RV32ZBT-NEXT:    ret
363;
364; RV64I-LABEL: numsignbits:
365; RV64I:       # %bb.0:
366; RV64I-NEXT:    addi sp, sp, -16
367; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
368; RV64I-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
369; RV64I-NEXT:    mv s0, a3
370; RV64I-NEXT:    beqz a0, .LBB1_2
371; RV64I-NEXT:  # %bb.1:
372; RV64I-NEXT:    mv s0, a2
373; RV64I-NEXT:  .LBB1_2:
374; RV64I-NEXT:    beqz a1, .LBB1_4
375; RV64I-NEXT:  # %bb.3:
376; RV64I-NEXT:    mv a0, s0
377; RV64I-NEXT:    call bar@plt
378; RV64I-NEXT:  .LBB1_4:
379; RV64I-NEXT:    mv a0, s0
380; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
381; RV64I-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
382; RV64I-NEXT:    addi sp, sp, 16
383; RV64I-NEXT:    ret
384;
385; RV64ZBT-LABEL: numsignbits:
386; RV64ZBT:       # %bb.0:
387; RV64ZBT-NEXT:    addi sp, sp, -16
388; RV64ZBT-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
389; RV64ZBT-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
390; RV64ZBT-NEXT:    cmov s0, a0, a2, a3
391; RV64ZBT-NEXT:    beqz a1, .LBB1_2
392; RV64ZBT-NEXT:  # %bb.1:
393; RV64ZBT-NEXT:    mv a0, s0
394; RV64ZBT-NEXT:    call bar@plt
395; RV64ZBT-NEXT:  .LBB1_2:
396; RV64ZBT-NEXT:    mv a0, s0
397; RV64ZBT-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
398; RV64ZBT-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
399; RV64ZBT-NEXT:    addi sp, sp, 16
400; RV64ZBT-NEXT:    ret
401  %5 = icmp eq i16 %0, 0
402  %6 = select i1 %5, i16 %3, i16 %2
403  %7 = icmp eq i16 %1, 0
404  br i1 %7, label %9, label %8
405
4068:                                                ; preds = %4
407  tail call void @bar(i16 signext %6)
408  br label %9
409
4109:                                                ; preds = %8, %4
411  ret i16 %6
412}
413
414declare void @bar(i16 signext)
415
416define i32 @select_sge_int16min(i32 signext %x, i32 signext %y, i32 signext %z) {
417; RV32I-LABEL: select_sge_int16min:
418; RV32I:       # %bb.0:
419; RV32I-NEXT:    lui a3, 1048560
420; RV32I-NEXT:    addi a3, a3, -1
421; RV32I-NEXT:    blt a3, a0, .LBB2_2
422; RV32I-NEXT:  # %bb.1:
423; RV32I-NEXT:    mv a1, a2
424; RV32I-NEXT:  .LBB2_2:
425; RV32I-NEXT:    mv a0, a1
426; RV32I-NEXT:    ret
427;
428; RV32ZBT-LABEL: select_sge_int16min:
429; RV32ZBT:       # %bb.0:
430; RV32ZBT-NEXT:    lui a3, 1048560
431; RV32ZBT-NEXT:    addi a3, a3, -1
432; RV32ZBT-NEXT:    slt a0, a3, a0
433; RV32ZBT-NEXT:    cmov a0, a0, a1, a2
434; RV32ZBT-NEXT:    ret
435;
436; RV64I-LABEL: select_sge_int16min:
437; RV64I:       # %bb.0:
438; RV64I-NEXT:    lui a3, 1048560
439; RV64I-NEXT:    addiw a3, a3, -1
440; RV64I-NEXT:    blt a3, a0, .LBB2_2
441; RV64I-NEXT:  # %bb.1:
442; RV64I-NEXT:    mv a1, a2
443; RV64I-NEXT:  .LBB2_2:
444; RV64I-NEXT:    mv a0, a1
445; RV64I-NEXT:    ret
446;
447; RV64ZBT-LABEL: select_sge_int16min:
448; RV64ZBT:       # %bb.0:
449; RV64ZBT-NEXT:    lui a3, 1048560
450; RV64ZBT-NEXT:    addiw a3, a3, -1
451; RV64ZBT-NEXT:    slt a0, a3, a0
452; RV64ZBT-NEXT:    cmov a0, a0, a1, a2
453; RV64ZBT-NEXT:    ret
454  %a = icmp sge i32 %x, -65536
455  %b = select i1 %a, i32 %y, i32 %z
456  ret i32 %b
457}
458
459define i64 @select_sge_int32min(i64 %x, i64 %y, i64 %z) {
460; RV32I-LABEL: select_sge_int32min:
461; RV32I:       # %bb.0:
462; RV32I-NEXT:    li a6, -1
463; RV32I-NEXT:    bne a1, a6, .LBB3_2
464; RV32I-NEXT:  # %bb.1:
465; RV32I-NEXT:    slti a0, a0, 0
466; RV32I-NEXT:    j .LBB3_3
467; RV32I-NEXT:  .LBB3_2:
468; RV32I-NEXT:    slt a0, a6, a1
469; RV32I-NEXT:  .LBB3_3:
470; RV32I-NEXT:    bnez a0, .LBB3_5
471; RV32I-NEXT:  # %bb.4:
472; RV32I-NEXT:    mv a2, a4
473; RV32I-NEXT:    mv a3, a5
474; RV32I-NEXT:  .LBB3_5:
475; RV32I-NEXT:    mv a0, a2
476; RV32I-NEXT:    mv a1, a3
477; RV32I-NEXT:    ret
478;
479; RV32ZBT-LABEL: select_sge_int32min:
480; RV32ZBT:       # %bb.0:
481; RV32ZBT-NEXT:    slti a0, a0, 0
482; RV32ZBT-NEXT:    addi a6, a1, 1
483; RV32ZBT-NEXT:    slti a1, a1, 0
484; RV32ZBT-NEXT:    xori a1, a1, 1
485; RV32ZBT-NEXT:    cmov a1, a6, a1, a0
486; RV32ZBT-NEXT:    cmov a0, a1, a2, a4
487; RV32ZBT-NEXT:    cmov a1, a1, a3, a5
488; RV32ZBT-NEXT:    ret
489;
490; RV64I-LABEL: select_sge_int32min:
491; RV64I:       # %bb.0:
492; RV64I-NEXT:    lui a3, 524288
493; RV64I-NEXT:    addi a3, a3, -1
494; RV64I-NEXT:    blt a3, a0, .LBB3_2
495; RV64I-NEXT:  # %bb.1:
496; RV64I-NEXT:    mv a1, a2
497; RV64I-NEXT:  .LBB3_2:
498; RV64I-NEXT:    mv a0, a1
499; RV64I-NEXT:    ret
500;
501; RV64ZBT-LABEL: select_sge_int32min:
502; RV64ZBT:       # %bb.0:
503; RV64ZBT-NEXT:    lui a3, 524288
504; RV64ZBT-NEXT:    addi a3, a3, -1
505; RV64ZBT-NEXT:    slt a0, a3, a0
506; RV64ZBT-NEXT:    cmov a0, a0, a1, a2
507; RV64ZBT-NEXT:    ret
508  %a = icmp sge i64 %x, -2147483648
509  %b = select i1 %a, i64 %y, i64 %z
510  ret i64 %b
511}
512