1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
3; RUN:   | FileCheck -check-prefix=RV32I %s
4; RUN: llc -mtriple=riscv32 -mattr=+m -verify-machineinstrs < %s \
5; RUN:   | FileCheck -check-prefix=RV32IM %s
6; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
7; RUN:   | FileCheck -check-prefix=RV64I %s
8; RUN: llc -mtriple=riscv64 -mattr=+m -verify-machineinstrs < %s \
9; RUN:   | FileCheck -check-prefix=RV64IM %s
10
11define i32 @udiv(i32 %a, i32 %b) nounwind {
12; RV32I-LABEL: udiv:
13; RV32I:       # %bb.0:
14; RV32I-NEXT:    addi sp, sp, -16
15; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
16; RV32I-NEXT:    call __udivsi3@plt
17; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
18; RV32I-NEXT:    addi sp, sp, 16
19; RV32I-NEXT:    ret
20;
21; RV32IM-LABEL: udiv:
22; RV32IM:       # %bb.0:
23; RV32IM-NEXT:    divu a0, a0, a1
24; RV32IM-NEXT:    ret
25;
26; RV64I-LABEL: udiv:
27; RV64I:       # %bb.0:
28; RV64I-NEXT:    addi sp, sp, -16
29; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
30; RV64I-NEXT:    slli a0, a0, 32
31; RV64I-NEXT:    srli a0, a0, 32
32; RV64I-NEXT:    slli a1, a1, 32
33; RV64I-NEXT:    srli a1, a1, 32
34; RV64I-NEXT:    call __udivdi3@plt
35; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
36; RV64I-NEXT:    addi sp, sp, 16
37; RV64I-NEXT:    ret
38;
39; RV64IM-LABEL: udiv:
40; RV64IM:       # %bb.0:
41; RV64IM-NEXT:    divuw a0, a0, a1
42; RV64IM-NEXT:    ret
43  %1 = udiv i32 %a, %b
44  ret i32 %1
45}
46
47define i32 @udiv_constant(i32 %a) nounwind {
48; RV32I-LABEL: udiv_constant:
49; RV32I:       # %bb.0:
50; RV32I-NEXT:    addi sp, sp, -16
51; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
52; RV32I-NEXT:    li a1, 5
53; RV32I-NEXT:    call __udivsi3@plt
54; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
55; RV32I-NEXT:    addi sp, sp, 16
56; RV32I-NEXT:    ret
57;
58; RV32IM-LABEL: udiv_constant:
59; RV32IM:       # %bb.0:
60; RV32IM-NEXT:    lui a1, 838861
61; RV32IM-NEXT:    addi a1, a1, -819
62; RV32IM-NEXT:    mulhu a0, a0, a1
63; RV32IM-NEXT:    srli a0, a0, 2
64; RV32IM-NEXT:    ret
65;
66; RV64I-LABEL: udiv_constant:
67; RV64I:       # %bb.0:
68; RV64I-NEXT:    addi sp, sp, -16
69; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
70; RV64I-NEXT:    slli a0, a0, 32
71; RV64I-NEXT:    srli a0, a0, 32
72; RV64I-NEXT:    li a1, 5
73; RV64I-NEXT:    call __udivdi3@plt
74; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
75; RV64I-NEXT:    addi sp, sp, 16
76; RV64I-NEXT:    ret
77;
78; RV64IM-LABEL: udiv_constant:
79; RV64IM:       # %bb.0:
80; RV64IM-NEXT:    slli a0, a0, 32
81; RV64IM-NEXT:    lui a1, 838861
82; RV64IM-NEXT:    addiw a1, a1, -819
83; RV64IM-NEXT:    slli a1, a1, 32
84; RV64IM-NEXT:    mulhu a0, a0, a1
85; RV64IM-NEXT:    srli a0, a0, 34
86; RV64IM-NEXT:    ret
87  %1 = udiv i32 %a, 5
88  ret i32 %1
89}
90
91define i32 @udiv_pow2(i32 %a) nounwind {
92; RV32I-LABEL: udiv_pow2:
93; RV32I:       # %bb.0:
94; RV32I-NEXT:    srli a0, a0, 3
95; RV32I-NEXT:    ret
96;
97; RV32IM-LABEL: udiv_pow2:
98; RV32IM:       # %bb.0:
99; RV32IM-NEXT:    srli a0, a0, 3
100; RV32IM-NEXT:    ret
101;
102; RV64I-LABEL: udiv_pow2:
103; RV64I:       # %bb.0:
104; RV64I-NEXT:    srliw a0, a0, 3
105; RV64I-NEXT:    ret
106;
107; RV64IM-LABEL: udiv_pow2:
108; RV64IM:       # %bb.0:
109; RV64IM-NEXT:    srliw a0, a0, 3
110; RV64IM-NEXT:    ret
111  %1 = udiv i32 %a, 8
112  ret i32 %1
113}
114
115define i32 @udiv_constant_lhs(i32 %a) nounwind {
116; RV32I-LABEL: udiv_constant_lhs:
117; RV32I:       # %bb.0:
118; RV32I-NEXT:    addi sp, sp, -16
119; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
120; RV32I-NEXT:    mv a1, a0
121; RV32I-NEXT:    li a0, 10
122; RV32I-NEXT:    call __udivsi3@plt
123; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
124; RV32I-NEXT:    addi sp, sp, 16
125; RV32I-NEXT:    ret
126;
127; RV32IM-LABEL: udiv_constant_lhs:
128; RV32IM:       # %bb.0:
129; RV32IM-NEXT:    li a1, 10
130; RV32IM-NEXT:    divu a0, a1, a0
131; RV32IM-NEXT:    ret
132;
133; RV64I-LABEL: udiv_constant_lhs:
134; RV64I:       # %bb.0:
135; RV64I-NEXT:    addi sp, sp, -16
136; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
137; RV64I-NEXT:    slli a0, a0, 32
138; RV64I-NEXT:    srli a1, a0, 32
139; RV64I-NEXT:    li a0, 10
140; RV64I-NEXT:    call __udivdi3@plt
141; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
142; RV64I-NEXT:    addi sp, sp, 16
143; RV64I-NEXT:    ret
144;
145; RV64IM-LABEL: udiv_constant_lhs:
146; RV64IM:       # %bb.0:
147; RV64IM-NEXT:    li a1, 10
148; RV64IM-NEXT:    divuw a0, a1, a0
149; RV64IM-NEXT:    ret
150  %1 = udiv i32 10, %a
151  ret i32 %1
152}
153
154define i64 @udiv64(i64 %a, i64 %b) nounwind {
155; RV32I-LABEL: udiv64:
156; RV32I:       # %bb.0:
157; RV32I-NEXT:    addi sp, sp, -16
158; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
159; RV32I-NEXT:    call __udivdi3@plt
160; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
161; RV32I-NEXT:    addi sp, sp, 16
162; RV32I-NEXT:    ret
163;
164; RV32IM-LABEL: udiv64:
165; RV32IM:       # %bb.0:
166; RV32IM-NEXT:    addi sp, sp, -16
167; RV32IM-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
168; RV32IM-NEXT:    call __udivdi3@plt
169; RV32IM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
170; RV32IM-NEXT:    addi sp, sp, 16
171; RV32IM-NEXT:    ret
172;
173; RV64I-LABEL: udiv64:
174; RV64I:       # %bb.0:
175; RV64I-NEXT:    addi sp, sp, -16
176; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
177; RV64I-NEXT:    call __udivdi3@plt
178; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
179; RV64I-NEXT:    addi sp, sp, 16
180; RV64I-NEXT:    ret
181;
182; RV64IM-LABEL: udiv64:
183; RV64IM:       # %bb.0:
184; RV64IM-NEXT:    divu a0, a0, a1
185; RV64IM-NEXT:    ret
186  %1 = udiv i64 %a, %b
187  ret i64 %1
188}
189
190define i64 @udiv64_constant(i64 %a) nounwind {
191; RV32I-LABEL: udiv64_constant:
192; RV32I:       # %bb.0:
193; RV32I-NEXT:    addi sp, sp, -16
194; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
195; RV32I-NEXT:    li a2, 5
196; RV32I-NEXT:    li a3, 0
197; RV32I-NEXT:    call __udivdi3@plt
198; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
199; RV32I-NEXT:    addi sp, sp, 16
200; RV32I-NEXT:    ret
201;
202; RV32IM-LABEL: udiv64_constant:
203; RV32IM:       # %bb.0:
204; RV32IM-NEXT:    addi sp, sp, -16
205; RV32IM-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
206; RV32IM-NEXT:    li a2, 5
207; RV32IM-NEXT:    li a3, 0
208; RV32IM-NEXT:    call __udivdi3@plt
209; RV32IM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
210; RV32IM-NEXT:    addi sp, sp, 16
211; RV32IM-NEXT:    ret
212;
213; RV64I-LABEL: udiv64_constant:
214; RV64I:       # %bb.0:
215; RV64I-NEXT:    addi sp, sp, -16
216; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
217; RV64I-NEXT:    li a1, 5
218; RV64I-NEXT:    call __udivdi3@plt
219; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
220; RV64I-NEXT:    addi sp, sp, 16
221; RV64I-NEXT:    ret
222;
223; RV64IM-LABEL: udiv64_constant:
224; RV64IM:       # %bb.0:
225; RV64IM-NEXT:    lui a1, %hi(.LCPI5_0)
226; RV64IM-NEXT:    ld a1, %lo(.LCPI5_0)(a1)
227; RV64IM-NEXT:    mulhu a0, a0, a1
228; RV64IM-NEXT:    srli a0, a0, 2
229; RV64IM-NEXT:    ret
230  %1 = udiv i64 %a, 5
231  ret i64 %1
232}
233
234define i64 @udiv64_constant_lhs(i64 %a) nounwind {
235; RV32I-LABEL: udiv64_constant_lhs:
236; RV32I:       # %bb.0:
237; RV32I-NEXT:    addi sp, sp, -16
238; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
239; RV32I-NEXT:    mv a3, a1
240; RV32I-NEXT:    mv a2, a0
241; RV32I-NEXT:    li a0, 10
242; RV32I-NEXT:    li a1, 0
243; RV32I-NEXT:    call __udivdi3@plt
244; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
245; RV32I-NEXT:    addi sp, sp, 16
246; RV32I-NEXT:    ret
247;
248; RV32IM-LABEL: udiv64_constant_lhs:
249; RV32IM:       # %bb.0:
250; RV32IM-NEXT:    addi sp, sp, -16
251; RV32IM-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
252; RV32IM-NEXT:    mv a3, a1
253; RV32IM-NEXT:    mv a2, a0
254; RV32IM-NEXT:    li a0, 10
255; RV32IM-NEXT:    li a1, 0
256; RV32IM-NEXT:    call __udivdi3@plt
257; RV32IM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
258; RV32IM-NEXT:    addi sp, sp, 16
259; RV32IM-NEXT:    ret
260;
261; RV64I-LABEL: udiv64_constant_lhs:
262; RV64I:       # %bb.0:
263; RV64I-NEXT:    addi sp, sp, -16
264; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
265; RV64I-NEXT:    mv a1, a0
266; RV64I-NEXT:    li a0, 10
267; RV64I-NEXT:    call __udivdi3@plt
268; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
269; RV64I-NEXT:    addi sp, sp, 16
270; RV64I-NEXT:    ret
271;
272; RV64IM-LABEL: udiv64_constant_lhs:
273; RV64IM:       # %bb.0:
274; RV64IM-NEXT:    li a1, 10
275; RV64IM-NEXT:    divu a0, a1, a0
276; RV64IM-NEXT:    ret
277  %1 = udiv i64 10, %a
278  ret i64 %1
279}
280
281define i8 @udiv8(i8 %a, i8 %b) nounwind {
282; RV32I-LABEL: udiv8:
283; RV32I:       # %bb.0:
284; RV32I-NEXT:    addi sp, sp, -16
285; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
286; RV32I-NEXT:    andi a0, a0, 255
287; RV32I-NEXT:    andi a1, a1, 255
288; RV32I-NEXT:    call __udivsi3@plt
289; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
290; RV32I-NEXT:    addi sp, sp, 16
291; RV32I-NEXT:    ret
292;
293; RV32IM-LABEL: udiv8:
294; RV32IM:       # %bb.0:
295; RV32IM-NEXT:    andi a1, a1, 255
296; RV32IM-NEXT:    andi a0, a0, 255
297; RV32IM-NEXT:    divu a0, a0, a1
298; RV32IM-NEXT:    ret
299;
300; RV64I-LABEL: udiv8:
301; RV64I:       # %bb.0:
302; RV64I-NEXT:    addi sp, sp, -16
303; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
304; RV64I-NEXT:    andi a0, a0, 255
305; RV64I-NEXT:    andi a1, a1, 255
306; RV64I-NEXT:    call __udivdi3@plt
307; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
308; RV64I-NEXT:    addi sp, sp, 16
309; RV64I-NEXT:    ret
310;
311; RV64IM-LABEL: udiv8:
312; RV64IM:       # %bb.0:
313; RV64IM-NEXT:    andi a1, a1, 255
314; RV64IM-NEXT:    andi a0, a0, 255
315; RV64IM-NEXT:    divuw a0, a0, a1
316; RV64IM-NEXT:    ret
317  %1 = udiv i8 %a, %b
318  ret i8 %1
319}
320
321define i8 @udiv8_constant(i8 %a) nounwind {
322; RV32I-LABEL: udiv8_constant:
323; RV32I:       # %bb.0:
324; RV32I-NEXT:    addi sp, sp, -16
325; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
326; RV32I-NEXT:    andi a0, a0, 255
327; RV32I-NEXT:    li a1, 5
328; RV32I-NEXT:    call __udivsi3@plt
329; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
330; RV32I-NEXT:    addi sp, sp, 16
331; RV32I-NEXT:    ret
332;
333; RV32IM-LABEL: udiv8_constant:
334; RV32IM:       # %bb.0:
335; RV32IM-NEXT:    andi a0, a0, 255
336; RV32IM-NEXT:    li a1, 205
337; RV32IM-NEXT:    mul a0, a0, a1
338; RV32IM-NEXT:    srli a0, a0, 10
339; RV32IM-NEXT:    ret
340;
341; RV64I-LABEL: udiv8_constant:
342; RV64I:       # %bb.0:
343; RV64I-NEXT:    addi sp, sp, -16
344; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
345; RV64I-NEXT:    andi a0, a0, 255
346; RV64I-NEXT:    li a1, 5
347; RV64I-NEXT:    call __udivdi3@plt
348; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
349; RV64I-NEXT:    addi sp, sp, 16
350; RV64I-NEXT:    ret
351;
352; RV64IM-LABEL: udiv8_constant:
353; RV64IM:       # %bb.0:
354; RV64IM-NEXT:    andi a0, a0, 255
355; RV64IM-NEXT:    li a1, 205
356; RV64IM-NEXT:    mul a0, a0, a1
357; RV64IM-NEXT:    srli a0, a0, 10
358; RV64IM-NEXT:    ret
359  %1 = udiv i8 %a, 5
360  ret i8 %1
361}
362
363define i8 @udiv8_pow2(i8 %a) nounwind {
364; RV32I-LABEL: udiv8_pow2:
365; RV32I:       # %bb.0:
366; RV32I-NEXT:    slli a0, a0, 24
367; RV32I-NEXT:    srli a0, a0, 27
368; RV32I-NEXT:    ret
369;
370; RV32IM-LABEL: udiv8_pow2:
371; RV32IM:       # %bb.0:
372; RV32IM-NEXT:    slli a0, a0, 24
373; RV32IM-NEXT:    srli a0, a0, 27
374; RV32IM-NEXT:    ret
375;
376; RV64I-LABEL: udiv8_pow2:
377; RV64I:       # %bb.0:
378; RV64I-NEXT:    slli a0, a0, 56
379; RV64I-NEXT:    srli a0, a0, 59
380; RV64I-NEXT:    ret
381;
382; RV64IM-LABEL: udiv8_pow2:
383; RV64IM:       # %bb.0:
384; RV64IM-NEXT:    slli a0, a0, 56
385; RV64IM-NEXT:    srli a0, a0, 59
386; RV64IM-NEXT:    ret
387  %1 = udiv i8 %a, 8
388  ret i8 %1
389}
390
391define i8 @udiv8_constant_lhs(i8 %a) nounwind {
392; RV32I-LABEL: udiv8_constant_lhs:
393; RV32I:       # %bb.0:
394; RV32I-NEXT:    addi sp, sp, -16
395; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
396; RV32I-NEXT:    andi a1, a0, 255
397; RV32I-NEXT:    li a0, 10
398; RV32I-NEXT:    call __udivsi3@plt
399; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
400; RV32I-NEXT:    addi sp, sp, 16
401; RV32I-NEXT:    ret
402;
403; RV32IM-LABEL: udiv8_constant_lhs:
404; RV32IM:       # %bb.0:
405; RV32IM-NEXT:    andi a0, a0, 255
406; RV32IM-NEXT:    li a1, 10
407; RV32IM-NEXT:    divu a0, a1, a0
408; RV32IM-NEXT:    ret
409;
410; RV64I-LABEL: udiv8_constant_lhs:
411; RV64I:       # %bb.0:
412; RV64I-NEXT:    addi sp, sp, -16
413; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
414; RV64I-NEXT:    andi a1, a0, 255
415; RV64I-NEXT:    li a0, 10
416; RV64I-NEXT:    call __udivdi3@plt
417; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
418; RV64I-NEXT:    addi sp, sp, 16
419; RV64I-NEXT:    ret
420;
421; RV64IM-LABEL: udiv8_constant_lhs:
422; RV64IM:       # %bb.0:
423; RV64IM-NEXT:    andi a0, a0, 255
424; RV64IM-NEXT:    li a1, 10
425; RV64IM-NEXT:    divuw a0, a1, a0
426; RV64IM-NEXT:    ret
427  %1 = udiv i8 10, %a
428  ret i8 %1
429}
430
431define i16 @udiv16(i16 %a, i16 %b) nounwind {
432; RV32I-LABEL: udiv16:
433; RV32I:       # %bb.0:
434; RV32I-NEXT:    addi sp, sp, -16
435; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
436; RV32I-NEXT:    lui a2, 16
437; RV32I-NEXT:    addi a2, a2, -1
438; RV32I-NEXT:    and a0, a0, a2
439; RV32I-NEXT:    and a1, a1, a2
440; RV32I-NEXT:    call __udivsi3@plt
441; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
442; RV32I-NEXT:    addi sp, sp, 16
443; RV32I-NEXT:    ret
444;
445; RV32IM-LABEL: udiv16:
446; RV32IM:       # %bb.0:
447; RV32IM-NEXT:    lui a2, 16
448; RV32IM-NEXT:    addi a2, a2, -1
449; RV32IM-NEXT:    and a1, a1, a2
450; RV32IM-NEXT:    and a0, a0, a2
451; RV32IM-NEXT:    divu a0, a0, a1
452; RV32IM-NEXT:    ret
453;
454; RV64I-LABEL: udiv16:
455; RV64I:       # %bb.0:
456; RV64I-NEXT:    addi sp, sp, -16
457; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
458; RV64I-NEXT:    lui a2, 16
459; RV64I-NEXT:    addiw a2, a2, -1
460; RV64I-NEXT:    and a0, a0, a2
461; RV64I-NEXT:    and a1, a1, a2
462; RV64I-NEXT:    call __udivdi3@plt
463; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
464; RV64I-NEXT:    addi sp, sp, 16
465; RV64I-NEXT:    ret
466;
467; RV64IM-LABEL: udiv16:
468; RV64IM:       # %bb.0:
469; RV64IM-NEXT:    lui a2, 16
470; RV64IM-NEXT:    addiw a2, a2, -1
471; RV64IM-NEXT:    and a1, a1, a2
472; RV64IM-NEXT:    and a0, a0, a2
473; RV64IM-NEXT:    divuw a0, a0, a1
474; RV64IM-NEXT:    ret
475  %1 = udiv i16 %a, %b
476  ret i16 %1
477}
478
479define i16 @udiv16_constant(i16 %a) nounwind {
480; RV32I-LABEL: udiv16_constant:
481; RV32I:       # %bb.0:
482; RV32I-NEXT:    addi sp, sp, -16
483; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
484; RV32I-NEXT:    slli a0, a0, 16
485; RV32I-NEXT:    srli a0, a0, 16
486; RV32I-NEXT:    li a1, 5
487; RV32I-NEXT:    call __udivsi3@plt
488; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
489; RV32I-NEXT:    addi sp, sp, 16
490; RV32I-NEXT:    ret
491;
492; RV32IM-LABEL: udiv16_constant:
493; RV32IM:       # %bb.0:
494; RV32IM-NEXT:    slli a0, a0, 16
495; RV32IM-NEXT:    lui a1, 838864
496; RV32IM-NEXT:    mulhu a0, a0, a1
497; RV32IM-NEXT:    srli a0, a0, 18
498; RV32IM-NEXT:    ret
499;
500; RV64I-LABEL: udiv16_constant:
501; RV64I:       # %bb.0:
502; RV64I-NEXT:    addi sp, sp, -16
503; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
504; RV64I-NEXT:    slli a0, a0, 48
505; RV64I-NEXT:    srli a0, a0, 48
506; RV64I-NEXT:    li a1, 5
507; RV64I-NEXT:    call __udivdi3@plt
508; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
509; RV64I-NEXT:    addi sp, sp, 16
510; RV64I-NEXT:    ret
511;
512; RV64IM-LABEL: udiv16_constant:
513; RV64IM:       # %bb.0:
514; RV64IM-NEXT:    lui a1, 52429
515; RV64IM-NEXT:    slli a1, a1, 4
516; RV64IM-NEXT:    slli a0, a0, 48
517; RV64IM-NEXT:    mulhu a0, a0, a1
518; RV64IM-NEXT:    srli a0, a0, 18
519; RV64IM-NEXT:    ret
520  %1 = udiv i16 %a, 5
521  ret i16 %1
522}
523
524define i16 @udiv16_pow2(i16 %a) nounwind {
525; RV32I-LABEL: udiv16_pow2:
526; RV32I:       # %bb.0:
527; RV32I-NEXT:    slli a0, a0, 16
528; RV32I-NEXT:    srli a0, a0, 19
529; RV32I-NEXT:    ret
530;
531; RV32IM-LABEL: udiv16_pow2:
532; RV32IM:       # %bb.0:
533; RV32IM-NEXT:    slli a0, a0, 16
534; RV32IM-NEXT:    srli a0, a0, 19
535; RV32IM-NEXT:    ret
536;
537; RV64I-LABEL: udiv16_pow2:
538; RV64I:       # %bb.0:
539; RV64I-NEXT:    slli a0, a0, 48
540; RV64I-NEXT:    srli a0, a0, 51
541; RV64I-NEXT:    ret
542;
543; RV64IM-LABEL: udiv16_pow2:
544; RV64IM:       # %bb.0:
545; RV64IM-NEXT:    slli a0, a0, 48
546; RV64IM-NEXT:    srli a0, a0, 51
547; RV64IM-NEXT:    ret
548  %1 = udiv i16 %a, 8
549  ret i16 %1
550}
551
552define i16 @udiv16_constant_lhs(i16 %a) nounwind {
553; RV32I-LABEL: udiv16_constant_lhs:
554; RV32I:       # %bb.0:
555; RV32I-NEXT:    addi sp, sp, -16
556; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
557; RV32I-NEXT:    slli a0, a0, 16
558; RV32I-NEXT:    srli a1, a0, 16
559; RV32I-NEXT:    li a0, 10
560; RV32I-NEXT:    call __udivsi3@plt
561; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
562; RV32I-NEXT:    addi sp, sp, 16
563; RV32I-NEXT:    ret
564;
565; RV32IM-LABEL: udiv16_constant_lhs:
566; RV32IM:       # %bb.0:
567; RV32IM-NEXT:    slli a0, a0, 16
568; RV32IM-NEXT:    srli a0, a0, 16
569; RV32IM-NEXT:    li a1, 10
570; RV32IM-NEXT:    divu a0, a1, a0
571; RV32IM-NEXT:    ret
572;
573; RV64I-LABEL: udiv16_constant_lhs:
574; RV64I:       # %bb.0:
575; RV64I-NEXT:    addi sp, sp, -16
576; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
577; RV64I-NEXT:    slli a0, a0, 48
578; RV64I-NEXT:    srli a1, a0, 48
579; RV64I-NEXT:    li a0, 10
580; RV64I-NEXT:    call __udivdi3@plt
581; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
582; RV64I-NEXT:    addi sp, sp, 16
583; RV64I-NEXT:    ret
584;
585; RV64IM-LABEL: udiv16_constant_lhs:
586; RV64IM:       # %bb.0:
587; RV64IM-NEXT:    slli a0, a0, 48
588; RV64IM-NEXT:    srli a0, a0, 48
589; RV64IM-NEXT:    li a1, 10
590; RV64IM-NEXT:    divuw a0, a1, a0
591; RV64IM-NEXT:    ret
592  %1 = udiv i16 10, %a
593  ret i16 %1
594}
595
596define i32 @sdiv(i32 %a, i32 %b) nounwind {
597; RV32I-LABEL: sdiv:
598; RV32I:       # %bb.0:
599; RV32I-NEXT:    addi sp, sp, -16
600; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
601; RV32I-NEXT:    call __divsi3@plt
602; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
603; RV32I-NEXT:    addi sp, sp, 16
604; RV32I-NEXT:    ret
605;
606; RV32IM-LABEL: sdiv:
607; RV32IM:       # %bb.0:
608; RV32IM-NEXT:    div a0, a0, a1
609; RV32IM-NEXT:    ret
610;
611; RV64I-LABEL: sdiv:
612; RV64I:       # %bb.0:
613; RV64I-NEXT:    addi sp, sp, -16
614; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
615; RV64I-NEXT:    sext.w a0, a0
616; RV64I-NEXT:    sext.w a1, a1
617; RV64I-NEXT:    call __divdi3@plt
618; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
619; RV64I-NEXT:    addi sp, sp, 16
620; RV64I-NEXT:    ret
621;
622; RV64IM-LABEL: sdiv:
623; RV64IM:       # %bb.0:
624; RV64IM-NEXT:    divw a0, a0, a1
625; RV64IM-NEXT:    ret
626  %1 = sdiv i32 %a, %b
627  ret i32 %1
628}
629
630define i32 @sdiv_constant(i32 %a) nounwind {
631; RV32I-LABEL: sdiv_constant:
632; RV32I:       # %bb.0:
633; RV32I-NEXT:    addi sp, sp, -16
634; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
635; RV32I-NEXT:    li a1, 5
636; RV32I-NEXT:    call __divsi3@plt
637; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
638; RV32I-NEXT:    addi sp, sp, 16
639; RV32I-NEXT:    ret
640;
641; RV32IM-LABEL: sdiv_constant:
642; RV32IM:       # %bb.0:
643; RV32IM-NEXT:    lui a1, 419430
644; RV32IM-NEXT:    addi a1, a1, 1639
645; RV32IM-NEXT:    mulh a0, a0, a1
646; RV32IM-NEXT:    srli a1, a0, 31
647; RV32IM-NEXT:    srai a0, a0, 1
648; RV32IM-NEXT:    add a0, a0, a1
649; RV32IM-NEXT:    ret
650;
651; RV64I-LABEL: sdiv_constant:
652; RV64I:       # %bb.0:
653; RV64I-NEXT:    addi sp, sp, -16
654; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
655; RV64I-NEXT:    sext.w a0, a0
656; RV64I-NEXT:    li a1, 5
657; RV64I-NEXT:    call __divdi3@plt
658; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
659; RV64I-NEXT:    addi sp, sp, 16
660; RV64I-NEXT:    ret
661;
662; RV64IM-LABEL: sdiv_constant:
663; RV64IM:       # %bb.0:
664; RV64IM-NEXT:    sext.w a0, a0
665; RV64IM-NEXT:    lui a1, 419430
666; RV64IM-NEXT:    addiw a1, a1, 1639
667; RV64IM-NEXT:    mul a0, a0, a1
668; RV64IM-NEXT:    srli a1, a0, 63
669; RV64IM-NEXT:    srai a0, a0, 33
670; RV64IM-NEXT:    add a0, a0, a1
671; RV64IM-NEXT:    ret
672  %1 = sdiv i32 %a, 5
673  ret i32 %1
674}
675
676define i32 @sdiv_pow2(i32 %a) nounwind {
677; RV32I-LABEL: sdiv_pow2:
678; RV32I:       # %bb.0:
679; RV32I-NEXT:    srai a1, a0, 31
680; RV32I-NEXT:    srli a1, a1, 29
681; RV32I-NEXT:    add a0, a0, a1
682; RV32I-NEXT:    srai a0, a0, 3
683; RV32I-NEXT:    ret
684;
685; RV32IM-LABEL: sdiv_pow2:
686; RV32IM:       # %bb.0:
687; RV32IM-NEXT:    srai a1, a0, 31
688; RV32IM-NEXT:    srli a1, a1, 29
689; RV32IM-NEXT:    add a0, a0, a1
690; RV32IM-NEXT:    srai a0, a0, 3
691; RV32IM-NEXT:    ret
692;
693; RV64I-LABEL: sdiv_pow2:
694; RV64I:       # %bb.0:
695; RV64I-NEXT:    sraiw a1, a0, 31
696; RV64I-NEXT:    srliw a1, a1, 29
697; RV64I-NEXT:    addw a0, a0, a1
698; RV64I-NEXT:    sraiw a0, a0, 3
699; RV64I-NEXT:    ret
700;
701; RV64IM-LABEL: sdiv_pow2:
702; RV64IM:       # %bb.0:
703; RV64IM-NEXT:    sraiw a1, a0, 31
704; RV64IM-NEXT:    srliw a1, a1, 29
705; RV64IM-NEXT:    addw a0, a0, a1
706; RV64IM-NEXT:    sraiw a0, a0, 3
707; RV64IM-NEXT:    ret
708  %1 = sdiv i32 %a, 8
709  ret i32 %1
710}
711
712define i32 @sdiv_pow2_2(i32 %a) nounwind {
713; RV32I-LABEL: sdiv_pow2_2:
714; RV32I:       # %bb.0:
715; RV32I-NEXT:    srai a1, a0, 31
716; RV32I-NEXT:    srli a1, a1, 16
717; RV32I-NEXT:    add a0, a0, a1
718; RV32I-NEXT:    srai a0, a0, 16
719; RV32I-NEXT:    ret
720;
721; RV32IM-LABEL: sdiv_pow2_2:
722; RV32IM:       # %bb.0:
723; RV32IM-NEXT:    srai a1, a0, 31
724; RV32IM-NEXT:    srli a1, a1, 16
725; RV32IM-NEXT:    add a0, a0, a1
726; RV32IM-NEXT:    srai a0, a0, 16
727; RV32IM-NEXT:    ret
728;
729; RV64I-LABEL: sdiv_pow2_2:
730; RV64I:       # %bb.0:
731; RV64I-NEXT:    sraiw a1, a0, 31
732; RV64I-NEXT:    srliw a1, a1, 16
733; RV64I-NEXT:    addw a0, a0, a1
734; RV64I-NEXT:    sraiw a0, a0, 16
735; RV64I-NEXT:    ret
736;
737; RV64IM-LABEL: sdiv_pow2_2:
738; RV64IM:       # %bb.0:
739; RV64IM-NEXT:    sraiw a1, a0, 31
740; RV64IM-NEXT:    srliw a1, a1, 16
741; RV64IM-NEXT:    addw a0, a0, a1
742; RV64IM-NEXT:    sraiw a0, a0, 16
743; RV64IM-NEXT:    ret
744  %1 = sdiv i32 %a, 65536
745  ret i32 %1
746}
747
748define i32 @sdiv_constant_lhs(i32 %a) nounwind {
749; RV32I-LABEL: sdiv_constant_lhs:
750; RV32I:       # %bb.0:
751; RV32I-NEXT:    addi sp, sp, -16
752; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
753; RV32I-NEXT:    mv a1, a0
754; RV32I-NEXT:    li a0, -10
755; RV32I-NEXT:    call __divsi3@plt
756; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
757; RV32I-NEXT:    addi sp, sp, 16
758; RV32I-NEXT:    ret
759;
760; RV32IM-LABEL: sdiv_constant_lhs:
761; RV32IM:       # %bb.0:
762; RV32IM-NEXT:    li a1, -10
763; RV32IM-NEXT:    div a0, a1, a0
764; RV32IM-NEXT:    ret
765;
766; RV64I-LABEL: sdiv_constant_lhs:
767; RV64I:       # %bb.0:
768; RV64I-NEXT:    addi sp, sp, -16
769; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
770; RV64I-NEXT:    sext.w a1, a0
771; RV64I-NEXT:    li a0, -10
772; RV64I-NEXT:    call __divdi3@plt
773; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
774; RV64I-NEXT:    addi sp, sp, 16
775; RV64I-NEXT:    ret
776;
777; RV64IM-LABEL: sdiv_constant_lhs:
778; RV64IM:       # %bb.0:
779; RV64IM-NEXT:    li a1, -10
780; RV64IM-NEXT:    divw a0, a1, a0
781; RV64IM-NEXT:    ret
782  %1 = sdiv i32 -10, %a
783  ret i32 %1
784}
785
786define i64 @sdiv64(i64 %a, i64 %b) nounwind {
787; RV32I-LABEL: sdiv64:
788; RV32I:       # %bb.0:
789; RV32I-NEXT:    addi sp, sp, -16
790; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
791; RV32I-NEXT:    call __divdi3@plt
792; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
793; RV32I-NEXT:    addi sp, sp, 16
794; RV32I-NEXT:    ret
795;
796; RV32IM-LABEL: sdiv64:
797; RV32IM:       # %bb.0:
798; RV32IM-NEXT:    addi sp, sp, -16
799; RV32IM-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
800; RV32IM-NEXT:    call __divdi3@plt
801; RV32IM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
802; RV32IM-NEXT:    addi sp, sp, 16
803; RV32IM-NEXT:    ret
804;
805; RV64I-LABEL: sdiv64:
806; RV64I:       # %bb.0:
807; RV64I-NEXT:    addi sp, sp, -16
808; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
809; RV64I-NEXT:    call __divdi3@plt
810; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
811; RV64I-NEXT:    addi sp, sp, 16
812; RV64I-NEXT:    ret
813;
814; RV64IM-LABEL: sdiv64:
815; RV64IM:       # %bb.0:
816; RV64IM-NEXT:    div a0, a0, a1
817; RV64IM-NEXT:    ret
818  %1 = sdiv i64 %a, %b
819  ret i64 %1
820}
821
822define i64 @sdiv64_constant(i64 %a) nounwind {
823; RV32I-LABEL: sdiv64_constant:
824; RV32I:       # %bb.0:
825; RV32I-NEXT:    addi sp, sp, -16
826; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
827; RV32I-NEXT:    li a2, 5
828; RV32I-NEXT:    li a3, 0
829; RV32I-NEXT:    call __divdi3@plt
830; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
831; RV32I-NEXT:    addi sp, sp, 16
832; RV32I-NEXT:    ret
833;
834; RV32IM-LABEL: sdiv64_constant:
835; RV32IM:       # %bb.0:
836; RV32IM-NEXT:    addi sp, sp, -16
837; RV32IM-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
838; RV32IM-NEXT:    li a2, 5
839; RV32IM-NEXT:    li a3, 0
840; RV32IM-NEXT:    call __divdi3@plt
841; RV32IM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
842; RV32IM-NEXT:    addi sp, sp, 16
843; RV32IM-NEXT:    ret
844;
845; RV64I-LABEL: sdiv64_constant:
846; RV64I:       # %bb.0:
847; RV64I-NEXT:    addi sp, sp, -16
848; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
849; RV64I-NEXT:    li a1, 5
850; RV64I-NEXT:    call __divdi3@plt
851; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
852; RV64I-NEXT:    addi sp, sp, 16
853; RV64I-NEXT:    ret
854;
855; RV64IM-LABEL: sdiv64_constant:
856; RV64IM:       # %bb.0:
857; RV64IM-NEXT:    lui a1, %hi(.LCPI21_0)
858; RV64IM-NEXT:    ld a1, %lo(.LCPI21_0)(a1)
859; RV64IM-NEXT:    mulh a0, a0, a1
860; RV64IM-NEXT:    srli a1, a0, 63
861; RV64IM-NEXT:    srai a0, a0, 1
862; RV64IM-NEXT:    add a0, a0, a1
863; RV64IM-NEXT:    ret
864  %1 = sdiv i64 %a, 5
865  ret i64 %1
866}
867
868define i64 @sdiv64_constant_lhs(i64 %a) nounwind {
869; RV32I-LABEL: sdiv64_constant_lhs:
870; RV32I:       # %bb.0:
871; RV32I-NEXT:    addi sp, sp, -16
872; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
873; RV32I-NEXT:    mv a3, a1
874; RV32I-NEXT:    mv a2, a0
875; RV32I-NEXT:    li a0, 10
876; RV32I-NEXT:    li a1, 0
877; RV32I-NEXT:    call __divdi3@plt
878; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
879; RV32I-NEXT:    addi sp, sp, 16
880; RV32I-NEXT:    ret
881;
882; RV32IM-LABEL: sdiv64_constant_lhs:
883; RV32IM:       # %bb.0:
884; RV32IM-NEXT:    addi sp, sp, -16
885; RV32IM-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
886; RV32IM-NEXT:    mv a3, a1
887; RV32IM-NEXT:    mv a2, a0
888; RV32IM-NEXT:    li a0, 10
889; RV32IM-NEXT:    li a1, 0
890; RV32IM-NEXT:    call __divdi3@plt
891; RV32IM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
892; RV32IM-NEXT:    addi sp, sp, 16
893; RV32IM-NEXT:    ret
894;
895; RV64I-LABEL: sdiv64_constant_lhs:
896; RV64I:       # %bb.0:
897; RV64I-NEXT:    addi sp, sp, -16
898; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
899; RV64I-NEXT:    mv a1, a0
900; RV64I-NEXT:    li a0, 10
901; RV64I-NEXT:    call __divdi3@plt
902; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
903; RV64I-NEXT:    addi sp, sp, 16
904; RV64I-NEXT:    ret
905;
906; RV64IM-LABEL: sdiv64_constant_lhs:
907; RV64IM:       # %bb.0:
908; RV64IM-NEXT:    li a1, 10
909; RV64IM-NEXT:    div a0, a1, a0
910; RV64IM-NEXT:    ret
911  %1 = sdiv i64 10, %a
912  ret i64 %1
913}
914
915; Although this sdiv has two sexti32 operands, it shouldn't compile to divw on
916; RV64M as that wouldn't produce the correct result for e.g. INT_MIN/-1.
917
918define i64 @sdiv64_sext_operands(i32 %a, i32 %b) nounwind {
919; RV32I-LABEL: sdiv64_sext_operands:
920; RV32I:       # %bb.0:
921; RV32I-NEXT:    addi sp, sp, -16
922; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
923; RV32I-NEXT:    mv a2, a1
924; RV32I-NEXT:    srai a1, a0, 31
925; RV32I-NEXT:    srai a3, a2, 31
926; RV32I-NEXT:    call __divdi3@plt
927; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
928; RV32I-NEXT:    addi sp, sp, 16
929; RV32I-NEXT:    ret
930;
931; RV32IM-LABEL: sdiv64_sext_operands:
932; RV32IM:       # %bb.0:
933; RV32IM-NEXT:    addi sp, sp, -16
934; RV32IM-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
935; RV32IM-NEXT:    mv a2, a1
936; RV32IM-NEXT:    srai a1, a0, 31
937; RV32IM-NEXT:    srai a3, a2, 31
938; RV32IM-NEXT:    call __divdi3@plt
939; RV32IM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
940; RV32IM-NEXT:    addi sp, sp, 16
941; RV32IM-NEXT:    ret
942;
943; RV64I-LABEL: sdiv64_sext_operands:
944; RV64I:       # %bb.0:
945; RV64I-NEXT:    addi sp, sp, -16
946; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
947; RV64I-NEXT:    sext.w a0, a0
948; RV64I-NEXT:    sext.w a1, a1
949; RV64I-NEXT:    call __divdi3@plt
950; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
951; RV64I-NEXT:    addi sp, sp, 16
952; RV64I-NEXT:    ret
953;
954; RV64IM-LABEL: sdiv64_sext_operands:
955; RV64IM:       # %bb.0:
956; RV64IM-NEXT:    sext.w a0, a0
957; RV64IM-NEXT:    sext.w a1, a1
958; RV64IM-NEXT:    div a0, a0, a1
959; RV64IM-NEXT:    ret
960  %1 = sext i32 %a to i64
961  %2 = sext i32 %b to i64
962  %3 = sdiv i64 %1, %2
963  ret i64 %3
964}
965
966define i8 @sdiv8(i8 %a, i8 %b) nounwind {
967; RV32I-LABEL: sdiv8:
968; RV32I:       # %bb.0:
969; RV32I-NEXT:    addi sp, sp, -16
970; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
971; RV32I-NEXT:    slli a0, a0, 24
972; RV32I-NEXT:    srai a0, a0, 24
973; RV32I-NEXT:    slli a1, a1, 24
974; RV32I-NEXT:    srai a1, a1, 24
975; RV32I-NEXT:    call __divsi3@plt
976; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
977; RV32I-NEXT:    addi sp, sp, 16
978; RV32I-NEXT:    ret
979;
980; RV32IM-LABEL: sdiv8:
981; RV32IM:       # %bb.0:
982; RV32IM-NEXT:    slli a1, a1, 24
983; RV32IM-NEXT:    srai a1, a1, 24
984; RV32IM-NEXT:    slli a0, a0, 24
985; RV32IM-NEXT:    srai a0, a0, 24
986; RV32IM-NEXT:    div a0, a0, a1
987; RV32IM-NEXT:    ret
988;
989; RV64I-LABEL: sdiv8:
990; RV64I:       # %bb.0:
991; RV64I-NEXT:    addi sp, sp, -16
992; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
993; RV64I-NEXT:    slli a0, a0, 56
994; RV64I-NEXT:    srai a0, a0, 56
995; RV64I-NEXT:    slli a1, a1, 56
996; RV64I-NEXT:    srai a1, a1, 56
997; RV64I-NEXT:    call __divdi3@plt
998; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
999; RV64I-NEXT:    addi sp, sp, 16
1000; RV64I-NEXT:    ret
1001;
1002; RV64IM-LABEL: sdiv8:
1003; RV64IM:       # %bb.0:
1004; RV64IM-NEXT:    slli a1, a1, 56
1005; RV64IM-NEXT:    srai a1, a1, 56
1006; RV64IM-NEXT:    slli a0, a0, 56
1007; RV64IM-NEXT:    srai a0, a0, 56
1008; RV64IM-NEXT:    divw a0, a0, a1
1009; RV64IM-NEXT:    ret
1010  %1 = sdiv i8 %a, %b
1011  ret i8 %1
1012}
1013
1014define i8 @sdiv8_constant(i8 %a) nounwind {
1015; RV32I-LABEL: sdiv8_constant:
1016; RV32I:       # %bb.0:
1017; RV32I-NEXT:    addi sp, sp, -16
1018; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1019; RV32I-NEXT:    slli a0, a0, 24
1020; RV32I-NEXT:    srai a0, a0, 24
1021; RV32I-NEXT:    li a1, 5
1022; RV32I-NEXT:    call __divsi3@plt
1023; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1024; RV32I-NEXT:    addi sp, sp, 16
1025; RV32I-NEXT:    ret
1026;
1027; RV32IM-LABEL: sdiv8_constant:
1028; RV32IM:       # %bb.0:
1029; RV32IM-NEXT:    slli a0, a0, 24
1030; RV32IM-NEXT:    srai a0, a0, 24
1031; RV32IM-NEXT:    li a1, 103
1032; RV32IM-NEXT:    mul a0, a0, a1
1033; RV32IM-NEXT:    srai a1, a0, 9
1034; RV32IM-NEXT:    slli a0, a0, 16
1035; RV32IM-NEXT:    srli a0, a0, 31
1036; RV32IM-NEXT:    add a0, a1, a0
1037; RV32IM-NEXT:    ret
1038;
1039; RV64I-LABEL: sdiv8_constant:
1040; RV64I:       # %bb.0:
1041; RV64I-NEXT:    addi sp, sp, -16
1042; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1043; RV64I-NEXT:    slli a0, a0, 56
1044; RV64I-NEXT:    srai a0, a0, 56
1045; RV64I-NEXT:    li a1, 5
1046; RV64I-NEXT:    call __divdi3@plt
1047; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1048; RV64I-NEXT:    addi sp, sp, 16
1049; RV64I-NEXT:    ret
1050;
1051; RV64IM-LABEL: sdiv8_constant:
1052; RV64IM:       # %bb.0:
1053; RV64IM-NEXT:    slli a0, a0, 56
1054; RV64IM-NEXT:    srai a0, a0, 56
1055; RV64IM-NEXT:    li a1, 103
1056; RV64IM-NEXT:    mul a0, a0, a1
1057; RV64IM-NEXT:    srai a1, a0, 9
1058; RV64IM-NEXT:    slli a0, a0, 48
1059; RV64IM-NEXT:    srli a0, a0, 63
1060; RV64IM-NEXT:    add a0, a1, a0
1061; RV64IM-NEXT:    ret
1062  %1 = sdiv i8 %a, 5
1063  ret i8 %1
1064}
1065
1066define i8 @sdiv8_pow2(i8 %a) nounwind {
1067; RV32I-LABEL: sdiv8_pow2:
1068; RV32I:       # %bb.0:
1069; RV32I-NEXT:    slli a1, a0, 24
1070; RV32I-NEXT:    srai a1, a1, 24
1071; RV32I-NEXT:    slli a1, a1, 17
1072; RV32I-NEXT:    srli a1, a1, 29
1073; RV32I-NEXT:    add a0, a0, a1
1074; RV32I-NEXT:    slli a0, a0, 24
1075; RV32I-NEXT:    srai a0, a0, 27
1076; RV32I-NEXT:    ret
1077;
1078; RV32IM-LABEL: sdiv8_pow2:
1079; RV32IM:       # %bb.0:
1080; RV32IM-NEXT:    slli a1, a0, 24
1081; RV32IM-NEXT:    srai a1, a1, 24
1082; RV32IM-NEXT:    slli a1, a1, 17
1083; RV32IM-NEXT:    srli a1, a1, 29
1084; RV32IM-NEXT:    add a0, a0, a1
1085; RV32IM-NEXT:    slli a0, a0, 24
1086; RV32IM-NEXT:    srai a0, a0, 27
1087; RV32IM-NEXT:    ret
1088;
1089; RV64I-LABEL: sdiv8_pow2:
1090; RV64I:       # %bb.0:
1091; RV64I-NEXT:    slli a1, a0, 56
1092; RV64I-NEXT:    srai a1, a1, 56
1093; RV64I-NEXT:    slli a1, a1, 49
1094; RV64I-NEXT:    srli a1, a1, 61
1095; RV64I-NEXT:    addw a0, a0, a1
1096; RV64I-NEXT:    slli a0, a0, 56
1097; RV64I-NEXT:    srai a0, a0, 59
1098; RV64I-NEXT:    ret
1099;
1100; RV64IM-LABEL: sdiv8_pow2:
1101; RV64IM:       # %bb.0:
1102; RV64IM-NEXT:    slli a1, a0, 56
1103; RV64IM-NEXT:    srai a1, a1, 56
1104; RV64IM-NEXT:    slli a1, a1, 49
1105; RV64IM-NEXT:    srli a1, a1, 61
1106; RV64IM-NEXT:    addw a0, a0, a1
1107; RV64IM-NEXT:    slli a0, a0, 56
1108; RV64IM-NEXT:    srai a0, a0, 59
1109; RV64IM-NEXT:    ret
1110  %1 = sdiv i8 %a, 8
1111  ret i8 %1
1112}
1113
1114define i8 @sdiv8_constant_lhs(i8 %a) nounwind {
1115; RV32I-LABEL: sdiv8_constant_lhs:
1116; RV32I:       # %bb.0:
1117; RV32I-NEXT:    addi sp, sp, -16
1118; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1119; RV32I-NEXT:    slli a0, a0, 24
1120; RV32I-NEXT:    srai a1, a0, 24
1121; RV32I-NEXT:    li a0, -10
1122; RV32I-NEXT:    call __divsi3@plt
1123; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1124; RV32I-NEXT:    addi sp, sp, 16
1125; RV32I-NEXT:    ret
1126;
1127; RV32IM-LABEL: sdiv8_constant_lhs:
1128; RV32IM:       # %bb.0:
1129; RV32IM-NEXT:    slli a0, a0, 24
1130; RV32IM-NEXT:    srai a0, a0, 24
1131; RV32IM-NEXT:    li a1, -10
1132; RV32IM-NEXT:    div a0, a1, a0
1133; RV32IM-NEXT:    ret
1134;
1135; RV64I-LABEL: sdiv8_constant_lhs:
1136; RV64I:       # %bb.0:
1137; RV64I-NEXT:    addi sp, sp, -16
1138; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1139; RV64I-NEXT:    slli a0, a0, 56
1140; RV64I-NEXT:    srai a1, a0, 56
1141; RV64I-NEXT:    li a0, -10
1142; RV64I-NEXT:    call __divdi3@plt
1143; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1144; RV64I-NEXT:    addi sp, sp, 16
1145; RV64I-NEXT:    ret
1146;
1147; RV64IM-LABEL: sdiv8_constant_lhs:
1148; RV64IM:       # %bb.0:
1149; RV64IM-NEXT:    slli a0, a0, 56
1150; RV64IM-NEXT:    srai a0, a0, 56
1151; RV64IM-NEXT:    li a1, -10
1152; RV64IM-NEXT:    divw a0, a1, a0
1153; RV64IM-NEXT:    ret
1154  %1 = sdiv i8 -10, %a
1155  ret i8 %1
1156}
1157
1158define i16 @sdiv16(i16 %a, i16 %b) nounwind {
1159; RV32I-LABEL: sdiv16:
1160; RV32I:       # %bb.0:
1161; RV32I-NEXT:    addi sp, sp, -16
1162; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1163; RV32I-NEXT:    slli a0, a0, 16
1164; RV32I-NEXT:    srai a0, a0, 16
1165; RV32I-NEXT:    slli a1, a1, 16
1166; RV32I-NEXT:    srai a1, a1, 16
1167; RV32I-NEXT:    call __divsi3@plt
1168; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1169; RV32I-NEXT:    addi sp, sp, 16
1170; RV32I-NEXT:    ret
1171;
1172; RV32IM-LABEL: sdiv16:
1173; RV32IM:       # %bb.0:
1174; RV32IM-NEXT:    slli a1, a1, 16
1175; RV32IM-NEXT:    srai a1, a1, 16
1176; RV32IM-NEXT:    slli a0, a0, 16
1177; RV32IM-NEXT:    srai a0, a0, 16
1178; RV32IM-NEXT:    div a0, a0, a1
1179; RV32IM-NEXT:    ret
1180;
1181; RV64I-LABEL: sdiv16:
1182; RV64I:       # %bb.0:
1183; RV64I-NEXT:    addi sp, sp, -16
1184; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1185; RV64I-NEXT:    slli a0, a0, 48
1186; RV64I-NEXT:    srai a0, a0, 48
1187; RV64I-NEXT:    slli a1, a1, 48
1188; RV64I-NEXT:    srai a1, a1, 48
1189; RV64I-NEXT:    call __divdi3@plt
1190; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1191; RV64I-NEXT:    addi sp, sp, 16
1192; RV64I-NEXT:    ret
1193;
1194; RV64IM-LABEL: sdiv16:
1195; RV64IM:       # %bb.0:
1196; RV64IM-NEXT:    slli a1, a1, 48
1197; RV64IM-NEXT:    srai a1, a1, 48
1198; RV64IM-NEXT:    slli a0, a0, 48
1199; RV64IM-NEXT:    srai a0, a0, 48
1200; RV64IM-NEXT:    divw a0, a0, a1
1201; RV64IM-NEXT:    ret
1202  %1 = sdiv i16 %a, %b
1203  ret i16 %1
1204}
1205
1206define i16 @sdiv16_constant(i16 %a) nounwind {
1207; RV32I-LABEL: sdiv16_constant:
1208; RV32I:       # %bb.0:
1209; RV32I-NEXT:    addi sp, sp, -16
1210; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1211; RV32I-NEXT:    slli a0, a0, 16
1212; RV32I-NEXT:    srai a0, a0, 16
1213; RV32I-NEXT:    li a1, 5
1214; RV32I-NEXT:    call __divsi3@plt
1215; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1216; RV32I-NEXT:    addi sp, sp, 16
1217; RV32I-NEXT:    ret
1218;
1219; RV32IM-LABEL: sdiv16_constant:
1220; RV32IM:       # %bb.0:
1221; RV32IM-NEXT:    slli a0, a0, 16
1222; RV32IM-NEXT:    srai a0, a0, 16
1223; RV32IM-NEXT:    lui a1, 6
1224; RV32IM-NEXT:    addi a1, a1, 1639
1225; RV32IM-NEXT:    mul a0, a0, a1
1226; RV32IM-NEXT:    srli a1, a0, 31
1227; RV32IM-NEXT:    srai a0, a0, 17
1228; RV32IM-NEXT:    add a0, a0, a1
1229; RV32IM-NEXT:    ret
1230;
1231; RV64I-LABEL: sdiv16_constant:
1232; RV64I:       # %bb.0:
1233; RV64I-NEXT:    addi sp, sp, -16
1234; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1235; RV64I-NEXT:    slli a0, a0, 48
1236; RV64I-NEXT:    srai a0, a0, 48
1237; RV64I-NEXT:    li a1, 5
1238; RV64I-NEXT:    call __divdi3@plt
1239; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1240; RV64I-NEXT:    addi sp, sp, 16
1241; RV64I-NEXT:    ret
1242;
1243; RV64IM-LABEL: sdiv16_constant:
1244; RV64IM:       # %bb.0:
1245; RV64IM-NEXT:    slli a0, a0, 48
1246; RV64IM-NEXT:    srai a0, a0, 48
1247; RV64IM-NEXT:    lui a1, 6
1248; RV64IM-NEXT:    addiw a1, a1, 1639
1249; RV64IM-NEXT:    mul a0, a0, a1
1250; RV64IM-NEXT:    srliw a1, a0, 31
1251; RV64IM-NEXT:    srai a0, a0, 17
1252; RV64IM-NEXT:    add a0, a0, a1
1253; RV64IM-NEXT:    ret
1254  %1 = sdiv i16 %a, 5
1255  ret i16 %1
1256}
1257
1258define i16 @sdiv16_pow2(i16 %a) nounwind {
1259; RV32I-LABEL: sdiv16_pow2:
1260; RV32I:       # %bb.0:
1261; RV32I-NEXT:    slli a1, a0, 16
1262; RV32I-NEXT:    srai a1, a1, 16
1263; RV32I-NEXT:    slli a1, a1, 1
1264; RV32I-NEXT:    srli a1, a1, 29
1265; RV32I-NEXT:    add a0, a0, a1
1266; RV32I-NEXT:    slli a0, a0, 16
1267; RV32I-NEXT:    srai a0, a0, 19
1268; RV32I-NEXT:    ret
1269;
1270; RV32IM-LABEL: sdiv16_pow2:
1271; RV32IM:       # %bb.0:
1272; RV32IM-NEXT:    slli a1, a0, 16
1273; RV32IM-NEXT:    srai a1, a1, 16
1274; RV32IM-NEXT:    slli a1, a1, 1
1275; RV32IM-NEXT:    srli a1, a1, 29
1276; RV32IM-NEXT:    add a0, a0, a1
1277; RV32IM-NEXT:    slli a0, a0, 16
1278; RV32IM-NEXT:    srai a0, a0, 19
1279; RV32IM-NEXT:    ret
1280;
1281; RV64I-LABEL: sdiv16_pow2:
1282; RV64I:       # %bb.0:
1283; RV64I-NEXT:    slli a1, a0, 48
1284; RV64I-NEXT:    srai a1, a1, 48
1285; RV64I-NEXT:    slli a1, a1, 33
1286; RV64I-NEXT:    srli a1, a1, 61
1287; RV64I-NEXT:    addw a0, a0, a1
1288; RV64I-NEXT:    slli a0, a0, 48
1289; RV64I-NEXT:    srai a0, a0, 51
1290; RV64I-NEXT:    ret
1291;
1292; RV64IM-LABEL: sdiv16_pow2:
1293; RV64IM:       # %bb.0:
1294; RV64IM-NEXT:    slli a1, a0, 48
1295; RV64IM-NEXT:    srai a1, a1, 48
1296; RV64IM-NEXT:    slli a1, a1, 33
1297; RV64IM-NEXT:    srli a1, a1, 61
1298; RV64IM-NEXT:    addw a0, a0, a1
1299; RV64IM-NEXT:    slli a0, a0, 48
1300; RV64IM-NEXT:    srai a0, a0, 51
1301; RV64IM-NEXT:    ret
1302  %1 = sdiv i16 %a, 8
1303  ret i16 %1
1304}
1305
1306define i16 @sdiv16_constant_lhs(i16 %a) nounwind {
1307; RV32I-LABEL: sdiv16_constant_lhs:
1308; RV32I:       # %bb.0:
1309; RV32I-NEXT:    addi sp, sp, -16
1310; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1311; RV32I-NEXT:    slli a0, a0, 16
1312; RV32I-NEXT:    srai a1, a0, 16
1313; RV32I-NEXT:    li a0, -10
1314; RV32I-NEXT:    call __divsi3@plt
1315; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1316; RV32I-NEXT:    addi sp, sp, 16
1317; RV32I-NEXT:    ret
1318;
1319; RV32IM-LABEL: sdiv16_constant_lhs:
1320; RV32IM:       # %bb.0:
1321; RV32IM-NEXT:    slli a0, a0, 16
1322; RV32IM-NEXT:    srai a0, a0, 16
1323; RV32IM-NEXT:    li a1, -10
1324; RV32IM-NEXT:    div a0, a1, a0
1325; RV32IM-NEXT:    ret
1326;
1327; RV64I-LABEL: sdiv16_constant_lhs:
1328; RV64I:       # %bb.0:
1329; RV64I-NEXT:    addi sp, sp, -16
1330; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1331; RV64I-NEXT:    slli a0, a0, 48
1332; RV64I-NEXT:    srai a1, a0, 48
1333; RV64I-NEXT:    li a0, -10
1334; RV64I-NEXT:    call __divdi3@plt
1335; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1336; RV64I-NEXT:    addi sp, sp, 16
1337; RV64I-NEXT:    ret
1338;
1339; RV64IM-LABEL: sdiv16_constant_lhs:
1340; RV64IM:       # %bb.0:
1341; RV64IM-NEXT:    slli a0, a0, 48
1342; RV64IM-NEXT:    srai a0, a0, 48
1343; RV64IM-NEXT:    li a1, -10
1344; RV64IM-NEXT:    divw a0, a1, a0
1345; RV64IM-NEXT:    ret
1346  %1 = sdiv i16 -10, %a
1347  ret i16 %1
1348}
1349