1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
3; RUN:   | FileCheck -check-prefix=RV32I %s
4; RUN: llc -mtriple=riscv32 -mattr=+m -verify-machineinstrs < %s \
5; RUN:   | FileCheck -check-prefix=RV32IM %s
6; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
7; RUN:   | FileCheck -check-prefix=RV64I %s
8; RUN: llc -mtriple=riscv64 -mattr=+m -verify-machineinstrs < %s \
9; RUN:   | FileCheck -check-prefix=RV64IM %s
10
11define i32 @urem(i32 %a, i32 %b) nounwind {
12; RV32I-LABEL: urem:
13; RV32I:       # %bb.0:
14; RV32I-NEXT:    addi sp, sp, -16
15; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
16; RV32I-NEXT:    call __umodsi3@plt
17; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
18; RV32I-NEXT:    addi sp, sp, 16
19; RV32I-NEXT:    ret
20;
21; RV32IM-LABEL: urem:
22; RV32IM:       # %bb.0:
23; RV32IM-NEXT:    remu a0, a0, a1
24; RV32IM-NEXT:    ret
25;
26; RV64I-LABEL: urem:
27; RV64I:       # %bb.0:
28; RV64I-NEXT:    addi sp, sp, -16
29; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
30; RV64I-NEXT:    slli a0, a0, 32
31; RV64I-NEXT:    srli a0, a0, 32
32; RV64I-NEXT:    slli a1, a1, 32
33; RV64I-NEXT:    srli a1, a1, 32
34; RV64I-NEXT:    call __umoddi3@plt
35; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
36; RV64I-NEXT:    addi sp, sp, 16
37; RV64I-NEXT:    ret
38;
39; RV64IM-LABEL: urem:
40; RV64IM:       # %bb.0:
41; RV64IM-NEXT:    remuw a0, a0, a1
42; RV64IM-NEXT:    ret
43  %1 = urem i32 %a, %b
44  ret i32 %1
45}
46
47define i32 @urem_constant_lhs(i32 %a) nounwind {
48; RV32I-LABEL: urem_constant_lhs:
49; RV32I:       # %bb.0:
50; RV32I-NEXT:    addi sp, sp, -16
51; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
52; RV32I-NEXT:    mv a1, a0
53; RV32I-NEXT:    li a0, 10
54; RV32I-NEXT:    call __umodsi3@plt
55; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
56; RV32I-NEXT:    addi sp, sp, 16
57; RV32I-NEXT:    ret
58;
59; RV32IM-LABEL: urem_constant_lhs:
60; RV32IM:       # %bb.0:
61; RV32IM-NEXT:    li a1, 10
62; RV32IM-NEXT:    remu a0, a1, a0
63; RV32IM-NEXT:    ret
64;
65; RV64I-LABEL: urem_constant_lhs:
66; RV64I:       # %bb.0:
67; RV64I-NEXT:    addi sp, sp, -16
68; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
69; RV64I-NEXT:    slli a0, a0, 32
70; RV64I-NEXT:    srli a1, a0, 32
71; RV64I-NEXT:    li a0, 10
72; RV64I-NEXT:    call __umoddi3@plt
73; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
74; RV64I-NEXT:    addi sp, sp, 16
75; RV64I-NEXT:    ret
76;
77; RV64IM-LABEL: urem_constant_lhs:
78; RV64IM:       # %bb.0:
79; RV64IM-NEXT:    li a1, 10
80; RV64IM-NEXT:    remuw a0, a1, a0
81; RV64IM-NEXT:    ret
82  %1 = urem i32 10, %a
83  ret i32 %1
84}
85
86define i32 @srem(i32 %a, i32 %b) nounwind {
87; RV32I-LABEL: srem:
88; RV32I:       # %bb.0:
89; RV32I-NEXT:    addi sp, sp, -16
90; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
91; RV32I-NEXT:    call __modsi3@plt
92; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
93; RV32I-NEXT:    addi sp, sp, 16
94; RV32I-NEXT:    ret
95;
96; RV32IM-LABEL: srem:
97; RV32IM:       # %bb.0:
98; RV32IM-NEXT:    rem a0, a0, a1
99; RV32IM-NEXT:    ret
100;
101; RV64I-LABEL: srem:
102; RV64I:       # %bb.0:
103; RV64I-NEXT:    addi sp, sp, -16
104; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
105; RV64I-NEXT:    sext.w a0, a0
106; RV64I-NEXT:    sext.w a1, a1
107; RV64I-NEXT:    call __moddi3@plt
108; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
109; RV64I-NEXT:    addi sp, sp, 16
110; RV64I-NEXT:    ret
111;
112; RV64IM-LABEL: srem:
113; RV64IM:       # %bb.0:
114; RV64IM-NEXT:    remw a0, a0, a1
115; RV64IM-NEXT:    ret
116  %1 = srem i32 %a, %b
117  ret i32 %1
118}
119
120define i32 @srem_pow2(i32 %a) nounwind {
121; RV32I-LABEL: srem_pow2:
122; RV32I:       # %bb.0:
123; RV32I-NEXT:    srai a1, a0, 31
124; RV32I-NEXT:    srli a1, a1, 29
125; RV32I-NEXT:    add a1, a0, a1
126; RV32I-NEXT:    andi a1, a1, -8
127; RV32I-NEXT:    sub a0, a0, a1
128; RV32I-NEXT:    ret
129;
130; RV32IM-LABEL: srem_pow2:
131; RV32IM:       # %bb.0:
132; RV32IM-NEXT:    srai a1, a0, 31
133; RV32IM-NEXT:    srli a1, a1, 29
134; RV32IM-NEXT:    add a1, a0, a1
135; RV32IM-NEXT:    andi a1, a1, -8
136; RV32IM-NEXT:    sub a0, a0, a1
137; RV32IM-NEXT:    ret
138;
139; RV64I-LABEL: srem_pow2:
140; RV64I:       # %bb.0:
141; RV64I-NEXT:    sraiw a1, a0, 31
142; RV64I-NEXT:    srliw a1, a1, 29
143; RV64I-NEXT:    add a1, a0, a1
144; RV64I-NEXT:    andi a1, a1, -8
145; RV64I-NEXT:    subw a0, a0, a1
146; RV64I-NEXT:    ret
147;
148; RV64IM-LABEL: srem_pow2:
149; RV64IM:       # %bb.0:
150; RV64IM-NEXT:    sraiw a1, a0, 31
151; RV64IM-NEXT:    srliw a1, a1, 29
152; RV64IM-NEXT:    add a1, a0, a1
153; RV64IM-NEXT:    andi a1, a1, -8
154; RV64IM-NEXT:    subw a0, a0, a1
155; RV64IM-NEXT:    ret
156  %1 = srem i32 %a, 8
157  ret i32 %1
158}
159
160define i32 @srem_pow2_2(i32 %a) nounwind {
161; RV32I-LABEL: srem_pow2_2:
162; RV32I:       # %bb.0:
163; RV32I-NEXT:    srai a1, a0, 31
164; RV32I-NEXT:    srli a1, a1, 16
165; RV32I-NEXT:    add a1, a0, a1
166; RV32I-NEXT:    lui a2, 1048560
167; RV32I-NEXT:    and a1, a1, a2
168; RV32I-NEXT:    sub a0, a0, a1
169; RV32I-NEXT:    ret
170;
171; RV32IM-LABEL: srem_pow2_2:
172; RV32IM:       # %bb.0:
173; RV32IM-NEXT:    srai a1, a0, 31
174; RV32IM-NEXT:    srli a1, a1, 16
175; RV32IM-NEXT:    add a1, a0, a1
176; RV32IM-NEXT:    lui a2, 1048560
177; RV32IM-NEXT:    and a1, a1, a2
178; RV32IM-NEXT:    sub a0, a0, a1
179; RV32IM-NEXT:    ret
180;
181; RV64I-LABEL: srem_pow2_2:
182; RV64I:       # %bb.0:
183; RV64I-NEXT:    sraiw a1, a0, 31
184; RV64I-NEXT:    srliw a1, a1, 16
185; RV64I-NEXT:    add a1, a0, a1
186; RV64I-NEXT:    lui a2, 1048560
187; RV64I-NEXT:    and a1, a1, a2
188; RV64I-NEXT:    subw a0, a0, a1
189; RV64I-NEXT:    ret
190;
191; RV64IM-LABEL: srem_pow2_2:
192; RV64IM:       # %bb.0:
193; RV64IM-NEXT:    sraiw a1, a0, 31
194; RV64IM-NEXT:    srliw a1, a1, 16
195; RV64IM-NEXT:    add a1, a0, a1
196; RV64IM-NEXT:    lui a2, 1048560
197; RV64IM-NEXT:    and a1, a1, a2
198; RV64IM-NEXT:    subw a0, a0, a1
199; RV64IM-NEXT:    ret
200  %1 = srem i32 %a, 65536
201  ret i32 %1
202}
203
204define i32 @srem_constant_lhs(i32 %a) nounwind {
205; RV32I-LABEL: srem_constant_lhs:
206; RV32I:       # %bb.0:
207; RV32I-NEXT:    addi sp, sp, -16
208; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
209; RV32I-NEXT:    mv a1, a0
210; RV32I-NEXT:    li a0, -10
211; RV32I-NEXT:    call __modsi3@plt
212; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
213; RV32I-NEXT:    addi sp, sp, 16
214; RV32I-NEXT:    ret
215;
216; RV32IM-LABEL: srem_constant_lhs:
217; RV32IM:       # %bb.0:
218; RV32IM-NEXT:    li a1, -10
219; RV32IM-NEXT:    rem a0, a1, a0
220; RV32IM-NEXT:    ret
221;
222; RV64I-LABEL: srem_constant_lhs:
223; RV64I:       # %bb.0:
224; RV64I-NEXT:    addi sp, sp, -16
225; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
226; RV64I-NEXT:    sext.w a1, a0
227; RV64I-NEXT:    li a0, -10
228; RV64I-NEXT:    call __moddi3@plt
229; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
230; RV64I-NEXT:    addi sp, sp, 16
231; RV64I-NEXT:    ret
232;
233; RV64IM-LABEL: srem_constant_lhs:
234; RV64IM:       # %bb.0:
235; RV64IM-NEXT:    li a1, -10
236; RV64IM-NEXT:    remw a0, a1, a0
237; RV64IM-NEXT:    ret
238  %1 = srem i32 -10, %a
239  ret i32 %1
240}
241
242define i64 @urem64(i64 %a, i64 %b) nounwind {
243; RV32I-LABEL: urem64:
244; RV32I:       # %bb.0:
245; RV32I-NEXT:    addi sp, sp, -16
246; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
247; RV32I-NEXT:    call __umoddi3@plt
248; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
249; RV32I-NEXT:    addi sp, sp, 16
250; RV32I-NEXT:    ret
251;
252; RV32IM-LABEL: urem64:
253; RV32IM:       # %bb.0:
254; RV32IM-NEXT:    addi sp, sp, -16
255; RV32IM-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
256; RV32IM-NEXT:    call __umoddi3@plt
257; RV32IM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
258; RV32IM-NEXT:    addi sp, sp, 16
259; RV32IM-NEXT:    ret
260;
261; RV64I-LABEL: urem64:
262; RV64I:       # %bb.0:
263; RV64I-NEXT:    addi sp, sp, -16
264; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
265; RV64I-NEXT:    call __umoddi3@plt
266; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
267; RV64I-NEXT:    addi sp, sp, 16
268; RV64I-NEXT:    ret
269;
270; RV64IM-LABEL: urem64:
271; RV64IM:       # %bb.0:
272; RV64IM-NEXT:    remu a0, a0, a1
273; RV64IM-NEXT:    ret
274  %1 = urem i64 %a, %b
275  ret i64 %1
276}
277
278define i64 @urem64_constant_lhs(i64 %a) nounwind {
279; RV32I-LABEL: urem64_constant_lhs:
280; RV32I:       # %bb.0:
281; RV32I-NEXT:    addi sp, sp, -16
282; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
283; RV32I-NEXT:    mv a3, a1
284; RV32I-NEXT:    mv a2, a0
285; RV32I-NEXT:    li a0, 10
286; RV32I-NEXT:    li a1, 0
287; RV32I-NEXT:    call __umoddi3@plt
288; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
289; RV32I-NEXT:    addi sp, sp, 16
290; RV32I-NEXT:    ret
291;
292; RV32IM-LABEL: urem64_constant_lhs:
293; RV32IM:       # %bb.0:
294; RV32IM-NEXT:    addi sp, sp, -16
295; RV32IM-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
296; RV32IM-NEXT:    mv a3, a1
297; RV32IM-NEXT:    mv a2, a0
298; RV32IM-NEXT:    li a0, 10
299; RV32IM-NEXT:    li a1, 0
300; RV32IM-NEXT:    call __umoddi3@plt
301; RV32IM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
302; RV32IM-NEXT:    addi sp, sp, 16
303; RV32IM-NEXT:    ret
304;
305; RV64I-LABEL: urem64_constant_lhs:
306; RV64I:       # %bb.0:
307; RV64I-NEXT:    addi sp, sp, -16
308; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
309; RV64I-NEXT:    mv a1, a0
310; RV64I-NEXT:    li a0, 10
311; RV64I-NEXT:    call __umoddi3@plt
312; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
313; RV64I-NEXT:    addi sp, sp, 16
314; RV64I-NEXT:    ret
315;
316; RV64IM-LABEL: urem64_constant_lhs:
317; RV64IM:       # %bb.0:
318; RV64IM-NEXT:    li a1, 10
319; RV64IM-NEXT:    remu a0, a1, a0
320; RV64IM-NEXT:    ret
321  %1 = urem i64 10, %a
322  ret i64 %1
323}
324
325define i64 @srem64(i64 %a, i64 %b) nounwind {
326; RV32I-LABEL: srem64:
327; RV32I:       # %bb.0:
328; RV32I-NEXT:    addi sp, sp, -16
329; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
330; RV32I-NEXT:    call __moddi3@plt
331; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
332; RV32I-NEXT:    addi sp, sp, 16
333; RV32I-NEXT:    ret
334;
335; RV32IM-LABEL: srem64:
336; RV32IM:       # %bb.0:
337; RV32IM-NEXT:    addi sp, sp, -16
338; RV32IM-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
339; RV32IM-NEXT:    call __moddi3@plt
340; RV32IM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
341; RV32IM-NEXT:    addi sp, sp, 16
342; RV32IM-NEXT:    ret
343;
344; RV64I-LABEL: srem64:
345; RV64I:       # %bb.0:
346; RV64I-NEXT:    addi sp, sp, -16
347; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
348; RV64I-NEXT:    call __moddi3@plt
349; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
350; RV64I-NEXT:    addi sp, sp, 16
351; RV64I-NEXT:    ret
352;
353; RV64IM-LABEL: srem64:
354; RV64IM:       # %bb.0:
355; RV64IM-NEXT:    rem a0, a0, a1
356; RV64IM-NEXT:    ret
357  %1 = srem i64 %a, %b
358  ret i64 %1
359}
360
361define i64 @srem64_constant_lhs(i64 %a) nounwind {
362; RV32I-LABEL: srem64_constant_lhs:
363; RV32I:       # %bb.0:
364; RV32I-NEXT:    addi sp, sp, -16
365; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
366; RV32I-NEXT:    mv a3, a1
367; RV32I-NEXT:    mv a2, a0
368; RV32I-NEXT:    li a0, -10
369; RV32I-NEXT:    li a1, -1
370; RV32I-NEXT:    call __moddi3@plt
371; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
372; RV32I-NEXT:    addi sp, sp, 16
373; RV32I-NEXT:    ret
374;
375; RV32IM-LABEL: srem64_constant_lhs:
376; RV32IM:       # %bb.0:
377; RV32IM-NEXT:    addi sp, sp, -16
378; RV32IM-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
379; RV32IM-NEXT:    mv a3, a1
380; RV32IM-NEXT:    mv a2, a0
381; RV32IM-NEXT:    li a0, -10
382; RV32IM-NEXT:    li a1, -1
383; RV32IM-NEXT:    call __moddi3@plt
384; RV32IM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
385; RV32IM-NEXT:    addi sp, sp, 16
386; RV32IM-NEXT:    ret
387;
388; RV64I-LABEL: srem64_constant_lhs:
389; RV64I:       # %bb.0:
390; RV64I-NEXT:    addi sp, sp, -16
391; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
392; RV64I-NEXT:    mv a1, a0
393; RV64I-NEXT:    li a0, -10
394; RV64I-NEXT:    call __moddi3@plt
395; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
396; RV64I-NEXT:    addi sp, sp, 16
397; RV64I-NEXT:    ret
398;
399; RV64IM-LABEL: srem64_constant_lhs:
400; RV64IM:       # %bb.0:
401; RV64IM-NEXT:    li a1, -10
402; RV64IM-NEXT:    rem a0, a1, a0
403; RV64IM-NEXT:    ret
404  %1 = srem i64 -10, %a
405  ret i64 %1
406}
407
408define i8 @urem8(i8 %a, i8 %b) nounwind {
409; RV32I-LABEL: urem8:
410; RV32I:       # %bb.0:
411; RV32I-NEXT:    addi sp, sp, -16
412; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
413; RV32I-NEXT:    andi a0, a0, 255
414; RV32I-NEXT:    andi a1, a1, 255
415; RV32I-NEXT:    call __umodsi3@plt
416; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
417; RV32I-NEXT:    addi sp, sp, 16
418; RV32I-NEXT:    ret
419;
420; RV32IM-LABEL: urem8:
421; RV32IM:       # %bb.0:
422; RV32IM-NEXT:    andi a1, a1, 255
423; RV32IM-NEXT:    andi a0, a0, 255
424; RV32IM-NEXT:    remu a0, a0, a1
425; RV32IM-NEXT:    ret
426;
427; RV64I-LABEL: urem8:
428; RV64I:       # %bb.0:
429; RV64I-NEXT:    addi sp, sp, -16
430; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
431; RV64I-NEXT:    andi a0, a0, 255
432; RV64I-NEXT:    andi a1, a1, 255
433; RV64I-NEXT:    call __umoddi3@plt
434; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
435; RV64I-NEXT:    addi sp, sp, 16
436; RV64I-NEXT:    ret
437;
438; RV64IM-LABEL: urem8:
439; RV64IM:       # %bb.0:
440; RV64IM-NEXT:    andi a1, a1, 255
441; RV64IM-NEXT:    andi a0, a0, 255
442; RV64IM-NEXT:    remuw a0, a0, a1
443; RV64IM-NEXT:    ret
444  %1 = urem i8 %a, %b
445  ret i8 %1
446}
447
448define i8 @urem8_constant_lhs(i8 %a) nounwind {
449; RV32I-LABEL: urem8_constant_lhs:
450; RV32I:       # %bb.0:
451; RV32I-NEXT:    addi sp, sp, -16
452; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
453; RV32I-NEXT:    andi a1, a0, 255
454; RV32I-NEXT:    li a0, 10
455; RV32I-NEXT:    call __umodsi3@plt
456; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
457; RV32I-NEXT:    addi sp, sp, 16
458; RV32I-NEXT:    ret
459;
460; RV32IM-LABEL: urem8_constant_lhs:
461; RV32IM:       # %bb.0:
462; RV32IM-NEXT:    andi a0, a0, 255
463; RV32IM-NEXT:    li a1, 10
464; RV32IM-NEXT:    remu a0, a1, a0
465; RV32IM-NEXT:    ret
466;
467; RV64I-LABEL: urem8_constant_lhs:
468; RV64I:       # %bb.0:
469; RV64I-NEXT:    addi sp, sp, -16
470; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
471; RV64I-NEXT:    andi a1, a0, 255
472; RV64I-NEXT:    li a0, 10
473; RV64I-NEXT:    call __umoddi3@plt
474; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
475; RV64I-NEXT:    addi sp, sp, 16
476; RV64I-NEXT:    ret
477;
478; RV64IM-LABEL: urem8_constant_lhs:
479; RV64IM:       # %bb.0:
480; RV64IM-NEXT:    andi a0, a0, 255
481; RV64IM-NEXT:    li a1, 10
482; RV64IM-NEXT:    remuw a0, a1, a0
483; RV64IM-NEXT:    ret
484  %1 = urem i8 10, %a
485  ret i8 %1
486}
487
488
489define i8 @srem8(i8 %a, i8 %b) nounwind {
490; RV32I-LABEL: srem8:
491; RV32I:       # %bb.0:
492; RV32I-NEXT:    addi sp, sp, -16
493; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
494; RV32I-NEXT:    slli a0, a0, 24
495; RV32I-NEXT:    srai a0, a0, 24
496; RV32I-NEXT:    slli a1, a1, 24
497; RV32I-NEXT:    srai a1, a1, 24
498; RV32I-NEXT:    call __modsi3@plt
499; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
500; RV32I-NEXT:    addi sp, sp, 16
501; RV32I-NEXT:    ret
502;
503; RV32IM-LABEL: srem8:
504; RV32IM:       # %bb.0:
505; RV32IM-NEXT:    slli a1, a1, 24
506; RV32IM-NEXT:    srai a1, a1, 24
507; RV32IM-NEXT:    slli a0, a0, 24
508; RV32IM-NEXT:    srai a0, a0, 24
509; RV32IM-NEXT:    rem a0, a0, a1
510; RV32IM-NEXT:    ret
511;
512; RV64I-LABEL: srem8:
513; RV64I:       # %bb.0:
514; RV64I-NEXT:    addi sp, sp, -16
515; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
516; RV64I-NEXT:    slli a0, a0, 56
517; RV64I-NEXT:    srai a0, a0, 56
518; RV64I-NEXT:    slli a1, a1, 56
519; RV64I-NEXT:    srai a1, a1, 56
520; RV64I-NEXT:    call __moddi3@plt
521; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
522; RV64I-NEXT:    addi sp, sp, 16
523; RV64I-NEXT:    ret
524;
525; RV64IM-LABEL: srem8:
526; RV64IM:       # %bb.0:
527; RV64IM-NEXT:    slli a1, a1, 56
528; RV64IM-NEXT:    srai a1, a1, 56
529; RV64IM-NEXT:    slli a0, a0, 56
530; RV64IM-NEXT:    srai a0, a0, 56
531; RV64IM-NEXT:    remw a0, a0, a1
532; RV64IM-NEXT:    ret
533  %1 = srem i8 %a, %b
534  ret i8 %1
535}
536
537define i8 @srem8_constant_lhs(i8 %a) nounwind {
538; RV32I-LABEL: srem8_constant_lhs:
539; RV32I:       # %bb.0:
540; RV32I-NEXT:    addi sp, sp, -16
541; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
542; RV32I-NEXT:    slli a0, a0, 24
543; RV32I-NEXT:    srai a1, a0, 24
544; RV32I-NEXT:    li a0, -10
545; RV32I-NEXT:    call __modsi3@plt
546; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
547; RV32I-NEXT:    addi sp, sp, 16
548; RV32I-NEXT:    ret
549;
550; RV32IM-LABEL: srem8_constant_lhs:
551; RV32IM:       # %bb.0:
552; RV32IM-NEXT:    slli a0, a0, 24
553; RV32IM-NEXT:    srai a0, a0, 24
554; RV32IM-NEXT:    li a1, -10
555; RV32IM-NEXT:    rem a0, a1, a0
556; RV32IM-NEXT:    ret
557;
558; RV64I-LABEL: srem8_constant_lhs:
559; RV64I:       # %bb.0:
560; RV64I-NEXT:    addi sp, sp, -16
561; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
562; RV64I-NEXT:    slli a0, a0, 56
563; RV64I-NEXT:    srai a1, a0, 56
564; RV64I-NEXT:    li a0, -10
565; RV64I-NEXT:    call __moddi3@plt
566; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
567; RV64I-NEXT:    addi sp, sp, 16
568; RV64I-NEXT:    ret
569;
570; RV64IM-LABEL: srem8_constant_lhs:
571; RV64IM:       # %bb.0:
572; RV64IM-NEXT:    slli a0, a0, 56
573; RV64IM-NEXT:    srai a0, a0, 56
574; RV64IM-NEXT:    li a1, -10
575; RV64IM-NEXT:    remw a0, a1, a0
576; RV64IM-NEXT:    ret
577  %1 = srem i8 -10, %a
578  ret i8 %1
579}
580
581
582define i16 @urem16(i16 %a, i16 %b) nounwind {
583; RV32I-LABEL: urem16:
584; RV32I:       # %bb.0:
585; RV32I-NEXT:    addi sp, sp, -16
586; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
587; RV32I-NEXT:    lui a2, 16
588; RV32I-NEXT:    addi a2, a2, -1
589; RV32I-NEXT:    and a0, a0, a2
590; RV32I-NEXT:    and a1, a1, a2
591; RV32I-NEXT:    call __umodsi3@plt
592; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
593; RV32I-NEXT:    addi sp, sp, 16
594; RV32I-NEXT:    ret
595;
596; RV32IM-LABEL: urem16:
597; RV32IM:       # %bb.0:
598; RV32IM-NEXT:    lui a2, 16
599; RV32IM-NEXT:    addi a2, a2, -1
600; RV32IM-NEXT:    and a1, a1, a2
601; RV32IM-NEXT:    and a0, a0, a2
602; RV32IM-NEXT:    remu a0, a0, a1
603; RV32IM-NEXT:    ret
604;
605; RV64I-LABEL: urem16:
606; RV64I:       # %bb.0:
607; RV64I-NEXT:    addi sp, sp, -16
608; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
609; RV64I-NEXT:    lui a2, 16
610; RV64I-NEXT:    addiw a2, a2, -1
611; RV64I-NEXT:    and a0, a0, a2
612; RV64I-NEXT:    and a1, a1, a2
613; RV64I-NEXT:    call __umoddi3@plt
614; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
615; RV64I-NEXT:    addi sp, sp, 16
616; RV64I-NEXT:    ret
617;
618; RV64IM-LABEL: urem16:
619; RV64IM:       # %bb.0:
620; RV64IM-NEXT:    lui a2, 16
621; RV64IM-NEXT:    addiw a2, a2, -1
622; RV64IM-NEXT:    and a1, a1, a2
623; RV64IM-NEXT:    and a0, a0, a2
624; RV64IM-NEXT:    remuw a0, a0, a1
625; RV64IM-NEXT:    ret
626  %1 = urem i16 %a, %b
627  ret i16 %1
628}
629
630define i16 @urem16_constant_lhs(i16 %a) nounwind {
631; RV32I-LABEL: urem16_constant_lhs:
632; RV32I:       # %bb.0:
633; RV32I-NEXT:    addi sp, sp, -16
634; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
635; RV32I-NEXT:    slli a0, a0, 16
636; RV32I-NEXT:    srli a1, a0, 16
637; RV32I-NEXT:    li a0, 10
638; RV32I-NEXT:    call __umodsi3@plt
639; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
640; RV32I-NEXT:    addi sp, sp, 16
641; RV32I-NEXT:    ret
642;
643; RV32IM-LABEL: urem16_constant_lhs:
644; RV32IM:       # %bb.0:
645; RV32IM-NEXT:    slli a0, a0, 16
646; RV32IM-NEXT:    srli a0, a0, 16
647; RV32IM-NEXT:    li a1, 10
648; RV32IM-NEXT:    remu a0, a1, a0
649; RV32IM-NEXT:    ret
650;
651; RV64I-LABEL: urem16_constant_lhs:
652; RV64I:       # %bb.0:
653; RV64I-NEXT:    addi sp, sp, -16
654; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
655; RV64I-NEXT:    slli a0, a0, 48
656; RV64I-NEXT:    srli a1, a0, 48
657; RV64I-NEXT:    li a0, 10
658; RV64I-NEXT:    call __umoddi3@plt
659; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
660; RV64I-NEXT:    addi sp, sp, 16
661; RV64I-NEXT:    ret
662;
663; RV64IM-LABEL: urem16_constant_lhs:
664; RV64IM:       # %bb.0:
665; RV64IM-NEXT:    slli a0, a0, 48
666; RV64IM-NEXT:    srli a0, a0, 48
667; RV64IM-NEXT:    li a1, 10
668; RV64IM-NEXT:    remuw a0, a1, a0
669; RV64IM-NEXT:    ret
670  %1 = urem i16 10, %a
671  ret i16 %1
672}
673
674define i16 @srem16(i16 %a, i16 %b) nounwind {
675; RV32I-LABEL: srem16:
676; RV32I:       # %bb.0:
677; RV32I-NEXT:    addi sp, sp, -16
678; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
679; RV32I-NEXT:    slli a0, a0, 16
680; RV32I-NEXT:    srai a0, a0, 16
681; RV32I-NEXT:    slli a1, a1, 16
682; RV32I-NEXT:    srai a1, a1, 16
683; RV32I-NEXT:    call __modsi3@plt
684; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
685; RV32I-NEXT:    addi sp, sp, 16
686; RV32I-NEXT:    ret
687;
688; RV32IM-LABEL: srem16:
689; RV32IM:       # %bb.0:
690; RV32IM-NEXT:    slli a1, a1, 16
691; RV32IM-NEXT:    srai a1, a1, 16
692; RV32IM-NEXT:    slli a0, a0, 16
693; RV32IM-NEXT:    srai a0, a0, 16
694; RV32IM-NEXT:    rem a0, a0, a1
695; RV32IM-NEXT:    ret
696;
697; RV64I-LABEL: srem16:
698; RV64I:       # %bb.0:
699; RV64I-NEXT:    addi sp, sp, -16
700; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
701; RV64I-NEXT:    slli a0, a0, 48
702; RV64I-NEXT:    srai a0, a0, 48
703; RV64I-NEXT:    slli a1, a1, 48
704; RV64I-NEXT:    srai a1, a1, 48
705; RV64I-NEXT:    call __moddi3@plt
706; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
707; RV64I-NEXT:    addi sp, sp, 16
708; RV64I-NEXT:    ret
709;
710; RV64IM-LABEL: srem16:
711; RV64IM:       # %bb.0:
712; RV64IM-NEXT:    slli a1, a1, 48
713; RV64IM-NEXT:    srai a1, a1, 48
714; RV64IM-NEXT:    slli a0, a0, 48
715; RV64IM-NEXT:    srai a0, a0, 48
716; RV64IM-NEXT:    remw a0, a0, a1
717; RV64IM-NEXT:    ret
718  %1 = srem i16 %a, %b
719  ret i16 %1
720}
721
722define i16 @srem16_constant_lhs(i16 %a) nounwind {
723; RV32I-LABEL: srem16_constant_lhs:
724; RV32I:       # %bb.0:
725; RV32I-NEXT:    addi sp, sp, -16
726; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
727; RV32I-NEXT:    slli a0, a0, 16
728; RV32I-NEXT:    srai a1, a0, 16
729; RV32I-NEXT:    li a0, -10
730; RV32I-NEXT:    call __modsi3@plt
731; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
732; RV32I-NEXT:    addi sp, sp, 16
733; RV32I-NEXT:    ret
734;
735; RV32IM-LABEL: srem16_constant_lhs:
736; RV32IM:       # %bb.0:
737; RV32IM-NEXT:    slli a0, a0, 16
738; RV32IM-NEXT:    srai a0, a0, 16
739; RV32IM-NEXT:    li a1, -10
740; RV32IM-NEXT:    rem a0, a1, a0
741; RV32IM-NEXT:    ret
742;
743; RV64I-LABEL: srem16_constant_lhs:
744; RV64I:       # %bb.0:
745; RV64I-NEXT:    addi sp, sp, -16
746; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
747; RV64I-NEXT:    slli a0, a0, 48
748; RV64I-NEXT:    srai a1, a0, 48
749; RV64I-NEXT:    li a0, -10
750; RV64I-NEXT:    call __moddi3@plt
751; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
752; RV64I-NEXT:    addi sp, sp, 16
753; RV64I-NEXT:    ret
754;
755; RV64IM-LABEL: srem16_constant_lhs:
756; RV64IM:       # %bb.0:
757; RV64IM-NEXT:    slli a0, a0, 48
758; RV64IM-NEXT:    srai a0, a0, 48
759; RV64IM-NEXT:    li a1, -10
760; RV64IM-NEXT:    remw a0, a1, a0
761; RV64IM-NEXT:    ret
762  %1 = srem i16 -10, %a
763  ret i16 %1
764}
765