1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
3; RUN:   | FileCheck -check-prefix=RV32I %s
4; RUN: llc -mtriple=riscv32 -mattr=+a -verify-machineinstrs < %s \
5; RUN:   | FileCheck -check-prefix=RV32IA %s
6; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
7; RUN:   | FileCheck -check-prefix=RV64I %s
8; RUN: llc -mtriple=riscv64 -mattr=+a -verify-machineinstrs < %s \
9; RUN:   | FileCheck -check-prefix=RV64IA %s
10
11define i8 @atomic_load_i8_unordered(i8 *%a) nounwind {
12; RV32I-LABEL: atomic_load_i8_unordered:
13; RV32I:       # %bb.0:
14; RV32I-NEXT:    addi sp, sp, -16
15; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
16; RV32I-NEXT:    li a1, 0
17; RV32I-NEXT:    call __atomic_load_1@plt
18; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
19; RV32I-NEXT:    addi sp, sp, 16
20; RV32I-NEXT:    ret
21;
22; RV32IA-LABEL: atomic_load_i8_unordered:
23; RV32IA:       # %bb.0:
24; RV32IA-NEXT:    lb a0, 0(a0)
25; RV32IA-NEXT:    ret
26;
27; RV64I-LABEL: atomic_load_i8_unordered:
28; RV64I:       # %bb.0:
29; RV64I-NEXT:    addi sp, sp, -16
30; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
31; RV64I-NEXT:    li a1, 0
32; RV64I-NEXT:    call __atomic_load_1@plt
33; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
34; RV64I-NEXT:    addi sp, sp, 16
35; RV64I-NEXT:    ret
36;
37; RV64IA-LABEL: atomic_load_i8_unordered:
38; RV64IA:       # %bb.0:
39; RV64IA-NEXT:    lb a0, 0(a0)
40; RV64IA-NEXT:    ret
41  %1 = load atomic i8, i8* %a unordered, align 1
42  ret i8 %1
43}
44
45define i8 @atomic_load_i8_monotonic(i8 *%a) nounwind {
46; RV32I-LABEL: atomic_load_i8_monotonic:
47; RV32I:       # %bb.0:
48; RV32I-NEXT:    addi sp, sp, -16
49; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
50; RV32I-NEXT:    li a1, 0
51; RV32I-NEXT:    call __atomic_load_1@plt
52; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
53; RV32I-NEXT:    addi sp, sp, 16
54; RV32I-NEXT:    ret
55;
56; RV32IA-LABEL: atomic_load_i8_monotonic:
57; RV32IA:       # %bb.0:
58; RV32IA-NEXT:    lb a0, 0(a0)
59; RV32IA-NEXT:    ret
60;
61; RV64I-LABEL: atomic_load_i8_monotonic:
62; RV64I:       # %bb.0:
63; RV64I-NEXT:    addi sp, sp, -16
64; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
65; RV64I-NEXT:    li a1, 0
66; RV64I-NEXT:    call __atomic_load_1@plt
67; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
68; RV64I-NEXT:    addi sp, sp, 16
69; RV64I-NEXT:    ret
70;
71; RV64IA-LABEL: atomic_load_i8_monotonic:
72; RV64IA:       # %bb.0:
73; RV64IA-NEXT:    lb a0, 0(a0)
74; RV64IA-NEXT:    ret
75  %1 = load atomic i8, i8* %a monotonic, align 1
76  ret i8 %1
77}
78
79define i8 @atomic_load_i8_acquire(i8 *%a) nounwind {
80; RV32I-LABEL: atomic_load_i8_acquire:
81; RV32I:       # %bb.0:
82; RV32I-NEXT:    addi sp, sp, -16
83; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
84; RV32I-NEXT:    li a1, 2
85; RV32I-NEXT:    call __atomic_load_1@plt
86; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
87; RV32I-NEXT:    addi sp, sp, 16
88; RV32I-NEXT:    ret
89;
90; RV32IA-LABEL: atomic_load_i8_acquire:
91; RV32IA:       # %bb.0:
92; RV32IA-NEXT:    lb a0, 0(a0)
93; RV32IA-NEXT:    fence r, rw
94; RV32IA-NEXT:    ret
95;
96; RV64I-LABEL: atomic_load_i8_acquire:
97; RV64I:       # %bb.0:
98; RV64I-NEXT:    addi sp, sp, -16
99; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
100; RV64I-NEXT:    li a1, 2
101; RV64I-NEXT:    call __atomic_load_1@plt
102; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
103; RV64I-NEXT:    addi sp, sp, 16
104; RV64I-NEXT:    ret
105;
106; RV64IA-LABEL: atomic_load_i8_acquire:
107; RV64IA:       # %bb.0:
108; RV64IA-NEXT:    lb a0, 0(a0)
109; RV64IA-NEXT:    fence r, rw
110; RV64IA-NEXT:    ret
111  %1 = load atomic i8, i8* %a acquire, align 1
112  ret i8 %1
113}
114
115define i8 @atomic_load_i8_seq_cst(i8 *%a) nounwind {
116; RV32I-LABEL: atomic_load_i8_seq_cst:
117; RV32I:       # %bb.0:
118; RV32I-NEXT:    addi sp, sp, -16
119; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
120; RV32I-NEXT:    li a1, 5
121; RV32I-NEXT:    call __atomic_load_1@plt
122; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
123; RV32I-NEXT:    addi sp, sp, 16
124; RV32I-NEXT:    ret
125;
126; RV32IA-LABEL: atomic_load_i8_seq_cst:
127; RV32IA:       # %bb.0:
128; RV32IA-NEXT:    fence rw, rw
129; RV32IA-NEXT:    lb a0, 0(a0)
130; RV32IA-NEXT:    fence r, rw
131; RV32IA-NEXT:    ret
132;
133; RV64I-LABEL: atomic_load_i8_seq_cst:
134; RV64I:       # %bb.0:
135; RV64I-NEXT:    addi sp, sp, -16
136; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
137; RV64I-NEXT:    li a1, 5
138; RV64I-NEXT:    call __atomic_load_1@plt
139; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
140; RV64I-NEXT:    addi sp, sp, 16
141; RV64I-NEXT:    ret
142;
143; RV64IA-LABEL: atomic_load_i8_seq_cst:
144; RV64IA:       # %bb.0:
145; RV64IA-NEXT:    fence rw, rw
146; RV64IA-NEXT:    lb a0, 0(a0)
147; RV64IA-NEXT:    fence r, rw
148; RV64IA-NEXT:    ret
149  %1 = load atomic i8, i8* %a seq_cst, align 1
150  ret i8 %1
151}
152
153define i16 @atomic_load_i16_unordered(i16 *%a) nounwind {
154; RV32I-LABEL: atomic_load_i16_unordered:
155; RV32I:       # %bb.0:
156; RV32I-NEXT:    addi sp, sp, -16
157; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
158; RV32I-NEXT:    li a1, 0
159; RV32I-NEXT:    call __atomic_load_2@plt
160; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
161; RV32I-NEXT:    addi sp, sp, 16
162; RV32I-NEXT:    ret
163;
164; RV32IA-LABEL: atomic_load_i16_unordered:
165; RV32IA:       # %bb.0:
166; RV32IA-NEXT:    lh a0, 0(a0)
167; RV32IA-NEXT:    ret
168;
169; RV64I-LABEL: atomic_load_i16_unordered:
170; RV64I:       # %bb.0:
171; RV64I-NEXT:    addi sp, sp, -16
172; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
173; RV64I-NEXT:    li a1, 0
174; RV64I-NEXT:    call __atomic_load_2@plt
175; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
176; RV64I-NEXT:    addi sp, sp, 16
177; RV64I-NEXT:    ret
178;
179; RV64IA-LABEL: atomic_load_i16_unordered:
180; RV64IA:       # %bb.0:
181; RV64IA-NEXT:    lh a0, 0(a0)
182; RV64IA-NEXT:    ret
183  %1 = load atomic i16, i16* %a unordered, align 2
184  ret i16 %1
185}
186
187define i16 @atomic_load_i16_monotonic(i16 *%a) nounwind {
188; RV32I-LABEL: atomic_load_i16_monotonic:
189; RV32I:       # %bb.0:
190; RV32I-NEXT:    addi sp, sp, -16
191; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
192; RV32I-NEXT:    li a1, 0
193; RV32I-NEXT:    call __atomic_load_2@plt
194; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
195; RV32I-NEXT:    addi sp, sp, 16
196; RV32I-NEXT:    ret
197;
198; RV32IA-LABEL: atomic_load_i16_monotonic:
199; RV32IA:       # %bb.0:
200; RV32IA-NEXT:    lh a0, 0(a0)
201; RV32IA-NEXT:    ret
202;
203; RV64I-LABEL: atomic_load_i16_monotonic:
204; RV64I:       # %bb.0:
205; RV64I-NEXT:    addi sp, sp, -16
206; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
207; RV64I-NEXT:    li a1, 0
208; RV64I-NEXT:    call __atomic_load_2@plt
209; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
210; RV64I-NEXT:    addi sp, sp, 16
211; RV64I-NEXT:    ret
212;
213; RV64IA-LABEL: atomic_load_i16_monotonic:
214; RV64IA:       # %bb.0:
215; RV64IA-NEXT:    lh a0, 0(a0)
216; RV64IA-NEXT:    ret
217  %1 = load atomic i16, i16* %a monotonic, align 2
218  ret i16 %1
219}
220
221define i16 @atomic_load_i16_acquire(i16 *%a) nounwind {
222; RV32I-LABEL: atomic_load_i16_acquire:
223; RV32I:       # %bb.0:
224; RV32I-NEXT:    addi sp, sp, -16
225; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
226; RV32I-NEXT:    li a1, 2
227; RV32I-NEXT:    call __atomic_load_2@plt
228; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
229; RV32I-NEXT:    addi sp, sp, 16
230; RV32I-NEXT:    ret
231;
232; RV32IA-LABEL: atomic_load_i16_acquire:
233; RV32IA:       # %bb.0:
234; RV32IA-NEXT:    lh a0, 0(a0)
235; RV32IA-NEXT:    fence r, rw
236; RV32IA-NEXT:    ret
237;
238; RV64I-LABEL: atomic_load_i16_acquire:
239; RV64I:       # %bb.0:
240; RV64I-NEXT:    addi sp, sp, -16
241; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
242; RV64I-NEXT:    li a1, 2
243; RV64I-NEXT:    call __atomic_load_2@plt
244; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
245; RV64I-NEXT:    addi sp, sp, 16
246; RV64I-NEXT:    ret
247;
248; RV64IA-LABEL: atomic_load_i16_acquire:
249; RV64IA:       # %bb.0:
250; RV64IA-NEXT:    lh a0, 0(a0)
251; RV64IA-NEXT:    fence r, rw
252; RV64IA-NEXT:    ret
253  %1 = load atomic i16, i16* %a acquire, align 2
254  ret i16 %1
255}
256
257define i16 @atomic_load_i16_seq_cst(i16 *%a) nounwind {
258; RV32I-LABEL: atomic_load_i16_seq_cst:
259; RV32I:       # %bb.0:
260; RV32I-NEXT:    addi sp, sp, -16
261; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
262; RV32I-NEXT:    li a1, 5
263; RV32I-NEXT:    call __atomic_load_2@plt
264; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
265; RV32I-NEXT:    addi sp, sp, 16
266; RV32I-NEXT:    ret
267;
268; RV32IA-LABEL: atomic_load_i16_seq_cst:
269; RV32IA:       # %bb.0:
270; RV32IA-NEXT:    fence rw, rw
271; RV32IA-NEXT:    lh a0, 0(a0)
272; RV32IA-NEXT:    fence r, rw
273; RV32IA-NEXT:    ret
274;
275; RV64I-LABEL: atomic_load_i16_seq_cst:
276; RV64I:       # %bb.0:
277; RV64I-NEXT:    addi sp, sp, -16
278; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
279; RV64I-NEXT:    li a1, 5
280; RV64I-NEXT:    call __atomic_load_2@plt
281; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
282; RV64I-NEXT:    addi sp, sp, 16
283; RV64I-NEXT:    ret
284;
285; RV64IA-LABEL: atomic_load_i16_seq_cst:
286; RV64IA:       # %bb.0:
287; RV64IA-NEXT:    fence rw, rw
288; RV64IA-NEXT:    lh a0, 0(a0)
289; RV64IA-NEXT:    fence r, rw
290; RV64IA-NEXT:    ret
291  %1 = load atomic i16, i16* %a seq_cst, align 2
292  ret i16 %1
293}
294
295define i32 @atomic_load_i32_unordered(i32 *%a) nounwind {
296; RV32I-LABEL: atomic_load_i32_unordered:
297; RV32I:       # %bb.0:
298; RV32I-NEXT:    addi sp, sp, -16
299; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
300; RV32I-NEXT:    li a1, 0
301; RV32I-NEXT:    call __atomic_load_4@plt
302; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
303; RV32I-NEXT:    addi sp, sp, 16
304; RV32I-NEXT:    ret
305;
306; RV32IA-LABEL: atomic_load_i32_unordered:
307; RV32IA:       # %bb.0:
308; RV32IA-NEXT:    lw a0, 0(a0)
309; RV32IA-NEXT:    ret
310;
311; RV64I-LABEL: atomic_load_i32_unordered:
312; RV64I:       # %bb.0:
313; RV64I-NEXT:    addi sp, sp, -16
314; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
315; RV64I-NEXT:    li a1, 0
316; RV64I-NEXT:    call __atomic_load_4@plt
317; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
318; RV64I-NEXT:    addi sp, sp, 16
319; RV64I-NEXT:    ret
320;
321; RV64IA-LABEL: atomic_load_i32_unordered:
322; RV64IA:       # %bb.0:
323; RV64IA-NEXT:    lw a0, 0(a0)
324; RV64IA-NEXT:    ret
325  %1 = load atomic i32, i32* %a unordered, align 4
326  ret i32 %1
327}
328
329define i32 @atomic_load_i32_monotonic(i32 *%a) nounwind {
330; RV32I-LABEL: atomic_load_i32_monotonic:
331; RV32I:       # %bb.0:
332; RV32I-NEXT:    addi sp, sp, -16
333; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
334; RV32I-NEXT:    li a1, 0
335; RV32I-NEXT:    call __atomic_load_4@plt
336; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
337; RV32I-NEXT:    addi sp, sp, 16
338; RV32I-NEXT:    ret
339;
340; RV32IA-LABEL: atomic_load_i32_monotonic:
341; RV32IA:       # %bb.0:
342; RV32IA-NEXT:    lw a0, 0(a0)
343; RV32IA-NEXT:    ret
344;
345; RV64I-LABEL: atomic_load_i32_monotonic:
346; RV64I:       # %bb.0:
347; RV64I-NEXT:    addi sp, sp, -16
348; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
349; RV64I-NEXT:    li a1, 0
350; RV64I-NEXT:    call __atomic_load_4@plt
351; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
352; RV64I-NEXT:    addi sp, sp, 16
353; RV64I-NEXT:    ret
354;
355; RV64IA-LABEL: atomic_load_i32_monotonic:
356; RV64IA:       # %bb.0:
357; RV64IA-NEXT:    lw a0, 0(a0)
358; RV64IA-NEXT:    ret
359  %1 = load atomic i32, i32* %a monotonic, align 4
360  ret i32 %1
361}
362
363define i32 @atomic_load_i32_acquire(i32 *%a) nounwind {
364; RV32I-LABEL: atomic_load_i32_acquire:
365; RV32I:       # %bb.0:
366; RV32I-NEXT:    addi sp, sp, -16
367; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
368; RV32I-NEXT:    li a1, 2
369; RV32I-NEXT:    call __atomic_load_4@plt
370; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
371; RV32I-NEXT:    addi sp, sp, 16
372; RV32I-NEXT:    ret
373;
374; RV32IA-LABEL: atomic_load_i32_acquire:
375; RV32IA:       # %bb.0:
376; RV32IA-NEXT:    lw a0, 0(a0)
377; RV32IA-NEXT:    fence r, rw
378; RV32IA-NEXT:    ret
379;
380; RV64I-LABEL: atomic_load_i32_acquire:
381; RV64I:       # %bb.0:
382; RV64I-NEXT:    addi sp, sp, -16
383; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
384; RV64I-NEXT:    li a1, 2
385; RV64I-NEXT:    call __atomic_load_4@plt
386; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
387; RV64I-NEXT:    addi sp, sp, 16
388; RV64I-NEXT:    ret
389;
390; RV64IA-LABEL: atomic_load_i32_acquire:
391; RV64IA:       # %bb.0:
392; RV64IA-NEXT:    lw a0, 0(a0)
393; RV64IA-NEXT:    fence r, rw
394; RV64IA-NEXT:    ret
395  %1 = load atomic i32, i32* %a acquire, align 4
396  ret i32 %1
397}
398
399define i32 @atomic_load_i32_seq_cst(i32 *%a) nounwind {
400; RV32I-LABEL: atomic_load_i32_seq_cst:
401; RV32I:       # %bb.0:
402; RV32I-NEXT:    addi sp, sp, -16
403; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
404; RV32I-NEXT:    li a1, 5
405; RV32I-NEXT:    call __atomic_load_4@plt
406; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
407; RV32I-NEXT:    addi sp, sp, 16
408; RV32I-NEXT:    ret
409;
410; RV32IA-LABEL: atomic_load_i32_seq_cst:
411; RV32IA:       # %bb.0:
412; RV32IA-NEXT:    fence rw, rw
413; RV32IA-NEXT:    lw a0, 0(a0)
414; RV32IA-NEXT:    fence r, rw
415; RV32IA-NEXT:    ret
416;
417; RV64I-LABEL: atomic_load_i32_seq_cst:
418; RV64I:       # %bb.0:
419; RV64I-NEXT:    addi sp, sp, -16
420; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
421; RV64I-NEXT:    li a1, 5
422; RV64I-NEXT:    call __atomic_load_4@plt
423; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
424; RV64I-NEXT:    addi sp, sp, 16
425; RV64I-NEXT:    ret
426;
427; RV64IA-LABEL: atomic_load_i32_seq_cst:
428; RV64IA:       # %bb.0:
429; RV64IA-NEXT:    fence rw, rw
430; RV64IA-NEXT:    lw a0, 0(a0)
431; RV64IA-NEXT:    fence r, rw
432; RV64IA-NEXT:    ret
433  %1 = load atomic i32, i32* %a seq_cst, align 4
434  ret i32 %1
435}
436
437define i64 @atomic_load_i64_unordered(i64 *%a) nounwind {
438; RV32I-LABEL: atomic_load_i64_unordered:
439; RV32I:       # %bb.0:
440; RV32I-NEXT:    addi sp, sp, -16
441; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
442; RV32I-NEXT:    li a1, 0
443; RV32I-NEXT:    call __atomic_load_8@plt
444; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
445; RV32I-NEXT:    addi sp, sp, 16
446; RV32I-NEXT:    ret
447;
448; RV32IA-LABEL: atomic_load_i64_unordered:
449; RV32IA:       # %bb.0:
450; RV32IA-NEXT:    addi sp, sp, -16
451; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
452; RV32IA-NEXT:    li a1, 0
453; RV32IA-NEXT:    call __atomic_load_8@plt
454; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
455; RV32IA-NEXT:    addi sp, sp, 16
456; RV32IA-NEXT:    ret
457;
458; RV64I-LABEL: atomic_load_i64_unordered:
459; RV64I:       # %bb.0:
460; RV64I-NEXT:    addi sp, sp, -16
461; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
462; RV64I-NEXT:    li a1, 0
463; RV64I-NEXT:    call __atomic_load_8@plt
464; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
465; RV64I-NEXT:    addi sp, sp, 16
466; RV64I-NEXT:    ret
467;
468; RV64IA-LABEL: atomic_load_i64_unordered:
469; RV64IA:       # %bb.0:
470; RV64IA-NEXT:    ld a0, 0(a0)
471; RV64IA-NEXT:    ret
472  %1 = load atomic i64, i64* %a unordered, align 8
473  ret i64 %1
474}
475
476define i64 @atomic_load_i64_monotonic(i64 *%a) nounwind {
477; RV32I-LABEL: atomic_load_i64_monotonic:
478; RV32I:       # %bb.0:
479; RV32I-NEXT:    addi sp, sp, -16
480; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
481; RV32I-NEXT:    li a1, 0
482; RV32I-NEXT:    call __atomic_load_8@plt
483; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
484; RV32I-NEXT:    addi sp, sp, 16
485; RV32I-NEXT:    ret
486;
487; RV32IA-LABEL: atomic_load_i64_monotonic:
488; RV32IA:       # %bb.0:
489; RV32IA-NEXT:    addi sp, sp, -16
490; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
491; RV32IA-NEXT:    li a1, 0
492; RV32IA-NEXT:    call __atomic_load_8@plt
493; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
494; RV32IA-NEXT:    addi sp, sp, 16
495; RV32IA-NEXT:    ret
496;
497; RV64I-LABEL: atomic_load_i64_monotonic:
498; RV64I:       # %bb.0:
499; RV64I-NEXT:    addi sp, sp, -16
500; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
501; RV64I-NEXT:    li a1, 0
502; RV64I-NEXT:    call __atomic_load_8@plt
503; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
504; RV64I-NEXT:    addi sp, sp, 16
505; RV64I-NEXT:    ret
506;
507; RV64IA-LABEL: atomic_load_i64_monotonic:
508; RV64IA:       # %bb.0:
509; RV64IA-NEXT:    ld a0, 0(a0)
510; RV64IA-NEXT:    ret
511  %1 = load atomic i64, i64* %a monotonic, align 8
512  ret i64 %1
513}
514
515define i64 @atomic_load_i64_acquire(i64 *%a) nounwind {
516; RV32I-LABEL: atomic_load_i64_acquire:
517; RV32I:       # %bb.0:
518; RV32I-NEXT:    addi sp, sp, -16
519; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
520; RV32I-NEXT:    li a1, 2
521; RV32I-NEXT:    call __atomic_load_8@plt
522; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
523; RV32I-NEXT:    addi sp, sp, 16
524; RV32I-NEXT:    ret
525;
526; RV32IA-LABEL: atomic_load_i64_acquire:
527; RV32IA:       # %bb.0:
528; RV32IA-NEXT:    addi sp, sp, -16
529; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
530; RV32IA-NEXT:    li a1, 2
531; RV32IA-NEXT:    call __atomic_load_8@plt
532; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
533; RV32IA-NEXT:    addi sp, sp, 16
534; RV32IA-NEXT:    ret
535;
536; RV64I-LABEL: atomic_load_i64_acquire:
537; RV64I:       # %bb.0:
538; RV64I-NEXT:    addi sp, sp, -16
539; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
540; RV64I-NEXT:    li a1, 2
541; RV64I-NEXT:    call __atomic_load_8@plt
542; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
543; RV64I-NEXT:    addi sp, sp, 16
544; RV64I-NEXT:    ret
545;
546; RV64IA-LABEL: atomic_load_i64_acquire:
547; RV64IA:       # %bb.0:
548; RV64IA-NEXT:    ld a0, 0(a0)
549; RV64IA-NEXT:    fence r, rw
550; RV64IA-NEXT:    ret
551  %1 = load atomic i64, i64* %a acquire, align 8
552  ret i64 %1
553}
554
555define i64 @atomic_load_i64_seq_cst(i64 *%a) nounwind {
556; RV32I-LABEL: atomic_load_i64_seq_cst:
557; RV32I:       # %bb.0:
558; RV32I-NEXT:    addi sp, sp, -16
559; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
560; RV32I-NEXT:    li a1, 5
561; RV32I-NEXT:    call __atomic_load_8@plt
562; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
563; RV32I-NEXT:    addi sp, sp, 16
564; RV32I-NEXT:    ret
565;
566; RV32IA-LABEL: atomic_load_i64_seq_cst:
567; RV32IA:       # %bb.0:
568; RV32IA-NEXT:    addi sp, sp, -16
569; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
570; RV32IA-NEXT:    li a1, 5
571; RV32IA-NEXT:    call __atomic_load_8@plt
572; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
573; RV32IA-NEXT:    addi sp, sp, 16
574; RV32IA-NEXT:    ret
575;
576; RV64I-LABEL: atomic_load_i64_seq_cst:
577; RV64I:       # %bb.0:
578; RV64I-NEXT:    addi sp, sp, -16
579; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
580; RV64I-NEXT:    li a1, 5
581; RV64I-NEXT:    call __atomic_load_8@plt
582; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
583; RV64I-NEXT:    addi sp, sp, 16
584; RV64I-NEXT:    ret
585;
586; RV64IA-LABEL: atomic_load_i64_seq_cst:
587; RV64IA:       # %bb.0:
588; RV64IA-NEXT:    fence rw, rw
589; RV64IA-NEXT:    ld a0, 0(a0)
590; RV64IA-NEXT:    fence r, rw
591; RV64IA-NEXT:    ret
592  %1 = load atomic i64, i64* %a seq_cst, align 8
593  ret i64 %1
594}
595
596define void @atomic_store_i8_unordered(i8 *%a, i8 %b) nounwind {
597; RV32I-LABEL: atomic_store_i8_unordered:
598; RV32I:       # %bb.0:
599; RV32I-NEXT:    addi sp, sp, -16
600; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
601; RV32I-NEXT:    li a2, 0
602; RV32I-NEXT:    call __atomic_store_1@plt
603; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
604; RV32I-NEXT:    addi sp, sp, 16
605; RV32I-NEXT:    ret
606;
607; RV32IA-LABEL: atomic_store_i8_unordered:
608; RV32IA:       # %bb.0:
609; RV32IA-NEXT:    sb a1, 0(a0)
610; RV32IA-NEXT:    ret
611;
612; RV64I-LABEL: atomic_store_i8_unordered:
613; RV64I:       # %bb.0:
614; RV64I-NEXT:    addi sp, sp, -16
615; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
616; RV64I-NEXT:    li a2, 0
617; RV64I-NEXT:    call __atomic_store_1@plt
618; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
619; RV64I-NEXT:    addi sp, sp, 16
620; RV64I-NEXT:    ret
621;
622; RV64IA-LABEL: atomic_store_i8_unordered:
623; RV64IA:       # %bb.0:
624; RV64IA-NEXT:    sb a1, 0(a0)
625; RV64IA-NEXT:    ret
626  store atomic i8 %b, i8* %a unordered, align 1
627  ret void
628}
629
630define void @atomic_store_i8_monotonic(i8 *%a, i8 %b) nounwind {
631; RV32I-LABEL: atomic_store_i8_monotonic:
632; RV32I:       # %bb.0:
633; RV32I-NEXT:    addi sp, sp, -16
634; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
635; RV32I-NEXT:    li a2, 0
636; RV32I-NEXT:    call __atomic_store_1@plt
637; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
638; RV32I-NEXT:    addi sp, sp, 16
639; RV32I-NEXT:    ret
640;
641; RV32IA-LABEL: atomic_store_i8_monotonic:
642; RV32IA:       # %bb.0:
643; RV32IA-NEXT:    sb a1, 0(a0)
644; RV32IA-NEXT:    ret
645;
646; RV64I-LABEL: atomic_store_i8_monotonic:
647; RV64I:       # %bb.0:
648; RV64I-NEXT:    addi sp, sp, -16
649; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
650; RV64I-NEXT:    li a2, 0
651; RV64I-NEXT:    call __atomic_store_1@plt
652; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
653; RV64I-NEXT:    addi sp, sp, 16
654; RV64I-NEXT:    ret
655;
656; RV64IA-LABEL: atomic_store_i8_monotonic:
657; RV64IA:       # %bb.0:
658; RV64IA-NEXT:    sb a1, 0(a0)
659; RV64IA-NEXT:    ret
660  store atomic i8 %b, i8* %a monotonic, align 1
661  ret void
662}
663
664define void @atomic_store_i8_release(i8 *%a, i8 %b) nounwind {
665; RV32I-LABEL: atomic_store_i8_release:
666; RV32I:       # %bb.0:
667; RV32I-NEXT:    addi sp, sp, -16
668; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
669; RV32I-NEXT:    li a2, 3
670; RV32I-NEXT:    call __atomic_store_1@plt
671; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
672; RV32I-NEXT:    addi sp, sp, 16
673; RV32I-NEXT:    ret
674;
675; RV32IA-LABEL: atomic_store_i8_release:
676; RV32IA:       # %bb.0:
677; RV32IA-NEXT:    fence rw, w
678; RV32IA-NEXT:    sb a1, 0(a0)
679; RV32IA-NEXT:    ret
680;
681; RV64I-LABEL: atomic_store_i8_release:
682; RV64I:       # %bb.0:
683; RV64I-NEXT:    addi sp, sp, -16
684; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
685; RV64I-NEXT:    li a2, 3
686; RV64I-NEXT:    call __atomic_store_1@plt
687; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
688; RV64I-NEXT:    addi sp, sp, 16
689; RV64I-NEXT:    ret
690;
691; RV64IA-LABEL: atomic_store_i8_release:
692; RV64IA:       # %bb.0:
693; RV64IA-NEXT:    fence rw, w
694; RV64IA-NEXT:    sb a1, 0(a0)
695; RV64IA-NEXT:    ret
696  store atomic i8 %b, i8* %a release, align 1
697  ret void
698}
699
700define void @atomic_store_i8_seq_cst(i8 *%a, i8 %b) nounwind {
701; RV32I-LABEL: atomic_store_i8_seq_cst:
702; RV32I:       # %bb.0:
703; RV32I-NEXT:    addi sp, sp, -16
704; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
705; RV32I-NEXT:    li a2, 5
706; RV32I-NEXT:    call __atomic_store_1@plt
707; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
708; RV32I-NEXT:    addi sp, sp, 16
709; RV32I-NEXT:    ret
710;
711; RV32IA-LABEL: atomic_store_i8_seq_cst:
712; RV32IA:       # %bb.0:
713; RV32IA-NEXT:    fence rw, w
714; RV32IA-NEXT:    sb a1, 0(a0)
715; RV32IA-NEXT:    ret
716;
717; RV64I-LABEL: atomic_store_i8_seq_cst:
718; RV64I:       # %bb.0:
719; RV64I-NEXT:    addi sp, sp, -16
720; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
721; RV64I-NEXT:    li a2, 5
722; RV64I-NEXT:    call __atomic_store_1@plt
723; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
724; RV64I-NEXT:    addi sp, sp, 16
725; RV64I-NEXT:    ret
726;
727; RV64IA-LABEL: atomic_store_i8_seq_cst:
728; RV64IA:       # %bb.0:
729; RV64IA-NEXT:    fence rw, w
730; RV64IA-NEXT:    sb a1, 0(a0)
731; RV64IA-NEXT:    ret
732  store atomic i8 %b, i8* %a seq_cst, align 1
733  ret void
734}
735
736define void @atomic_store_i16_unordered(i16 *%a, i16 %b) nounwind {
737; RV32I-LABEL: atomic_store_i16_unordered:
738; RV32I:       # %bb.0:
739; RV32I-NEXT:    addi sp, sp, -16
740; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
741; RV32I-NEXT:    li a2, 0
742; RV32I-NEXT:    call __atomic_store_2@plt
743; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
744; RV32I-NEXT:    addi sp, sp, 16
745; RV32I-NEXT:    ret
746;
747; RV32IA-LABEL: atomic_store_i16_unordered:
748; RV32IA:       # %bb.0:
749; RV32IA-NEXT:    sh a1, 0(a0)
750; RV32IA-NEXT:    ret
751;
752; RV64I-LABEL: atomic_store_i16_unordered:
753; RV64I:       # %bb.0:
754; RV64I-NEXT:    addi sp, sp, -16
755; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
756; RV64I-NEXT:    li a2, 0
757; RV64I-NEXT:    call __atomic_store_2@plt
758; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
759; RV64I-NEXT:    addi sp, sp, 16
760; RV64I-NEXT:    ret
761;
762; RV64IA-LABEL: atomic_store_i16_unordered:
763; RV64IA:       # %bb.0:
764; RV64IA-NEXT:    sh a1, 0(a0)
765; RV64IA-NEXT:    ret
766  store atomic i16 %b, i16* %a unordered, align 2
767  ret void
768}
769
770define void @atomic_store_i16_monotonic(i16 *%a, i16 %b) nounwind {
771; RV32I-LABEL: atomic_store_i16_monotonic:
772; RV32I:       # %bb.0:
773; RV32I-NEXT:    addi sp, sp, -16
774; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
775; RV32I-NEXT:    li a2, 0
776; RV32I-NEXT:    call __atomic_store_2@plt
777; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
778; RV32I-NEXT:    addi sp, sp, 16
779; RV32I-NEXT:    ret
780;
781; RV32IA-LABEL: atomic_store_i16_monotonic:
782; RV32IA:       # %bb.0:
783; RV32IA-NEXT:    sh a1, 0(a0)
784; RV32IA-NEXT:    ret
785;
786; RV64I-LABEL: atomic_store_i16_monotonic:
787; RV64I:       # %bb.0:
788; RV64I-NEXT:    addi sp, sp, -16
789; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
790; RV64I-NEXT:    li a2, 0
791; RV64I-NEXT:    call __atomic_store_2@plt
792; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
793; RV64I-NEXT:    addi sp, sp, 16
794; RV64I-NEXT:    ret
795;
796; RV64IA-LABEL: atomic_store_i16_monotonic:
797; RV64IA:       # %bb.0:
798; RV64IA-NEXT:    sh a1, 0(a0)
799; RV64IA-NEXT:    ret
800  store atomic i16 %b, i16* %a monotonic, align 2
801  ret void
802}
803
804define void @atomic_store_i16_release(i16 *%a, i16 %b) nounwind {
805; RV32I-LABEL: atomic_store_i16_release:
806; RV32I:       # %bb.0:
807; RV32I-NEXT:    addi sp, sp, -16
808; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
809; RV32I-NEXT:    li a2, 3
810; RV32I-NEXT:    call __atomic_store_2@plt
811; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
812; RV32I-NEXT:    addi sp, sp, 16
813; RV32I-NEXT:    ret
814;
815; RV32IA-LABEL: atomic_store_i16_release:
816; RV32IA:       # %bb.0:
817; RV32IA-NEXT:    fence rw, w
818; RV32IA-NEXT:    sh a1, 0(a0)
819; RV32IA-NEXT:    ret
820;
821; RV64I-LABEL: atomic_store_i16_release:
822; RV64I:       # %bb.0:
823; RV64I-NEXT:    addi sp, sp, -16
824; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
825; RV64I-NEXT:    li a2, 3
826; RV64I-NEXT:    call __atomic_store_2@plt
827; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
828; RV64I-NEXT:    addi sp, sp, 16
829; RV64I-NEXT:    ret
830;
831; RV64IA-LABEL: atomic_store_i16_release:
832; RV64IA:       # %bb.0:
833; RV64IA-NEXT:    fence rw, w
834; RV64IA-NEXT:    sh a1, 0(a0)
835; RV64IA-NEXT:    ret
836  store atomic i16 %b, i16* %a release, align 2
837  ret void
838}
839
840define void @atomic_store_i16_seq_cst(i16 *%a, i16 %b) nounwind {
841; RV32I-LABEL: atomic_store_i16_seq_cst:
842; RV32I:       # %bb.0:
843; RV32I-NEXT:    addi sp, sp, -16
844; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
845; RV32I-NEXT:    li a2, 5
846; RV32I-NEXT:    call __atomic_store_2@plt
847; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
848; RV32I-NEXT:    addi sp, sp, 16
849; RV32I-NEXT:    ret
850;
851; RV32IA-LABEL: atomic_store_i16_seq_cst:
852; RV32IA:       # %bb.0:
853; RV32IA-NEXT:    fence rw, w
854; RV32IA-NEXT:    sh a1, 0(a0)
855; RV32IA-NEXT:    ret
856;
857; RV64I-LABEL: atomic_store_i16_seq_cst:
858; RV64I:       # %bb.0:
859; RV64I-NEXT:    addi sp, sp, -16
860; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
861; RV64I-NEXT:    li a2, 5
862; RV64I-NEXT:    call __atomic_store_2@plt
863; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
864; RV64I-NEXT:    addi sp, sp, 16
865; RV64I-NEXT:    ret
866;
867; RV64IA-LABEL: atomic_store_i16_seq_cst:
868; RV64IA:       # %bb.0:
869; RV64IA-NEXT:    fence rw, w
870; RV64IA-NEXT:    sh a1, 0(a0)
871; RV64IA-NEXT:    ret
872  store atomic i16 %b, i16* %a seq_cst, align 2
873  ret void
874}
875
876define void @atomic_store_i32_unordered(i32 *%a, i32 %b) nounwind {
877; RV32I-LABEL: atomic_store_i32_unordered:
878; RV32I:       # %bb.0:
879; RV32I-NEXT:    addi sp, sp, -16
880; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
881; RV32I-NEXT:    li a2, 0
882; RV32I-NEXT:    call __atomic_store_4@plt
883; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
884; RV32I-NEXT:    addi sp, sp, 16
885; RV32I-NEXT:    ret
886;
887; RV32IA-LABEL: atomic_store_i32_unordered:
888; RV32IA:       # %bb.0:
889; RV32IA-NEXT:    sw a1, 0(a0)
890; RV32IA-NEXT:    ret
891;
892; RV64I-LABEL: atomic_store_i32_unordered:
893; RV64I:       # %bb.0:
894; RV64I-NEXT:    addi sp, sp, -16
895; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
896; RV64I-NEXT:    li a2, 0
897; RV64I-NEXT:    call __atomic_store_4@plt
898; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
899; RV64I-NEXT:    addi sp, sp, 16
900; RV64I-NEXT:    ret
901;
902; RV64IA-LABEL: atomic_store_i32_unordered:
903; RV64IA:       # %bb.0:
904; RV64IA-NEXT:    sw a1, 0(a0)
905; RV64IA-NEXT:    ret
906  store atomic i32 %b, i32* %a unordered, align 4
907  ret void
908}
909
910define void @atomic_store_i32_monotonic(i32 *%a, i32 %b) nounwind {
911; RV32I-LABEL: atomic_store_i32_monotonic:
912; RV32I:       # %bb.0:
913; RV32I-NEXT:    addi sp, sp, -16
914; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
915; RV32I-NEXT:    li a2, 0
916; RV32I-NEXT:    call __atomic_store_4@plt
917; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
918; RV32I-NEXT:    addi sp, sp, 16
919; RV32I-NEXT:    ret
920;
921; RV32IA-LABEL: atomic_store_i32_monotonic:
922; RV32IA:       # %bb.0:
923; RV32IA-NEXT:    sw a1, 0(a0)
924; RV32IA-NEXT:    ret
925;
926; RV64I-LABEL: atomic_store_i32_monotonic:
927; RV64I:       # %bb.0:
928; RV64I-NEXT:    addi sp, sp, -16
929; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
930; RV64I-NEXT:    li a2, 0
931; RV64I-NEXT:    call __atomic_store_4@plt
932; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
933; RV64I-NEXT:    addi sp, sp, 16
934; RV64I-NEXT:    ret
935;
936; RV64IA-LABEL: atomic_store_i32_monotonic:
937; RV64IA:       # %bb.0:
938; RV64IA-NEXT:    sw a1, 0(a0)
939; RV64IA-NEXT:    ret
940  store atomic i32 %b, i32* %a monotonic, align 4
941  ret void
942}
943
944define void @atomic_store_i32_release(i32 *%a, i32 %b) nounwind {
945; RV32I-LABEL: atomic_store_i32_release:
946; RV32I:       # %bb.0:
947; RV32I-NEXT:    addi sp, sp, -16
948; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
949; RV32I-NEXT:    li a2, 3
950; RV32I-NEXT:    call __atomic_store_4@plt
951; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
952; RV32I-NEXT:    addi sp, sp, 16
953; RV32I-NEXT:    ret
954;
955; RV32IA-LABEL: atomic_store_i32_release:
956; RV32IA:       # %bb.0:
957; RV32IA-NEXT:    fence rw, w
958; RV32IA-NEXT:    sw a1, 0(a0)
959; RV32IA-NEXT:    ret
960;
961; RV64I-LABEL: atomic_store_i32_release:
962; RV64I:       # %bb.0:
963; RV64I-NEXT:    addi sp, sp, -16
964; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
965; RV64I-NEXT:    li a2, 3
966; RV64I-NEXT:    call __atomic_store_4@plt
967; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
968; RV64I-NEXT:    addi sp, sp, 16
969; RV64I-NEXT:    ret
970;
971; RV64IA-LABEL: atomic_store_i32_release:
972; RV64IA:       # %bb.0:
973; RV64IA-NEXT:    fence rw, w
974; RV64IA-NEXT:    sw a1, 0(a0)
975; RV64IA-NEXT:    ret
976  store atomic i32 %b, i32* %a release, align 4
977  ret void
978}
979
980define void @atomic_store_i32_seq_cst(i32 *%a, i32 %b) nounwind {
981; RV32I-LABEL: atomic_store_i32_seq_cst:
982; RV32I:       # %bb.0:
983; RV32I-NEXT:    addi sp, sp, -16
984; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
985; RV32I-NEXT:    li a2, 5
986; RV32I-NEXT:    call __atomic_store_4@plt
987; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
988; RV32I-NEXT:    addi sp, sp, 16
989; RV32I-NEXT:    ret
990;
991; RV32IA-LABEL: atomic_store_i32_seq_cst:
992; RV32IA:       # %bb.0:
993; RV32IA-NEXT:    fence rw, w
994; RV32IA-NEXT:    sw a1, 0(a0)
995; RV32IA-NEXT:    ret
996;
997; RV64I-LABEL: atomic_store_i32_seq_cst:
998; RV64I:       # %bb.0:
999; RV64I-NEXT:    addi sp, sp, -16
1000; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1001; RV64I-NEXT:    li a2, 5
1002; RV64I-NEXT:    call __atomic_store_4@plt
1003; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1004; RV64I-NEXT:    addi sp, sp, 16
1005; RV64I-NEXT:    ret
1006;
1007; RV64IA-LABEL: atomic_store_i32_seq_cst:
1008; RV64IA:       # %bb.0:
1009; RV64IA-NEXT:    fence rw, w
1010; RV64IA-NEXT:    sw a1, 0(a0)
1011; RV64IA-NEXT:    ret
1012  store atomic i32 %b, i32* %a seq_cst, align 4
1013  ret void
1014}
1015
1016define void @atomic_store_i64_unordered(i64 *%a, i64 %b) nounwind {
1017; RV32I-LABEL: atomic_store_i64_unordered:
1018; RV32I:       # %bb.0:
1019; RV32I-NEXT:    addi sp, sp, -16
1020; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1021; RV32I-NEXT:    li a3, 0
1022; RV32I-NEXT:    call __atomic_store_8@plt
1023; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1024; RV32I-NEXT:    addi sp, sp, 16
1025; RV32I-NEXT:    ret
1026;
1027; RV32IA-LABEL: atomic_store_i64_unordered:
1028; RV32IA:       # %bb.0:
1029; RV32IA-NEXT:    addi sp, sp, -16
1030; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1031; RV32IA-NEXT:    li a3, 0
1032; RV32IA-NEXT:    call __atomic_store_8@plt
1033; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1034; RV32IA-NEXT:    addi sp, sp, 16
1035; RV32IA-NEXT:    ret
1036;
1037; RV64I-LABEL: atomic_store_i64_unordered:
1038; RV64I:       # %bb.0:
1039; RV64I-NEXT:    addi sp, sp, -16
1040; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1041; RV64I-NEXT:    li a2, 0
1042; RV64I-NEXT:    call __atomic_store_8@plt
1043; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1044; RV64I-NEXT:    addi sp, sp, 16
1045; RV64I-NEXT:    ret
1046;
1047; RV64IA-LABEL: atomic_store_i64_unordered:
1048; RV64IA:       # %bb.0:
1049; RV64IA-NEXT:    sd a1, 0(a0)
1050; RV64IA-NEXT:    ret
1051  store atomic i64 %b, i64* %a unordered, align 8
1052  ret void
1053}
1054
1055define void @atomic_store_i64_monotonic(i64 *%a, i64 %b) nounwind {
1056; RV32I-LABEL: atomic_store_i64_monotonic:
1057; RV32I:       # %bb.0:
1058; RV32I-NEXT:    addi sp, sp, -16
1059; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1060; RV32I-NEXT:    li a3, 0
1061; RV32I-NEXT:    call __atomic_store_8@plt
1062; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1063; RV32I-NEXT:    addi sp, sp, 16
1064; RV32I-NEXT:    ret
1065;
1066; RV32IA-LABEL: atomic_store_i64_monotonic:
1067; RV32IA:       # %bb.0:
1068; RV32IA-NEXT:    addi sp, sp, -16
1069; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1070; RV32IA-NEXT:    li a3, 0
1071; RV32IA-NEXT:    call __atomic_store_8@plt
1072; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1073; RV32IA-NEXT:    addi sp, sp, 16
1074; RV32IA-NEXT:    ret
1075;
1076; RV64I-LABEL: atomic_store_i64_monotonic:
1077; RV64I:       # %bb.0:
1078; RV64I-NEXT:    addi sp, sp, -16
1079; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1080; RV64I-NEXT:    li a2, 0
1081; RV64I-NEXT:    call __atomic_store_8@plt
1082; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1083; RV64I-NEXT:    addi sp, sp, 16
1084; RV64I-NEXT:    ret
1085;
1086; RV64IA-LABEL: atomic_store_i64_monotonic:
1087; RV64IA:       # %bb.0:
1088; RV64IA-NEXT:    sd a1, 0(a0)
1089; RV64IA-NEXT:    ret
1090  store atomic i64 %b, i64* %a monotonic, align 8
1091  ret void
1092}
1093
1094define void @atomic_store_i64_release(i64 *%a, i64 %b) nounwind {
1095; RV32I-LABEL: atomic_store_i64_release:
1096; RV32I:       # %bb.0:
1097; RV32I-NEXT:    addi sp, sp, -16
1098; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1099; RV32I-NEXT:    li a3, 3
1100; RV32I-NEXT:    call __atomic_store_8@plt
1101; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1102; RV32I-NEXT:    addi sp, sp, 16
1103; RV32I-NEXT:    ret
1104;
1105; RV32IA-LABEL: atomic_store_i64_release:
1106; RV32IA:       # %bb.0:
1107; RV32IA-NEXT:    addi sp, sp, -16
1108; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1109; RV32IA-NEXT:    li a3, 3
1110; RV32IA-NEXT:    call __atomic_store_8@plt
1111; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1112; RV32IA-NEXT:    addi sp, sp, 16
1113; RV32IA-NEXT:    ret
1114;
1115; RV64I-LABEL: atomic_store_i64_release:
1116; RV64I:       # %bb.0:
1117; RV64I-NEXT:    addi sp, sp, -16
1118; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1119; RV64I-NEXT:    li a2, 3
1120; RV64I-NEXT:    call __atomic_store_8@plt
1121; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1122; RV64I-NEXT:    addi sp, sp, 16
1123; RV64I-NEXT:    ret
1124;
1125; RV64IA-LABEL: atomic_store_i64_release:
1126; RV64IA:       # %bb.0:
1127; RV64IA-NEXT:    fence rw, w
1128; RV64IA-NEXT:    sd a1, 0(a0)
1129; RV64IA-NEXT:    ret
1130  store atomic i64 %b, i64* %a release, align 8
1131  ret void
1132}
1133
1134define void @atomic_store_i64_seq_cst(i64 *%a, i64 %b) nounwind {
1135; RV32I-LABEL: atomic_store_i64_seq_cst:
1136; RV32I:       # %bb.0:
1137; RV32I-NEXT:    addi sp, sp, -16
1138; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1139; RV32I-NEXT:    li a3, 5
1140; RV32I-NEXT:    call __atomic_store_8@plt
1141; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1142; RV32I-NEXT:    addi sp, sp, 16
1143; RV32I-NEXT:    ret
1144;
1145; RV32IA-LABEL: atomic_store_i64_seq_cst:
1146; RV32IA:       # %bb.0:
1147; RV32IA-NEXT:    addi sp, sp, -16
1148; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1149; RV32IA-NEXT:    li a3, 5
1150; RV32IA-NEXT:    call __atomic_store_8@plt
1151; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1152; RV32IA-NEXT:    addi sp, sp, 16
1153; RV32IA-NEXT:    ret
1154;
1155; RV64I-LABEL: atomic_store_i64_seq_cst:
1156; RV64I:       # %bb.0:
1157; RV64I-NEXT:    addi sp, sp, -16
1158; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1159; RV64I-NEXT:    li a2, 5
1160; RV64I-NEXT:    call __atomic_store_8@plt
1161; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1162; RV64I-NEXT:    addi sp, sp, 16
1163; RV64I-NEXT:    ret
1164;
1165; RV64IA-LABEL: atomic_store_i64_seq_cst:
1166; RV64IA:       # %bb.0:
1167; RV64IA-NEXT:    fence rw, w
1168; RV64IA-NEXT:    sd a1, 0(a0)
1169; RV64IA-NEXT:    ret
1170  store atomic i64 %b, i64* %a seq_cst, align 8
1171  ret void
1172}
1173