1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=riscv32 \
3; RUN:   | FileCheck %s --check-prefixes=RV32,RV32ALIGNED
4; RUN: llc < %s -mtriple=riscv64 \
5; RUN:   | FileCheck %s --check-prefixes=RV64,RV64ALIGNED
6; RUN: llc < %s -mtriple=riscv32 -mattr=+unaligned-scalar-mem \
7; RUN:   | FileCheck %s --check-prefixes=RV32,RV32UNALIGNED
8; RUN: llc < %s -mtriple=riscv64 -mattr=+unaligned-scalar-mem \
9; RUN:   | FileCheck %s --check-prefixes=RV64,RV64UNALIGNED
10%struct.x = type { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }
11
12@src = external dso_local global %struct.x
13@dst = external dso_local global %struct.x
14
15@.str1 = private unnamed_addr constant [31 x i8] c"DHRYSTONE PROGRAM, SOME STRING\00", align 1
16@.str2 = private unnamed_addr constant [36 x i8] c"DHRYSTONE PROGRAM, SOME STRING BLAH\00", align 1
17@.str3 = private unnamed_addr constant [24 x i8] c"DHRYSTONE PROGRAM, SOME\00", align 1
18@.str4 = private unnamed_addr constant [18 x i8] c"DHRYSTONE PROGR  \00", align 1
19@.str5 = private unnamed_addr constant [7 x i8] c"DHRYST\00", align 1
20@.str6 = private unnamed_addr constant [14 x i8] c"/tmp/rmXXXXXX\00", align 1
21@spool.splbuf = internal global [512 x i8] zeroinitializer, align 16
22
23define i32 @t0() {
24; RV32-LABEL: t0:
25; RV32:       # %bb.0: # %entry
26; RV32-NEXT:    lui a0, %hi(src)
27; RV32-NEXT:    lw a1, %lo(src)(a0)
28; RV32-NEXT:    lui a2, %hi(dst)
29; RV32-NEXT:    sw a1, %lo(dst)(a2)
30; RV32-NEXT:    addi a0, a0, %lo(src)
31; RV32-NEXT:    lb a1, 10(a0)
32; RV32-NEXT:    lh a3, 8(a0)
33; RV32-NEXT:    lw a0, 4(a0)
34; RV32-NEXT:    addi a2, a2, %lo(dst)
35; RV32-NEXT:    sb a1, 10(a2)
36; RV32-NEXT:    sh a3, 8(a2)
37; RV32-NEXT:    sw a0, 4(a2)
38; RV32-NEXT:    li a0, 0
39; RV32-NEXT:    ret
40;
41; RV64-LABEL: t0:
42; RV64:       # %bb.0: # %entry
43; RV64-NEXT:    lui a0, %hi(src)
44; RV64-NEXT:    ld a1, %lo(src)(a0)
45; RV64-NEXT:    lui a2, %hi(dst)
46; RV64-NEXT:    addi a0, a0, %lo(src)
47; RV64-NEXT:    lb a3, 10(a0)
48; RV64-NEXT:    lh a0, 8(a0)
49; RV64-NEXT:    sd a1, %lo(dst)(a2)
50; RV64-NEXT:    addi a1, a2, %lo(dst)
51; RV64-NEXT:    sb a3, 10(a1)
52; RV64-NEXT:    sh a0, 8(a1)
53; RV64-NEXT:    li a0, 0
54; RV64-NEXT:    ret
55entry:
56  call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 getelementptr inbounds (%struct.x, %struct.x* @dst, i32 0, i32 0), i8* align 8 getelementptr inbounds (%struct.x, %struct.x* @src, i32 0, i32 0), i32 11, i1 false)
57  ret i32 0
58}
59
60define void @t1(i8* nocapture %C) nounwind {
61; RV32-LABEL: t1:
62; RV32:       # %bb.0: # %entry
63; RV32-NEXT:    lui a1, %hi(.L.str1)
64; RV32-NEXT:    addi a1, a1, %lo(.L.str1)
65; RV32-NEXT:    li a2, 31
66; RV32-NEXT:    tail memcpy@plt
67;
68; RV64ALIGNED-LABEL: t1:
69; RV64ALIGNED:       # %bb.0: # %entry
70; RV64ALIGNED-NEXT:    lui a1, %hi(.L.str1)
71; RV64ALIGNED-NEXT:    addi a1, a1, %lo(.L.str1)
72; RV64ALIGNED-NEXT:    li a2, 31
73; RV64ALIGNED-NEXT:    tail memcpy@plt
74;
75; RV64UNALIGNED-LABEL: t1:
76; RV64UNALIGNED:       # %bb.0: # %entry
77; RV64UNALIGNED-NEXT:    lui a1, %hi(.L.str1)
78; RV64UNALIGNED-NEXT:    ld a2, %lo(.L.str1)(a1)
79; RV64UNALIGNED-NEXT:    sd a2, 0(a0)
80; RV64UNALIGNED-NEXT:    lui a2, 4
81; RV64UNALIGNED-NEXT:    addiw a2, a2, 1870
82; RV64UNALIGNED-NEXT:    sh a2, 28(a0)
83; RV64UNALIGNED-NEXT:    lui a2, 300325
84; RV64UNALIGNED-NEXT:    addiw a2, a2, 1107
85; RV64UNALIGNED-NEXT:    addi a1, a1, %lo(.L.str1)
86; RV64UNALIGNED-NEXT:    ld a3, 16(a1)
87; RV64UNALIGNED-NEXT:    ld a1, 8(a1)
88; RV64UNALIGNED-NEXT:    sw a2, 24(a0)
89; RV64UNALIGNED-NEXT:    sb zero, 30(a0)
90; RV64UNALIGNED-NEXT:    sd a3, 16(a0)
91; RV64UNALIGNED-NEXT:    sd a1, 8(a0)
92; RV64UNALIGNED-NEXT:    ret
93entry:
94  tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %C, i8* getelementptr inbounds ([31 x i8], [31 x i8]* @.str1, i64 0, i64 0), i64 31, i1 false)
95  ret void
96}
97
98define void @t2(i8* nocapture %C) nounwind {
99; RV32-LABEL: t2:
100; RV32:       # %bb.0: # %entry
101; RV32-NEXT:    lui a1, %hi(.L.str2)
102; RV32-NEXT:    addi a1, a1, %lo(.L.str2)
103; RV32-NEXT:    li a2, 36
104; RV32-NEXT:    tail memcpy@plt
105;
106; RV64ALIGNED-LABEL: t2:
107; RV64ALIGNED:       # %bb.0: # %entry
108; RV64ALIGNED-NEXT:    lui a1, %hi(.L.str2)
109; RV64ALIGNED-NEXT:    addi a1, a1, %lo(.L.str2)
110; RV64ALIGNED-NEXT:    li a2, 36
111; RV64ALIGNED-NEXT:    tail memcpy@plt
112;
113; RV64UNALIGNED-LABEL: t2:
114; RV64UNALIGNED:       # %bb.0: # %entry
115; RV64UNALIGNED-NEXT:    lui a1, %hi(.L.str2)
116; RV64UNALIGNED-NEXT:    ld a2, %lo(.L.str2)(a1)
117; RV64UNALIGNED-NEXT:    sd a2, 0(a0)
118; RV64UNALIGNED-NEXT:    lui a2, 1156
119; RV64UNALIGNED-NEXT:    addiw a2, a2, 332
120; RV64UNALIGNED-NEXT:    addi a1, a1, %lo(.L.str2)
121; RV64UNALIGNED-NEXT:    ld a3, 24(a1)
122; RV64UNALIGNED-NEXT:    ld a4, 16(a1)
123; RV64UNALIGNED-NEXT:    ld a1, 8(a1)
124; RV64UNALIGNED-NEXT:    sw a2, 32(a0)
125; RV64UNALIGNED-NEXT:    sd a3, 24(a0)
126; RV64UNALIGNED-NEXT:    sd a4, 16(a0)
127; RV64UNALIGNED-NEXT:    sd a1, 8(a0)
128; RV64UNALIGNED-NEXT:    ret
129entry:
130  tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %C, i8* getelementptr inbounds ([36 x i8], [36 x i8]* @.str2, i64 0, i64 0), i64 36, i1 false)
131  ret void
132}
133
134define void @t3(i8* nocapture %C) nounwind {
135; RV32ALIGNED-LABEL: t3:
136; RV32ALIGNED:       # %bb.0: # %entry
137; RV32ALIGNED-NEXT:    lui a1, %hi(.L.str3)
138; RV32ALIGNED-NEXT:    addi a1, a1, %lo(.L.str3)
139; RV32ALIGNED-NEXT:    li a2, 24
140; RV32ALIGNED-NEXT:    tail memcpy@plt
141;
142; RV64ALIGNED-LABEL: t3:
143; RV64ALIGNED:       # %bb.0: # %entry
144; RV64ALIGNED-NEXT:    lui a1, %hi(.L.str3)
145; RV64ALIGNED-NEXT:    addi a1, a1, %lo(.L.str3)
146; RV64ALIGNED-NEXT:    li a2, 24
147; RV64ALIGNED-NEXT:    tail memcpy@plt
148;
149; RV32UNALIGNED-LABEL: t3:
150; RV32UNALIGNED:       # %bb.0: # %entry
151; RV32UNALIGNED-NEXT:    lui a1, 1109
152; RV32UNALIGNED-NEXT:    addi a1, a1, -689
153; RV32UNALIGNED-NEXT:    sw a1, 20(a0)
154; RV32UNALIGNED-NEXT:    lui a1, 340483
155; RV32UNALIGNED-NEXT:    addi a1, a1, -947
156; RV32UNALIGNED-NEXT:    sw a1, 16(a0)
157; RV32UNALIGNED-NEXT:    lui a1, 267556
158; RV32UNALIGNED-NEXT:    addi a1, a1, 1871
159; RV32UNALIGNED-NEXT:    sw a1, 12(a0)
160; RV32UNALIGNED-NEXT:    lui a1, 337154
161; RV32UNALIGNED-NEXT:    addi a1, a1, 69
162; RV32UNALIGNED-NEXT:    sw a1, 8(a0)
163; RV32UNALIGNED-NEXT:    lui a1, 320757
164; RV32UNALIGNED-NEXT:    addi a1, a1, 1107
165; RV32UNALIGNED-NEXT:    sw a1, 4(a0)
166; RV32UNALIGNED-NEXT:    lui a1, 365861
167; RV32UNALIGNED-NEXT:    addi a1, a1, -1980
168; RV32UNALIGNED-NEXT:    sw a1, 0(a0)
169; RV32UNALIGNED-NEXT:    ret
170;
171; RV64UNALIGNED-LABEL: t3:
172; RV64UNALIGNED:       # %bb.0: # %entry
173; RV64UNALIGNED-NEXT:    lui a1, %hi(.L.str3)
174; RV64UNALIGNED-NEXT:    ld a2, %lo(.L.str3)(a1)
175; RV64UNALIGNED-NEXT:    addi a1, a1, %lo(.L.str3)
176; RV64UNALIGNED-NEXT:    ld a3, 16(a1)
177; RV64UNALIGNED-NEXT:    ld a1, 8(a1)
178; RV64UNALIGNED-NEXT:    sd a2, 0(a0)
179; RV64UNALIGNED-NEXT:    sd a3, 16(a0)
180; RV64UNALIGNED-NEXT:    sd a1, 8(a0)
181; RV64UNALIGNED-NEXT:    ret
182entry:
183  tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %C, i8* getelementptr inbounds ([24 x i8], [24 x i8]* @.str3, i64 0, i64 0), i64 24, i1 false)
184  ret void
185}
186
187define void @t4(i8* nocapture %C) nounwind {
188; RV32ALIGNED-LABEL: t4:
189; RV32ALIGNED:       # %bb.0: # %entry
190; RV32ALIGNED-NEXT:    lui a1, %hi(.L.str4)
191; RV32ALIGNED-NEXT:    addi a1, a1, %lo(.L.str4)
192; RV32ALIGNED-NEXT:    li a2, 18
193; RV32ALIGNED-NEXT:    tail memcpy@plt
194;
195; RV64ALIGNED-LABEL: t4:
196; RV64ALIGNED:       # %bb.0: # %entry
197; RV64ALIGNED-NEXT:    lui a1, %hi(.L.str4)
198; RV64ALIGNED-NEXT:    addi a1, a1, %lo(.L.str4)
199; RV64ALIGNED-NEXT:    li a2, 18
200; RV64ALIGNED-NEXT:    tail memcpy@plt
201;
202; RV32UNALIGNED-LABEL: t4:
203; RV32UNALIGNED:       # %bb.0: # %entry
204; RV32UNALIGNED-NEXT:    li a1, 32
205; RV32UNALIGNED-NEXT:    sh a1, 16(a0)
206; RV32UNALIGNED-NEXT:    lui a1, 132388
207; RV32UNALIGNED-NEXT:    addi a1, a1, 1871
208; RV32UNALIGNED-NEXT:    sw a1, 12(a0)
209; RV32UNALIGNED-NEXT:    lui a1, 337154
210; RV32UNALIGNED-NEXT:    addi a1, a1, 69
211; RV32UNALIGNED-NEXT:    sw a1, 8(a0)
212; RV32UNALIGNED-NEXT:    lui a1, 320757
213; RV32UNALIGNED-NEXT:    addi a1, a1, 1107
214; RV32UNALIGNED-NEXT:    sw a1, 4(a0)
215; RV32UNALIGNED-NEXT:    lui a1, 365861
216; RV32UNALIGNED-NEXT:    addi a1, a1, -1980
217; RV32UNALIGNED-NEXT:    sw a1, 0(a0)
218; RV32UNALIGNED-NEXT:    ret
219;
220; RV64UNALIGNED-LABEL: t4:
221; RV64UNALIGNED:       # %bb.0: # %entry
222; RV64UNALIGNED-NEXT:    lui a1, %hi(.L.str4)
223; RV64UNALIGNED-NEXT:    ld a2, %lo(.L.str4)(a1)
224; RV64UNALIGNED-NEXT:    addi a1, a1, %lo(.L.str4)
225; RV64UNALIGNED-NEXT:    ld a1, 8(a1)
226; RV64UNALIGNED-NEXT:    li a3, 32
227; RV64UNALIGNED-NEXT:    sh a3, 16(a0)
228; RV64UNALIGNED-NEXT:    sd a2, 0(a0)
229; RV64UNALIGNED-NEXT:    sd a1, 8(a0)
230; RV64UNALIGNED-NEXT:    ret
231entry:
232  tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %C, i8* getelementptr inbounds ([18 x i8], [18 x i8]* @.str4, i64 0, i64 0), i64 18, i1 false)
233  ret void
234}
235
236define void @t5(i8* nocapture %C) nounwind {
237; RV32ALIGNED-LABEL: t5:
238; RV32ALIGNED:       # %bb.0: # %entry
239; RV32ALIGNED-NEXT:    sb zero, 6(a0)
240; RV32ALIGNED-NEXT:    li a1, 84
241; RV32ALIGNED-NEXT:    sb a1, 5(a0)
242; RV32ALIGNED-NEXT:    li a1, 83
243; RV32ALIGNED-NEXT:    sb a1, 4(a0)
244; RV32ALIGNED-NEXT:    li a1, 89
245; RV32ALIGNED-NEXT:    sb a1, 3(a0)
246; RV32ALIGNED-NEXT:    li a1, 82
247; RV32ALIGNED-NEXT:    sb a1, 2(a0)
248; RV32ALIGNED-NEXT:    li a1, 72
249; RV32ALIGNED-NEXT:    sb a1, 1(a0)
250; RV32ALIGNED-NEXT:    li a1, 68
251; RV32ALIGNED-NEXT:    sb a1, 0(a0)
252; RV32ALIGNED-NEXT:    ret
253;
254; RV64ALIGNED-LABEL: t5:
255; RV64ALIGNED:       # %bb.0: # %entry
256; RV64ALIGNED-NEXT:    sb zero, 6(a0)
257; RV64ALIGNED-NEXT:    li a1, 84
258; RV64ALIGNED-NEXT:    sb a1, 5(a0)
259; RV64ALIGNED-NEXT:    li a1, 83
260; RV64ALIGNED-NEXT:    sb a1, 4(a0)
261; RV64ALIGNED-NEXT:    li a1, 89
262; RV64ALIGNED-NEXT:    sb a1, 3(a0)
263; RV64ALIGNED-NEXT:    li a1, 82
264; RV64ALIGNED-NEXT:    sb a1, 2(a0)
265; RV64ALIGNED-NEXT:    li a1, 72
266; RV64ALIGNED-NEXT:    sb a1, 1(a0)
267; RV64ALIGNED-NEXT:    li a1, 68
268; RV64ALIGNED-NEXT:    sb a1, 0(a0)
269; RV64ALIGNED-NEXT:    ret
270;
271; RV32UNALIGNED-LABEL: t5:
272; RV32UNALIGNED:       # %bb.0: # %entry
273; RV32UNALIGNED-NEXT:    sb zero, 6(a0)
274; RV32UNALIGNED-NEXT:    lui a1, 5
275; RV32UNALIGNED-NEXT:    addi a1, a1, 1107
276; RV32UNALIGNED-NEXT:    sh a1, 4(a0)
277; RV32UNALIGNED-NEXT:    lui a1, 365861
278; RV32UNALIGNED-NEXT:    addi a1, a1, -1980
279; RV32UNALIGNED-NEXT:    sw a1, 0(a0)
280; RV32UNALIGNED-NEXT:    ret
281;
282; RV64UNALIGNED-LABEL: t5:
283; RV64UNALIGNED:       # %bb.0: # %entry
284; RV64UNALIGNED-NEXT:    sb zero, 6(a0)
285; RV64UNALIGNED-NEXT:    lui a1, 5
286; RV64UNALIGNED-NEXT:    addiw a1, a1, 1107
287; RV64UNALIGNED-NEXT:    sh a1, 4(a0)
288; RV64UNALIGNED-NEXT:    lui a1, 365861
289; RV64UNALIGNED-NEXT:    addiw a1, a1, -1980
290; RV64UNALIGNED-NEXT:    sw a1, 0(a0)
291; RV64UNALIGNED-NEXT:    ret
292entry:
293  tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %C, i8* getelementptr inbounds ([7 x i8], [7 x i8]* @.str5, i64 0, i64 0), i64 7, i1 false)
294  ret void
295}
296
297define void @t6() nounwind {
298; RV32ALIGNED-LABEL: t6:
299; RV32ALIGNED:       # %bb.0: # %entry
300; RV32ALIGNED-NEXT:    addi sp, sp, -16
301; RV32ALIGNED-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
302; RV32ALIGNED-NEXT:    lui a0, %hi(spool.splbuf)
303; RV32ALIGNED-NEXT:    addi a0, a0, %lo(spool.splbuf)
304; RV32ALIGNED-NEXT:    lui a1, %hi(.L.str6)
305; RV32ALIGNED-NEXT:    addi a1, a1, %lo(.L.str6)
306; RV32ALIGNED-NEXT:    li a2, 14
307; RV32ALIGNED-NEXT:    call memcpy@plt
308; RV32ALIGNED-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
309; RV32ALIGNED-NEXT:    addi sp, sp, 16
310; RV32ALIGNED-NEXT:    ret
311;
312; RV64ALIGNED-LABEL: t6:
313; RV64ALIGNED:       # %bb.0: # %entry
314; RV64ALIGNED-NEXT:    addi sp, sp, -16
315; RV64ALIGNED-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
316; RV64ALIGNED-NEXT:    lui a0, %hi(spool.splbuf)
317; RV64ALIGNED-NEXT:    addi a0, a0, %lo(spool.splbuf)
318; RV64ALIGNED-NEXT:    lui a1, %hi(.L.str6)
319; RV64ALIGNED-NEXT:    addi a1, a1, %lo(.L.str6)
320; RV64ALIGNED-NEXT:    li a2, 14
321; RV64ALIGNED-NEXT:    call memcpy@plt
322; RV64ALIGNED-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
323; RV64ALIGNED-NEXT:    addi sp, sp, 16
324; RV64ALIGNED-NEXT:    ret
325;
326; RV32UNALIGNED-LABEL: t6:
327; RV32UNALIGNED:       # %bb.0: # %entry
328; RV32UNALIGNED-NEXT:    lui a0, %hi(spool.splbuf)
329; RV32UNALIGNED-NEXT:    li a1, 88
330; RV32UNALIGNED-NEXT:    sh a1, %lo(spool.splbuf+12)(a0)
331; RV32UNALIGNED-NEXT:    lui a1, 361862
332; RV32UNALIGNED-NEXT:    addi a1, a1, -1960
333; RV32UNALIGNED-NEXT:    sw a1, %lo(spool.splbuf+8)(a0)
334; RV32UNALIGNED-NEXT:    lui a1, 362199
335; RV32UNALIGNED-NEXT:    addi a1, a1, 559
336; RV32UNALIGNED-NEXT:    sw a1, %lo(spool.splbuf+4)(a0)
337; RV32UNALIGNED-NEXT:    lui a1, 460503
338; RV32UNALIGNED-NEXT:    addi a1, a1, 1071
339; RV32UNALIGNED-NEXT:    sw a1, %lo(spool.splbuf)(a0)
340; RV32UNALIGNED-NEXT:    ret
341;
342; RV64UNALIGNED-LABEL: t6:
343; RV64UNALIGNED:       # %bb.0: # %entry
344; RV64UNALIGNED-NEXT:    lui a0, %hi(.L.str6)
345; RV64UNALIGNED-NEXT:    ld a0, %lo(.L.str6)(a0)
346; RV64UNALIGNED-NEXT:    lui a1, %hi(spool.splbuf)
347; RV64UNALIGNED-NEXT:    li a2, 88
348; RV64UNALIGNED-NEXT:    sh a2, %lo(spool.splbuf+12)(a1)
349; RV64UNALIGNED-NEXT:    sd a0, %lo(spool.splbuf)(a1)
350; RV64UNALIGNED-NEXT:    lui a0, 361862
351; RV64UNALIGNED-NEXT:    addiw a0, a0, -1960
352; RV64UNALIGNED-NEXT:    sw a0, %lo(spool.splbuf+8)(a1)
353; RV64UNALIGNED-NEXT:    ret
354entry:
355  call void @llvm.memcpy.p0i8.p0i8.i64(i8* getelementptr inbounds ([512 x i8], [512 x i8]* @spool.splbuf, i64 0, i64 0), i8* getelementptr inbounds ([14 x i8], [14 x i8]* @.str6, i64 0, i64 0), i64 14, i1 false)
356  ret void
357}
358
359%struct.Foo = type { i32, i32, i32, i32 }
360
361define void @t7(%struct.Foo* nocapture %a, %struct.Foo* nocapture %b) nounwind {
362; RV32-LABEL: t7:
363; RV32:       # %bb.0: # %entry
364; RV32-NEXT:    lw a2, 12(a1)
365; RV32-NEXT:    sw a2, 12(a0)
366; RV32-NEXT:    lw a2, 8(a1)
367; RV32-NEXT:    sw a2, 8(a0)
368; RV32-NEXT:    lw a2, 4(a1)
369; RV32-NEXT:    sw a2, 4(a0)
370; RV32-NEXT:    lw a1, 0(a1)
371; RV32-NEXT:    sw a1, 0(a0)
372; RV32-NEXT:    ret
373;
374; RV64ALIGNED-LABEL: t7:
375; RV64ALIGNED:       # %bb.0: # %entry
376; RV64ALIGNED-NEXT:    lw a2, 12(a1)
377; RV64ALIGNED-NEXT:    sw a2, 12(a0)
378; RV64ALIGNED-NEXT:    lw a2, 8(a1)
379; RV64ALIGNED-NEXT:    sw a2, 8(a0)
380; RV64ALIGNED-NEXT:    lw a2, 4(a1)
381; RV64ALIGNED-NEXT:    sw a2, 4(a0)
382; RV64ALIGNED-NEXT:    lw a1, 0(a1)
383; RV64ALIGNED-NEXT:    sw a1, 0(a0)
384; RV64ALIGNED-NEXT:    ret
385;
386; RV64UNALIGNED-LABEL: t7:
387; RV64UNALIGNED:       # %bb.0: # %entry
388; RV64UNALIGNED-NEXT:    ld a2, 8(a1)
389; RV64UNALIGNED-NEXT:    sd a2, 8(a0)
390; RV64UNALIGNED-NEXT:    ld a1, 0(a1)
391; RV64UNALIGNED-NEXT:    sd a1, 0(a0)
392; RV64UNALIGNED-NEXT:    ret
393entry:
394  %0 = bitcast %struct.Foo* %a to i8*
395  %1 = bitcast %struct.Foo* %b to i8*
396  tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %0, i8* align 4 %1, i32 16, i1 false)
397  ret void
398}
399
400declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i1) nounwind
401declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind
402