1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mcpu=pentium2 -mtriple=i686-apple-darwin8.8.0 | FileCheck %s --check-prefix=X86
3; RUN: llc < %s -mcpu=pentium3 -mtriple=i686-apple-darwin8.8.0 | FileCheck %s --check-prefix=XMM
4; RUN: llc < %s -mcpu=bdver1   -mtriple=i686-apple-darwin8.8.0 | FileCheck %s --check-prefix=YMM
5
6%struct.x = type { i16, i16 }
7
8define void @t() nounwind  {
9; X86-LABEL: t:
10; X86:       ## %bb.0: ## %entry
11; X86-NEXT:    subl $44, %esp
12; X86-NEXT:    movl $0, {{[0-9]+}}(%esp)
13; X86-NEXT:    movl $0, {{[0-9]+}}(%esp)
14; X86-NEXT:    movl $0, {{[0-9]+}}(%esp)
15; X86-NEXT:    movl $0, {{[0-9]+}}(%esp)
16; X86-NEXT:    movl $0, {{[0-9]+}}(%esp)
17; X86-NEXT:    movl $0, {{[0-9]+}}(%esp)
18; X86-NEXT:    movl $0, {{[0-9]+}}(%esp)
19; X86-NEXT:    movl $0, {{[0-9]+}}(%esp)
20; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
21; X86-NEXT:    movl %eax, (%esp)
22; X86-NEXT:    calll _foo
23; X86-NEXT:    addl $44, %esp
24; X86-NEXT:    retl
25;
26; XMM-LABEL: t:
27; XMM:       ## %bb.0: ## %entry
28; XMM-NEXT:    subl $60, %esp
29; XMM-NEXT:    xorps %xmm0, %xmm0
30; XMM-NEXT:    movaps %xmm0, {{[0-9]+}}(%esp)
31; XMM-NEXT:    movaps %xmm0, {{[0-9]+}}(%esp)
32; XMM-NEXT:    leal {{[0-9]+}}(%esp), %eax
33; XMM-NEXT:    movl %eax, (%esp)
34; XMM-NEXT:    calll _foo
35; XMM-NEXT:    addl $60, %esp
36; XMM-NEXT:    retl
37;
38; YMM-LABEL: t:
39; YMM:       ## %bb.0: ## %entry
40; YMM-NEXT:    pushl %ebp
41; YMM-NEXT:    movl %esp, %ebp
42; YMM-NEXT:    andl $-32, %esp
43; YMM-NEXT:    subl $96, %esp
44; YMM-NEXT:    leal {{[0-9]+}}(%esp), %eax
45; YMM-NEXT:    vxorps %xmm0, %xmm0, %xmm0
46; YMM-NEXT:    vmovaps %ymm0, {{[0-9]+}}(%esp)
47; YMM-NEXT:    movl %eax, (%esp)
48; YMM-NEXT:    vzeroupper
49; YMM-NEXT:    calll _foo
50; YMM-NEXT:    movl %ebp, %esp
51; YMM-NEXT:    popl %ebp
52; YMM-NEXT:    retl
53entry:
54	%up_mvd = alloca [8 x %struct.x]		; <ptr> [#uses=2]
55	%up_mvd116 = getelementptr [8 x %struct.x], ptr %up_mvd, i32 0, i32 0		; <ptr> [#uses=1]
56
57	call void @llvm.memset.p0.i64(ptr align 8 %up_mvd, i8 0, i64 32, i1 false)
58	call void @foo( ptr %up_mvd116 ) nounwind
59	ret void
60}
61
62declare void @foo(ptr)
63
64declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1) nounwind
65
66; Ensure that alignment of '0' in an @llvm.memset intrinsic results in
67; unaligned loads and stores.
68define void @PR15348(ptr %a) {
69; X86-LABEL: PR15348:
70; X86:       ## %bb.0:
71; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
72; X86-NEXT:    movb $0, 16(%eax)
73; X86-NEXT:    movl $0, 12(%eax)
74; X86-NEXT:    movl $0, 8(%eax)
75; X86-NEXT:    movl $0, 4(%eax)
76; X86-NEXT:    movl $0, (%eax)
77; X86-NEXT:    retl
78;
79; XMM-LABEL: PR15348:
80; XMM:       ## %bb.0:
81; XMM-NEXT:    movl {{[0-9]+}}(%esp), %eax
82; XMM-NEXT:    movb $0, 16(%eax)
83; XMM-NEXT:    movl $0, 12(%eax)
84; XMM-NEXT:    movl $0, 8(%eax)
85; XMM-NEXT:    movl $0, 4(%eax)
86; XMM-NEXT:    movl $0, (%eax)
87; XMM-NEXT:    retl
88;
89; YMM-LABEL: PR15348:
90; YMM:       ## %bb.0:
91; YMM-NEXT:    movl {{[0-9]+}}(%esp), %eax
92; YMM-NEXT:    vxorps %xmm0, %xmm0, %xmm0
93; YMM-NEXT:    vmovups %xmm0, (%eax)
94; YMM-NEXT:    movb $0, 16(%eax)
95; YMM-NEXT:    retl
96  call void @llvm.memset.p0.i64(ptr %a, i8 0, i64 17, i1 false)
97  ret void
98}
99