1; RUN: opt < %s -passes=sroa -S | FileCheck %s
2; RUN: opt -passes='debugify,function(sroa)' -S < %s | FileCheck %s -check-prefix DEBUGLOC
3
4target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n8:16:32:64"
5
6declare void @llvm.memcpy.p0.p0.i32(ptr, ptr, i32, i1)
7
8define void @test1(ptr %a, ptr %b) {
9; CHECK-LABEL: @test1(
10; CHECK: %[[gep_a0:.*]] = getelementptr { i8, i8 }, ptr %a, i32 0, i32 0
11; CHECK: %[[gep_b0:.*]] = getelementptr { i8, i8 }, ptr %b, i32 0, i32 0
12; CHECK: %[[a0:.*]] = load i8, ptr %[[gep_a0]], align 16
13; CHECK: %[[gep_a1:.*]] = getelementptr inbounds i8, ptr %[[gep_a0]], i64 1
14; CHECK: %[[a1:.*]] = load i8, ptr %[[gep_a1]], align 1
15; CHECK: store i8 %[[a0]], ptr %[[gep_b0]], align 16
16; CHECK: %[[gep_b1:.*]] = getelementptr inbounds i8, ptr %[[gep_b0]], i64 1
17; CHECK: store i8 %[[a1]], ptr %[[gep_b1]], align 1
18; CHECK: ret void
19
20entry:
21  %alloca = alloca { i8, i8 }, align 16
22  %gep_a = getelementptr { i8, i8 }, ptr %a, i32 0, i32 0
23  %gep_alloca = getelementptr { i8, i8 }, ptr %alloca, i32 0, i32 0
24  %gep_b = getelementptr { i8, i8 }, ptr %b, i32 0, i32 0
25
26  store i8 420, ptr %gep_alloca, align 16
27
28  call void @llvm.memcpy.p0.p0.i32(ptr align 16 %gep_alloca, ptr align 16 %gep_a, i32 2, i1 false)
29  call void @llvm.memcpy.p0.p0.i32(ptr align 16 %gep_b, ptr align 16 %gep_alloca, i32 2, i1 false)
30  ret void
31}
32
33define void @test2() {
34; CHECK-LABEL: @test2(
35; CHECK: alloca i16
36; CHECK: load i8, ptr %{{.*}}
37; CHECK: store i8 42, ptr %{{.*}}
38; CHECK: ret void
39
40; Check that when sroa rewrites the alloca partition
41; it preserves the original DebugLocation.
42; DEBUGLOC-LABEL: @test2(
43; DEBUGLOC: {{.*}} = alloca {{.*}} !dbg ![[DbgLoc:[0-9]+]]
44; DEBUGLOC-LABEL: }
45;
46; DEBUGLOC: ![[DbgLoc]] = !DILocation(line: 9,
47
48entry:
49  %a = alloca { i8, i8, i8, i8 }, align 2      ; "line 9" to -debugify
50  %gep1 = getelementptr { i8, i8, i8, i8 }, ptr %a, i32 0, i32 1
51  store volatile i16 0, ptr %gep1
52  %gep2 = getelementptr { i8, i8, i8, i8 }, ptr %a, i32 0, i32 2
53  %result = load i8, ptr %gep2
54  store i8 42, ptr %gep2
55  ret void
56}
57
58define void @PR13920(ptr %a, ptr %b) {
59; Test that alignments on memcpy intrinsics get propagated to loads and stores.
60; CHECK-LABEL: @PR13920(
61; CHECK: load <2 x i64>, ptr %a, align 2
62; CHECK: store <2 x i64> {{.*}}, ptr {{.*}}, align 2
63; CHECK: ret void
64
65entry:
66  %aa = alloca <2 x i64>, align 16
67  call void @llvm.memcpy.p0.p0.i32(ptr align 2 %aa, ptr align 2 %a, i32 16, i1 false)
68  call void @llvm.memcpy.p0.p0.i32(ptr align 2 %b, ptr align 2 %aa, i32 16, i1 false)
69  ret void
70}
71
72define void @test3(ptr %x) {
73; Test that when we promote an alloca to a type with lower ABI alignment, we
74; provide the needed explicit alignment that code using the alloca may be
75; expecting. However, also check that any offset within an alloca can in turn
76; reduce the alignment.
77; CHECK-LABEL: @test3(
78; CHECK: alloca [22 x i8], align 8
79; CHECK: alloca [18 x i8], align 2
80; CHECK: ret void
81
82entry:
83  %a = alloca { ptr, ptr, ptr }
84  %b = alloca { ptr, ptr, ptr }
85  call void @llvm.memcpy.p0.p0.i32(ptr align 8 %a, ptr align 8 %x, i32 22, i1 false)
86  %b_gep = getelementptr i8, ptr %b, i32 6
87  call void @llvm.memcpy.p0.p0.i32(ptr align 2 %b_gep, ptr align 2 %x, i32 18, i1 false)
88  ret void
89}
90
91define void @test5() {
92; Test that we preserve underaligned loads and stores when splitting. The use
93; of volatile in this test case is just to force the loads and stores to not be
94; split or promoted out of existence.
95;
96; CHECK-LABEL: @test5(
97; CHECK: alloca [9 x i8]
98; CHECK: alloca [9 x i8]
99; CHECK: store volatile double 0.0{{.*}}, ptr %{{.*}}, align 1
100; CHECK: load volatile i16, ptr %{{.*}}, align 1
101; CHECK: load double, ptr %{{.*}}, align 1
102; CHECK: store volatile double %{{.*}}, ptr %{{.*}}, align 1
103; CHECK: load volatile i16, ptr %{{.*}}, align 1
104; CHECK: ret void
105
106entry:
107  %a = alloca [18 x i8]
108  store volatile double 0.0, ptr %a, align 1
109  %weird_gep1 = getelementptr inbounds [18 x i8], ptr %a, i32 0, i32 7
110  %weird_load1 = load volatile i16, ptr %weird_gep1, align 1
111
112  %raw2 = getelementptr inbounds [18 x i8], ptr %a, i32 0, i32 9
113  %d1 = load double, ptr %a, align 1
114  store volatile double %d1, ptr %raw2, align 1
115  %weird_gep2 = getelementptr inbounds [18 x i8], ptr %a, i32 0, i32 16
116  %weird_load2 = load volatile i16, ptr %weird_gep2, align 1
117
118  ret void
119}
120
121define void @test6() {
122; We should set the alignment on all load and store operations; make sure
123; we choose an appropriate alignment.
124; CHECK-LABEL: @test6(
125; CHECK: alloca double, align 8{{$}}
126; CHECK: alloca double, align 8{{$}}
127; CHECK: store{{.*}}, align 8
128; CHECK: load{{.*}}, align 8
129; CHECK: store{{.*}}, align 8
130; CHECK-NOT: align
131; CHECK: ret void
132
133entry:
134  %a = alloca [16 x i8]
135  store volatile double 0.0, ptr %a, align 1
136
137  %raw2 = getelementptr inbounds [16 x i8], ptr %a, i32 0, i32 8
138  %val = load double, ptr %a, align 1
139  store volatile double %val, ptr %raw2, align 1
140
141  ret void
142}
143
144define void @test7(ptr %out) {
145; Test that we properly compute the destination alignment when rewriting
146; memcpys as direct loads or stores.
147; CHECK-LABEL: @test7(
148; CHECK-NOT: alloca
149
150entry:
151  %a = alloca [16 x i8]
152  %raw2 = getelementptr inbounds [16 x i8], ptr %a, i32 0, i32 8
153
154  call void @llvm.memcpy.p0.p0.i32(ptr %a, ptr %out, i32 16, i1 false)
155; CHECK: %[[val2:.*]] = load double, ptr %{{.*}}, align 1
156; CHECK: %[[val1:.*]] = load double, ptr %{{.*}}, align 1
157
158  %val1 = load double, ptr %raw2, align 1
159  %val2 = load double, ptr %a, align 1
160
161  store double %val1, ptr %a, align 1
162  store double %val2, ptr %raw2, align 1
163
164  call void @llvm.memcpy.p0.p0.i32(ptr %out, ptr %a, i32 16, i1 false)
165; CHECK: store double %[[val1]], ptr %{{.*}}, align 1
166; CHECK: store double %[[val2]], ptr %{{.*}}, align 1
167
168  ret void
169; CHECK: ret void
170}
171
172define void @test8() {
173; CHECK-LABEL: @test8(
174; CHECK: load i32, {{.*}}, align 1
175; CHECK: load i32, {{.*}}, align 1
176; CHECK: load i32, {{.*}}, align 1
177; CHECK: load i32, {{.*}}, align 1
178; CHECK: load i32, {{.*}}, align 1
179
180  %ptr = alloca [5 x i32], align 1
181  call void @populate(ptr %ptr)
182  %val = load [5 x i32], ptr %ptr, align 1
183  ret void
184}
185
186define void @test9() {
187; CHECK-LABEL: @test9(
188; CHECK: load i32, {{.*}}, align 8
189; CHECK: load i32, {{.*}}, align 4
190; CHECK: load i32, {{.*}}, align 8
191; CHECK: load i32, {{.*}}, align 4
192; CHECK: load i32, {{.*}}, align 8
193
194  %ptr = alloca [5 x i32], align 8
195  call void @populate(ptr %ptr)
196  %val = load [5 x i32], ptr %ptr, align 8
197  ret void
198}
199
200define void @test10() {
201; CHECK-LABEL: @test10(
202; CHECK: load i32, {{.*}}, align 2
203; CHECK: load i8, {{.*}}, align 2
204; CHECK: load i8, {{.*}}, align 1
205; CHECK: load i8, {{.*}}, align 2
206; CHECK: load i16, {{.*}}, align 2
207
208  %ptr = alloca {i32, i8, i8, {i8, i16}}, align 2
209  call void @populate(ptr %ptr)
210  %val = load {i32, i8, i8, {i8, i16}}, ptr %ptr, align 2
211  ret void
212}
213
214%struct = type { i32, i32 }
215define dso_local i32 @pr45010(ptr %A) {
216; CHECK-LABEL: @pr45010
217; CHECK: load atomic volatile i32, {{.*}}, align 4
218
219  %B = alloca %struct, align 4
220  %1 = load i32, ptr %A, align 4
221  store atomic volatile i32 %1, ptr %B release, align 4
222  %x = load atomic volatile i32, ptr %B acquire, align 4
223  ret i32 %x
224}
225
226declare void @populate(ptr)
227