1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt < %s -passes=sroa -S | FileCheck %s
3target datalayout = "e-p:64:64:64-p1:16:16:16-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:64:64-f80:128-v64:64:64-v128:128:128-a0:0:64-n8:16:32:64"
4
5declare void @llvm.memcpy.p0.p0.i32(ptr nocapture, ptr nocapture, i32, i1) nounwind
6declare void @llvm.memset.p0.i32(ptr nocapture, i8, i32, i1) nounwind
7declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1) nounwind
8
9; This tests that allocas are not split into slices that are not byte width multiple
10define void @no_split_on_non_byte_width(i32) {
11; CHECK-LABEL: @no_split_on_non_byte_width(
12; CHECK-NEXT:    [[ARG_SROA_0:%.*]] = alloca i8, align 8
13; CHECK-NEXT:    [[ARG_SROA_0_0_EXTRACT_TRUNC:%.*]] = trunc i32 [[TMP0:%.*]] to i8
14; CHECK-NEXT:    store i8 [[ARG_SROA_0_0_EXTRACT_TRUNC]], ptr [[ARG_SROA_0]], align 8
15; CHECK-NEXT:    [[ARG_SROA_3_0_EXTRACT_SHIFT:%.*]] = lshr i32 [[TMP0]], 8
16; CHECK-NEXT:    [[ARG_SROA_3_0_EXTRACT_TRUNC:%.*]] = trunc i32 [[ARG_SROA_3_0_EXTRACT_SHIFT]] to i24
17; CHECK-NEXT:    br label [[LOAD_I32:%.*]]
18; CHECK:       load_i32:
19; CHECK-NEXT:    [[ARG_SROA_0_0_ARG_SROA_0_0_R01:%.*]] = load i8, ptr [[ARG_SROA_0]], align 8
20; CHECK-NEXT:    br label [[LOAD_I1:%.*]]
21; CHECK:       load_i1:
22; CHECK-NEXT:    [[ARG_SROA_0_0_ARG_SROA_0_0_T1:%.*]] = load i1, ptr [[ARG_SROA_0]], align 8
23; CHECK-NEXT:    ret void
24;
25  %arg = alloca i32 , align 8
26  store i32 %0, ptr %arg
27  br label %load_i32
28
29load_i32:
30  %r0 = load i32, ptr %arg
31  br label %load_i1
32
33load_i1:
34  %t1 = load i1, ptr %arg
35  ret void
36}
37
38; PR18726: Check that we use memcpy and memset to fill out padding when we have
39; a slice with a simple single type whose store size is smaller than the slice
40; size.
41
42%union.Foo = type { x86_fp80, i64, i64 }
43
44@foo_copy_source = external constant %union.Foo
45@i64_sink = global i64 0
46
47define void @memcpy_fp80_padding() {
48; CHECK-LABEL: @memcpy_fp80_padding(
49; CHECK-NEXT:    [[X_SROA_0:%.*]] = alloca x86_fp80, align 16
50; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i32(ptr align 16 [[X_SROA_0]], ptr align 16 @foo_copy_source, i32 16, i1 false)
51; CHECK-NEXT:    [[X_SROA_1_0_COPYLOAD:%.*]] = load i64, ptr getelementptr inbounds (i8, ptr @foo_copy_source, i64 16), align 16
52; CHECK-NEXT:    [[X_SROA_2_0_COPYLOAD:%.*]] = load i64, ptr getelementptr inbounds (i8, ptr @foo_copy_source, i64 24), align 8
53; CHECK-NEXT:    store i64 [[X_SROA_1_0_COPYLOAD]], ptr @i64_sink, align 4
54; CHECK-NEXT:    ret void
55;
56  %x = alloca %union.Foo
57
58  ; Copy from a global.
59  call void @llvm.memcpy.p0.p0.i32(ptr align 16 %x, ptr align 16 @foo_copy_source, i32 32, i1 false)
60
61  ; Access a slice of the alloca to trigger SROA.
62  %mid_p = getelementptr %union.Foo, ptr %x, i32 0, i32 1
63  %elt = load i64, ptr %mid_p
64  store i64 %elt, ptr @i64_sink
65  ret void
66}
67
68define void @memset_fp80_padding() {
69; CHECK-LABEL: @memset_fp80_padding(
70; CHECK-NEXT:    [[X_SROA_0:%.*]] = alloca x86_fp80, align 16
71; CHECK-NEXT:    call void @llvm.memset.p0.i32(ptr align 16 [[X_SROA_0]], i8 -1, i32 16, i1 false)
72; CHECK-NEXT:    store i64 -1, ptr @i64_sink, align 4
73; CHECK-NEXT:    ret void
74;
75  %x = alloca %union.Foo
76
77  ; Set to all ones.
78  call void @llvm.memset.p0.i32(ptr align 16 %x, i8 -1, i32 32, i1 false)
79
80  ; Access a slice of the alloca to trigger SROA.
81  %mid_p = getelementptr %union.Foo, ptr %x, i32 0, i32 1
82  %elt = load i64, ptr %mid_p
83  store i64 %elt, ptr @i64_sink
84  ret void
85}
86
87%S.vec3float = type { float, float, float }
88%U.vec3float = type { <4 x float> }
89
90declare i32 @memcpy_vec3float_helper(ptr)
91
92; PR18726: Check that SROA does not rewrite a 12-byte memcpy into a 16-byte
93; vector store, hence accidentally putting gibberish onto the stack.
94define i32 @memcpy_vec3float_widening(ptr %x) {
95; CHECK-LABEL: @memcpy_vec3float_widening(
96; CHECK-NEXT:  entry:
97; CHECK-NEXT:    [[TMP1_SROA_0_0_COPYLOAD:%.*]] = load <3 x float>, ptr [[X:%.*]], align 4
98; CHECK-NEXT:    [[TMP1_SROA_0_0_VEC_EXPAND:%.*]] = shufflevector <3 x float> [[TMP1_SROA_0_0_COPYLOAD]], <3 x float> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 undef>
99; CHECK-NEXT:    [[TMP1_SROA_0_0_VECBLEND:%.*]] = select <4 x i1> <i1 true, i1 true, i1 true, i1 false>, <4 x float> [[TMP1_SROA_0_0_VEC_EXPAND]], <4 x float> undef
100; CHECK-NEXT:    [[TMP2:%.*]] = alloca [[S_VEC3FLOAT:%.*]], align 4
101; CHECK-NEXT:    [[TMP1_SROA_0_0_VEC_EXTRACT:%.*]] = shufflevector <4 x float> [[TMP1_SROA_0_0_VECBLEND]], <4 x float> poison, <3 x i32> <i32 0, i32 1, i32 2>
102; CHECK-NEXT:    store <3 x float> [[TMP1_SROA_0_0_VEC_EXTRACT]], ptr [[TMP2]], align 4
103; CHECK-NEXT:    [[RESULT:%.*]] = call i32 @memcpy_vec3float_helper(ptr [[TMP2]])
104; CHECK-NEXT:    ret i32 [[RESULT]]
105;
106entry:
107  ; Create a temporary variable %tmp1 and copy %x[0] into it
108  %tmp1 = alloca %S.vec3float, align 4
109  call void @llvm.memcpy.p0.p0.i32(ptr align 4 %tmp1, ptr align 4 %x, i32 12, i1 false)
110
111  ; The following block does nothing; but appears to confuse SROA
112  %unused3 = load <4 x float>, ptr %tmp1, align 1
113
114  ; Create a second temporary and copy %tmp1 into it
115  %tmp2 = alloca %S.vec3float, align 4
116  call void @llvm.memcpy.p0.p0.i32(ptr align 4 %tmp2, ptr align 4 %tmp1, i32 12, i1 false)
117
118  %result = call i32 @memcpy_vec3float_helper(ptr %tmp2)
119  ret i32 %result
120}
121
122; Don't crash on length that is constant expression.
123
124define void @PR50888() {
125; CHECK-LABEL: @PR50888(
126; CHECK-NEXT:    [[ARRAY:%.*]] = alloca i8, align 1
127; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 1 [[ARRAY]], i8 0, i64 ptrtoint (ptr @PR50888 to i64), i1 false)
128; CHECK-NEXT:    ret void
129;
130  %array = alloca i8
131  call void @llvm.memset.p0.i64(ptr align 16 %array, i8 0, i64 ptrtoint (ptr @PR50888 to i64), i1 false)
132  ret void
133}
134
135; Don't crash on out-of-bounds length.
136
137define void @PR50910() {
138; CHECK-LABEL: @PR50910(
139; CHECK-NEXT:    [[T1:%.*]] = alloca i8, i64 1, align 8
140; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[T1]], i8 0, i64 1, i1 false)
141; CHECK-NEXT:    ret void
142;
143  %t1 = alloca i8, i64 1, align 8
144  call void @llvm.memset.p0.i64(ptr align 8 %t1, i8 0, i64 4294967296, i1 false)
145  ret void
146}
147
148define i1 @presplit_overlarge_load() {
149; CHECK-LABEL: @presplit_overlarge_load(
150; CHECK-NEXT:    [[A_SROA_0:%.*]] = alloca i8, align 2
151; CHECK-NEXT:    [[A_SROA_0_0_A_SROA_0_0_L11:%.*]] = load i8, ptr [[A_SROA_0]], align 2
152; CHECK-NEXT:    [[A_SROA_0_0_A_SROA_0_0_L2:%.*]] = load i1, ptr [[A_SROA_0]], align 2
153; CHECK-NEXT:    ret i1 [[A_SROA_0_0_A_SROA_0_0_L2]]
154;
155  %A = alloca i16
156  %L1 = load i32, ptr %A
157  %L2 = load i1, ptr %A
158  ret i1 %L2
159}
160