1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-attributes
2; RUN: opt < %s -function-attrs -S | FileCheck %s
3; RUN: opt < %s -passes=function-attrs -S | FileCheck %s
4
5target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
6
7; Base case, empty function
8define void @test1() {
9; CHECK: Function Attrs: mustprogress nofree norecurse nosync nounwind readnone willreturn
10; CHECK-LABEL: @test1(
11; CHECK-NEXT:    ret void
12;
13  ret void
14}
15
16; Show the bottom up walk
17define void @test2() {
18; CHECK: Function Attrs: mustprogress nofree norecurse nosync nounwind readnone willreturn
19; CHECK-LABEL: @test2(
20; CHECK-NEXT:    call void @test1()
21; CHECK-NEXT:    ret void
22;
23  call void @test1()
24  ret void
25}
26
27declare void @unknown() convergent
28
29; Negative case with convergent function
30define void @test3() convergent {
31; CHECK: Function Attrs: convergent
32; CHECK-LABEL: @test3(
33; CHECK-NEXT:    call void @unknown()
34; CHECK-NEXT:    ret void
35;
36  call void @unknown()
37  ret void
38}
39
40define i32 @test4(i32 %a, i32 %b) {
41; CHECK: Function Attrs: mustprogress nofree norecurse nosync nounwind readnone willreturn
42; CHECK-LABEL: @test4(
43; CHECK-NEXT:    [[ADD:%.*]] = add i32 [[A:%.*]], [[B:%.*]]
44; CHECK-NEXT:    ret i32 [[A]]
45;
46  %add = add i32 %a, %b
47  ret i32 %a
48}
49
50; negative case - explicit sync
51define void @test5(i8* %p) {
52; CHECK: Function Attrs: argmemonly mustprogress nofree norecurse nounwind willreturn
53; CHECK-LABEL: @test5(
54; CHECK-NEXT:    store atomic i8 0, i8* [[P:%.*]] seq_cst, align 1
55; CHECK-NEXT:    ret void
56;
57  store atomic i8 0, i8* %p seq_cst, align 1
58  ret void
59}
60
61; negative case - explicit sync
62define i8 @test6(i8* %p) {
63; CHECK: Function Attrs: argmemonly mustprogress nofree norecurse nounwind willreturn
64; CHECK-LABEL: @test6(
65; CHECK-NEXT:    [[V:%.*]] = load atomic i8, i8* [[P:%.*]] seq_cst, align 1
66; CHECK-NEXT:    ret i8 [[V]]
67;
68  %v = load atomic i8, i8* %p seq_cst, align 1
69  ret i8 %v
70}
71
72; negative case - explicit sync
73define void @test7(i8* %p) {
74; CHECK: Function Attrs: mustprogress nofree norecurse nounwind willreturn
75; CHECK-LABEL: @test7(
76; CHECK-NEXT:    [[TMP1:%.*]] = atomicrmw add i8* [[P:%.*]], i8 0 seq_cst, align 1
77; CHECK-NEXT:    ret void
78;
79  atomicrmw add i8* %p, i8 0 seq_cst, align 1
80  ret void
81}
82
83; negative case - explicit sync
84define void @test8(i8* %p) {
85; CHECK: Function Attrs: mustprogress nofree norecurse nounwind willreturn
86; CHECK-LABEL: @test8(
87; CHECK-NEXT:    fence seq_cst
88; CHECK-NEXT:    ret void
89;
90  fence seq_cst
91  ret void
92}
93
94; singlethread fences are okay
95define void @test9(i8* %p) {
96; CHECK: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn
97; CHECK-LABEL: @test9(
98; CHECK-NEXT:    fence syncscope("singlethread") seq_cst
99; CHECK-NEXT:    ret void
100;
101  fence syncscope("singlethread") seq_cst
102  ret void
103}
104
105; atomic load with monotonic ordering
106define i32 @load_monotonic(i32* nocapture readonly %0) norecurse nounwind uwtable {
107; CHECK: Function Attrs: argmemonly mustprogress nofree norecurse nounwind willreturn uwtable
108; CHECK-LABEL: @load_monotonic(
109; CHECK-NEXT:    [[TMP2:%.*]] = load atomic i32, i32* [[TMP0:%.*]] monotonic, align 4
110; CHECK-NEXT:    ret i32 [[TMP2]]
111;
112  %2 = load atomic i32, i32* %0 monotonic, align 4
113  ret i32 %2
114}
115
116; atomic store with monotonic ordering.
117define void @store_monotonic(i32* nocapture %0) norecurse nounwind uwtable {
118; CHECK: Function Attrs: argmemonly mustprogress nofree norecurse nounwind willreturn uwtable
119; CHECK-LABEL: @store_monotonic(
120; CHECK-NEXT:    store atomic i32 10, i32* [[TMP0:%.*]] monotonic, align 4
121; CHECK-NEXT:    ret void
122;
123  store atomic i32 10, i32* %0 monotonic, align 4
124  ret void
125}
126
127; negative, should not deduce nosync
128; atomic load with acquire ordering.
129define i32 @load_acquire(i32* nocapture readonly %0) norecurse nounwind uwtable {
130; CHECK: Function Attrs: argmemonly mustprogress nofree norecurse nounwind willreturn uwtable
131; CHECK-LABEL: @load_acquire(
132; CHECK-NEXT:    [[TMP2:%.*]] = load atomic i32, i32* [[TMP0:%.*]] acquire, align 4
133; CHECK-NEXT:    ret i32 [[TMP2]]
134;
135  %2 = load atomic i32, i32* %0 acquire, align 4
136  ret i32 %2
137}
138
139define i32 @load_unordered(i32* nocapture readonly %0) norecurse nounwind uwtable {
140; CHECK: Function Attrs: argmemonly mustprogress nofree norecurse nosync nounwind readonly willreturn uwtable
141; CHECK-LABEL: @load_unordered(
142; CHECK-NEXT:    [[TMP2:%.*]] = load atomic i32, i32* [[TMP0:%.*]] unordered, align 4
143; CHECK-NEXT:    ret i32 [[TMP2]]
144;
145  %2 = load atomic i32, i32* %0 unordered, align 4
146  ret i32 %2
147}
148
149; atomic store with unordered ordering.
150define void @store_unordered(i32* nocapture %0) norecurse nounwind uwtable {
151; CHECK: Function Attrs: argmemonly mustprogress nofree norecurse nosync nounwind willreturn writeonly uwtable
152; CHECK-LABEL: @store_unordered(
153; CHECK-NEXT:    store atomic i32 10, i32* [[TMP0:%.*]] unordered, align 4
154; CHECK-NEXT:    ret void
155;
156  store atomic i32 10, i32* %0 unordered, align 4
157  ret void
158}
159
160
161; negative, should not deduce nosync
162; atomic load with release ordering
163define void @load_release(i32* nocapture %0) norecurse nounwind uwtable {
164; CHECK: Function Attrs: argmemonly nofree norecurse nounwind uwtable
165; CHECK-LABEL: @load_release(
166; CHECK-NEXT:    store atomic volatile i32 10, i32* [[TMP0:%.*]] release, align 4
167; CHECK-NEXT:    ret void
168;
169  store atomic volatile i32 10, i32* %0 release, align 4
170  ret void
171}
172
173; negative volatile, relaxed atomic
174define void @load_volatile_release(i32* nocapture %0) norecurse nounwind uwtable {
175; CHECK: Function Attrs: argmemonly nofree norecurse nounwind uwtable
176; CHECK-LABEL: @load_volatile_release(
177; CHECK-NEXT:    store atomic volatile i32 10, i32* [[TMP0:%.*]] release, align 4
178; CHECK-NEXT:    ret void
179;
180  store atomic volatile i32 10, i32* %0 release, align 4
181  ret void
182}
183
184; volatile store.
185define void @volatile_store(i32* %0) norecurse nounwind uwtable {
186; CHECK: Function Attrs: argmemonly nofree norecurse nounwind uwtable
187; CHECK-LABEL: @volatile_store(
188; CHECK-NEXT:    store volatile i32 14, i32* [[TMP0:%.*]], align 4
189; CHECK-NEXT:    ret void
190;
191  store volatile i32 14, i32* %0, align 4
192  ret void
193}
194
195; negative, should not deduce nosync
196; volatile load.
197define i32 @volatile_load(i32* %0) norecurse nounwind uwtable {
198; CHECK: Function Attrs: argmemonly mustprogress nofree norecurse nounwind willreturn uwtable
199; CHECK-LABEL: @volatile_load(
200; CHECK-NEXT:    [[TMP2:%.*]] = load volatile i32, i32* [[TMP0:%.*]], align 4
201; CHECK-NEXT:    ret i32 [[TMP2]]
202;
203  %2 = load volatile i32, i32* %0, align 4
204  ret i32 %2
205}
206
207; CHECK: Function Attrs: noinline nosync nounwind uwtable
208; CHECK-NEXT: declare void @nosync_function()
209declare void @nosync_function() noinline nounwind uwtable nosync
210
211define void @call_nosync_function() nounwind uwtable noinline {
212; CHECK: Function Attrs: noinline nosync nounwind uwtable
213; CHECK-LABEL: @call_nosync_function(
214; CHECK-NEXT:    tail call void @nosync_function() #[[ATTR9:[0-9]+]]
215; CHECK-NEXT:    ret void
216;
217  tail call void @nosync_function() noinline nounwind uwtable
218  ret void
219}
220
221; CHECK: Function Attrs: noinline nounwind uwtable
222; CHECK-NEXT: declare void @might_sync()
223declare void @might_sync() noinline nounwind uwtable
224
225define void @call_might_sync() nounwind uwtable noinline {
226; CHECK: Function Attrs: noinline nounwind uwtable
227; CHECK-LABEL: @call_might_sync(
228; CHECK-NEXT:    tail call void @might_sync() #[[ATTR9]]
229; CHECK-NEXT:    ret void
230;
231  tail call void @might_sync() noinline nounwind uwtable
232  ret void
233}
234
235declare void @llvm.memcpy(i8* %dest, i8* %src, i32 %len, i1 %isvolatile)
236declare void @llvm.memset(i8* %dest, i8 %val, i32 %len, i1 %isvolatile)
237
238; negative, checking volatile intrinsics.
239define i32 @memcpy_volatile(i8* %ptr1, i8* %ptr2) {
240; CHECK: Function Attrs: argmemonly mustprogress nofree nounwind willreturn
241; CHECK-LABEL: @memcpy_volatile(
242; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[PTR1:%.*]], i8* [[PTR2:%.*]], i32 8, i1 true)
243; CHECK-NEXT:    ret i32 4
244;
245  call void @llvm.memcpy(i8* %ptr1, i8* %ptr2, i32 8, i1 1)
246  ret i32 4
247}
248
249; positive, non-volatile intrinsic.
250define i32 @memset_non_volatile(i8* %ptr1, i8 %val) {
251; CHECK: Function Attrs: argmemonly mustprogress nofree nosync nounwind willreturn writeonly
252; CHECK-LABEL: @memset_non_volatile(
253; CHECK-NEXT:    call void @llvm.memset.p0i8.i32(i8* [[PTR1:%.*]], i8 [[VAL:%.*]], i32 8, i1 false)
254; CHECK-NEXT:    ret i32 4
255;
256  call void @llvm.memset(i8* %ptr1, i8 %val, i32 8, i1 0)
257  ret i32 4
258}
259
260; negative, inline assembly.
261define i32 @inline_asm_test(i32 %x) {
262; CHECK-LABEL: @inline_asm_test(
263; CHECK-NEXT:    [[TMP1:%.*]] = call i32 asm "bswap $0", "=r,r"(i32 [[X:%.*]])
264; CHECK-NEXT:    ret i32 4
265;
266  call i32 asm "bswap $0", "=r,r"(i32 %x)
267  ret i32 4
268}
269
270declare void @readnone_test() convergent readnone
271
272; negative. Convergent
273define void @convergent_readnone(){
274; CHECK: Function Attrs: nofree nosync readnone
275; CHECK-LABEL: @convergent_readnone(
276; CHECK-NEXT:    call void @readnone_test()
277; CHECK-NEXT:    ret void
278;
279  call void @readnone_test()
280  ret void
281}
282
283; CHECK: Function Attrs: nounwind
284; CHECK-NEXT: declare void @llvm.x86.sse2.clflush(i8*)
285declare void @llvm.x86.sse2.clflush(i8*)
286@a = common global i32 0, align 4
287
288; negative. Synchronizing intrinsic
289define void @i_totally_sync() {
290; CHECK: Function Attrs: nounwind
291; CHECK-LABEL: @i_totally_sync(
292; CHECK-NEXT:    tail call void @llvm.x86.sse2.clflush(i8* bitcast (i32* @a to i8*))
293; CHECK-NEXT:    ret void
294;
295  tail call void @llvm.x86.sse2.clflush(i8* bitcast (i32* @a to i8*))
296  ret void
297}
298
299declare float @llvm.cos(float %val) readnone
300
301define float @cos_test(float %x) {
302; CHECK: Function Attrs: mustprogress nofree nosync nounwind readnone willreturn
303; CHECK-LABEL: @cos_test(
304; CHECK-NEXT:    [[C:%.*]] = call float @llvm.cos.f32(float [[X:%.*]])
305; CHECK-NEXT:    ret float [[C]]
306;
307  %c = call float @llvm.cos(float %x)
308  ret float %c
309}
310