1target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
2target triple = "aarch64-unknown-linux"
3
4attributes #0 = { noinline sanitize_memtag "target-features"="+mte,+neon" }
5
6define dso_local void @Write1(i8* %p) #0 {
7entry:
8  store i8 0, i8* %p, align 1
9  ret void
10}
11
12define dso_local void @Write4(i8* %p) #0 {
13entry:
14  %0 = bitcast i8* %p to i32*
15  store i32 0, i32* %0, align 1
16  ret void
17}
18
19define dso_local void @Write4_2(i8* %p, i8* %q) #0 {
20entry:
21  %0 = bitcast i8* %p to i32*
22  store i32 0, i32* %0, align 1
23  %1 = bitcast i8* %q to i32*
24  store i32 0, i32* %1, align 1
25  ret void
26}
27
28define dso_local void @Write8(i8* %p) #0 {
29entry:
30  %0 = bitcast i8* %p to i64*
31  store i64 0, i64* %0, align 1
32  ret void
33}
34
35define dso_local i8* @WriteAndReturn8(i8* %p) #0 {
36entry:
37  store i8 0, i8* %p, align 1
38  ret i8* %p
39}
40
41declare dso_local void @ExternalCall(i8* %p)
42
43define dso_preemptable void @PreemptableWrite1(i8* %p) #0 {
44entry:
45  store i8 0, i8* %p, align 1
46  ret void
47}
48
49define linkonce dso_local void @InterposableWrite1(i8* %p) #0 {
50entry:
51  store i8 0, i8* %p, align 1
52  ret void
53}
54
55define dso_local i8* @ReturnDependent(i8* %p) #0 {
56entry:
57  %p2 = getelementptr i8, i8* %p, i64 2
58  ret i8* %p2
59}
60
61; access range [2, 6)
62define dso_local void @Rec0(i8* %p) #0 {
63entry:
64  %p1 = getelementptr i8, i8* %p, i64 2
65  call void @Write4(i8* %p1)
66  ret void
67}
68
69; access range [3, 7)
70define dso_local void @Rec1(i8* %p) #0 {
71entry:
72  %p1 = getelementptr i8, i8* %p, i64 1
73  call void @Rec0(i8* %p1)
74  ret void
75}
76
77; access range [-2, 2)
78define dso_local void @Rec2(i8* %p) #0 {
79entry:
80  %p1 = getelementptr i8, i8* %p, i64 -5
81  call void @Rec1(i8* %p1)
82  ret void
83}
84
85; Recursive function that passes %acc unchanged => access range [0, 4).
86define dso_local void @RecursiveNoOffset(i32* %p, i32 %size, i32* %acc) {
87entry:
88  %cmp = icmp eq i32 %size, 0
89  br i1 %cmp, label %return, label %if.end
90
91if.end:
92  %0 = load i32, i32* %p, align 4
93  %1 = load i32, i32* %acc, align 4
94  %add = add nsw i32 %1, %0
95  store i32 %add, i32* %acc, align 4
96  %add.ptr = getelementptr inbounds i32, i32* %p, i64 1
97  %sub = add nsw i32 %size, -1
98  tail call void @RecursiveNoOffset(i32* %add.ptr, i32 %sub, i32* %acc)
99  ret void
100
101return:
102  ret void
103}
104
105; Recursive function that advances %acc on each iteration => access range unlimited.
106define dso_local void @RecursiveWithOffset(i32 %size, i32* %acc) {
107entry:
108  %cmp = icmp eq i32 %size, 0
109  br i1 %cmp, label %return, label %if.end
110
111if.end:
112  store i32 0, i32* %acc, align 4
113  %acc2 = getelementptr inbounds i32, i32* %acc, i64 1
114  %sub = add nsw i32 %size, -1
115  tail call void @RecursiveWithOffset(i32 %sub, i32* %acc2)
116  ret void
117
118return:
119  ret void
120}
121
122define dso_local i64* @ReturnAlloca() {
123entry:
124  %x = alloca i64, align 4
125  ret i64* %x
126}
127
128define dso_local void @Write1Private(i8* %p) #0 {
129entry:
130  call void @Private(i8* %p)
131  ret void
132}
133
134define dso_local void @Write1SameModule(i8* %p) #0 {
135entry:
136  call void @Write1(i8* %p)
137  ret void
138}
139
140declare void @Write1Module0(i8* %p)
141
142define dso_local void @Write1DiffModule(i8* %p) #0 {
143entry:
144  call void @Write1Module0(i8* %p)
145  ret void
146}
147
148define private dso_local void @Private(i8* %p) #0 {
149entry:
150  %p1 = getelementptr i8, i8* %p, i64 -1
151  store i8 0, i8* %p1, align 1
152  ret void
153}
154
155define dso_local void @Write1Weak(i8* %p) #0 {
156entry:
157  call void @Weak(i8* %p)
158  ret void
159}
160
161define weak dso_local void @Weak(i8* %p) #0 {
162entry:
163  %p1 = getelementptr i8, i8* %p, i64 -1
164  store i8 0, i8* %p1, align 1
165  ret void
166}
167
168