xref: /linux-6.15/include/linux/alloc_tag.h (revision eb4e7726)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * allocation tagging
4  */
5 #ifndef _LINUX_ALLOC_TAG_H
6 #define _LINUX_ALLOC_TAG_H
7 
8 #include <linux/bug.h>
9 #include <linux/codetag.h>
10 #include <linux/container_of.h>
11 #include <linux/preempt.h>
12 #include <asm/percpu.h>
13 #include <linux/cpumask.h>
14 #include <linux/static_key.h>
15 #include <linux/irqflags.h>
16 
17 struct alloc_tag_counters {
18 	u64 bytes;
19 	u64 calls;
20 };
21 
22 /*
23  * An instance of this structure is created in a special ELF section at every
24  * allocation callsite. At runtime, the special section is treated as
25  * an array of these. Embedded codetag utilizes codetag framework.
26  */
27 struct alloc_tag {
28 	struct codetag			ct;
29 	struct alloc_tag_counters __percpu	*counters;
30 } __aligned(8);
31 
32 #ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG
33 
34 #define CODETAG_EMPTY	((void *)1)
35 
36 static inline bool is_codetag_empty(union codetag_ref *ref)
37 {
38 	return ref->ct == CODETAG_EMPTY;
39 }
40 
41 static inline void set_codetag_empty(union codetag_ref *ref)
42 {
43 	if (ref)
44 		ref->ct = CODETAG_EMPTY;
45 }
46 
47 #else /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */
48 
49 static inline bool is_codetag_empty(union codetag_ref *ref) { return false; }
50 static inline void set_codetag_empty(union codetag_ref *ref) {}
51 
52 #endif /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */
53 
54 #ifdef CONFIG_MEM_ALLOC_PROFILING
55 
56 struct codetag_bytes {
57 	struct codetag *ct;
58 	s64 bytes;
59 };
60 
61 size_t alloc_tag_top_users(struct codetag_bytes *tags, size_t count, bool can_sleep);
62 
63 static inline struct alloc_tag *ct_to_alloc_tag(struct codetag *ct)
64 {
65 	return container_of(ct, struct alloc_tag, ct);
66 }
67 
68 #ifdef ARCH_NEEDS_WEAK_PER_CPU
69 /*
70  * When percpu variables are required to be defined as weak, static percpu
71  * variables can't be used inside a function (see comments for DECLARE_PER_CPU_SECTION).
72  * Instead we will accound all module allocations to a single counter.
73  */
74 DECLARE_PER_CPU(struct alloc_tag_counters, _shared_alloc_tag);
75 
76 #define DEFINE_ALLOC_TAG(_alloc_tag)						\
77 	static struct alloc_tag _alloc_tag __used __aligned(8)			\
78 	__section("alloc_tags") = {						\
79 		.ct = CODE_TAG_INIT,						\
80 		.counters = &_shared_alloc_tag };
81 
82 #else /* ARCH_NEEDS_WEAK_PER_CPU */
83 
84 #define DEFINE_ALLOC_TAG(_alloc_tag)						\
85 	static DEFINE_PER_CPU(struct alloc_tag_counters, _alloc_tag_cntr);	\
86 	static struct alloc_tag _alloc_tag __used __aligned(8)			\
87 	__section("alloc_tags") = {						\
88 		.ct = CODE_TAG_INIT,						\
89 		.counters = &_alloc_tag_cntr };
90 
91 #endif /* ARCH_NEEDS_WEAK_PER_CPU */
92 
93 DECLARE_STATIC_KEY_MAYBE(CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT,
94 			mem_alloc_profiling_key);
95 
96 static inline bool mem_alloc_profiling_enabled(void)
97 {
98 	return static_branch_maybe(CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT,
99 				   &mem_alloc_profiling_key);
100 }
101 
102 static inline struct alloc_tag_counters alloc_tag_read(struct alloc_tag *tag)
103 {
104 	struct alloc_tag_counters v = { 0, 0 };
105 	struct alloc_tag_counters *counter;
106 	int cpu;
107 
108 	for_each_possible_cpu(cpu) {
109 		counter = per_cpu_ptr(tag->counters, cpu);
110 		v.bytes += counter->bytes;
111 		v.calls += counter->calls;
112 	}
113 
114 	return v;
115 }
116 
117 #ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG
118 static inline void alloc_tag_add_check(union codetag_ref *ref, struct alloc_tag *tag)
119 {
120 	WARN_ONCE(ref && ref->ct,
121 		  "alloc_tag was not cleared (got tag for %s:%u)\n",
122 		  ref->ct->filename, ref->ct->lineno);
123 
124 	WARN_ONCE(!tag, "current->alloc_tag not set");
125 }
126 
127 static inline void alloc_tag_sub_check(union codetag_ref *ref)
128 {
129 	WARN_ONCE(ref && !ref->ct, "alloc_tag was not set\n");
130 }
131 #else
132 static inline void alloc_tag_add_check(union codetag_ref *ref, struct alloc_tag *tag) {}
133 static inline void alloc_tag_sub_check(union codetag_ref *ref) {}
134 #endif
135 
136 /* Caller should verify both ref and tag to be valid */
137 static inline void __alloc_tag_ref_set(union codetag_ref *ref, struct alloc_tag *tag)
138 {
139 	ref->ct = &tag->ct;
140 	/*
141 	 * We need in increment the call counter every time we have a new
142 	 * allocation or when we split a large allocation into smaller ones.
143 	 * Each new reference for every sub-allocation needs to increment call
144 	 * counter because when we free each part the counter will be decremented.
145 	 */
146 	this_cpu_inc(tag->counters->calls);
147 }
148 
149 static inline void alloc_tag_ref_set(union codetag_ref *ref, struct alloc_tag *tag)
150 {
151 	alloc_tag_add_check(ref, tag);
152 	if (!ref || !tag)
153 		return;
154 
155 	__alloc_tag_ref_set(ref, tag);
156 }
157 
158 static inline void alloc_tag_add(union codetag_ref *ref, struct alloc_tag *tag, size_t bytes)
159 {
160 	alloc_tag_add_check(ref, tag);
161 	if (!ref || !tag)
162 		return;
163 
164 	__alloc_tag_ref_set(ref, tag);
165 	this_cpu_add(tag->counters->bytes, bytes);
166 }
167 
168 static inline void alloc_tag_sub(union codetag_ref *ref, size_t bytes)
169 {
170 	struct alloc_tag *tag;
171 
172 	alloc_tag_sub_check(ref);
173 	if (!ref || !ref->ct)
174 		return;
175 
176 	if (is_codetag_empty(ref)) {
177 		ref->ct = NULL;
178 		return;
179 	}
180 
181 	tag = ct_to_alloc_tag(ref->ct);
182 
183 	this_cpu_sub(tag->counters->bytes, bytes);
184 	this_cpu_dec(tag->counters->calls);
185 
186 	ref->ct = NULL;
187 }
188 
189 #define alloc_tag_record(p)	((p) = current->alloc_tag)
190 
191 #else /* CONFIG_MEM_ALLOC_PROFILING */
192 
193 #define DEFINE_ALLOC_TAG(_alloc_tag)
194 static inline bool mem_alloc_profiling_enabled(void) { return false; }
195 static inline void alloc_tag_add(union codetag_ref *ref, struct alloc_tag *tag,
196 				 size_t bytes) {}
197 static inline void alloc_tag_sub(union codetag_ref *ref, size_t bytes) {}
198 #define alloc_tag_record(p)	do {} while (0)
199 
200 #endif /* CONFIG_MEM_ALLOC_PROFILING */
201 
202 #define alloc_hooks_tag(_tag, _do_alloc)				\
203 ({									\
204 	struct alloc_tag * __maybe_unused _old = alloc_tag_save(_tag);	\
205 	typeof(_do_alloc) _res = _do_alloc;				\
206 	alloc_tag_restore(_tag, _old);					\
207 	_res;								\
208 })
209 
210 #define alloc_hooks(_do_alloc)						\
211 ({									\
212 	DEFINE_ALLOC_TAG(_alloc_tag);					\
213 	alloc_hooks_tag(&_alloc_tag, _do_alloc);			\
214 })
215 
216 #endif /* _LINUX_ALLOC_TAG_H */
217