xref: /linux-6.15/include/linux/alloc_tag.h (revision 0db6f8d7)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * allocation tagging
4  */
5 #ifndef _LINUX_ALLOC_TAG_H
6 #define _LINUX_ALLOC_TAG_H
7 
8 #include <linux/bug.h>
9 #include <linux/codetag.h>
10 #include <linux/container_of.h>
11 #include <linux/preempt.h>
12 #include <asm/percpu.h>
13 #include <linux/cpumask.h>
14 #include <linux/smp.h>
15 #include <linux/static_key.h>
16 #include <linux/irqflags.h>
17 
18 struct alloc_tag_counters {
19 	u64 bytes;
20 	u64 calls;
21 };
22 
23 /*
24  * An instance of this structure is created in a special ELF section at every
25  * allocation callsite. At runtime, the special section is treated as
26  * an array of these. Embedded codetag utilizes codetag framework.
27  */
28 struct alloc_tag {
29 	struct codetag			ct;
30 	struct alloc_tag_counters __percpu	*counters;
31 } __aligned(8);
32 
33 struct alloc_tag_module_section {
34 	unsigned long start_addr;
35 	unsigned long end_addr;
36 	/* used size */
37 	unsigned long size;
38 };
39 
40 #ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG
41 
42 #define CODETAG_EMPTY	((void *)1)
43 
44 static inline bool is_codetag_empty(union codetag_ref *ref)
45 {
46 	return ref->ct == CODETAG_EMPTY;
47 }
48 
49 static inline void set_codetag_empty(union codetag_ref *ref)
50 {
51 	if (ref)
52 		ref->ct = CODETAG_EMPTY;
53 }
54 
55 #else /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */
56 
57 static inline bool is_codetag_empty(union codetag_ref *ref) { return false; }
58 static inline void set_codetag_empty(union codetag_ref *ref) {}
59 
60 #endif /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */
61 
62 #ifdef CONFIG_MEM_ALLOC_PROFILING
63 
64 #define ALLOC_TAG_SECTION_NAME	"alloc_tags"
65 
66 struct codetag_bytes {
67 	struct codetag *ct;
68 	s64 bytes;
69 };
70 
71 size_t alloc_tag_top_users(struct codetag_bytes *tags, size_t count, bool can_sleep);
72 
73 static inline struct alloc_tag *ct_to_alloc_tag(struct codetag *ct)
74 {
75 	return container_of(ct, struct alloc_tag, ct);
76 }
77 
78 #ifdef ARCH_NEEDS_WEAK_PER_CPU
79 /*
80  * When percpu variables are required to be defined as weak, static percpu
81  * variables can't be used inside a function (see comments for DECLARE_PER_CPU_SECTION).
82  * Instead we will account all module allocations to a single counter.
83  */
84 DECLARE_PER_CPU(struct alloc_tag_counters, _shared_alloc_tag);
85 
86 #define DEFINE_ALLOC_TAG(_alloc_tag)						\
87 	static struct alloc_tag _alloc_tag __used __aligned(8)			\
88 	__section(ALLOC_TAG_SECTION_NAME) = {					\
89 		.ct = CODE_TAG_INIT,						\
90 		.counters = &_shared_alloc_tag };
91 
92 #else /* ARCH_NEEDS_WEAK_PER_CPU */
93 
94 #define DEFINE_ALLOC_TAG(_alloc_tag)						\
95 	static DEFINE_PER_CPU(struct alloc_tag_counters, _alloc_tag_cntr);	\
96 	static struct alloc_tag _alloc_tag __used __aligned(8)			\
97 	__section(ALLOC_TAG_SECTION_NAME) = {					\
98 		.ct = CODE_TAG_INIT,						\
99 		.counters = &_alloc_tag_cntr };
100 
101 #endif /* ARCH_NEEDS_WEAK_PER_CPU */
102 
103 DECLARE_STATIC_KEY_MAYBE(CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT,
104 			mem_alloc_profiling_key);
105 
106 static inline bool mem_alloc_profiling_enabled(void)
107 {
108 	return static_branch_maybe(CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT,
109 				   &mem_alloc_profiling_key);
110 }
111 
112 static inline struct alloc_tag_counters alloc_tag_read(struct alloc_tag *tag)
113 {
114 	struct alloc_tag_counters v = { 0, 0 };
115 	struct alloc_tag_counters *counter;
116 	int cpu;
117 
118 	for_each_possible_cpu(cpu) {
119 		counter = per_cpu_ptr(tag->counters, cpu);
120 		v.bytes += counter->bytes;
121 		v.calls += counter->calls;
122 	}
123 
124 	return v;
125 }
126 
127 #ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG
128 static inline void alloc_tag_add_check(union codetag_ref *ref, struct alloc_tag *tag)
129 {
130 	WARN_ONCE(ref && ref->ct,
131 		  "alloc_tag was not cleared (got tag for %s:%u)\n",
132 		  ref->ct->filename, ref->ct->lineno);
133 
134 	WARN_ONCE(!tag, "current->alloc_tag not set\n");
135 }
136 
137 static inline void alloc_tag_sub_check(union codetag_ref *ref)
138 {
139 	WARN_ONCE(ref && !ref->ct, "alloc_tag was not set\n");
140 }
141 #else
142 static inline void alloc_tag_add_check(union codetag_ref *ref, struct alloc_tag *tag) {}
143 static inline void alloc_tag_sub_check(union codetag_ref *ref) {}
144 #endif
145 
146 /* Caller should verify both ref and tag to be valid */
147 static inline bool __alloc_tag_ref_set(union codetag_ref *ref, struct alloc_tag *tag)
148 {
149 	alloc_tag_add_check(ref, tag);
150 	if (!ref || !tag)
151 		return false;
152 
153 	ref->ct = &tag->ct;
154 	return true;
155 }
156 
157 static inline bool alloc_tag_ref_set(union codetag_ref *ref, struct alloc_tag *tag)
158 {
159 	if (unlikely(!__alloc_tag_ref_set(ref, tag)))
160 		return false;
161 
162 	/*
163 	 * We need in increment the call counter every time we have a new
164 	 * allocation or when we split a large allocation into smaller ones.
165 	 * Each new reference for every sub-allocation needs to increment call
166 	 * counter because when we free each part the counter will be decremented.
167 	 */
168 	this_cpu_inc(tag->counters->calls);
169 	return true;
170 }
171 
172 static inline void alloc_tag_add(union codetag_ref *ref, struct alloc_tag *tag, size_t bytes)
173 {
174 	if (likely(alloc_tag_ref_set(ref, tag)))
175 		this_cpu_add(tag->counters->bytes, bytes);
176 }
177 
178 static inline void alloc_tag_sub(union codetag_ref *ref, size_t bytes)
179 {
180 	struct alloc_tag *tag;
181 
182 	alloc_tag_sub_check(ref);
183 	if (!ref || !ref->ct)
184 		return;
185 
186 	if (is_codetag_empty(ref)) {
187 		ref->ct = NULL;
188 		return;
189 	}
190 
191 	tag = ct_to_alloc_tag(ref->ct);
192 
193 	this_cpu_sub(tag->counters->bytes, bytes);
194 	this_cpu_dec(tag->counters->calls);
195 
196 	ref->ct = NULL;
197 }
198 
199 #define alloc_tag_record(p)	((p) = current->alloc_tag)
200 
201 #else /* CONFIG_MEM_ALLOC_PROFILING */
202 
203 #define DEFINE_ALLOC_TAG(_alloc_tag)
204 static inline bool mem_alloc_profiling_enabled(void) { return false; }
205 static inline void alloc_tag_add(union codetag_ref *ref, struct alloc_tag *tag,
206 				 size_t bytes) {}
207 static inline void alloc_tag_sub(union codetag_ref *ref, size_t bytes) {}
208 #define alloc_tag_record(p)	do {} while (0)
209 
210 #endif /* CONFIG_MEM_ALLOC_PROFILING */
211 
212 #define alloc_hooks_tag(_tag, _do_alloc)				\
213 ({									\
214 	struct alloc_tag * __maybe_unused _old = alloc_tag_save(_tag);	\
215 	typeof(_do_alloc) _res = _do_alloc;				\
216 	alloc_tag_restore(_tag, _old);					\
217 	_res;								\
218 })
219 
220 #define alloc_hooks(_do_alloc)						\
221 ({									\
222 	DEFINE_ALLOC_TAG(_alloc_tag);					\
223 	alloc_hooks_tag(&_alloc_tag, _do_alloc);			\
224 })
225 
226 #endif /* _LINUX_ALLOC_TAG_H */
227