xref: /linux-6.15/include/linux/alloc_tag.h (revision 239d6c96)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * allocation tagging
4  */
5 #ifndef _LINUX_ALLOC_TAG_H
6 #define _LINUX_ALLOC_TAG_H
7 
8 #include <linux/bug.h>
9 #include <linux/codetag.h>
10 #include <linux/container_of.h>
11 #include <linux/preempt.h>
12 #include <asm/percpu.h>
13 #include <linux/cpumask.h>
14 #include <linux/static_key.h>
15 #include <linux/irqflags.h>
16 
17 struct alloc_tag_counters {
18 	u64 bytes;
19 	u64 calls;
20 };
21 
22 /*
23  * An instance of this structure is created in a special ELF section at every
24  * allocation callsite. At runtime, the special section is treated as
25  * an array of these. Embedded codetag utilizes codetag framework.
26  */
27 struct alloc_tag {
28 	struct codetag			ct;
29 	struct alloc_tag_counters __percpu	*counters;
30 } __aligned(8);
31 
32 #ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG
33 
34 #define CODETAG_EMPTY	((void *)1)
35 
36 static inline bool is_codetag_empty(union codetag_ref *ref)
37 {
38 	return ref->ct == CODETAG_EMPTY;
39 }
40 
41 static inline void set_codetag_empty(union codetag_ref *ref)
42 {
43 	if (ref)
44 		ref->ct = CODETAG_EMPTY;
45 }
46 
47 #else /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */
48 
49 static inline bool is_codetag_empty(union codetag_ref *ref) { return false; }
50 
51 #endif /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */
52 
53 #ifdef CONFIG_MEM_ALLOC_PROFILING
54 
55 struct codetag_bytes {
56 	struct codetag *ct;
57 	s64 bytes;
58 };
59 
60 size_t alloc_tag_top_users(struct codetag_bytes *tags, size_t count, bool can_sleep);
61 
62 static inline struct alloc_tag *ct_to_alloc_tag(struct codetag *ct)
63 {
64 	return container_of(ct, struct alloc_tag, ct);
65 }
66 
67 #ifdef ARCH_NEEDS_WEAK_PER_CPU
68 /*
69  * When percpu variables are required to be defined as weak, static percpu
70  * variables can't be used inside a function (see comments for DECLARE_PER_CPU_SECTION).
71  * Instead we will accound all module allocations to a single counter.
72  */
73 DECLARE_PER_CPU(struct alloc_tag_counters, _shared_alloc_tag);
74 
75 #define DEFINE_ALLOC_TAG(_alloc_tag)						\
76 	static struct alloc_tag _alloc_tag __used __aligned(8)			\
77 	__section("alloc_tags") = {						\
78 		.ct = CODE_TAG_INIT,						\
79 		.counters = &_shared_alloc_tag };
80 
81 #else /* ARCH_NEEDS_WEAK_PER_CPU */
82 
83 #define DEFINE_ALLOC_TAG(_alloc_tag)						\
84 	static DEFINE_PER_CPU(struct alloc_tag_counters, _alloc_tag_cntr);	\
85 	static struct alloc_tag _alloc_tag __used __aligned(8)			\
86 	__section("alloc_tags") = {						\
87 		.ct = CODE_TAG_INIT,						\
88 		.counters = &_alloc_tag_cntr };
89 
90 #endif /* ARCH_NEEDS_WEAK_PER_CPU */
91 
92 DECLARE_STATIC_KEY_MAYBE(CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT,
93 			mem_alloc_profiling_key);
94 
95 static inline bool mem_alloc_profiling_enabled(void)
96 {
97 	return static_branch_maybe(CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT,
98 				   &mem_alloc_profiling_key);
99 }
100 
101 static inline struct alloc_tag_counters alloc_tag_read(struct alloc_tag *tag)
102 {
103 	struct alloc_tag_counters v = { 0, 0 };
104 	struct alloc_tag_counters *counter;
105 	int cpu;
106 
107 	for_each_possible_cpu(cpu) {
108 		counter = per_cpu_ptr(tag->counters, cpu);
109 		v.bytes += counter->bytes;
110 		v.calls += counter->calls;
111 	}
112 
113 	return v;
114 }
115 
116 #ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG
117 static inline void alloc_tag_add_check(union codetag_ref *ref, struct alloc_tag *tag)
118 {
119 	WARN_ONCE(ref && ref->ct,
120 		  "alloc_tag was not cleared (got tag for %s:%u)\n",
121 		  ref->ct->filename, ref->ct->lineno);
122 
123 	WARN_ONCE(!tag, "current->alloc_tag not set");
124 }
125 
126 static inline void alloc_tag_sub_check(union codetag_ref *ref)
127 {
128 	WARN_ONCE(ref && !ref->ct, "alloc_tag was not set\n");
129 }
130 #else
131 static inline void alloc_tag_add_check(union codetag_ref *ref, struct alloc_tag *tag) {}
132 static inline void alloc_tag_sub_check(union codetag_ref *ref) {}
133 #endif
134 
135 /* Caller should verify both ref and tag to be valid */
136 static inline void __alloc_tag_ref_set(union codetag_ref *ref, struct alloc_tag *tag)
137 {
138 	ref->ct = &tag->ct;
139 	/*
140 	 * We need in increment the call counter every time we have a new
141 	 * allocation or when we split a large allocation into smaller ones.
142 	 * Each new reference for every sub-allocation needs to increment call
143 	 * counter because when we free each part the counter will be decremented.
144 	 */
145 	this_cpu_inc(tag->counters->calls);
146 }
147 
148 static inline void alloc_tag_ref_set(union codetag_ref *ref, struct alloc_tag *tag)
149 {
150 	alloc_tag_add_check(ref, tag);
151 	if (!ref || !tag)
152 		return;
153 
154 	__alloc_tag_ref_set(ref, tag);
155 }
156 
157 static inline void alloc_tag_add(union codetag_ref *ref, struct alloc_tag *tag, size_t bytes)
158 {
159 	alloc_tag_add_check(ref, tag);
160 	if (!ref || !tag)
161 		return;
162 
163 	__alloc_tag_ref_set(ref, tag);
164 	this_cpu_add(tag->counters->bytes, bytes);
165 }
166 
167 static inline void alloc_tag_sub(union codetag_ref *ref, size_t bytes)
168 {
169 	struct alloc_tag *tag;
170 
171 	alloc_tag_sub_check(ref);
172 	if (!ref || !ref->ct)
173 		return;
174 
175 	if (is_codetag_empty(ref)) {
176 		ref->ct = NULL;
177 		return;
178 	}
179 
180 	tag = ct_to_alloc_tag(ref->ct);
181 
182 	this_cpu_sub(tag->counters->bytes, bytes);
183 	this_cpu_dec(tag->counters->calls);
184 
185 	ref->ct = NULL;
186 }
187 
188 #define alloc_tag_record(p)	((p) = current->alloc_tag)
189 
190 #else /* CONFIG_MEM_ALLOC_PROFILING */
191 
192 #define DEFINE_ALLOC_TAG(_alloc_tag)
193 static inline bool mem_alloc_profiling_enabled(void) { return false; }
194 static inline void alloc_tag_add(union codetag_ref *ref, struct alloc_tag *tag,
195 				 size_t bytes) {}
196 static inline void alloc_tag_sub(union codetag_ref *ref, size_t bytes) {}
197 #define alloc_tag_record(p)	do {} while (0)
198 
199 #endif /* CONFIG_MEM_ALLOC_PROFILING */
200 
201 #define alloc_hooks_tag(_tag, _do_alloc)				\
202 ({									\
203 	struct alloc_tag * __maybe_unused _old = alloc_tag_save(_tag);	\
204 	typeof(_do_alloc) _res = _do_alloc;				\
205 	alloc_tag_restore(_tag, _old);					\
206 	_res;								\
207 })
208 
209 #define alloc_hooks(_do_alloc)						\
210 ({									\
211 	DEFINE_ALLOC_TAG(_alloc_tag);					\
212 	alloc_hooks_tag(&_alloc_tag, _do_alloc);			\
213 })
214 
215 #endif /* _LINUX_ALLOC_TAG_H */
216