xref: /linux-6.15/include/linux/jump_label.h (revision d4ea45e8)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_JUMP_LABEL_H
3 #define _LINUX_JUMP_LABEL_H
4 
5 /*
6  * Jump label support
7  *
8  * Copyright (C) 2009-2012 Jason Baron <[email protected]>
9  * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra
10  *
11  * DEPRECATED API:
12  *
13  * The use of 'struct static_key' directly, is now DEPRECATED. In addition
14  * static_key_{true,false}() is also DEPRECATED. IE DO NOT use the following:
15  *
16  * struct static_key false = STATIC_KEY_INIT_FALSE;
17  * struct static_key true = STATIC_KEY_INIT_TRUE;
18  * static_key_true()
19  * static_key_false()
20  *
21  * The updated API replacements are:
22  *
23  * DEFINE_STATIC_KEY_TRUE(key);
24  * DEFINE_STATIC_KEY_FALSE(key);
25  * DEFINE_STATIC_KEY_ARRAY_TRUE(keys, count);
26  * DEFINE_STATIC_KEY_ARRAY_FALSE(keys, count);
27  * static_branch_likely()
28  * static_branch_unlikely()
29  *
30  * Jump labels provide an interface to generate dynamic branches using
31  * self-modifying code. Assuming toolchain and architecture support, if we
32  * define a "key" that is initially false via "DEFINE_STATIC_KEY_FALSE(key)",
33  * an "if (static_branch_unlikely(&key))" statement is an unconditional branch
34  * (which defaults to false - and the true block is placed out of line).
35  * Similarly, we can define an initially true key via
36  * "DEFINE_STATIC_KEY_TRUE(key)", and use it in the same
37  * "if (static_branch_unlikely(&key))", in which case we will generate an
38  * unconditional branch to the out-of-line true branch. Keys that are
39  * initially true or false can be using in both static_branch_unlikely()
40  * and static_branch_likely() statements.
41  *
42  * At runtime we can change the branch target by setting the key
43  * to true via a call to static_branch_enable(), or false using
44  * static_branch_disable(). If the direction of the branch is switched by
45  * these calls then we run-time modify the branch target via a
46  * no-op -> jump or jump -> no-op conversion. For example, for an
47  * initially false key that is used in an "if (static_branch_unlikely(&key))"
48  * statement, setting the key to true requires us to patch in a jump
49  * to the out-of-line of true branch.
50  *
51  * In addition to static_branch_{enable,disable}, we can also reference count
52  * the key or branch direction via static_branch_{inc,dec}. Thus,
53  * static_branch_inc() can be thought of as a 'make more true' and
54  * static_branch_dec() as a 'make more false'.
55  *
56  * Since this relies on modifying code, the branch modifying functions
57  * must be considered absolute slow paths (machine wide synchronization etc.).
58  * OTOH, since the affected branches are unconditional, their runtime overhead
59  * will be absolutely minimal, esp. in the default (off) case where the total
60  * effect is a single NOP of appropriate size. The on case will patch in a jump
61  * to the out-of-line block.
62  *
63  * When the control is directly exposed to userspace, it is prudent to delay the
64  * decrement to avoid high frequency code modifications which can (and do)
65  * cause significant performance degradation. Struct static_key_deferred and
66  * static_key_slow_dec_deferred() provide for this.
67  *
68  * Lacking toolchain and or architecture support, static keys fall back to a
69  * simple conditional branch.
70  *
71  * Additional babbling in: Documentation/static-keys.txt
72  */
73 
74 #if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL)
75 # define HAVE_JUMP_LABEL
76 #endif
77 
78 #ifndef __ASSEMBLY__
79 
80 #include <linux/types.h>
81 #include <linux/compiler.h>
82 
83 extern bool static_key_initialized;
84 
85 #define STATIC_KEY_CHECK_USE(key) WARN(!static_key_initialized,		      \
86 				    "%s(): static key '%pS' used before call to jump_label_init()", \
87 				    __func__, (key))
88 
89 #ifdef HAVE_JUMP_LABEL
90 
91 struct static_key {
92 	atomic_t enabled;
93 /*
94  * Note:
95  *   To make anonymous unions work with old compilers, the static
96  *   initialization of them requires brackets. This creates a dependency
97  *   on the order of the struct with the initializers. If any fields
98  *   are added, STATIC_KEY_INIT_TRUE and STATIC_KEY_INIT_FALSE may need
99  *   to be modified.
100  *
101  * bit 0 => 1 if key is initially true
102  *	    0 if initially false
103  * bit 1 => 1 if points to struct static_key_mod
104  *	    0 if points to struct jump_entry
105  */
106 	union {
107 		unsigned long type;
108 		struct jump_entry *entries;
109 		struct static_key_mod *next;
110 	};
111 };
112 
113 #else
114 struct static_key {
115 	atomic_t enabled;
116 };
117 #endif	/* HAVE_JUMP_LABEL */
118 #endif /* __ASSEMBLY__ */
119 
120 #ifdef HAVE_JUMP_LABEL
121 #include <asm/jump_label.h>
122 
123 #ifndef __ASSEMBLY__
124 #ifdef CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE
125 
126 struct jump_entry {
127 	s32 code;
128 	s32 target;
129 	long key;	// key may be far away from the core kernel under KASLR
130 };
131 
132 static inline unsigned long jump_entry_code(const struct jump_entry *entry)
133 {
134 	return (unsigned long)&entry->code + entry->code;
135 }
136 
137 static inline unsigned long jump_entry_target(const struct jump_entry *entry)
138 {
139 	return (unsigned long)&entry->target + entry->target;
140 }
141 
142 static inline struct static_key *jump_entry_key(const struct jump_entry *entry)
143 {
144 	long offset = entry->key & ~3L;
145 
146 	return (struct static_key *)((unsigned long)&entry->key + offset);
147 }
148 
149 #else
150 
151 static inline unsigned long jump_entry_code(const struct jump_entry *entry)
152 {
153 	return entry->code;
154 }
155 
156 static inline unsigned long jump_entry_target(const struct jump_entry *entry)
157 {
158 	return entry->target;
159 }
160 
161 static inline struct static_key *jump_entry_key(const struct jump_entry *entry)
162 {
163 	return (struct static_key *)((unsigned long)entry->key & ~3UL);
164 }
165 
166 #endif
167 
168 static inline bool jump_entry_is_branch(const struct jump_entry *entry)
169 {
170 	return (unsigned long)entry->key & 1UL;
171 }
172 
173 static inline bool jump_entry_is_init(const struct jump_entry *entry)
174 {
175 	return (unsigned long)entry->key & 2UL;
176 }
177 
178 static inline void jump_entry_set_init(struct jump_entry *entry)
179 {
180 	entry->key |= 2;
181 }
182 
183 #endif
184 #endif
185 
186 #ifndef __ASSEMBLY__
187 
188 enum jump_label_type {
189 	JUMP_LABEL_NOP = 0,
190 	JUMP_LABEL_JMP,
191 };
192 
193 struct module;
194 
195 #ifdef HAVE_JUMP_LABEL
196 
197 #define JUMP_TYPE_FALSE		0UL
198 #define JUMP_TYPE_TRUE		1UL
199 #define JUMP_TYPE_LINKED	2UL
200 #define JUMP_TYPE_MASK		3UL
201 
202 static __always_inline bool static_key_false(struct static_key *key)
203 {
204 	return arch_static_branch(key, false);
205 }
206 
207 static __always_inline bool static_key_true(struct static_key *key)
208 {
209 	return !arch_static_branch(key, true);
210 }
211 
212 extern struct jump_entry __start___jump_table[];
213 extern struct jump_entry __stop___jump_table[];
214 
215 extern void jump_label_init(void);
216 extern void jump_label_lock(void);
217 extern void jump_label_unlock(void);
218 extern void arch_jump_label_transform(struct jump_entry *entry,
219 				      enum jump_label_type type);
220 extern void arch_jump_label_transform_static(struct jump_entry *entry,
221 					     enum jump_label_type type);
222 extern int jump_label_text_reserved(void *start, void *end);
223 extern void static_key_slow_inc(struct static_key *key);
224 extern void static_key_slow_dec(struct static_key *key);
225 extern void static_key_slow_inc_cpuslocked(struct static_key *key);
226 extern void static_key_slow_dec_cpuslocked(struct static_key *key);
227 extern void jump_label_apply_nops(struct module *mod);
228 extern int static_key_count(struct static_key *key);
229 extern void static_key_enable(struct static_key *key);
230 extern void static_key_disable(struct static_key *key);
231 extern void static_key_enable_cpuslocked(struct static_key *key);
232 extern void static_key_disable_cpuslocked(struct static_key *key);
233 
234 /*
235  * We should be using ATOMIC_INIT() for initializing .enabled, but
236  * the inclusion of atomic.h is problematic for inclusion of jump_label.h
237  * in 'low-level' headers. Thus, we are initializing .enabled with a
238  * raw value, but have added a BUILD_BUG_ON() to catch any issues in
239  * jump_label_init() see: kernel/jump_label.c.
240  */
241 #define STATIC_KEY_INIT_TRUE					\
242 	{ .enabled = { 1 },					\
243 	  { .entries = (void *)JUMP_TYPE_TRUE } }
244 #define STATIC_KEY_INIT_FALSE					\
245 	{ .enabled = { 0 },					\
246 	  { .entries = (void *)JUMP_TYPE_FALSE } }
247 
248 #else  /* !HAVE_JUMP_LABEL */
249 
250 #include <linux/atomic.h>
251 #include <linux/bug.h>
252 
253 static inline int static_key_count(struct static_key *key)
254 {
255 	return atomic_read(&key->enabled);
256 }
257 
258 static __always_inline void jump_label_init(void)
259 {
260 	static_key_initialized = true;
261 }
262 
263 static __always_inline bool static_key_false(struct static_key *key)
264 {
265 	if (unlikely(static_key_count(key) > 0))
266 		return true;
267 	return false;
268 }
269 
270 static __always_inline bool static_key_true(struct static_key *key)
271 {
272 	if (likely(static_key_count(key) > 0))
273 		return true;
274 	return false;
275 }
276 
277 static inline void static_key_slow_inc(struct static_key *key)
278 {
279 	STATIC_KEY_CHECK_USE(key);
280 	atomic_inc(&key->enabled);
281 }
282 
283 static inline void static_key_slow_dec(struct static_key *key)
284 {
285 	STATIC_KEY_CHECK_USE(key);
286 	atomic_dec(&key->enabled);
287 }
288 
289 #define static_key_slow_inc_cpuslocked(key) static_key_slow_inc(key)
290 #define static_key_slow_dec_cpuslocked(key) static_key_slow_dec(key)
291 
292 static inline int jump_label_text_reserved(void *start, void *end)
293 {
294 	return 0;
295 }
296 
297 static inline void jump_label_lock(void) {}
298 static inline void jump_label_unlock(void) {}
299 
300 static inline int jump_label_apply_nops(struct module *mod)
301 {
302 	return 0;
303 }
304 
305 static inline void static_key_enable(struct static_key *key)
306 {
307 	STATIC_KEY_CHECK_USE(key);
308 
309 	if (atomic_read(&key->enabled) != 0) {
310 		WARN_ON_ONCE(atomic_read(&key->enabled) != 1);
311 		return;
312 	}
313 	atomic_set(&key->enabled, 1);
314 }
315 
316 static inline void static_key_disable(struct static_key *key)
317 {
318 	STATIC_KEY_CHECK_USE(key);
319 
320 	if (atomic_read(&key->enabled) != 1) {
321 		WARN_ON_ONCE(atomic_read(&key->enabled) != 0);
322 		return;
323 	}
324 	atomic_set(&key->enabled, 0);
325 }
326 
327 #define static_key_enable_cpuslocked(k)		static_key_enable((k))
328 #define static_key_disable_cpuslocked(k)	static_key_disable((k))
329 
330 #define STATIC_KEY_INIT_TRUE	{ .enabled = ATOMIC_INIT(1) }
331 #define STATIC_KEY_INIT_FALSE	{ .enabled = ATOMIC_INIT(0) }
332 
333 #endif	/* HAVE_JUMP_LABEL */
334 
335 #define STATIC_KEY_INIT STATIC_KEY_INIT_FALSE
336 #define jump_label_enabled static_key_enabled
337 
338 /* -------------------------------------------------------------------------- */
339 
340 /*
341  * Two type wrappers around static_key, such that we can use compile time
342  * type differentiation to emit the right code.
343  *
344  * All the below code is macros in order to play type games.
345  */
346 
347 struct static_key_true {
348 	struct static_key key;
349 };
350 
351 struct static_key_false {
352 	struct static_key key;
353 };
354 
355 #define STATIC_KEY_TRUE_INIT  (struct static_key_true) { .key = STATIC_KEY_INIT_TRUE,  }
356 #define STATIC_KEY_FALSE_INIT (struct static_key_false){ .key = STATIC_KEY_INIT_FALSE, }
357 
358 #define DEFINE_STATIC_KEY_TRUE(name)	\
359 	struct static_key_true name = STATIC_KEY_TRUE_INIT
360 
361 #define DEFINE_STATIC_KEY_TRUE_RO(name)	\
362 	struct static_key_true name __ro_after_init = STATIC_KEY_TRUE_INIT
363 
364 #define DECLARE_STATIC_KEY_TRUE(name)	\
365 	extern struct static_key_true name
366 
367 #define DEFINE_STATIC_KEY_FALSE(name)	\
368 	struct static_key_false name = STATIC_KEY_FALSE_INIT
369 
370 #define DEFINE_STATIC_KEY_FALSE_RO(name)	\
371 	struct static_key_false name __ro_after_init = STATIC_KEY_FALSE_INIT
372 
373 #define DECLARE_STATIC_KEY_FALSE(name)	\
374 	extern struct static_key_false name
375 
376 #define DEFINE_STATIC_KEY_ARRAY_TRUE(name, count)		\
377 	struct static_key_true name[count] = {			\
378 		[0 ... (count) - 1] = STATIC_KEY_TRUE_INIT,	\
379 	}
380 
381 #define DEFINE_STATIC_KEY_ARRAY_FALSE(name, count)		\
382 	struct static_key_false name[count] = {			\
383 		[0 ... (count) - 1] = STATIC_KEY_FALSE_INIT,	\
384 	}
385 
386 extern bool ____wrong_branch_error(void);
387 
388 #define static_key_enabled(x)							\
389 ({										\
390 	if (!__builtin_types_compatible_p(typeof(*x), struct static_key) &&	\
391 	    !__builtin_types_compatible_p(typeof(*x), struct static_key_true) &&\
392 	    !__builtin_types_compatible_p(typeof(*x), struct static_key_false))	\
393 		____wrong_branch_error();					\
394 	static_key_count((struct static_key *)x) > 0;				\
395 })
396 
397 #ifdef HAVE_JUMP_LABEL
398 
399 /*
400  * Combine the right initial value (type) with the right branch order
401  * to generate the desired result.
402  *
403  *
404  * type\branch|	likely (1)	      |	unlikely (0)
405  * -----------+-----------------------+------------------
406  *            |                       |
407  *  true (1)  |	   ...		      |	   ...
408  *            |    NOP		      |	   JMP L
409  *            |    <br-stmts>	      |	1: ...
410  *            |	L: ...		      |
411  *            |			      |
412  *            |			      |	L: <br-stmts>
413  *            |			      |	   jmp 1b
414  *            |                       |
415  * -----------+-----------------------+------------------
416  *            |                       |
417  *  false (0) |	   ...		      |	   ...
418  *            |    JMP L	      |	   NOP
419  *            |    <br-stmts>	      |	1: ...
420  *            |	L: ...		      |
421  *            |			      |
422  *            |			      |	L: <br-stmts>
423  *            |			      |	   jmp 1b
424  *            |                       |
425  * -----------+-----------------------+------------------
426  *
427  * The initial value is encoded in the LSB of static_key::entries,
428  * type: 0 = false, 1 = true.
429  *
430  * The branch type is encoded in the LSB of jump_entry::key,
431  * branch: 0 = unlikely, 1 = likely.
432  *
433  * This gives the following logic table:
434  *
435  *	enabled	type	branch	  instuction
436  * -----------------------------+-----------
437  *	0	0	0	| NOP
438  *	0	0	1	| JMP
439  *	0	1	0	| NOP
440  *	0	1	1	| JMP
441  *
442  *	1	0	0	| JMP
443  *	1	0	1	| NOP
444  *	1	1	0	| JMP
445  *	1	1	1	| NOP
446  *
447  * Which gives the following functions:
448  *
449  *   dynamic: instruction = enabled ^ branch
450  *   static:  instruction = type ^ branch
451  *
452  * See jump_label_type() / jump_label_init_type().
453  */
454 
455 #define static_branch_likely(x)							\
456 ({										\
457 	bool branch;								\
458 	if (__builtin_types_compatible_p(typeof(*x), struct static_key_true))	\
459 		branch = !arch_static_branch(&(x)->key, true);			\
460 	else if (__builtin_types_compatible_p(typeof(*x), struct static_key_false)) \
461 		branch = !arch_static_branch_jump(&(x)->key, true);		\
462 	else									\
463 		branch = ____wrong_branch_error();				\
464 	likely(branch);								\
465 })
466 
467 #define static_branch_unlikely(x)						\
468 ({										\
469 	bool branch;								\
470 	if (__builtin_types_compatible_p(typeof(*x), struct static_key_true))	\
471 		branch = arch_static_branch_jump(&(x)->key, false);		\
472 	else if (__builtin_types_compatible_p(typeof(*x), struct static_key_false)) \
473 		branch = arch_static_branch(&(x)->key, false);			\
474 	else									\
475 		branch = ____wrong_branch_error();				\
476 	unlikely(branch);							\
477 })
478 
479 #else /* !HAVE_JUMP_LABEL */
480 
481 #define static_branch_likely(x)		likely(static_key_enabled(&(x)->key))
482 #define static_branch_unlikely(x)	unlikely(static_key_enabled(&(x)->key))
483 
484 #endif /* HAVE_JUMP_LABEL */
485 
486 /*
487  * Advanced usage; refcount, branch is enabled when: count != 0
488  */
489 
490 #define static_branch_inc(x)		static_key_slow_inc(&(x)->key)
491 #define static_branch_dec(x)		static_key_slow_dec(&(x)->key)
492 #define static_branch_inc_cpuslocked(x)	static_key_slow_inc_cpuslocked(&(x)->key)
493 #define static_branch_dec_cpuslocked(x)	static_key_slow_dec_cpuslocked(&(x)->key)
494 
495 /*
496  * Normal usage; boolean enable/disable.
497  */
498 
499 #define static_branch_enable(x)			static_key_enable(&(x)->key)
500 #define static_branch_disable(x)		static_key_disable(&(x)->key)
501 #define static_branch_enable_cpuslocked(x)	static_key_enable_cpuslocked(&(x)->key)
502 #define static_branch_disable_cpuslocked(x)	static_key_disable_cpuslocked(&(x)->key)
503 
504 #endif /* __ASSEMBLY__ */
505 
506 #endif	/* _LINUX_JUMP_LABEL_H */
507