xref: /linux-6.15/kernel/jump_label.c (revision fdfd4289)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * jump label support
4  *
5  * Copyright (C) 2009 Jason Baron <[email protected]>
6  * Copyright (C) 2011 Peter Zijlstra
7  *
8  */
9 #include <linux/memory.h>
10 #include <linux/uaccess.h>
11 #include <linux/module.h>
12 #include <linux/list.h>
13 #include <linux/slab.h>
14 #include <linux/sort.h>
15 #include <linux/err.h>
16 #include <linux/static_key.h>
17 #include <linux/jump_label_ratelimit.h>
18 #include <linux/bug.h>
19 #include <linux/cpu.h>
20 #include <asm/sections.h>
21 
22 /* mutex to protect coming/going of the jump_label table */
23 static DEFINE_MUTEX(jump_label_mutex);
24 
25 void jump_label_lock(void)
26 {
27 	mutex_lock(&jump_label_mutex);
28 }
29 
30 void jump_label_unlock(void)
31 {
32 	mutex_unlock(&jump_label_mutex);
33 }
34 
35 static int jump_label_cmp(const void *a, const void *b)
36 {
37 	const struct jump_entry *jea = a;
38 	const struct jump_entry *jeb = b;
39 
40 	/*
41 	 * Entrires are sorted by key.
42 	 */
43 	if (jump_entry_key(jea) < jump_entry_key(jeb))
44 		return -1;
45 
46 	if (jump_entry_key(jea) > jump_entry_key(jeb))
47 		return 1;
48 
49 	/*
50 	 * In the batching mode, entries should also be sorted by the code
51 	 * inside the already sorted list of entries, enabling a bsearch in
52 	 * the vector.
53 	 */
54 	if (jump_entry_code(jea) < jump_entry_code(jeb))
55 		return -1;
56 
57 	if (jump_entry_code(jea) > jump_entry_code(jeb))
58 		return 1;
59 
60 	return 0;
61 }
62 
63 static void jump_label_swap(void *a, void *b, int size)
64 {
65 	long delta = (unsigned long)a - (unsigned long)b;
66 	struct jump_entry *jea = a;
67 	struct jump_entry *jeb = b;
68 	struct jump_entry tmp = *jea;
69 
70 	jea->code	= jeb->code - delta;
71 	jea->target	= jeb->target - delta;
72 	jea->key	= jeb->key - delta;
73 
74 	jeb->code	= tmp.code + delta;
75 	jeb->target	= tmp.target + delta;
76 	jeb->key	= tmp.key + delta;
77 }
78 
79 static void
80 jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
81 {
82 	unsigned long size;
83 	void *swapfn = NULL;
84 
85 	if (IS_ENABLED(CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE))
86 		swapfn = jump_label_swap;
87 
88 	size = (((unsigned long)stop - (unsigned long)start)
89 					/ sizeof(struct jump_entry));
90 	sort(start, size, sizeof(struct jump_entry), jump_label_cmp, swapfn);
91 }
92 
93 static void jump_label_update(struct static_key *key);
94 
95 /*
96  * There are similar definitions for the !CONFIG_JUMP_LABEL case in jump_label.h.
97  * The use of 'atomic_read()' requires atomic.h and its problematic for some
98  * kernel headers such as kernel.h and others. Since static_key_count() is not
99  * used in the branch statements as it is for the !CONFIG_JUMP_LABEL case its ok
100  * to have it be a function here. Similarly, for 'static_key_enable()' and
101  * 'static_key_disable()', which require bug.h. This should allow jump_label.h
102  * to be included from most/all places for CONFIG_JUMP_LABEL.
103  */
104 int static_key_count(struct static_key *key)
105 {
106 	/*
107 	 * -1 means the first static_key_slow_inc() is in progress.
108 	 *  static_key_enabled() must return true, so return 1 here.
109 	 */
110 	int n = atomic_read(&key->enabled);
111 
112 	return n >= 0 ? n : 1;
113 }
114 EXPORT_SYMBOL_GPL(static_key_count);
115 
116 void static_key_slow_inc_cpuslocked(struct static_key *key)
117 {
118 	int v, v1;
119 
120 	STATIC_KEY_CHECK_USE(key);
121 	lockdep_assert_cpus_held();
122 
123 	/*
124 	 * Careful if we get concurrent static_key_slow_inc() calls;
125 	 * later calls must wait for the first one to _finish_ the
126 	 * jump_label_update() process.  At the same time, however,
127 	 * the jump_label_update() call below wants to see
128 	 * static_key_enabled(&key) for jumps to be updated properly.
129 	 *
130 	 * So give a special meaning to negative key->enabled: it sends
131 	 * static_key_slow_inc() down the slow path, and it is non-zero
132 	 * so it counts as "enabled" in jump_label_update().  Note that
133 	 * atomic_inc_unless_negative() checks >= 0, so roll our own.
134 	 */
135 	for (v = atomic_read(&key->enabled); v > 0; v = v1) {
136 		v1 = atomic_cmpxchg(&key->enabled, v, v + 1);
137 		if (likely(v1 == v))
138 			return;
139 	}
140 
141 	jump_label_lock();
142 	if (atomic_read(&key->enabled) == 0) {
143 		atomic_set(&key->enabled, -1);
144 		jump_label_update(key);
145 		/*
146 		 * Ensure that if the above cmpxchg loop observes our positive
147 		 * value, it must also observe all the text changes.
148 		 */
149 		atomic_set_release(&key->enabled, 1);
150 	} else {
151 		atomic_inc(&key->enabled);
152 	}
153 	jump_label_unlock();
154 }
155 
156 void static_key_slow_inc(struct static_key *key)
157 {
158 	cpus_read_lock();
159 	static_key_slow_inc_cpuslocked(key);
160 	cpus_read_unlock();
161 }
162 EXPORT_SYMBOL_GPL(static_key_slow_inc);
163 
164 void static_key_enable_cpuslocked(struct static_key *key)
165 {
166 	STATIC_KEY_CHECK_USE(key);
167 	lockdep_assert_cpus_held();
168 
169 	if (atomic_read(&key->enabled) > 0) {
170 		WARN_ON_ONCE(atomic_read(&key->enabled) != 1);
171 		return;
172 	}
173 
174 	jump_label_lock();
175 	if (atomic_read(&key->enabled) == 0) {
176 		atomic_set(&key->enabled, -1);
177 		jump_label_update(key);
178 		/*
179 		 * See static_key_slow_inc().
180 		 */
181 		atomic_set_release(&key->enabled, 1);
182 	}
183 	jump_label_unlock();
184 }
185 EXPORT_SYMBOL_GPL(static_key_enable_cpuslocked);
186 
187 void static_key_enable(struct static_key *key)
188 {
189 	cpus_read_lock();
190 	static_key_enable_cpuslocked(key);
191 	cpus_read_unlock();
192 }
193 EXPORT_SYMBOL_GPL(static_key_enable);
194 
195 void static_key_disable_cpuslocked(struct static_key *key)
196 {
197 	STATIC_KEY_CHECK_USE(key);
198 	lockdep_assert_cpus_held();
199 
200 	if (atomic_read(&key->enabled) != 1) {
201 		WARN_ON_ONCE(atomic_read(&key->enabled) != 0);
202 		return;
203 	}
204 
205 	jump_label_lock();
206 	if (atomic_cmpxchg(&key->enabled, 1, 0))
207 		jump_label_update(key);
208 	jump_label_unlock();
209 }
210 EXPORT_SYMBOL_GPL(static_key_disable_cpuslocked);
211 
212 void static_key_disable(struct static_key *key)
213 {
214 	cpus_read_lock();
215 	static_key_disable_cpuslocked(key);
216 	cpus_read_unlock();
217 }
218 EXPORT_SYMBOL_GPL(static_key_disable);
219 
220 static bool static_key_slow_try_dec(struct static_key *key)
221 {
222 	int val;
223 
224 	val = atomic_fetch_add_unless(&key->enabled, -1, 1);
225 	if (val == 1)
226 		return false;
227 
228 	/*
229 	 * The negative count check is valid even when a negative
230 	 * key->enabled is in use by static_key_slow_inc(); a
231 	 * __static_key_slow_dec() before the first static_key_slow_inc()
232 	 * returns is unbalanced, because all other static_key_slow_inc()
233 	 * instances block while the update is in progress.
234 	 */
235 	WARN(val < 0, "jump label: negative count!\n");
236 	return true;
237 }
238 
239 static void __static_key_slow_dec_cpuslocked(struct static_key *key)
240 {
241 	lockdep_assert_cpus_held();
242 
243 	if (static_key_slow_try_dec(key))
244 		return;
245 
246 	jump_label_lock();
247 	if (atomic_dec_and_test(&key->enabled))
248 		jump_label_update(key);
249 	jump_label_unlock();
250 }
251 
252 static void __static_key_slow_dec(struct static_key *key)
253 {
254 	cpus_read_lock();
255 	__static_key_slow_dec_cpuslocked(key);
256 	cpus_read_unlock();
257 }
258 
259 void jump_label_update_timeout(struct work_struct *work)
260 {
261 	struct static_key_deferred *key =
262 		container_of(work, struct static_key_deferred, work.work);
263 	__static_key_slow_dec(&key->key);
264 }
265 EXPORT_SYMBOL_GPL(jump_label_update_timeout);
266 
267 void static_key_slow_dec(struct static_key *key)
268 {
269 	STATIC_KEY_CHECK_USE(key);
270 	__static_key_slow_dec(key);
271 }
272 EXPORT_SYMBOL_GPL(static_key_slow_dec);
273 
274 void static_key_slow_dec_cpuslocked(struct static_key *key)
275 {
276 	STATIC_KEY_CHECK_USE(key);
277 	__static_key_slow_dec_cpuslocked(key);
278 }
279 
280 void __static_key_slow_dec_deferred(struct static_key *key,
281 				    struct delayed_work *work,
282 				    unsigned long timeout)
283 {
284 	STATIC_KEY_CHECK_USE(key);
285 
286 	if (static_key_slow_try_dec(key))
287 		return;
288 
289 	schedule_delayed_work(work, timeout);
290 }
291 EXPORT_SYMBOL_GPL(__static_key_slow_dec_deferred);
292 
293 void __static_key_deferred_flush(void *key, struct delayed_work *work)
294 {
295 	STATIC_KEY_CHECK_USE(key);
296 	flush_delayed_work(work);
297 }
298 EXPORT_SYMBOL_GPL(__static_key_deferred_flush);
299 
300 void jump_label_rate_limit(struct static_key_deferred *key,
301 		unsigned long rl)
302 {
303 	STATIC_KEY_CHECK_USE(key);
304 	key->timeout = rl;
305 	INIT_DELAYED_WORK(&key->work, jump_label_update_timeout);
306 }
307 EXPORT_SYMBOL_GPL(jump_label_rate_limit);
308 
309 static int addr_conflict(struct jump_entry *entry, void *start, void *end)
310 {
311 	if (jump_entry_code(entry) <= (unsigned long)end &&
312 	    jump_entry_code(entry) + jump_entry_size(entry) > (unsigned long)start)
313 		return 1;
314 
315 	return 0;
316 }
317 
318 static int __jump_label_text_reserved(struct jump_entry *iter_start,
319 		struct jump_entry *iter_stop, void *start, void *end, bool init)
320 {
321 	struct jump_entry *iter;
322 
323 	iter = iter_start;
324 	while (iter < iter_stop) {
325 		if (init || !jump_entry_is_init(iter)) {
326 			if (addr_conflict(iter, start, end))
327 				return 1;
328 		}
329 		iter++;
330 	}
331 
332 	return 0;
333 }
334 
335 /*
336  * Update code which is definitely not currently executing.
337  * Architectures which need heavyweight synchronization to modify
338  * running code can override this to make the non-live update case
339  * cheaper.
340  */
341 void __weak __init_or_module arch_jump_label_transform_static(struct jump_entry *entry,
342 					    enum jump_label_type type)
343 {
344 	arch_jump_label_transform(entry, type);
345 }
346 
347 static inline struct jump_entry *static_key_entries(struct static_key *key)
348 {
349 	WARN_ON_ONCE(key->type & JUMP_TYPE_LINKED);
350 	return (struct jump_entry *)(key->type & ~JUMP_TYPE_MASK);
351 }
352 
353 static inline bool static_key_type(struct static_key *key)
354 {
355 	return key->type & JUMP_TYPE_TRUE;
356 }
357 
358 static inline bool static_key_linked(struct static_key *key)
359 {
360 	return key->type & JUMP_TYPE_LINKED;
361 }
362 
363 static inline void static_key_clear_linked(struct static_key *key)
364 {
365 	key->type &= ~JUMP_TYPE_LINKED;
366 }
367 
368 static inline void static_key_set_linked(struct static_key *key)
369 {
370 	key->type |= JUMP_TYPE_LINKED;
371 }
372 
373 /***
374  * A 'struct static_key' uses a union such that it either points directly
375  * to a table of 'struct jump_entry' or to a linked list of modules which in
376  * turn point to 'struct jump_entry' tables.
377  *
378  * The two lower bits of the pointer are used to keep track of which pointer
379  * type is in use and to store the initial branch direction, we use an access
380  * function which preserves these bits.
381  */
382 static void static_key_set_entries(struct static_key *key,
383 				   struct jump_entry *entries)
384 {
385 	unsigned long type;
386 
387 	WARN_ON_ONCE((unsigned long)entries & JUMP_TYPE_MASK);
388 	type = key->type & JUMP_TYPE_MASK;
389 	key->entries = entries;
390 	key->type |= type;
391 }
392 
393 static enum jump_label_type jump_label_type(struct jump_entry *entry)
394 {
395 	struct static_key *key = jump_entry_key(entry);
396 	bool enabled = static_key_enabled(key);
397 	bool branch = jump_entry_is_branch(entry);
398 
399 	/* See the comment in linux/jump_label.h */
400 	return enabled ^ branch;
401 }
402 
403 static bool jump_label_can_update(struct jump_entry *entry, bool init)
404 {
405 	/*
406 	 * Cannot update code that was in an init text area.
407 	 */
408 	if (!init && jump_entry_is_init(entry))
409 		return false;
410 
411 	if (!kernel_text_address(jump_entry_code(entry))) {
412 		/*
413 		 * This skips patching built-in __exit, which
414 		 * is part of init_section_contains() but is
415 		 * not part of kernel_text_address().
416 		 *
417 		 * Skipping built-in __exit is fine since it
418 		 * will never be executed.
419 		 */
420 		WARN_ONCE(!jump_entry_is_init(entry),
421 			  "can't patch jump_label at %pS",
422 			  (void *)jump_entry_code(entry));
423 		return false;
424 	}
425 
426 	return true;
427 }
428 
429 #ifndef HAVE_JUMP_LABEL_BATCH
430 static void __jump_label_update(struct static_key *key,
431 				struct jump_entry *entry,
432 				struct jump_entry *stop,
433 				bool init)
434 {
435 	for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) {
436 		if (jump_label_can_update(entry, init))
437 			arch_jump_label_transform(entry, jump_label_type(entry));
438 	}
439 }
440 #else
441 static void __jump_label_update(struct static_key *key,
442 				struct jump_entry *entry,
443 				struct jump_entry *stop,
444 				bool init)
445 {
446 	for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) {
447 
448 		if (!jump_label_can_update(entry, init))
449 			continue;
450 
451 		if (!arch_jump_label_transform_queue(entry, jump_label_type(entry))) {
452 			/*
453 			 * Queue is full: Apply the current queue and try again.
454 			 */
455 			arch_jump_label_transform_apply();
456 			BUG_ON(!arch_jump_label_transform_queue(entry, jump_label_type(entry)));
457 		}
458 	}
459 	arch_jump_label_transform_apply();
460 }
461 #endif
462 
463 void __init jump_label_init(void)
464 {
465 	struct jump_entry *iter_start = __start___jump_table;
466 	struct jump_entry *iter_stop = __stop___jump_table;
467 	struct static_key *key = NULL;
468 	struct jump_entry *iter;
469 
470 	/*
471 	 * Since we are initializing the static_key.enabled field with
472 	 * with the 'raw' int values (to avoid pulling in atomic.h) in
473 	 * jump_label.h, let's make sure that is safe. There are only two
474 	 * cases to check since we initialize to 0 or 1.
475 	 */
476 	BUILD_BUG_ON((int)ATOMIC_INIT(0) != 0);
477 	BUILD_BUG_ON((int)ATOMIC_INIT(1) != 1);
478 
479 	if (static_key_initialized)
480 		return;
481 
482 	cpus_read_lock();
483 	jump_label_lock();
484 	jump_label_sort_entries(iter_start, iter_stop);
485 
486 	for (iter = iter_start; iter < iter_stop; iter++) {
487 		struct static_key *iterk;
488 		bool in_init;
489 
490 		/* rewrite NOPs */
491 		if (jump_label_type(iter) == JUMP_LABEL_NOP)
492 			arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);
493 
494 		in_init = init_section_contains((void *)jump_entry_code(iter), 1);
495 		jump_entry_set_init(iter, in_init);
496 
497 		iterk = jump_entry_key(iter);
498 		if (iterk == key)
499 			continue;
500 
501 		key = iterk;
502 		static_key_set_entries(key, iter);
503 	}
504 	static_key_initialized = true;
505 	jump_label_unlock();
506 	cpus_read_unlock();
507 }
508 
509 #ifdef CONFIG_MODULES
510 
511 enum jump_label_type jump_label_init_type(struct jump_entry *entry)
512 {
513 	struct static_key *key = jump_entry_key(entry);
514 	bool type = static_key_type(key);
515 	bool branch = jump_entry_is_branch(entry);
516 
517 	/* See the comment in linux/jump_label.h */
518 	return type ^ branch;
519 }
520 
521 struct static_key_mod {
522 	struct static_key_mod *next;
523 	struct jump_entry *entries;
524 	struct module *mod;
525 };
526 
527 static inline struct static_key_mod *static_key_mod(struct static_key *key)
528 {
529 	WARN_ON_ONCE(!static_key_linked(key));
530 	return (struct static_key_mod *)(key->type & ~JUMP_TYPE_MASK);
531 }
532 
533 /***
534  * key->type and key->next are the same via union.
535  * This sets key->next and preserves the type bits.
536  *
537  * See additional comments above static_key_set_entries().
538  */
539 static void static_key_set_mod(struct static_key *key,
540 			       struct static_key_mod *mod)
541 {
542 	unsigned long type;
543 
544 	WARN_ON_ONCE((unsigned long)mod & JUMP_TYPE_MASK);
545 	type = key->type & JUMP_TYPE_MASK;
546 	key->next = mod;
547 	key->type |= type;
548 }
549 
550 static int __jump_label_mod_text_reserved(void *start, void *end)
551 {
552 	struct module *mod;
553 	int ret;
554 
555 	preempt_disable();
556 	mod = __module_text_address((unsigned long)start);
557 	WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod);
558 	if (!try_module_get(mod))
559 		mod = NULL;
560 	preempt_enable();
561 
562 	if (!mod)
563 		return 0;
564 
565 	ret = __jump_label_text_reserved(mod->jump_entries,
566 				mod->jump_entries + mod->num_jump_entries,
567 				start, end, mod->state == MODULE_STATE_COMING);
568 
569 	module_put(mod);
570 
571 	return ret;
572 }
573 
574 static void __jump_label_mod_update(struct static_key *key)
575 {
576 	struct static_key_mod *mod;
577 
578 	for (mod = static_key_mod(key); mod; mod = mod->next) {
579 		struct jump_entry *stop;
580 		struct module *m;
581 
582 		/*
583 		 * NULL if the static_key is defined in a module
584 		 * that does not use it
585 		 */
586 		if (!mod->entries)
587 			continue;
588 
589 		m = mod->mod;
590 		if (!m)
591 			stop = __stop___jump_table;
592 		else
593 			stop = m->jump_entries + m->num_jump_entries;
594 		__jump_label_update(key, mod->entries, stop,
595 				    m && m->state == MODULE_STATE_COMING);
596 	}
597 }
598 
599 static int jump_label_add_module(struct module *mod)
600 {
601 	struct jump_entry *iter_start = mod->jump_entries;
602 	struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
603 	struct jump_entry *iter;
604 	struct static_key *key = NULL;
605 	struct static_key_mod *jlm, *jlm2;
606 
607 	/* if the module doesn't have jump label entries, just return */
608 	if (iter_start == iter_stop)
609 		return 0;
610 
611 	jump_label_sort_entries(iter_start, iter_stop);
612 
613 	for (iter = iter_start; iter < iter_stop; iter++) {
614 		struct static_key *iterk;
615 		bool in_init;
616 
617 		in_init = within_module_init(jump_entry_code(iter), mod);
618 		jump_entry_set_init(iter, in_init);
619 
620 		iterk = jump_entry_key(iter);
621 		if (iterk == key)
622 			continue;
623 
624 		key = iterk;
625 		if (within_module((unsigned long)key, mod)) {
626 			static_key_set_entries(key, iter);
627 			continue;
628 		}
629 		jlm = kzalloc(sizeof(struct static_key_mod), GFP_KERNEL);
630 		if (!jlm)
631 			return -ENOMEM;
632 		if (!static_key_linked(key)) {
633 			jlm2 = kzalloc(sizeof(struct static_key_mod),
634 				       GFP_KERNEL);
635 			if (!jlm2) {
636 				kfree(jlm);
637 				return -ENOMEM;
638 			}
639 			preempt_disable();
640 			jlm2->mod = __module_address((unsigned long)key);
641 			preempt_enable();
642 			jlm2->entries = static_key_entries(key);
643 			jlm2->next = NULL;
644 			static_key_set_mod(key, jlm2);
645 			static_key_set_linked(key);
646 		}
647 		jlm->mod = mod;
648 		jlm->entries = iter;
649 		jlm->next = static_key_mod(key);
650 		static_key_set_mod(key, jlm);
651 		static_key_set_linked(key);
652 
653 		/* Only update if we've changed from our initial state */
654 		if (jump_label_type(iter) != jump_label_init_type(iter))
655 			__jump_label_update(key, iter, iter_stop, true);
656 	}
657 
658 	return 0;
659 }
660 
661 static void jump_label_del_module(struct module *mod)
662 {
663 	struct jump_entry *iter_start = mod->jump_entries;
664 	struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
665 	struct jump_entry *iter;
666 	struct static_key *key = NULL;
667 	struct static_key_mod *jlm, **prev;
668 
669 	for (iter = iter_start; iter < iter_stop; iter++) {
670 		if (jump_entry_key(iter) == key)
671 			continue;
672 
673 		key = jump_entry_key(iter);
674 
675 		if (within_module((unsigned long)key, mod))
676 			continue;
677 
678 		/* No memory during module load */
679 		if (WARN_ON(!static_key_linked(key)))
680 			continue;
681 
682 		prev = &key->next;
683 		jlm = static_key_mod(key);
684 
685 		while (jlm && jlm->mod != mod) {
686 			prev = &jlm->next;
687 			jlm = jlm->next;
688 		}
689 
690 		/* No memory during module load */
691 		if (WARN_ON(!jlm))
692 			continue;
693 
694 		if (prev == &key->next)
695 			static_key_set_mod(key, jlm->next);
696 		else
697 			*prev = jlm->next;
698 
699 		kfree(jlm);
700 
701 		jlm = static_key_mod(key);
702 		/* if only one etry is left, fold it back into the static_key */
703 		if (jlm->next == NULL) {
704 			static_key_set_entries(key, jlm->entries);
705 			static_key_clear_linked(key);
706 			kfree(jlm);
707 		}
708 	}
709 }
710 
711 static int
712 jump_label_module_notify(struct notifier_block *self, unsigned long val,
713 			 void *data)
714 {
715 	struct module *mod = data;
716 	int ret = 0;
717 
718 	cpus_read_lock();
719 	jump_label_lock();
720 
721 	switch (val) {
722 	case MODULE_STATE_COMING:
723 		ret = jump_label_add_module(mod);
724 		if (ret) {
725 			WARN(1, "Failed to allocate memory: jump_label may not work properly.\n");
726 			jump_label_del_module(mod);
727 		}
728 		break;
729 	case MODULE_STATE_GOING:
730 		jump_label_del_module(mod);
731 		break;
732 	}
733 
734 	jump_label_unlock();
735 	cpus_read_unlock();
736 
737 	return notifier_from_errno(ret);
738 }
739 
740 static struct notifier_block jump_label_module_nb = {
741 	.notifier_call = jump_label_module_notify,
742 	.priority = 1, /* higher than tracepoints */
743 };
744 
745 static __init int jump_label_init_module(void)
746 {
747 	return register_module_notifier(&jump_label_module_nb);
748 }
749 early_initcall(jump_label_init_module);
750 
751 #endif /* CONFIG_MODULES */
752 
753 /***
754  * jump_label_text_reserved - check if addr range is reserved
755  * @start: start text addr
756  * @end: end text addr
757  *
758  * checks if the text addr located between @start and @end
759  * overlaps with any of the jump label patch addresses. Code
760  * that wants to modify kernel text should first verify that
761  * it does not overlap with any of the jump label addresses.
762  * Caller must hold jump_label_mutex.
763  *
764  * returns 1 if there is an overlap, 0 otherwise
765  */
766 int jump_label_text_reserved(void *start, void *end)
767 {
768 	bool init = system_state < SYSTEM_RUNNING;
769 	int ret = __jump_label_text_reserved(__start___jump_table,
770 			__stop___jump_table, start, end, init);
771 
772 	if (ret)
773 		return ret;
774 
775 #ifdef CONFIG_MODULES
776 	ret = __jump_label_mod_text_reserved(start, end);
777 #endif
778 	return ret;
779 }
780 
781 static void jump_label_update(struct static_key *key)
782 {
783 	struct jump_entry *stop = __stop___jump_table;
784 	bool init = system_state < SYSTEM_RUNNING;
785 	struct jump_entry *entry;
786 #ifdef CONFIG_MODULES
787 	struct module *mod;
788 
789 	if (static_key_linked(key)) {
790 		__jump_label_mod_update(key);
791 		return;
792 	}
793 
794 	preempt_disable();
795 	mod = __module_address((unsigned long)key);
796 	if (mod) {
797 		stop = mod->jump_entries + mod->num_jump_entries;
798 		init = mod->state == MODULE_STATE_COMING;
799 	}
800 	preempt_enable();
801 #endif
802 	entry = static_key_entries(key);
803 	/* if there are no users, entry can be NULL */
804 	if (entry)
805 		__jump_label_update(key, entry, stop, init);
806 }
807 
808 #ifdef CONFIG_STATIC_KEYS_SELFTEST
809 static DEFINE_STATIC_KEY_TRUE(sk_true);
810 static DEFINE_STATIC_KEY_FALSE(sk_false);
811 
812 static __init int jump_label_test(void)
813 {
814 	int i;
815 
816 	for (i = 0; i < 2; i++) {
817 		WARN_ON(static_key_enabled(&sk_true.key) != true);
818 		WARN_ON(static_key_enabled(&sk_false.key) != false);
819 
820 		WARN_ON(!static_branch_likely(&sk_true));
821 		WARN_ON(!static_branch_unlikely(&sk_true));
822 		WARN_ON(static_branch_likely(&sk_false));
823 		WARN_ON(static_branch_unlikely(&sk_false));
824 
825 		static_branch_disable(&sk_true);
826 		static_branch_enable(&sk_false);
827 
828 		WARN_ON(static_key_enabled(&sk_true.key) == true);
829 		WARN_ON(static_key_enabled(&sk_false.key) == false);
830 
831 		WARN_ON(static_branch_likely(&sk_true));
832 		WARN_ON(static_branch_unlikely(&sk_true));
833 		WARN_ON(!static_branch_likely(&sk_false));
834 		WARN_ON(!static_branch_unlikely(&sk_false));
835 
836 		static_branch_enable(&sk_true);
837 		static_branch_disable(&sk_false);
838 	}
839 
840 	return 0;
841 }
842 early_initcall(jump_label_test);
843 #endif /* STATIC_KEYS_SELFTEST */
844