xref: /linux-6.15/kernel/rcu/tiny.c (revision 7acc2d90)
100de9d74SPaul E. McKenney // SPDX-License-Identifier: GPL-2.0+
24102adabSPaul E. McKenney /*
34102adabSPaul E. McKenney  * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition.
44102adabSPaul E. McKenney  *
54102adabSPaul E. McKenney  * Copyright IBM Corporation, 2008
64102adabSPaul E. McKenney  *
700de9d74SPaul E. McKenney  * Author: Paul E. McKenney <[email protected]>
84102adabSPaul E. McKenney  *
94102adabSPaul E. McKenney  * For detailed explanation of Read-Copy Update mechanism see -
104102adabSPaul E. McKenney  *		Documentation/RCU
114102adabSPaul E. McKenney  */
124102adabSPaul E. McKenney #include <linux/completion.h>
134102adabSPaul E. McKenney #include <linux/interrupt.h>
144102adabSPaul E. McKenney #include <linux/notifier.h>
15f9411ebeSIngo Molnar #include <linux/rcupdate_wait.h>
164102adabSPaul E. McKenney #include <linux/kernel.h>
174102adabSPaul E. McKenney #include <linux/export.h>
184102adabSPaul E. McKenney #include <linux/mutex.h>
194102adabSPaul E. McKenney #include <linux/sched.h>
204102adabSPaul E. McKenney #include <linux/types.h>
214102adabSPaul E. McKenney #include <linux/init.h>
224102adabSPaul E. McKenney #include <linux/time.h>
234102adabSPaul E. McKenney #include <linux/cpu.h>
244102adabSPaul E. McKenney #include <linux/prefetch.h>
2577a40f97SJoel Fernandes (Google) #include <linux/slab.h>
2664d1d06cSUladzislau Rezki (Sony) #include <linux/mm.h>
274102adabSPaul E. McKenney 
284102adabSPaul E. McKenney #include "rcu.h"
294102adabSPaul E. McKenney 
306d48152eSPaul E. McKenney /* Global control variables for rcupdate callback mechanism. */
316d48152eSPaul E. McKenney struct rcu_ctrlblk {
326d48152eSPaul E. McKenney 	struct rcu_head *rcucblist;	/* List of pending callbacks (CBs). */
336d48152eSPaul E. McKenney 	struct rcu_head **donetail;	/* ->next pointer of last "done" CB. */
346d48152eSPaul E. McKenney 	struct rcu_head **curtail;	/* ->next pointer of last CB. */
350909fc2bSPaul E. McKenney 	unsigned long gp_seq;		/* Grace-period counter. */
366d48152eSPaul E. McKenney };
376d48152eSPaul E. McKenney 
386d48152eSPaul E. McKenney /* Definition for rcupdate control block. */
39709fdce7SPaul E. McKenney static struct rcu_ctrlblk rcu_ctrlblk = {
40709fdce7SPaul E. McKenney 	.donetail	= &rcu_ctrlblk.rcucblist,
41709fdce7SPaul E. McKenney 	.curtail	= &rcu_ctrlblk.rcucblist,
420909fc2bSPaul E. McKenney 	.gp_seq		= 0 - 300UL,
436d48152eSPaul E. McKenney };
446d48152eSPaul E. McKenney 
rcu_barrier(void)45709fdce7SPaul E. McKenney void rcu_barrier(void)
46f9411ebeSIngo Molnar {
473cb278e7SJoel Fernandes (Google) 	wait_rcu_gp(call_rcu_hurry);
48f9411ebeSIngo Molnar }
49709fdce7SPaul E. McKenney EXPORT_SYMBOL(rcu_barrier);
50f9411ebeSIngo Molnar 
5165cfe358SPaul E. McKenney /* Record an rcu quiescent state.  */
rcu_qs(void)52709fdce7SPaul E. McKenney void rcu_qs(void)
534102adabSPaul E. McKenney {
544102adabSPaul E. McKenney 	unsigned long flags;
554102adabSPaul E. McKenney 
564102adabSPaul E. McKenney 	local_irq_save(flags);
57709fdce7SPaul E. McKenney 	if (rcu_ctrlblk.donetail != rcu_ctrlblk.curtail) {
58709fdce7SPaul E. McKenney 		rcu_ctrlblk.donetail = rcu_ctrlblk.curtail;
5918d7e406SCyrill Gorcunov 		raise_softirq_irqoff(RCU_SOFTIRQ);
604102adabSPaul E. McKenney 	}
61414c1238SPaul E. McKenney 	WRITE_ONCE(rcu_ctrlblk.gp_seq, rcu_ctrlblk.gp_seq + 2);
624102adabSPaul E. McKenney 	local_irq_restore(flags);
634102adabSPaul E. McKenney }
644102adabSPaul E. McKenney 
654102adabSPaul E. McKenney /*
664102adabSPaul E. McKenney  * Check to see if the scheduling-clock interrupt came from an extended
674102adabSPaul E. McKenney  * quiescent state, and, if so, tell RCU about it.  This function must
684102adabSPaul E. McKenney  * be called from hardirq context.  It is normally called from the
694102adabSPaul E. McKenney  * scheduling-clock interrupt.
704102adabSPaul E. McKenney  */
rcu_sched_clock_irq(int user)71c98cac60SPaul E. McKenney void rcu_sched_clock_irq(int user)
724102adabSPaul E. McKenney {
73c5bacd94SPaul E. McKenney 	if (user) {
74709fdce7SPaul E. McKenney 		rcu_qs();
75c5bacd94SPaul E. McKenney 	} else if (rcu_ctrlblk.donetail != rcu_ctrlblk.curtail) {
76c5bacd94SPaul E. McKenney 		set_tsk_need_resched(current);
77c5bacd94SPaul E. McKenney 		set_preempt_need_resched();
78c5bacd94SPaul E. McKenney 	}
794102adabSPaul E. McKenney }
804102adabSPaul E. McKenney 
8177a40f97SJoel Fernandes (Google) /*
8277a40f97SJoel Fernandes (Google)  * Reclaim the specified callback, either by invoking it for non-kfree cases or
8377a40f97SJoel Fernandes (Google)  * freeing it directly (for kfree). Return true if kfreeing, false otherwise.
8477a40f97SJoel Fernandes (Google)  */
rcu_reclaim_tiny(struct rcu_head * head)8577a40f97SJoel Fernandes (Google) static inline bool rcu_reclaim_tiny(struct rcu_head *head)
8677a40f97SJoel Fernandes (Google) {
8777a40f97SJoel Fernandes (Google) 	rcu_callback_t f;
8877a40f97SJoel Fernandes (Google) 
8977a40f97SJoel Fernandes (Google) 	rcu_lock_acquire(&rcu_callback_map);
9077a40f97SJoel Fernandes (Google) 
91c408b215SUladzislau Rezki (Sony) 	trace_rcu_invoke_callback("", head);
92c408b215SUladzislau Rezki (Sony) 	f = head->func;
9364d1d06cSUladzislau Rezki (Sony) 	debug_rcu_head_callback(head);
9477a40f97SJoel Fernandes (Google) 	WRITE_ONCE(head->func, (rcu_callback_t)0L);
9577a40f97SJoel Fernandes (Google) 	f(head);
9677a40f97SJoel Fernandes (Google) 	rcu_lock_release(&rcu_callback_map);
9777a40f97SJoel Fernandes (Google) 	return false;
9877a40f97SJoel Fernandes (Google) }
9977a40f97SJoel Fernandes (Google) 
1002cbc482dSZhen Lei /* Invoke the RCU callbacks whose grace period has elapsed.  */
rcu_process_callbacks(void)10177a40f97SJoel Fernandes (Google) static __latent_entropy void rcu_process_callbacks(void)
10277a40f97SJoel Fernandes (Google) {
10377a40f97SJoel Fernandes (Google) 	struct rcu_head *next, *list;
10477a40f97SJoel Fernandes (Google) 	unsigned long flags;
10577a40f97SJoel Fernandes (Google) 
10677a40f97SJoel Fernandes (Google) 	/* Move the ready-to-invoke callbacks to a local list. */
10765cfe358SPaul E. McKenney 	local_irq_save(flags);
108e68ac2b4SCaleb Sander Mateos 	if (rcu_ctrlblk.donetail == &rcu_ctrlblk.rcucblist) {
1094102adabSPaul E. McKenney 		/* No callbacks ready, so just leave. */
1104102adabSPaul E. McKenney 		local_irq_restore(flags);
1114102adabSPaul E. McKenney 		return;
1124102adabSPaul E. McKenney 	}
1134102adabSPaul E. McKenney 	list = rcu_ctrlblk.rcucblist;
1144102adabSPaul E. McKenney 	rcu_ctrlblk.rcucblist = *rcu_ctrlblk.donetail;
115709fdce7SPaul E. McKenney 	*rcu_ctrlblk.donetail = NULL;
1166e91f8cbSPaul E. McKenney 	if (rcu_ctrlblk.curtail == rcu_ctrlblk.donetail)
1176e91f8cbSPaul E. McKenney 		rcu_ctrlblk.curtail = &rcu_ctrlblk.rcucblist;
1186e91f8cbSPaul E. McKenney 	rcu_ctrlblk.donetail = &rcu_ctrlblk.rcucblist;
1196e91f8cbSPaul E. McKenney 	local_irq_restore(flags);
120709fdce7SPaul E. McKenney 
121709fdce7SPaul E. McKenney 	/* Invoke the callbacks on the local list. */
122709fdce7SPaul E. McKenney 	while (list) {
123709fdce7SPaul E. McKenney 		next = list->next;
124709fdce7SPaul E. McKenney 		prefetch(next);
125709fdce7SPaul E. McKenney 		debug_rcu_head_unqueue(list);
1264102adabSPaul E. McKenney 		rcu_reclaim_tiny(list);
1274102adabSPaul E. McKenney 		list = next;
1284102adabSPaul E. McKenney 	}
1294102adabSPaul E. McKenney }
1304102adabSPaul E. McKenney 
1314102adabSPaul E. McKenney /*
1324102adabSPaul E. McKenney  * Wait for a grace period to elapse.  But it is illegal to invoke
13377a40f97SJoel Fernandes (Google)  * synchronize_rcu() from within an RCU read-side critical section.
1344102adabSPaul E. McKenney  * Therefore, any legal call to synchronize_rcu() is a quiescent state,
1354102adabSPaul E. McKenney  * and so on a UP system, synchronize_rcu() need do nothing, other than
1364102adabSPaul E. McKenney  * let the polled APIs know that another grace period elapsed.
1374102adabSPaul E. McKenney  *
1384102adabSPaul E. McKenney  * (But Lai Jiangshan points out the benefits of doing might_sleep()
1394102adabSPaul E. McKenney  * to reduce latency.)
140679d3f30SPaul E. McKenney  *
1417f453536SPaul E. McKenney  * Cool, huh?  (Due to Josh Triplett.)
1427f453536SPaul E. McKenney  */
synchronize_rcu(void)1437f453536SPaul E. McKenney void synchronize_rcu(void)
1447f453536SPaul E. McKenney {
14565cfe358SPaul E. McKenney 	RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
14665cfe358SPaul E. McKenney 			 lock_is_held(&rcu_lock_map) ||
1474102adabSPaul E. McKenney 			 lock_is_held(&rcu_sched_lock_map),
1484102adabSPaul E. McKenney 			 "Illegal synchronize_rcu() in RCU read-side critical section");
1494102adabSPaul E. McKenney 	preempt_disable();
150709fdce7SPaul E. McKenney 	WRITE_ONCE(rcu_ctrlblk.gp_seq, rcu_ctrlblk.gp_seq + 2);
1514102adabSPaul E. McKenney 	preempt_enable();
152f78f5b90SPaul E. McKenney }
153f78f5b90SPaul E. McKenney EXPORT_SYMBOL_GPL(synchronize_rcu);
154f78f5b90SPaul E. McKenney 
155679d3f30SPaul E. McKenney /*
15611b8b378SPaul E. McKenney  * Post an RCU callback to be invoked after the end of an RCU grace
1577f453536SPaul E. McKenney  * period.  But since we have but one CPU, that would be after any
15811b8b378SPaul E. McKenney  * quiescent state.
1594102adabSPaul E. McKenney  */
call_rcu(struct rcu_head * head,rcu_callback_t func)160709fdce7SPaul E. McKenney void call_rcu(struct rcu_head *head, rcu_callback_t func)
1614102adabSPaul E. McKenney {
1626ca0292cSZqiang 	static atomic_t doublefrees;
1636ca0292cSZqiang 	unsigned long flags;
1646ca0292cSZqiang 
1656ca0292cSZqiang 	if (debug_rcu_head_queue(head)) {
1664102adabSPaul E. McKenney 		if (atomic_inc_return(&doublefrees) < 4) {
167679d3f30SPaul E. McKenney 			pr_err("%s(): Double-freed CB %p->%pS()!!!  ", __func__, head, head->func);
16865cfe358SPaul E. McKenney 			mem_dump_obj(head);
16965cfe358SPaul E. McKenney 		}
1704102adabSPaul E. McKenney 		return;
171709fdce7SPaul E. McKenney 	}
1724102adabSPaul E. McKenney 
1736ca0292cSZqiang 	head->func = func;
1744102adabSPaul E. McKenney 	head->next = NULL;
1754102adabSPaul E. McKenney 
1766ca0292cSZqiang 	local_irq_save(flags);
1776ca0292cSZqiang 	*rcu_ctrlblk.curtail = head;
1786ca0292cSZqiang 	rcu_ctrlblk.curtail = &head->next;
1796ca0292cSZqiang 	local_irq_restore(flags);
1806ca0292cSZqiang 
1816ca0292cSZqiang 	if (unlikely(is_idle_task(current))) {
1826ca0292cSZqiang 		/* force scheduling for rcu_qs() */
1836ca0292cSZqiang 		resched_cpu(0);
1846ca0292cSZqiang 	}
1856ca0292cSZqiang }
1866ca0292cSZqiang EXPORT_SYMBOL_GPL(call_rcu);
1874102adabSPaul E. McKenney 
1884102adabSPaul E. McKenney /*
1894102adabSPaul E. McKenney  * Store a grace-period-counter "cookie".  For more information,
1904102adabSPaul E. McKenney  * see the Tree RCU header comment.
191709fdce7SPaul E. McKenney  */
get_completed_synchronize_rcu_full(struct rcu_gp_oldstate * rgosp)192709fdce7SPaul E. McKenney void get_completed_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
1934102adabSPaul E. McKenney {
1945f6130faSLai Jiangshan 	rgosp->rgos_norm = RCU_GET_STATE_COMPLETED;
1955f6130faSLai Jiangshan }
196709fdce7SPaul E. McKenney EXPORT_SYMBOL_GPL(get_completed_synchronize_rcu_full);
1975f6130faSLai Jiangshan 
1985f6130faSLai Jiangshan /*
1994102adabSPaul E. McKenney  * Return a grace-period-counter "cookie".  For more information,
200709fdce7SPaul E. McKenney  * see the Tree RCU header comment.
2014102adabSPaul E. McKenney  */
get_state_synchronize_rcu(void)2020909fc2bSPaul E. McKenney unsigned long get_state_synchronize_rcu(void)
20391a967fdSPaul E. McKenney {
20491a967fdSPaul E. McKenney 	return READ_ONCE(rcu_ctrlblk.gp_seq);
20591a967fdSPaul E. McKenney }
20691a967fdSPaul E. McKenney EXPORT_SYMBOL_GPL(get_state_synchronize_rcu);
20791a967fdSPaul E. McKenney 
20891a967fdSPaul E. McKenney /*
20991a967fdSPaul E. McKenney  * Return a grace-period-counter "cookie" and ensure that a future grace
21091a967fdSPaul E. McKenney  * period completes.  For more information, see the Tree RCU header comment.
21191a967fdSPaul E. McKenney  */
start_poll_synchronize_rcu(void)21291a967fdSPaul E. McKenney unsigned long start_poll_synchronize_rcu(void)
2130909fc2bSPaul E. McKenney {
2140909fc2bSPaul E. McKenney 	unsigned long gp_seq = get_state_synchronize_rcu();
2150909fc2bSPaul E. McKenney 
2160909fc2bSPaul E. McKenney 	if (unlikely(is_idle_task(current))) {
2170909fc2bSPaul E. McKenney 		/* force scheduling for rcu_qs() */
2180909fc2bSPaul E. McKenney 		resched_cpu(0);
2190909fc2bSPaul E. McKenney 	}
2200909fc2bSPaul E. McKenney 	return gp_seq;
2210909fc2bSPaul E. McKenney }
2220909fc2bSPaul E. McKenney EXPORT_SYMBOL_GPL(start_poll_synchronize_rcu);
2230909fc2bSPaul E. McKenney 
2240909fc2bSPaul E. McKenney /*
2250909fc2bSPaul E. McKenney  * Return true if the grace period corresponding to oldstate has completed
2260909fc2bSPaul E. McKenney  * and false otherwise.  For more information, see the Tree RCU header
2270909fc2bSPaul E. McKenney  * comment.
2280909fc2bSPaul E. McKenney  */
poll_state_synchronize_rcu(unsigned long oldstate)2290909fc2bSPaul E. McKenney bool poll_state_synchronize_rcu(unsigned long oldstate)
2300909fc2bSPaul E. McKenney {
2310909fc2bSPaul E. McKenney 	return oldstate == RCU_GET_STATE_COMPLETED || READ_ONCE(rcu_ctrlblk.gp_seq) != oldstate;
2320909fc2bSPaul E. McKenney }
2330909fc2bSPaul E. McKenney EXPORT_SYMBOL_GPL(poll_state_synchronize_rcu);
2340909fc2bSPaul E. McKenney 
2350909fc2bSPaul E. McKenney #if IS_ENABLED(CONFIG_RCU_TORTURE_TEST)
rcutorture_gather_gp_seqs(void)2360909fc2bSPaul E. McKenney unsigned long long rcutorture_gather_gp_seqs(void)
2370909fc2bSPaul E. McKenney {
2380909fc2bSPaul E. McKenney 	return READ_ONCE(rcu_ctrlblk.gp_seq) & 0xffffULL;
2390909fc2bSPaul E. McKenney }
2400909fc2bSPaul E. McKenney EXPORT_SYMBOL_GPL(rcutorture_gather_gp_seqs);
2410909fc2bSPaul E. McKenney 
rcutorture_format_gp_seqs(unsigned long long seqs,char * cp,size_t len)2420909fc2bSPaul E. McKenney void rcutorture_format_gp_seqs(unsigned long long seqs, char *cp, size_t len)
2430909fc2bSPaul E. McKenney {
2440909fc2bSPaul E. McKenney 	snprintf(cp, len, "g%04llx", seqs & 0xffffULL);
245414c1238SPaul E. McKenney }
2460909fc2bSPaul E. McKenney EXPORT_SYMBOL_GPL(rcutorture_format_gp_seqs);
2470909fc2bSPaul E. McKenney #endif
2480909fc2bSPaul E. McKenney 
rcu_init(void)249800d6acfSJohannes Berg void __init rcu_init(void)
25004a522b7SUladzislau Rezki (Sony) {
251800d6acfSJohannes Berg 	open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
25204a522b7SUladzislau Rezki (Sony) 	rcu_early_boot_tests();
253d40797d6SPeter Zijlstra 	tasks_cblist_init_generic();
254800d6acfSJohannes Berg }
25504a522b7SUladzislau Rezki (Sony)