xref: /linux-6.15/include/linux/percpu-rwsem.h (revision fdfda868)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
262ac665fSMikulas Patocka #ifndef _LINUX_PERCPU_RWSEM_H
362ac665fSMikulas Patocka #define _LINUX_PERCPU_RWSEM_H
462ac665fSMikulas Patocka 
59390ef0cSOleg Nesterov #include <linux/atomic.h>
662ac665fSMikulas Patocka #include <linux/percpu.h>
752b94129SDavidlohr Bueso #include <linux/rcuwait.h>
87f26482aSPeter Zijlstra #include <linux/wait.h>
9001dac62SOleg Nesterov #include <linux/rcu_sync.h>
108ebe3473SOleg Nesterov #include <linux/lockdep.h>
11*fdfda868SPeter Zijlstra (Intel) #include <linux/cleanup.h>
1262ac665fSMikulas Patocka 
1362ac665fSMikulas Patocka struct percpu_rw_semaphore {
14001dac62SOleg Nesterov 	struct rcu_sync		rss;
1580127a39SPeter Zijlstra 	unsigned int __percpu	*read_count;
167f26482aSPeter Zijlstra 	struct rcuwait		writer;
177f26482aSPeter Zijlstra 	wait_queue_head_t	waiters;
187f26482aSPeter Zijlstra 	atomic_t		block;
191751060eSPeter Zijlstra #ifdef CONFIG_DEBUG_LOCK_ALLOC
201751060eSPeter Zijlstra 	struct lockdep_map	dep_map;
211751060eSPeter Zijlstra #endif
2262ac665fSMikulas Patocka };
2362ac665fSMikulas Patocka 
241751060eSPeter Zijlstra #ifdef CONFIG_DEBUG_LOCK_ALLOC
251751060eSPeter Zijlstra #define __PERCPU_RWSEM_DEP_MAP_INIT(lockname)	.dep_map = { .name = #lockname },
261751060eSPeter Zijlstra #else
271751060eSPeter Zijlstra #define __PERCPU_RWSEM_DEP_MAP_INIT(lockname)
281751060eSPeter Zijlstra #endif
291751060eSPeter Zijlstra 
303f2947b7SOleg Nesterov #define __DEFINE_PERCPU_RWSEM(name, is_static)				\
3111d9684cSPeter Zijlstra static DEFINE_PER_CPU(unsigned int, __percpu_rwsem_rc_##name);		\
323f2947b7SOleg Nesterov is_static struct percpu_rw_semaphore name = {				\
3395bf33b5SOleg Nesterov 	.rss = __RCU_SYNC_INITIALIZER(name.rss),			\
3411d9684cSPeter Zijlstra 	.read_count = &__percpu_rwsem_rc_##name,			\
3552b94129SDavidlohr Bueso 	.writer = __RCUWAIT_INITIALIZER(name.writer),			\
367f26482aSPeter Zijlstra 	.waiters = __WAIT_QUEUE_HEAD_INITIALIZER(name.waiters),		\
377f26482aSPeter Zijlstra 	.block = ATOMIC_INIT(0),					\
381751060eSPeter Zijlstra 	__PERCPU_RWSEM_DEP_MAP_INIT(name)				\
3911d9684cSPeter Zijlstra }
401751060eSPeter Zijlstra 
413f2947b7SOleg Nesterov #define DEFINE_PERCPU_RWSEM(name)		\
423f2947b7SOleg Nesterov 	__DEFINE_PERCPU_RWSEM(name, /* not static */)
433f2947b7SOleg Nesterov #define DEFINE_STATIC_PERCPU_RWSEM(name)	\
443f2947b7SOleg Nesterov 	__DEFINE_PERCPU_RWSEM(name, static)
4511d9684cSPeter Zijlstra 
46206c98ffSPeter Zijlstra extern bool __percpu_down_read(struct percpu_rw_semaphore *, bool);
4780127a39SPeter Zijlstra 
percpu_down_read(struct percpu_rw_semaphore * sem)4802e525b2SPeter Zijlstra static inline void percpu_down_read(struct percpu_rw_semaphore *sem)
4980127a39SPeter Zijlstra {
5080127a39SPeter Zijlstra 	might_sleep();
5180127a39SPeter Zijlstra 
521751060eSPeter Zijlstra 	rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
5380127a39SPeter Zijlstra 
5480127a39SPeter Zijlstra 	preempt_disable();
5580127a39SPeter Zijlstra 	/*
5680127a39SPeter Zijlstra 	 * We are in an RCU-sched read-side critical section, so the writer
5780127a39SPeter Zijlstra 	 * cannot both change sem->state from readers_fast and start checking
5880127a39SPeter Zijlstra 	 * counters while we are here. So if we see !sem->state, we know that
5980127a39SPeter Zijlstra 	 * the writer won't be checking until we're past the preempt_enable()
60e3e74054SPaul E. McKenney 	 * and that once the synchronize_rcu() is done, the writer will see
6180127a39SPeter Zijlstra 	 * anything we did within this RCU-sched read-size critical section.
6280127a39SPeter Zijlstra 	 */
6371365d40SPeter Zijlstra 	if (likely(rcu_sync_is_idle(&sem->rss)))
64e6b1a44eSHou Tao 		this_cpu_inc(*sem->read_count);
6571365d40SPeter Zijlstra 	else
6680127a39SPeter Zijlstra 		__percpu_down_read(sem, false); /* Unconditional memory barrier */
6780127a39SPeter Zijlstra 	/*
6802e525b2SPeter Zijlstra 	 * The preempt_enable() prevents the compiler from
6980127a39SPeter Zijlstra 	 * bleeding the critical section out.
7080127a39SPeter Zijlstra 	 */
71259d69b7SPeter Zijlstra 	preempt_enable();
72259d69b7SPeter Zijlstra }
73259d69b7SPeter Zijlstra 
percpu_down_read_trylock(struct percpu_rw_semaphore * sem)74206c98ffSPeter Zijlstra static inline bool percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
7580127a39SPeter Zijlstra {
76206c98ffSPeter Zijlstra 	bool ret = true;
7780127a39SPeter Zijlstra 
7880127a39SPeter Zijlstra 	preempt_disable();
7980127a39SPeter Zijlstra 	/*
8080127a39SPeter Zijlstra 	 * Same as in percpu_down_read().
8180127a39SPeter Zijlstra 	 */
8271365d40SPeter Zijlstra 	if (likely(rcu_sync_is_idle(&sem->rss)))
83e6b1a44eSHou Tao 		this_cpu_inc(*sem->read_count);
8471365d40SPeter Zijlstra 	else
8580127a39SPeter Zijlstra 		ret = __percpu_down_read(sem, true); /* Unconditional memory barrier */
8680127a39SPeter Zijlstra 	preempt_enable();
8780127a39SPeter Zijlstra 	/*
8880127a39SPeter Zijlstra 	 * The barrier() from preempt_enable() prevents the compiler from
8980127a39SPeter Zijlstra 	 * bleeding the critical section out.
9080127a39SPeter Zijlstra 	 */
9180127a39SPeter Zijlstra 
9280127a39SPeter Zijlstra 	if (ret)
931751060eSPeter Zijlstra 		rwsem_acquire_read(&sem->dep_map, 0, 1, _RET_IP_);
9480127a39SPeter Zijlstra 
9580127a39SPeter Zijlstra 	return ret;
9680127a39SPeter Zijlstra }
9780127a39SPeter Zijlstra 
percpu_up_read(struct percpu_rw_semaphore * sem)9802e525b2SPeter Zijlstra static inline void percpu_up_read(struct percpu_rw_semaphore *sem)
9980127a39SPeter Zijlstra {
1001751060eSPeter Zijlstra 	rwsem_release(&sem->dep_map, _RET_IP_);
1011751060eSPeter Zijlstra 
10202e525b2SPeter Zijlstra 	preempt_disable();
10380127a39SPeter Zijlstra 	/*
10480127a39SPeter Zijlstra 	 * Same as in percpu_down_read().
10580127a39SPeter Zijlstra 	 */
106ac8dec42SDavidlohr Bueso 	if (likely(rcu_sync_is_idle(&sem->rss))) {
107e6b1a44eSHou Tao 		this_cpu_dec(*sem->read_count);
108ac8dec42SDavidlohr Bueso 	} else {
109ac8dec42SDavidlohr Bueso 		/*
110ac8dec42SDavidlohr Bueso 		 * slowpath; reader will only ever wake a single blocked
111ac8dec42SDavidlohr Bueso 		 * writer.
112ac8dec42SDavidlohr Bueso 		 */
113ac8dec42SDavidlohr Bueso 		smp_mb(); /* B matches C */
114ac8dec42SDavidlohr Bueso 		/*
115ac8dec42SDavidlohr Bueso 		 * In other words, if they see our decrement (presumably to
116ac8dec42SDavidlohr Bueso 		 * aggregate zero, as that is the only time it matters) they
117ac8dec42SDavidlohr Bueso 		 * will also see our critical section.
118ac8dec42SDavidlohr Bueso 		 */
119e6b1a44eSHou Tao 		this_cpu_dec(*sem->read_count);
120ac8dec42SDavidlohr Bueso 		rcuwait_wake_up(&sem->writer);
121ac8dec42SDavidlohr Bueso 	}
12280127a39SPeter Zijlstra 	preempt_enable();
12380127a39SPeter Zijlstra }
1245c1eabe6SMikulas Patocka 
12501fe8a3fSMarco Elver extern bool percpu_is_read_locked(struct percpu_rw_semaphore *);
126a1fd3e24SOleg Nesterov extern void percpu_down_write(struct percpu_rw_semaphore *);
127a1fd3e24SOleg Nesterov extern void percpu_up_write(struct percpu_rw_semaphore *);
12862ac665fSMikulas Patocka 
DEFINE_GUARD(percpu_read,struct percpu_rw_semaphore *,percpu_down_read (_T),percpu_up_read (_T))129*fdfda868SPeter Zijlstra (Intel) DEFINE_GUARD(percpu_read, struct percpu_rw_semaphore *,
130*fdfda868SPeter Zijlstra (Intel) 	     percpu_down_read(_T), percpu_up_read(_T))
131*fdfda868SPeter Zijlstra (Intel) DEFINE_GUARD_COND(percpu_read, _try, percpu_down_read_trylock(_T))
132*fdfda868SPeter Zijlstra (Intel) 
133*fdfda868SPeter Zijlstra (Intel) DEFINE_GUARD(percpu_write, struct percpu_rw_semaphore *,
134*fdfda868SPeter Zijlstra (Intel) 	     percpu_down_write(_T), percpu_up_write(_T))
135*fdfda868SPeter Zijlstra (Intel) 
13601fe8a3fSMarco Elver static inline bool percpu_is_write_locked(struct percpu_rw_semaphore *sem)
13701fe8a3fSMarco Elver {
13801fe8a3fSMarco Elver 	return atomic_read(&sem->block);
13901fe8a3fSMarco Elver }
14001fe8a3fSMarco Elver 
1418ebe3473SOleg Nesterov extern int __percpu_init_rwsem(struct percpu_rw_semaphore *,
1428ebe3473SOleg Nesterov 				const char *, struct lock_class_key *);
14380127a39SPeter Zijlstra 
144a1fd3e24SOleg Nesterov extern void percpu_free_rwsem(struct percpu_rw_semaphore *);
14562ac665fSMikulas Patocka 
14680127a39SPeter Zijlstra #define percpu_init_rwsem(sem)					\
1478ebe3473SOleg Nesterov ({								\
1488ebe3473SOleg Nesterov 	static struct lock_class_key rwsem_key;			\
14980127a39SPeter Zijlstra 	__percpu_init_rwsem(sem, #sem, &rwsem_key);		\
1508ebe3473SOleg Nesterov })
1518ebe3473SOleg Nesterov 
1521751060eSPeter Zijlstra #define percpu_rwsem_is_held(sem)	lockdep_is_held(sem)
1531751060eSPeter Zijlstra #define percpu_rwsem_assert_held(sem)	lockdep_assert_held(sem)
15411d9684cSPeter Zijlstra 
percpu_rwsem_release(struct percpu_rw_semaphore * sem,unsigned long ip)15555cc1565SOleg Nesterov static inline void percpu_rwsem_release(struct percpu_rw_semaphore *sem,
156c01a5d89SWang Long 					unsigned long ip)
15755cc1565SOleg Nesterov {
1581751060eSPeter Zijlstra 	lock_release(&sem->dep_map, ip);
15955cc1565SOleg Nesterov }
16055cc1565SOleg Nesterov 
percpu_rwsem_acquire(struct percpu_rw_semaphore * sem,bool read,unsigned long ip)16155cc1565SOleg Nesterov static inline void percpu_rwsem_acquire(struct percpu_rw_semaphore *sem,
16255cc1565SOleg Nesterov 					bool read, unsigned long ip)
16355cc1565SOleg Nesterov {
1641751060eSPeter Zijlstra 	lock_acquire(&sem->dep_map, 0, 1, read, 1, NULL, ip);
16555cc1565SOleg Nesterov }
16655cc1565SOleg Nesterov 
16762ac665fSMikulas Patocka #endif
168