xref: /linux-6.15/include/linux/rwsem.h (revision fa1f5116)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
21da177e4SLinus Torvalds /* rwsem.h: R/W semaphores, public interface
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  * Written by David Howells ([email protected]).
51da177e4SLinus Torvalds  * Derived from asm-i386/semaphore.h
61da177e4SLinus Torvalds  */
71da177e4SLinus Torvalds 
81da177e4SLinus Torvalds #ifndef _LINUX_RWSEM_H
91da177e4SLinus Torvalds #define _LINUX_RWSEM_H
101da177e4SLinus Torvalds 
111da177e4SLinus Torvalds #include <linux/linkage.h>
121da177e4SLinus Torvalds 
131da177e4SLinus Torvalds #include <linux/types.h>
14c16a87ceSThomas Gleixner #include <linux/list.h>
15c16a87ceSThomas Gleixner #include <linux/spinlock.h>
1660063497SArun Sharma #include <linux/atomic.h>
17d4799608SMichal Hocko #include <linux/err.h>
1854da6a09SPeter Zijlstra #include <linux/cleanup.h>
1942254105SThomas Gleixner 
2042254105SThomas Gleixner #ifdef CONFIG_DEBUG_LOCK_ALLOC
2142254105SThomas Gleixner # define __RWSEM_DEP_MAP_INIT(lockname)			\
2242254105SThomas Gleixner 	.dep_map = {					\
2342254105SThomas Gleixner 		.name = #lockname,			\
2442254105SThomas Gleixner 		.wait_type_inner = LD_WAIT_SLEEP,	\
2542254105SThomas Gleixner 	},
2642254105SThomas Gleixner #else
2742254105SThomas Gleixner # define __RWSEM_DEP_MAP_INIT(lockname)
2842254105SThomas Gleixner #endif
2942254105SThomas Gleixner 
3042254105SThomas Gleixner #ifndef CONFIG_PREEMPT_RT
3142254105SThomas Gleixner 
325db6c6feSDavidlohr Bueso #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
3390631822SJason Low #include <linux/osq_lock.h>
345db6c6feSDavidlohr Bueso #endif
351da177e4SLinus Torvalds 
36364f784fSWaiman Long /*
37364f784fSWaiman Long  * For an uncontended rwsem, count and owner are the only fields a task
38364f784fSWaiman Long  * needs to touch when acquiring the rwsem. So they are put next to each
39364f784fSWaiman Long  * other to increase the chance that they will share the same cacheline.
40364f784fSWaiman Long  *
41364f784fSWaiman Long  * In a contended rwsem, the owner is likely the most frequently accessed
42364f784fSWaiman Long  * field in the structure as the optimistic waiter that holds the osq lock
43364f784fSWaiman Long  * will spin on owner. For an embedded rwsem, other hot fields in the
44364f784fSWaiman Long  * containing structure should be moved further away from the rwsem to
45364f784fSWaiman Long  * reduce the chance that they will share the same cacheline causing
46364f784fSWaiman Long  * cacheline bouncing problem.
47364f784fSWaiman Long  */
481c8ed640SThomas Gleixner struct rw_semaphore {
498ee62b18SJason Low 	atomic_long_t count;
504fc828e2SDavidlohr Bueso 	/*
5194a9717bSWaiman Long 	 * Write owner or one of the read owners as well flags regarding
5294a9717bSWaiman Long 	 * the current state of the rwsem. Can be used as a speculative
5394a9717bSWaiman Long 	 * check to see if the write owner is running on the cpu.
544fc828e2SDavidlohr Bueso 	 */
5594a9717bSWaiman Long 	atomic_long_t owner;
56c71fd893SWaiman Long #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
57364f784fSWaiman Long 	struct optimistic_spin_queue osq; /* spinner MCS lock */
584fc828e2SDavidlohr Bueso #endif
59364f784fSWaiman Long 	raw_spinlock_t wait_lock;
60364f784fSWaiman Long 	struct list_head wait_list;
61fce45cd4SDavidlohr Bueso #ifdef CONFIG_DEBUG_RWSEMS
62fce45cd4SDavidlohr Bueso 	void *magic;
63fce45cd4SDavidlohr Bueso #endif
641c8ed640SThomas Gleixner #ifdef CONFIG_DEBUG_LOCK_ALLOC
651c8ed640SThomas Gleixner 	struct lockdep_map	dep_map;
661c8ed640SThomas Gleixner #endif
671c8ed640SThomas Gleixner };
681c8ed640SThomas Gleixner 
69f70405afSMatthew Wilcox (Oracle) #define RWSEM_UNLOCKED_VALUE		0UL
70f70405afSMatthew Wilcox (Oracle) #define RWSEM_WRITER_LOCKED		(1UL << 0)
71f70405afSMatthew Wilcox (Oracle) #define __RWSEM_COUNT_INIT(name)	.count = ATOMIC_LONG_INIT(RWSEM_UNLOCKED_VALUE)
72f70405afSMatthew Wilcox (Oracle) 
rwsem_is_locked(struct rw_semaphore * sem)7341e5887fSThomas Gleixner static inline int rwsem_is_locked(struct rw_semaphore *sem)
7441e5887fSThomas Gleixner {
75f70405afSMatthew Wilcox (Oracle) 	return atomic_long_read(&sem->count) != RWSEM_UNLOCKED_VALUE;
7641e5887fSThomas Gleixner }
7741e5887fSThomas Gleixner 
rwsem_assert_held_nolockdep(const struct rw_semaphore * sem)78f70405afSMatthew Wilcox (Oracle) static inline void rwsem_assert_held_nolockdep(const struct rw_semaphore *sem)
79f70405afSMatthew Wilcox (Oracle) {
80f70405afSMatthew Wilcox (Oracle) 	WARN_ON(atomic_long_read(&sem->count) == RWSEM_UNLOCKED_VALUE);
81f70405afSMatthew Wilcox (Oracle) }
82f70405afSMatthew Wilcox (Oracle) 
rwsem_assert_held_write_nolockdep(const struct rw_semaphore * sem)83f70405afSMatthew Wilcox (Oracle) static inline void rwsem_assert_held_write_nolockdep(const struct rw_semaphore *sem)
84f70405afSMatthew Wilcox (Oracle) {
85f70405afSMatthew Wilcox (Oracle) 	WARN_ON(!(atomic_long_read(&sem->count) & RWSEM_WRITER_LOCKED));
86f70405afSMatthew Wilcox (Oracle) }
871da177e4SLinus Torvalds 
8812249b34SThomas Gleixner /* Common initializer macros and functions */
8912249b34SThomas Gleixner 
90fce45cd4SDavidlohr Bueso #ifdef CONFIG_DEBUG_RWSEMS
91a9232dc5SAlexey Dobriyan # define __RWSEM_DEBUG_INIT(lockname) .magic = &lockname,
92fce45cd4SDavidlohr Bueso #else
93a9232dc5SAlexey Dobriyan # define __RWSEM_DEBUG_INIT(lockname)
94fce45cd4SDavidlohr Bueso #endif
95fce45cd4SDavidlohr Bueso 
965db6c6feSDavidlohr Bueso #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
97a9232dc5SAlexey Dobriyan #define __RWSEM_OPT_INIT(lockname) .osq = OSQ_LOCK_UNLOCKED,
984fc828e2SDavidlohr Bueso #else
99ce069fc9SJason Low #define __RWSEM_OPT_INIT(lockname)
1004fc828e2SDavidlohr Bueso #endif
10112249b34SThomas Gleixner 
102ce069fc9SJason Low #define __RWSEM_INITIALIZER(name)				\
103a9232dc5SAlexey Dobriyan 	{ __RWSEM_COUNT_INIT(name),				\
10494a9717bSWaiman Long 	  .owner = ATOMIC_LONG_INIT(0),				\
105ce069fc9SJason Low 	  __RWSEM_OPT_INIT(name)				\
106a9232dc5SAlexey Dobriyan 	  .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock),\
107a9232dc5SAlexey Dobriyan 	  .wait_list = LIST_HEAD_INIT((name).wait_list),	\
108a9232dc5SAlexey Dobriyan 	  __RWSEM_DEBUG_INIT(name)				\
109ce069fc9SJason Low 	  __RWSEM_DEP_MAP_INIT(name) }
110ce069fc9SJason Low 
11112249b34SThomas Gleixner #define DECLARE_RWSEM(name) \
11212249b34SThomas Gleixner 	struct rw_semaphore name = __RWSEM_INITIALIZER(name)
11312249b34SThomas Gleixner 
11412249b34SThomas Gleixner extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
11512249b34SThomas Gleixner 			 struct lock_class_key *key);
11612249b34SThomas Gleixner 
11712249b34SThomas Gleixner #define init_rwsem(sem)						\
11812249b34SThomas Gleixner do {								\
11912249b34SThomas Gleixner 	static struct lock_class_key __key;			\
12012249b34SThomas Gleixner 								\
12112249b34SThomas Gleixner 	__init_rwsem((sem), #sem, &__key);			\
12212249b34SThomas Gleixner } while (0)
12312249b34SThomas Gleixner 
1241da177e4SLinus Torvalds /*
1254a444b1fSJosef Bacik  * This is the same regardless of which rwsem implementation that is being used.
126e2db7592SIngo Molnar  * It is just a heuristic meant to be called by somebody already holding the
1274a444b1fSJosef Bacik  * rwsem to see if somebody from an incompatible type is wanting access to the
1284a444b1fSJosef Bacik  * lock.
1294a444b1fSJosef Bacik  */
rwsem_is_contended(struct rw_semaphore * sem)1304a444b1fSJosef Bacik static inline int rwsem_is_contended(struct rw_semaphore *sem)
1314a444b1fSJosef Bacik {
1324a444b1fSJosef Bacik 	return !list_empty(&sem->wait_list);
1334a444b1fSJosef Bacik }
1344a444b1fSJosef Bacik 
13542254105SThomas Gleixner #else /* !CONFIG_PREEMPT_RT */
13642254105SThomas Gleixner 
13742254105SThomas Gleixner #include <linux/rwbase_rt.h>
13842254105SThomas Gleixner 
13942254105SThomas Gleixner struct rw_semaphore {
14042254105SThomas Gleixner 	struct rwbase_rt	rwbase;
14142254105SThomas Gleixner #ifdef CONFIG_DEBUG_LOCK_ALLOC
14242254105SThomas Gleixner 	struct lockdep_map	dep_map;
14342254105SThomas Gleixner #endif
14442254105SThomas Gleixner };
14542254105SThomas Gleixner 
14642254105SThomas Gleixner #define __RWSEM_INITIALIZER(name)				\
14742254105SThomas Gleixner 	{							\
14842254105SThomas Gleixner 		.rwbase = __RWBASE_INITIALIZER(name),		\
14942254105SThomas Gleixner 		__RWSEM_DEP_MAP_INIT(name)			\
15042254105SThomas Gleixner 	}
15142254105SThomas Gleixner 
15242254105SThomas Gleixner #define DECLARE_RWSEM(lockname) \
15342254105SThomas Gleixner 	struct rw_semaphore lockname = __RWSEM_INITIALIZER(lockname)
15442254105SThomas Gleixner 
15515eb7c88SMike Galbraith extern void  __init_rwsem(struct rw_semaphore *rwsem, const char *name,
15642254105SThomas Gleixner 			  struct lock_class_key *key);
15742254105SThomas Gleixner 
15842254105SThomas Gleixner #define init_rwsem(sem)						\
15942254105SThomas Gleixner do {								\
16042254105SThomas Gleixner 	static struct lock_class_key __key;			\
16142254105SThomas Gleixner 								\
16215eb7c88SMike Galbraith 	__init_rwsem((sem), #sem, &__key);			\
16342254105SThomas Gleixner } while (0)
16442254105SThomas Gleixner 
rwsem_is_locked(const struct rw_semaphore * sem)165f70405afSMatthew Wilcox (Oracle) static __always_inline int rwsem_is_locked(const struct rw_semaphore *sem)
16642254105SThomas Gleixner {
16742254105SThomas Gleixner 	return rw_base_is_locked(&sem->rwbase);
16842254105SThomas Gleixner }
16942254105SThomas Gleixner 
rwsem_assert_held_nolockdep(const struct rw_semaphore * sem)170*fa1f5116SSebastian Andrzej Siewior static __always_inline void rwsem_assert_held_nolockdep(const struct rw_semaphore *sem)
171f70405afSMatthew Wilcox (Oracle) {
172f70405afSMatthew Wilcox (Oracle) 	WARN_ON(!rwsem_is_locked(sem));
173f70405afSMatthew Wilcox (Oracle) }
174f70405afSMatthew Wilcox (Oracle) 
rwsem_assert_held_write_nolockdep(const struct rw_semaphore * sem)175*fa1f5116SSebastian Andrzej Siewior static __always_inline void rwsem_assert_held_write_nolockdep(const struct rw_semaphore *sem)
176f70405afSMatthew Wilcox (Oracle) {
177*fa1f5116SSebastian Andrzej Siewior 	WARN_ON(!rw_base_is_write_locked(&sem->rwbase));
178f70405afSMatthew Wilcox (Oracle) }
179f70405afSMatthew Wilcox (Oracle) 
rwsem_is_contended(struct rw_semaphore * sem)18042254105SThomas Gleixner static __always_inline int rwsem_is_contended(struct rw_semaphore *sem)
18142254105SThomas Gleixner {
18242254105SThomas Gleixner 	return rw_base_is_contended(&sem->rwbase);
18342254105SThomas Gleixner }
18442254105SThomas Gleixner 
18542254105SThomas Gleixner #endif /* CONFIG_PREEMPT_RT */
18642254105SThomas Gleixner 
18742254105SThomas Gleixner /*
18842254105SThomas Gleixner  * The functions below are the same for all rwsem implementations including
18942254105SThomas Gleixner  * the RT specific variant.
19042254105SThomas Gleixner  */
19142254105SThomas Gleixner 
rwsem_assert_held(const struct rw_semaphore * sem)192f70405afSMatthew Wilcox (Oracle) static inline void rwsem_assert_held(const struct rw_semaphore *sem)
193f70405afSMatthew Wilcox (Oracle) {
194f70405afSMatthew Wilcox (Oracle) 	if (IS_ENABLED(CONFIG_LOCKDEP))
195f70405afSMatthew Wilcox (Oracle) 		lockdep_assert_held(sem);
196f70405afSMatthew Wilcox (Oracle) 	else
197f70405afSMatthew Wilcox (Oracle) 		rwsem_assert_held_nolockdep(sem);
198f70405afSMatthew Wilcox (Oracle) }
199f70405afSMatthew Wilcox (Oracle) 
rwsem_assert_held_write(const struct rw_semaphore * sem)200f70405afSMatthew Wilcox (Oracle) static inline void rwsem_assert_held_write(const struct rw_semaphore *sem)
201f70405afSMatthew Wilcox (Oracle) {
202f70405afSMatthew Wilcox (Oracle) 	if (IS_ENABLED(CONFIG_LOCKDEP))
203f70405afSMatthew Wilcox (Oracle) 		lockdep_assert_held_write(sem);
204f70405afSMatthew Wilcox (Oracle) 	else
205f70405afSMatthew Wilcox (Oracle) 		rwsem_assert_held_write_nolockdep(sem);
206f70405afSMatthew Wilcox (Oracle) }
207f70405afSMatthew Wilcox (Oracle) 
2084a444b1fSJosef Bacik /*
2091da177e4SLinus Torvalds  * lock for reading
2101da177e4SLinus Torvalds  */
2114ea2176dSIngo Molnar extern void down_read(struct rw_semaphore *sem);
21231784cffSEric W. Biederman extern int __must_check down_read_interruptible(struct rw_semaphore *sem);
21376f8507fSKirill Tkhai extern int __must_check down_read_killable(struct rw_semaphore *sem);
2141da177e4SLinus Torvalds 
2151da177e4SLinus Torvalds /*
2161da177e4SLinus Torvalds  * trylock for reading -- returns 1 if successful, 0 if contention
2171da177e4SLinus Torvalds  */
2184ea2176dSIngo Molnar extern int down_read_trylock(struct rw_semaphore *sem);
2191da177e4SLinus Torvalds 
2201da177e4SLinus Torvalds /*
2211da177e4SLinus Torvalds  * lock for writing
2221da177e4SLinus Torvalds  */
2234ea2176dSIngo Molnar extern void down_write(struct rw_semaphore *sem);
224916633a4SMichal Hocko extern int __must_check down_write_killable(struct rw_semaphore *sem);
2251da177e4SLinus Torvalds 
2261da177e4SLinus Torvalds /*
2271da177e4SLinus Torvalds  * trylock for writing -- returns 1 if successful, 0 if contention
2281da177e4SLinus Torvalds  */
2294ea2176dSIngo Molnar extern int down_write_trylock(struct rw_semaphore *sem);
2301da177e4SLinus Torvalds 
2311da177e4SLinus Torvalds /*
2321da177e4SLinus Torvalds  * release a read lock
2331da177e4SLinus Torvalds  */
2344ea2176dSIngo Molnar extern void up_read(struct rw_semaphore *sem);
2351da177e4SLinus Torvalds 
2361da177e4SLinus Torvalds /*
2371da177e4SLinus Torvalds  * release a write lock
2381da177e4SLinus Torvalds  */
2394ea2176dSIngo Molnar extern void up_write(struct rw_semaphore *sem);
2401da177e4SLinus Torvalds 
24154da6a09SPeter Zijlstra DEFINE_GUARD(rwsem_read, struct rw_semaphore *, down_read(_T), up_read(_T))
242e4ab322fSPeter Zijlstra DEFINE_GUARD_COND(rwsem_read, _try, down_read_trylock(_T))
243e4ab322fSPeter Zijlstra DEFINE_GUARD_COND(rwsem_read, _intr, down_read_interruptible(_T) == 0)
244e4ab322fSPeter Zijlstra 
24554da6a09SPeter Zijlstra DEFINE_GUARD(rwsem_write, struct rw_semaphore *, down_write(_T), up_write(_T))
246e4ab322fSPeter Zijlstra DEFINE_GUARD_COND(rwsem_write, _try, down_write_trylock(_T))
24754da6a09SPeter Zijlstra 
2481da177e4SLinus Torvalds /*
2491da177e4SLinus Torvalds  * downgrade write lock to read lock
2501da177e4SLinus Torvalds  */
2514ea2176dSIngo Molnar extern void downgrade_write(struct rw_semaphore *sem);
2524ea2176dSIngo Molnar 
2534ea2176dSIngo Molnar #ifdef CONFIG_DEBUG_LOCK_ALLOC
2544ea2176dSIngo Molnar /*
2555fca80e8SIngo Molnar  * nested locking. NOTE: rwsems are not allowed to recurse
2565fca80e8SIngo Molnar  * (which occurs if the same task tries to acquire the same
2575fca80e8SIngo Molnar  * lock instance multiple times), but multiple locks of the
2585fca80e8SIngo Molnar  * same lock class might be taken, if the order of the locks
2595fca80e8SIngo Molnar  * is always the same. This ordering rule can be expressed
2605fca80e8SIngo Molnar  * to lockdep via the _nested() APIs, but enumerating the
2615fca80e8SIngo Molnar  * subclasses that are used. (If the nesting relationship is
2625fca80e8SIngo Molnar  * static then another method for expressing nested locking is
2635fca80e8SIngo Molnar  * the explicit definition of lock class keys and the use of
2645fca80e8SIngo Molnar  * lockdep_set_class() at lock initialization time.
265387b1468SMauro Carvalho Chehab  * See Documentation/locking/lockdep-design.rst for more details.)
2664ea2176dSIngo Molnar  */
2674ea2176dSIngo Molnar extern void down_read_nested(struct rw_semaphore *sem, int subclass);
2680f9368b5SEric W. Biederman extern int __must_check down_read_killable_nested(struct rw_semaphore *sem, int subclass);
2694ea2176dSIngo Molnar extern void down_write_nested(struct rw_semaphore *sem, int subclass);
270887bddfaSAl Viro extern int down_write_killable_nested(struct rw_semaphore *sem, int subclass);
2711b963c81SJiri Kosina extern void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest_lock);
2721b963c81SJiri Kosina 
2731b963c81SJiri Kosina # define down_write_nest_lock(sem, nest_lock)			\
2741b963c81SJiri Kosina do {								\
2751b963c81SJiri Kosina 	typecheck(struct lockdep_map *, &(nest_lock)->dep_map);	\
2761b963c81SJiri Kosina 	_down_write_nest_lock(sem, &(nest_lock)->dep_map);	\
277d72d84aeSGuchun Chen } while (0)
2781b963c81SJiri Kosina 
27984759c6dSKent Overstreet /*
28084759c6dSKent Overstreet  * Take/release a lock when not the owner will release it.
28184759c6dSKent Overstreet  *
28284759c6dSKent Overstreet  * [ This API should be avoided as much as possible - the
28384759c6dSKent Overstreet  *   proper abstraction for this case is completions. ]
28484759c6dSKent Overstreet  */
28584759c6dSKent Overstreet extern void down_read_non_owner(struct rw_semaphore *sem);
28684759c6dSKent Overstreet extern void up_read_non_owner(struct rw_semaphore *sem);
2874ea2176dSIngo Molnar #else
2884ea2176dSIngo Molnar # define down_read_nested(sem, subclass)		down_read(sem)
2890f9368b5SEric W. Biederman # define down_read_killable_nested(sem, subclass)	down_read_killable(sem)
290e65b9ad2SJiri Kosina # define down_write_nest_lock(sem, nest_lock)	down_write(sem)
2914ea2176dSIngo Molnar # define down_write_nested(sem, subclass)	down_write(sem)
292887bddfaSAl Viro # define down_write_killable_nested(sem, subclass)	down_write_killable(sem)
29384759c6dSKent Overstreet # define down_read_non_owner(sem)		down_read(sem)
29484759c6dSKent Overstreet # define up_read_non_owner(sem)			up_read(sem)
2954ea2176dSIngo Molnar #endif
2961da177e4SLinus Torvalds 
2971da177e4SLinus Torvalds #endif /* _LINUX_RWSEM_H */
298