xref: /linux-6.15/include/linux/lockref.h (revision bb504b4d)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
20f8f2aaaSWaiman Long #ifndef __LINUX_LOCKREF_H
30f8f2aaaSWaiman Long #define __LINUX_LOCKREF_H
40f8f2aaaSWaiman Long 
50f8f2aaaSWaiman Long /*
60f8f2aaaSWaiman Long  * Locked reference counts.
70f8f2aaaSWaiman Long  *
80f8f2aaaSWaiman Long  * These are different from just plain atomic refcounts in that they
90f8f2aaaSWaiman Long  * are atomic with respect to the spinlock that goes with them.  In
100f8f2aaaSWaiman Long  * particular, there can be implementations that don't actually get
110f8f2aaaSWaiman Long  * the spinlock for the common decrement/increment operations, but they
120f8f2aaaSWaiman Long  * still have to check that the operation is done semantically as if
130f8f2aaaSWaiman Long  * the spinlock had been taken (using a cmpxchg operation that covers
140f8f2aaaSWaiman Long  * both the lock and the count word, or using memory transactions, for
150f8f2aaaSWaiman Long  * example).
160f8f2aaaSWaiman Long  */
170f8f2aaaSWaiman Long 
180f8f2aaaSWaiman Long #include <linux/spinlock.h>
1957f4257eSPeter Zijlstra #include <generated/bounds.h>
2057f4257eSPeter Zijlstra 
2157f4257eSPeter Zijlstra #define USE_CMPXCHG_LOCKREF \
2257f4257eSPeter Zijlstra 	(IS_ENABLED(CONFIG_ARCH_USE_CMPXCHG_LOCKREF) && \
23597d795aSKirill A. Shutemov 	 IS_ENABLED(CONFIG_SMP) && SPINLOCK_SIZE <= 4)
240f8f2aaaSWaiman Long 
250f8f2aaaSWaiman Long struct lockref {
26bc08b449SLinus Torvalds 	union {
2757f4257eSPeter Zijlstra #if USE_CMPXCHG_LOCKREF
28bc08b449SLinus Torvalds 		aligned_u64 lock_count;
29bc08b449SLinus Torvalds #endif
30bc08b449SLinus Torvalds 		struct {
310f8f2aaaSWaiman Long 			spinlock_t lock;
32360f5479SLinus Torvalds 			int count;
330f8f2aaaSWaiman Long 		};
34bc08b449SLinus Torvalds 	};
35bc08b449SLinus Torvalds };
360f8f2aaaSWaiman Long 
3763440d1cSChristoph Hellwig /**
3863440d1cSChristoph Hellwig  * lockref_init - Initialize a lockref
3963440d1cSChristoph Hellwig  * @lockref: pointer to lockref structure
40*bb504b4dSAndreas Gruenbacher  *
41*bb504b4dSAndreas Gruenbacher  * Initializes @lockref->count to 1.
4263440d1cSChristoph Hellwig  */
lockref_init(struct lockref * lockref)43*bb504b4dSAndreas Gruenbacher static inline void lockref_init(struct lockref *lockref)
4463440d1cSChristoph Hellwig {
4563440d1cSChristoph Hellwig 	spin_lock_init(&lockref->lock);
46*bb504b4dSAndreas Gruenbacher 	lockref->count = 1;
4763440d1cSChristoph Hellwig }
4863440d1cSChristoph Hellwig 
4925d80604SChristoph Hellwig void lockref_get(struct lockref *lockref);
5025d80604SChristoph Hellwig int lockref_put_return(struct lockref *lockref);
516d2868d5SChristoph Hellwig bool lockref_get_not_zero(struct lockref *lockref);
526d2868d5SChristoph Hellwig bool lockref_put_or_lock(struct lockref *lockref);
530f8f2aaaSWaiman Long 
5425d80604SChristoph Hellwig void lockref_mark_dead(struct lockref *lockref);
556d2868d5SChristoph Hellwig bool lockref_get_not_dead(struct lockref *lockref);
56e7d33bb5SLinus Torvalds 
57e66cf161SSteven Whitehouse /* Must be called under spinlock for reliable results */
__lockref_is_dead(const struct lockref * l)5811209f3cSYaowei Bai static inline bool __lockref_is_dead(const struct lockref *l)
59e66cf161SSteven Whitehouse {
60e66cf161SSteven Whitehouse 	return ((int)l->count < 0);
61e66cf161SSteven Whitehouse }
62e66cf161SSteven Whitehouse 
630f8f2aaaSWaiman Long #endif /* __LINUX_LOCKREF_H */
64