xref: /linux-6.15/include/linux/lockref.h (revision 2ada0add)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __LINUX_LOCKREF_H
3 #define __LINUX_LOCKREF_H
4 
5 /*
6  * Locked reference counts.
7  *
8  * These are different from just plain atomic refcounts in that they
9  * are atomic with respect to the spinlock that goes with them.  In
10  * particular, there can be implementations that don't actually get
11  * the spinlock for the common decrement/increment operations, but they
12  * still have to check that the operation is done semantically as if
13  * the spinlock had been taken (using a cmpxchg operation that covers
14  * both the lock and the count word, or using memory transactions, for
15  * example).
16  */
17 
18 #include <linux/spinlock.h>
19 #include <generated/bounds.h>
20 
21 #define USE_CMPXCHG_LOCKREF \
22 	(IS_ENABLED(CONFIG_ARCH_USE_CMPXCHG_LOCKREF) && \
23 	 IS_ENABLED(CONFIG_SMP) && SPINLOCK_SIZE <= 4)
24 
25 struct lockref {
26 	union {
27 #if USE_CMPXCHG_LOCKREF
28 		aligned_u64 lock_count;
29 #endif
30 		struct {
31 			spinlock_t lock;
32 			int count;
33 		};
34 	};
35 };
36 
37 /**
38  * lockref_init - Initialize a lockref
39  * @lockref: pointer to lockref structure
40  * @count: initial count
41  */
42 static inline void lockref_init(struct lockref *lockref, unsigned int count)
43 {
44 	spin_lock_init(&lockref->lock);
45 	lockref->count = count;
46 }
47 
48 void lockref_get(struct lockref *lockref);
49 int lockref_put_return(struct lockref *lockref);
50 bool lockref_get_not_zero(struct lockref *lockref);
51 bool lockref_put_or_lock(struct lockref *lockref);
52 
53 void lockref_mark_dead(struct lockref *lockref);
54 bool lockref_get_not_dead(struct lockref *lockref);
55 
56 /* Must be called under spinlock for reliable results */
57 static inline bool __lockref_is_dead(const struct lockref *l)
58 {
59 	return ((int)l->count < 0);
60 }
61 
62 #endif /* __LINUX_LOCKREF_H */
63