xref: /linux-6.15/include/linux/refcount.h (revision 6e5c8381)
1 #ifndef _LINUX_REFCOUNT_H
2 #define _LINUX_REFCOUNT_H
3 
4 /*
5  * Variant of atomic_t specialized for reference counts.
6  *
7  * The interface matches the atomic_t interface (to aid in porting) but only
8  * provides the few functions one should use for reference counting.
9  *
10  * It differs in that the counter saturates at UINT_MAX and will not move once
11  * there. This avoids wrapping the counter and causing 'spurious'
12  * use-after-free issues.
13  *
14  * Memory ordering rules are slightly relaxed wrt regular atomic_t functions
15  * and provide only what is strictly required for refcounts.
16  *
17  * The increments are fully relaxed; these will not provide ordering. The
18  * rationale is that whatever is used to obtain the object we're increasing the
19  * reference count on will provide the ordering. For locked data structures,
20  * its the lock acquire, for RCU/lockless data structures its the dependent
21  * load.
22  *
23  * Do note that inc_not_zero() provides a control dependency which will order
24  * future stores against the inc, this ensures we'll never modify the object
25  * if we did not in fact acquire a reference.
26  *
27  * The decrements will provide release order, such that all the prior loads and
28  * stores will be issued before, it also provides a control dependency, which
29  * will order us against the subsequent free().
30  *
31  * The control dependency is against the load of the cmpxchg (ll/sc) that
32  * succeeded. This means the stores aren't fully ordered, but this is fine
33  * because the 1->0 transition indicates no concurrency.
34  *
35  * Note that the allocator is responsible for ordering things between free()
36  * and alloc().
37  *
38  */
39 
40 #include <linux/atomic.h>
41 #include <linux/bug.h>
42 #include <linux/mutex.h>
43 #include <linux/spinlock.h>
44 
45 #ifdef CONFIG_DEBUG_REFCOUNT
46 #define REFCOUNT_WARN(cond, str) WARN_ON(cond)
47 #define __refcount_check	__must_check
48 #else
49 #define REFCOUNT_WARN(cond, str) (void)(cond)
50 #define __refcount_check
51 #endif
52 
53 typedef struct refcount_struct {
54 	atomic_t refs;
55 } refcount_t;
56 
57 #define REFCOUNT_INIT(n)	{ .refs = ATOMIC_INIT(n), }
58 
59 static inline void refcount_set(refcount_t *r, unsigned int n)
60 {
61 	atomic_set(&r->refs, n);
62 }
63 
64 static inline unsigned int refcount_read(const refcount_t *r)
65 {
66 	return atomic_read(&r->refs);
67 }
68 
69 static inline __refcount_check
70 bool refcount_add_not_zero(unsigned int i, refcount_t *r)
71 {
72 	unsigned int old, new, val = atomic_read(&r->refs);
73 
74 	for (;;) {
75 		if (!val)
76 			return false;
77 
78 		if (unlikely(val == UINT_MAX))
79 			return true;
80 
81 		new = val + i;
82 		if (new < val)
83 			new = UINT_MAX;
84 		old = atomic_cmpxchg_relaxed(&r->refs, val, new);
85 		if (old == val)
86 			break;
87 
88 		val = old;
89 	}
90 
91 	REFCOUNT_WARN(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
92 
93 	return true;
94 }
95 
96 static inline void refcount_add(unsigned int i, refcount_t *r)
97 {
98 	REFCOUNT_WARN(!refcount_add_not_zero(i, r), "refcount_t: addition on 0; use-after-free.\n");
99 }
100 
101 /*
102  * Similar to atomic_inc_not_zero(), will saturate at UINT_MAX and WARN.
103  *
104  * Provides no memory ordering, it is assumed the caller has guaranteed the
105  * object memory to be stable (RCU, etc.). It does provide a control dependency
106  * and thereby orders future stores. See the comment on top.
107  */
108 static inline __refcount_check
109 bool refcount_inc_not_zero(refcount_t *r)
110 {
111 	unsigned int old, new, val = atomic_read(&r->refs);
112 
113 	for (;;) {
114 		new = val + 1;
115 
116 		if (!val)
117 			return false;
118 
119 		if (unlikely(!new))
120 			return true;
121 
122 		old = atomic_cmpxchg_relaxed(&r->refs, val, new);
123 		if (old == val)
124 			break;
125 
126 		val = old;
127 	}
128 
129 	REFCOUNT_WARN(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
130 
131 	return true;
132 }
133 
134 /*
135  * Similar to atomic_inc(), will saturate at UINT_MAX and WARN.
136  *
137  * Provides no memory ordering, it is assumed the caller already has a
138  * reference on the object, will WARN when this is not so.
139  */
140 static inline void refcount_inc(refcount_t *r)
141 {
142 	REFCOUNT_WARN(!refcount_inc_not_zero(r), "refcount_t: increment on 0; use-after-free.\n");
143 }
144 
145 /*
146  * Similar to atomic_dec_and_test(), it will WARN on underflow and fail to
147  * decrement when saturated at UINT_MAX.
148  *
149  * Provides release memory ordering, such that prior loads and stores are done
150  * before, and provides a control dependency such that free() must come after.
151  * See the comment on top.
152  */
153 static inline __refcount_check
154 bool refcount_sub_and_test(unsigned int i, refcount_t *r)
155 {
156 	unsigned int old, new, val = atomic_read(&r->refs);
157 
158 	for (;;) {
159 		if (unlikely(val == UINT_MAX))
160 			return false;
161 
162 		new = val - i;
163 		if (new > val) {
164 			REFCOUNT_WARN(new > val, "refcount_t: underflow; use-after-free.\n");
165 			return false;
166 		}
167 
168 		old = atomic_cmpxchg_release(&r->refs, val, new);
169 		if (old == val)
170 			break;
171 
172 		val = old;
173 	}
174 
175 	return !new;
176 }
177 
178 static inline __refcount_check
179 bool refcount_dec_and_test(refcount_t *r)
180 {
181 	return refcount_sub_and_test(1, r);
182 }
183 
184 /*
185  * Similar to atomic_dec(), it will WARN on underflow and fail to decrement
186  * when saturated at UINT_MAX.
187  *
188  * Provides release memory ordering, such that prior loads and stores are done
189  * before.
190  */
191 static inline
192 void refcount_dec(refcount_t *r)
193 {
194 	REFCOUNT_WARN(refcount_dec_and_test(r), "refcount_t: decrement hit 0; leaking memory.\n");
195 }
196 
197 /*
198  * No atomic_t counterpart, it attempts a 1 -> 0 transition and returns the
199  * success thereof.
200  *
201  * Like all decrement operations, it provides release memory order and provides
202  * a control dependency.
203  *
204  * It can be used like a try-delete operator; this explicit case is provided
205  * and not cmpxchg in generic, because that would allow implementing unsafe
206  * operations.
207  */
208 static inline __refcount_check
209 bool refcount_dec_if_one(refcount_t *r)
210 {
211 	return atomic_cmpxchg_release(&r->refs, 1, 0) == 1;
212 }
213 
214 /*
215  * No atomic_t counterpart, it decrements unless the value is 1, in which case
216  * it will return false.
217  *
218  * Was often done like: atomic_add_unless(&var, -1, 1)
219  */
220 static inline __refcount_check
221 bool refcount_dec_not_one(refcount_t *r)
222 {
223 	unsigned int old, new, val = atomic_read(&r->refs);
224 
225 	for (;;) {
226 		if (unlikely(val == UINT_MAX))
227 			return true;
228 
229 		if (val == 1)
230 			return false;
231 
232 		new = val - 1;
233 		if (new > val) {
234 			REFCOUNT_WARN(new > val, "refcount_t: underflow; use-after-free.\n");
235 			return true;
236 		}
237 
238 		old = atomic_cmpxchg_release(&r->refs, val, new);
239 		if (old == val)
240 			break;
241 
242 		val = old;
243 	}
244 
245 	return true;
246 }
247 
248 /*
249  * Similar to atomic_dec_and_mutex_lock(), it will WARN on underflow and fail
250  * to decrement when saturated at UINT_MAX.
251  *
252  * Provides release memory ordering, such that prior loads and stores are done
253  * before, and provides a control dependency such that free() must come after.
254  * See the comment on top.
255  */
256 static inline __refcount_check
257 bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock)
258 {
259 	if (refcount_dec_not_one(r))
260 		return false;
261 
262 	mutex_lock(lock);
263 	if (!refcount_dec_and_test(r)) {
264 		mutex_unlock(lock);
265 		return false;
266 	}
267 
268 	return true;
269 }
270 
271 /*
272  * Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to
273  * decrement when saturated at UINT_MAX.
274  *
275  * Provides release memory ordering, such that prior loads and stores are done
276  * before, and provides a control dependency such that free() must come after.
277  * See the comment on top.
278  */
279 static inline __refcount_check
280 bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock)
281 {
282 	if (refcount_dec_not_one(r))
283 		return false;
284 
285 	spin_lock(lock);
286 	if (!refcount_dec_and_test(r)) {
287 		spin_unlock(lock);
288 		return false;
289 	}
290 
291 	return true;
292 }
293 
294 #endif /* _LINUX_REFCOUNT_H */
295