xref: /linux-6.15/include/linux/refcount.h (revision dcb78649)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_REFCOUNT_H
3 #define _LINUX_REFCOUNT_H
4 
5 #include <linux/atomic.h>
6 #include <linux/compiler.h>
7 #include <linux/limits.h>
8 #include <linux/spinlock_types.h>
9 
10 struct mutex;
11 
12 /**
13  * struct refcount_t - variant of atomic_t specialized for reference counts
14  * @refs: atomic_t counter field
15  *
16  * The counter saturates at REFCOUNT_SATURATED and will not move once
17  * there. This avoids wrapping the counter and causing 'spurious'
18  * use-after-free bugs.
19  */
20 typedef struct refcount_struct {
21 	atomic_t refs;
22 } refcount_t;
23 
24 #define REFCOUNT_INIT(n)	{ .refs = ATOMIC_INIT(n), }
25 
26 /**
27  * refcount_set - set a refcount's value
28  * @r: the refcount
29  * @n: value to which the refcount will be set
30  */
31 static inline void refcount_set(refcount_t *r, int n)
32 {
33 	atomic_set(&r->refs, n);
34 }
35 
36 /**
37  * refcount_read - get a refcount's value
38  * @r: the refcount
39  *
40  * Return: the refcount's value
41  */
42 static inline unsigned int refcount_read(const refcount_t *r)
43 {
44 	return atomic_read(&r->refs);
45 }
46 
47 #ifdef CONFIG_REFCOUNT_FULL
48 #include <linux/bug.h>
49 
50 #define REFCOUNT_MAX		INT_MAX
51 #define REFCOUNT_SATURATED	(INT_MIN / 2)
52 
53 /*
54  * Variant of atomic_t specialized for reference counts.
55  *
56  * The interface matches the atomic_t interface (to aid in porting) but only
57  * provides the few functions one should use for reference counting.
58  *
59  * Saturation semantics
60  * ====================
61  *
62  * refcount_t differs from atomic_t in that the counter saturates at
63  * REFCOUNT_SATURATED and will not move once there. This avoids wrapping the
64  * counter and causing 'spurious' use-after-free issues. In order to avoid the
65  * cost associated with introducing cmpxchg() loops into all of the saturating
66  * operations, we temporarily allow the counter to take on an unchecked value
67  * and then explicitly set it to REFCOUNT_SATURATED on detecting that underflow
68  * or overflow has occurred. Although this is racy when multiple threads
69  * access the refcount concurrently, by placing REFCOUNT_SATURATED roughly
70  * equidistant from 0 and INT_MAX we minimise the scope for error:
71  *
72  * 	                           INT_MAX     REFCOUNT_SATURATED   UINT_MAX
73  *   0                          (0x7fff_ffff)    (0xc000_0000)    (0xffff_ffff)
74  *   +--------------------------------+----------------+----------------+
75  *                                     <---------- bad value! ---------->
76  *
77  * (in a signed view of the world, the "bad value" range corresponds to
78  * a negative counter value).
79  *
80  * As an example, consider a refcount_inc() operation that causes the counter
81  * to overflow:
82  *
83  * 	int old = atomic_fetch_add_relaxed(r);
84  *	// old is INT_MAX, refcount now INT_MIN (0x8000_0000)
85  *	if (old < 0)
86  *		atomic_set(r, REFCOUNT_SATURATED);
87  *
88  * If another thread also performs a refcount_inc() operation between the two
89  * atomic operations, then the count will continue to edge closer to 0. If it
90  * reaches a value of 1 before /any/ of the threads reset it to the saturated
91  * value, then a concurrent refcount_dec_and_test() may erroneously free the
92  * underlying object. Given the precise timing details involved with the
93  * round-robin scheduling of each thread manipulating the refcount and the need
94  * to hit the race multiple times in succession, there doesn't appear to be a
95  * practical avenue of attack even if using refcount_add() operations with
96  * larger increments.
97  *
98  * Memory ordering
99  * ===============
100  *
101  * Memory ordering rules are slightly relaxed wrt regular atomic_t functions
102  * and provide only what is strictly required for refcounts.
103  *
104  * The increments are fully relaxed; these will not provide ordering. The
105  * rationale is that whatever is used to obtain the object we're increasing the
106  * reference count on will provide the ordering. For locked data structures,
107  * its the lock acquire, for RCU/lockless data structures its the dependent
108  * load.
109  *
110  * Do note that inc_not_zero() provides a control dependency which will order
111  * future stores against the inc, this ensures we'll never modify the object
112  * if we did not in fact acquire a reference.
113  *
114  * The decrements will provide release order, such that all the prior loads and
115  * stores will be issued before, it also provides a control dependency, which
116  * will order us against the subsequent free().
117  *
118  * The control dependency is against the load of the cmpxchg (ll/sc) that
119  * succeeded. This means the stores aren't fully ordered, but this is fine
120  * because the 1->0 transition indicates no concurrency.
121  *
122  * Note that the allocator is responsible for ordering things between free()
123  * and alloc().
124  *
125  * The decrements dec_and_test() and sub_and_test() also provide acquire
126  * ordering on success.
127  *
128  */
129 
130 /**
131  * refcount_add_not_zero - add a value to a refcount unless it is 0
132  * @i: the value to add to the refcount
133  * @r: the refcount
134  *
135  * Will saturate at REFCOUNT_SATURATED and WARN.
136  *
137  * Provides no memory ordering, it is assumed the caller has guaranteed the
138  * object memory to be stable (RCU, etc.). It does provide a control dependency
139  * and thereby orders future stores. See the comment on top.
140  *
141  * Use of this function is not recommended for the normal reference counting
142  * use case in which references are taken and released one at a time.  In these
143  * cases, refcount_inc(), or one of its variants, should instead be used to
144  * increment a reference count.
145  *
146  * Return: false if the passed refcount is 0, true otherwise
147  */
148 static inline __must_check bool refcount_add_not_zero(int i, refcount_t *r)
149 {
150 	int old = refcount_read(r);
151 
152 	do {
153 		if (!old)
154 			break;
155 	} while (!atomic_try_cmpxchg_relaxed(&r->refs, &old, old + i));
156 
157 	if (unlikely(old < 0 || old + i < 0)) {
158 		refcount_set(r, REFCOUNT_SATURATED);
159 		WARN_ONCE(1, "refcount_t: saturated; leaking memory.\n");
160 	}
161 
162 	return old;
163 }
164 
165 /**
166  * refcount_add - add a value to a refcount
167  * @i: the value to add to the refcount
168  * @r: the refcount
169  *
170  * Similar to atomic_add(), but will saturate at REFCOUNT_SATURATED and WARN.
171  *
172  * Provides no memory ordering, it is assumed the caller has guaranteed the
173  * object memory to be stable (RCU, etc.). It does provide a control dependency
174  * and thereby orders future stores. See the comment on top.
175  *
176  * Use of this function is not recommended for the normal reference counting
177  * use case in which references are taken and released one at a time.  In these
178  * cases, refcount_inc(), or one of its variants, should instead be used to
179  * increment a reference count.
180  */
181 static inline void refcount_add(int i, refcount_t *r)
182 {
183 	int old = atomic_fetch_add_relaxed(i, &r->refs);
184 
185 	WARN_ONCE(!old, "refcount_t: addition on 0; use-after-free.\n");
186 	if (unlikely(old <= 0 || old + i <= 0)) {
187 		refcount_set(r, REFCOUNT_SATURATED);
188 		WARN_ONCE(old, "refcount_t: saturated; leaking memory.\n");
189 	}
190 }
191 
192 /**
193  * refcount_inc_not_zero - increment a refcount unless it is 0
194  * @r: the refcount to increment
195  *
196  * Similar to atomic_inc_not_zero(), but will saturate at REFCOUNT_SATURATED
197  * and WARN.
198  *
199  * Provides no memory ordering, it is assumed the caller has guaranteed the
200  * object memory to be stable (RCU, etc.). It does provide a control dependency
201  * and thereby orders future stores. See the comment on top.
202  *
203  * Return: true if the increment was successful, false otherwise
204  */
205 static inline __must_check bool refcount_inc_not_zero(refcount_t *r)
206 {
207 	return refcount_add_not_zero(1, r);
208 }
209 
210 /**
211  * refcount_inc - increment a refcount
212  * @r: the refcount to increment
213  *
214  * Similar to atomic_inc(), but will saturate at REFCOUNT_SATURATED and WARN.
215  *
216  * Provides no memory ordering, it is assumed the caller already has a
217  * reference on the object.
218  *
219  * Will WARN if the refcount is 0, as this represents a possible use-after-free
220  * condition.
221  */
222 static inline void refcount_inc(refcount_t *r)
223 {
224 	refcount_add(1, r);
225 }
226 
227 /**
228  * refcount_sub_and_test - subtract from a refcount and test if it is 0
229  * @i: amount to subtract from the refcount
230  * @r: the refcount
231  *
232  * Similar to atomic_dec_and_test(), but it will WARN, return false and
233  * ultimately leak on underflow and will fail to decrement when saturated
234  * at REFCOUNT_SATURATED.
235  *
236  * Provides release memory ordering, such that prior loads and stores are done
237  * before, and provides an acquire ordering on success such that free()
238  * must come after.
239  *
240  * Use of this function is not recommended for the normal reference counting
241  * use case in which references are taken and released one at a time.  In these
242  * cases, refcount_dec(), or one of its variants, should instead be used to
243  * decrement a reference count.
244  *
245  * Return: true if the resulting refcount is 0, false otherwise
246  */
247 static inline __must_check bool refcount_sub_and_test(int i, refcount_t *r)
248 {
249 	int old = atomic_fetch_sub_release(i, &r->refs);
250 
251 	if (old == i) {
252 		smp_acquire__after_ctrl_dep();
253 		return true;
254 	}
255 
256 	if (unlikely(old < 0 || old - i < 0)) {
257 		refcount_set(r, REFCOUNT_SATURATED);
258 		WARN_ONCE(1, "refcount_t: underflow; use-after-free.\n");
259 	}
260 
261 	return false;
262 }
263 
264 /**
265  * refcount_dec_and_test - decrement a refcount and test if it is 0
266  * @r: the refcount
267  *
268  * Similar to atomic_dec_and_test(), it will WARN on underflow and fail to
269  * decrement when saturated at REFCOUNT_SATURATED.
270  *
271  * Provides release memory ordering, such that prior loads and stores are done
272  * before, and provides an acquire ordering on success such that free()
273  * must come after.
274  *
275  * Return: true if the resulting refcount is 0, false otherwise
276  */
277 static inline __must_check bool refcount_dec_and_test(refcount_t *r)
278 {
279 	return refcount_sub_and_test(1, r);
280 }
281 
282 /**
283  * refcount_dec - decrement a refcount
284  * @r: the refcount
285  *
286  * Similar to atomic_dec(), it will WARN on underflow and fail to decrement
287  * when saturated at REFCOUNT_SATURATED.
288  *
289  * Provides release memory ordering, such that prior loads and stores are done
290  * before.
291  */
292 static inline void refcount_dec(refcount_t *r)
293 {
294 	int old = atomic_fetch_sub_release(1, &r->refs);
295 
296 	if (unlikely(old <= 1)) {
297 		refcount_set(r, REFCOUNT_SATURATED);
298 		WARN_ONCE(1, "refcount_t: decrement hit 0; leaking memory.\n");
299 	}
300 }
301 #else /* CONFIG_REFCOUNT_FULL */
302 
303 #define REFCOUNT_MAX		INT_MAX
304 #define REFCOUNT_SATURATED	(INT_MIN / 2)
305 
306 # ifdef CONFIG_ARCH_HAS_REFCOUNT
307 #  include <asm/refcount.h>
308 # else
309 static inline __must_check bool refcount_add_not_zero(int i, refcount_t *r)
310 {
311 	return atomic_add_unless(&r->refs, i, 0);
312 }
313 
314 static inline void refcount_add(int i, refcount_t *r)
315 {
316 	atomic_add(i, &r->refs);
317 }
318 
319 static inline __must_check bool refcount_inc_not_zero(refcount_t *r)
320 {
321 	return atomic_add_unless(&r->refs, 1, 0);
322 }
323 
324 static inline void refcount_inc(refcount_t *r)
325 {
326 	atomic_inc(&r->refs);
327 }
328 
329 static inline __must_check bool refcount_sub_and_test(int i, refcount_t *r)
330 {
331 	return atomic_sub_and_test(i, &r->refs);
332 }
333 
334 static inline __must_check bool refcount_dec_and_test(refcount_t *r)
335 {
336 	return atomic_dec_and_test(&r->refs);
337 }
338 
339 static inline void refcount_dec(refcount_t *r)
340 {
341 	atomic_dec(&r->refs);
342 }
343 # endif /* !CONFIG_ARCH_HAS_REFCOUNT */
344 #endif /* !CONFIG_REFCOUNT_FULL */
345 
346 extern __must_check bool refcount_dec_if_one(refcount_t *r);
347 extern __must_check bool refcount_dec_not_one(refcount_t *r);
348 extern __must_check bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock);
349 extern __must_check bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock);
350 extern __must_check bool refcount_dec_and_lock_irqsave(refcount_t *r,
351 						       spinlock_t *lock,
352 						       unsigned long *flags);
353 #endif /* _LINUX_REFCOUNT_H */
354