xref: /linux-6.15/include/linux/refcount.h (revision 1eb085d9)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_REFCOUNT_H
3 #define _LINUX_REFCOUNT_H
4 
5 #include <linux/atomic.h>
6 #include <linux/compiler.h>
7 #include <linux/limits.h>
8 #include <linux/spinlock_types.h>
9 
10 struct mutex;
11 
12 /**
13  * struct refcount_t - variant of atomic_t specialized for reference counts
14  * @refs: atomic_t counter field
15  *
16  * The counter saturates at REFCOUNT_SATURATED and will not move once
17  * there. This avoids wrapping the counter and causing 'spurious'
18  * use-after-free bugs.
19  */
20 typedef struct refcount_struct {
21 	atomic_t refs;
22 } refcount_t;
23 
24 #define REFCOUNT_INIT(n)	{ .refs = ATOMIC_INIT(n), }
25 
26 enum refcount_saturation_type {
27 	REFCOUNT_ADD_NOT_ZERO_OVF,
28 	REFCOUNT_ADD_OVF,
29 	REFCOUNT_ADD_UAF,
30 	REFCOUNT_SUB_UAF,
31 	REFCOUNT_DEC_LEAK,
32 };
33 
34 void refcount_warn_saturate(refcount_t *r, enum refcount_saturation_type t);
35 
36 /**
37  * refcount_set - set a refcount's value
38  * @r: the refcount
39  * @n: value to which the refcount will be set
40  */
41 static inline void refcount_set(refcount_t *r, int n)
42 {
43 	atomic_set(&r->refs, n);
44 }
45 
46 /**
47  * refcount_read - get a refcount's value
48  * @r: the refcount
49  *
50  * Return: the refcount's value
51  */
52 static inline unsigned int refcount_read(const refcount_t *r)
53 {
54 	return atomic_read(&r->refs);
55 }
56 
57 #ifdef CONFIG_REFCOUNT_FULL
58 #include <linux/bug.h>
59 
60 #define REFCOUNT_MAX		INT_MAX
61 #define REFCOUNT_SATURATED	(INT_MIN / 2)
62 
63 /*
64  * Variant of atomic_t specialized for reference counts.
65  *
66  * The interface matches the atomic_t interface (to aid in porting) but only
67  * provides the few functions one should use for reference counting.
68  *
69  * Saturation semantics
70  * ====================
71  *
72  * refcount_t differs from atomic_t in that the counter saturates at
73  * REFCOUNT_SATURATED and will not move once there. This avoids wrapping the
74  * counter and causing 'spurious' use-after-free issues. In order to avoid the
75  * cost associated with introducing cmpxchg() loops into all of the saturating
76  * operations, we temporarily allow the counter to take on an unchecked value
77  * and then explicitly set it to REFCOUNT_SATURATED on detecting that underflow
78  * or overflow has occurred. Although this is racy when multiple threads
79  * access the refcount concurrently, by placing REFCOUNT_SATURATED roughly
80  * equidistant from 0 and INT_MAX we minimise the scope for error:
81  *
82  * 	                           INT_MAX     REFCOUNT_SATURATED   UINT_MAX
83  *   0                          (0x7fff_ffff)    (0xc000_0000)    (0xffff_ffff)
84  *   +--------------------------------+----------------+----------------+
85  *                                     <---------- bad value! ---------->
86  *
87  * (in a signed view of the world, the "bad value" range corresponds to
88  * a negative counter value).
89  *
90  * As an example, consider a refcount_inc() operation that causes the counter
91  * to overflow:
92  *
93  * 	int old = atomic_fetch_add_relaxed(r);
94  *	// old is INT_MAX, refcount now INT_MIN (0x8000_0000)
95  *	if (old < 0)
96  *		atomic_set(r, REFCOUNT_SATURATED);
97  *
98  * If another thread also performs a refcount_inc() operation between the two
99  * atomic operations, then the count will continue to edge closer to 0. If it
100  * reaches a value of 1 before /any/ of the threads reset it to the saturated
101  * value, then a concurrent refcount_dec_and_test() may erroneously free the
102  * underlying object. Given the precise timing details involved with the
103  * round-robin scheduling of each thread manipulating the refcount and the need
104  * to hit the race multiple times in succession, there doesn't appear to be a
105  * practical avenue of attack even if using refcount_add() operations with
106  * larger increments.
107  *
108  * Memory ordering
109  * ===============
110  *
111  * Memory ordering rules are slightly relaxed wrt regular atomic_t functions
112  * and provide only what is strictly required for refcounts.
113  *
114  * The increments are fully relaxed; these will not provide ordering. The
115  * rationale is that whatever is used to obtain the object we're increasing the
116  * reference count on will provide the ordering. For locked data structures,
117  * its the lock acquire, for RCU/lockless data structures its the dependent
118  * load.
119  *
120  * Do note that inc_not_zero() provides a control dependency which will order
121  * future stores against the inc, this ensures we'll never modify the object
122  * if we did not in fact acquire a reference.
123  *
124  * The decrements will provide release order, such that all the prior loads and
125  * stores will be issued before, it also provides a control dependency, which
126  * will order us against the subsequent free().
127  *
128  * The control dependency is against the load of the cmpxchg (ll/sc) that
129  * succeeded. This means the stores aren't fully ordered, but this is fine
130  * because the 1->0 transition indicates no concurrency.
131  *
132  * Note that the allocator is responsible for ordering things between free()
133  * and alloc().
134  *
135  * The decrements dec_and_test() and sub_and_test() also provide acquire
136  * ordering on success.
137  *
138  */
139 
140 /**
141  * refcount_add_not_zero - add a value to a refcount unless it is 0
142  * @i: the value to add to the refcount
143  * @r: the refcount
144  *
145  * Will saturate at REFCOUNT_SATURATED and WARN.
146  *
147  * Provides no memory ordering, it is assumed the caller has guaranteed the
148  * object memory to be stable (RCU, etc.). It does provide a control dependency
149  * and thereby orders future stores. See the comment on top.
150  *
151  * Use of this function is not recommended for the normal reference counting
152  * use case in which references are taken and released one at a time.  In these
153  * cases, refcount_inc(), or one of its variants, should instead be used to
154  * increment a reference count.
155  *
156  * Return: false if the passed refcount is 0, true otherwise
157  */
158 static inline __must_check bool refcount_add_not_zero(int i, refcount_t *r)
159 {
160 	int old = refcount_read(r);
161 
162 	do {
163 		if (!old)
164 			break;
165 	} while (!atomic_try_cmpxchg_relaxed(&r->refs, &old, old + i));
166 
167 	if (unlikely(old < 0 || old + i < 0))
168 		refcount_warn_saturate(r, REFCOUNT_ADD_NOT_ZERO_OVF);
169 
170 	return old;
171 }
172 
173 /**
174  * refcount_add - add a value to a refcount
175  * @i: the value to add to the refcount
176  * @r: the refcount
177  *
178  * Similar to atomic_add(), but will saturate at REFCOUNT_SATURATED and WARN.
179  *
180  * Provides no memory ordering, it is assumed the caller has guaranteed the
181  * object memory to be stable (RCU, etc.). It does provide a control dependency
182  * and thereby orders future stores. See the comment on top.
183  *
184  * Use of this function is not recommended for the normal reference counting
185  * use case in which references are taken and released one at a time.  In these
186  * cases, refcount_inc(), or one of its variants, should instead be used to
187  * increment a reference count.
188  */
189 static inline void refcount_add(int i, refcount_t *r)
190 {
191 	int old = atomic_fetch_add_relaxed(i, &r->refs);
192 
193 	if (unlikely(!old))
194 		refcount_warn_saturate(r, REFCOUNT_ADD_UAF);
195 	else if (unlikely(old < 0 || old + i < 0))
196 		refcount_warn_saturate(r, REFCOUNT_ADD_OVF);
197 }
198 
199 /**
200  * refcount_inc_not_zero - increment a refcount unless it is 0
201  * @r: the refcount to increment
202  *
203  * Similar to atomic_inc_not_zero(), but will saturate at REFCOUNT_SATURATED
204  * and WARN.
205  *
206  * Provides no memory ordering, it is assumed the caller has guaranteed the
207  * object memory to be stable (RCU, etc.). It does provide a control dependency
208  * and thereby orders future stores. See the comment on top.
209  *
210  * Return: true if the increment was successful, false otherwise
211  */
212 static inline __must_check bool refcount_inc_not_zero(refcount_t *r)
213 {
214 	return refcount_add_not_zero(1, r);
215 }
216 
217 /**
218  * refcount_inc - increment a refcount
219  * @r: the refcount to increment
220  *
221  * Similar to atomic_inc(), but will saturate at REFCOUNT_SATURATED and WARN.
222  *
223  * Provides no memory ordering, it is assumed the caller already has a
224  * reference on the object.
225  *
226  * Will WARN if the refcount is 0, as this represents a possible use-after-free
227  * condition.
228  */
229 static inline void refcount_inc(refcount_t *r)
230 {
231 	refcount_add(1, r);
232 }
233 
234 /**
235  * refcount_sub_and_test - subtract from a refcount and test if it is 0
236  * @i: amount to subtract from the refcount
237  * @r: the refcount
238  *
239  * Similar to atomic_dec_and_test(), but it will WARN, return false and
240  * ultimately leak on underflow and will fail to decrement when saturated
241  * at REFCOUNT_SATURATED.
242  *
243  * Provides release memory ordering, such that prior loads and stores are done
244  * before, and provides an acquire ordering on success such that free()
245  * must come after.
246  *
247  * Use of this function is not recommended for the normal reference counting
248  * use case in which references are taken and released one at a time.  In these
249  * cases, refcount_dec(), or one of its variants, should instead be used to
250  * decrement a reference count.
251  *
252  * Return: true if the resulting refcount is 0, false otherwise
253  */
254 static inline __must_check bool refcount_sub_and_test(int i, refcount_t *r)
255 {
256 	int old = atomic_fetch_sub_release(i, &r->refs);
257 
258 	if (old == i) {
259 		smp_acquire__after_ctrl_dep();
260 		return true;
261 	}
262 
263 	if (unlikely(old < 0 || old - i < 0))
264 		refcount_warn_saturate(r, REFCOUNT_SUB_UAF);
265 
266 	return false;
267 }
268 
269 /**
270  * refcount_dec_and_test - decrement a refcount and test if it is 0
271  * @r: the refcount
272  *
273  * Similar to atomic_dec_and_test(), it will WARN on underflow and fail to
274  * decrement when saturated at REFCOUNT_SATURATED.
275  *
276  * Provides release memory ordering, such that prior loads and stores are done
277  * before, and provides an acquire ordering on success such that free()
278  * must come after.
279  *
280  * Return: true if the resulting refcount is 0, false otherwise
281  */
282 static inline __must_check bool refcount_dec_and_test(refcount_t *r)
283 {
284 	return refcount_sub_and_test(1, r);
285 }
286 
287 /**
288  * refcount_dec - decrement a refcount
289  * @r: the refcount
290  *
291  * Similar to atomic_dec(), it will WARN on underflow and fail to decrement
292  * when saturated at REFCOUNT_SATURATED.
293  *
294  * Provides release memory ordering, such that prior loads and stores are done
295  * before.
296  */
297 static inline void refcount_dec(refcount_t *r)
298 {
299 	if (unlikely(atomic_fetch_sub_release(1, &r->refs) <= 1))
300 		refcount_warn_saturate(r, REFCOUNT_DEC_LEAK);
301 }
302 #else /* CONFIG_REFCOUNT_FULL */
303 
304 #define REFCOUNT_MAX		INT_MAX
305 #define REFCOUNT_SATURATED	(INT_MIN / 2)
306 
307 # ifdef CONFIG_ARCH_HAS_REFCOUNT
308 #  include <asm/refcount.h>
309 # else
310 static inline __must_check bool refcount_add_not_zero(int i, refcount_t *r)
311 {
312 	return atomic_add_unless(&r->refs, i, 0);
313 }
314 
315 static inline void refcount_add(int i, refcount_t *r)
316 {
317 	atomic_add(i, &r->refs);
318 }
319 
320 static inline __must_check bool refcount_inc_not_zero(refcount_t *r)
321 {
322 	return atomic_add_unless(&r->refs, 1, 0);
323 }
324 
325 static inline void refcount_inc(refcount_t *r)
326 {
327 	atomic_inc(&r->refs);
328 }
329 
330 static inline __must_check bool refcount_sub_and_test(int i, refcount_t *r)
331 {
332 	return atomic_sub_and_test(i, &r->refs);
333 }
334 
335 static inline __must_check bool refcount_dec_and_test(refcount_t *r)
336 {
337 	return atomic_dec_and_test(&r->refs);
338 }
339 
340 static inline void refcount_dec(refcount_t *r)
341 {
342 	atomic_dec(&r->refs);
343 }
344 # endif /* !CONFIG_ARCH_HAS_REFCOUNT */
345 #endif /* !CONFIG_REFCOUNT_FULL */
346 
347 extern __must_check bool refcount_dec_if_one(refcount_t *r);
348 extern __must_check bool refcount_dec_not_one(refcount_t *r);
349 extern __must_check bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock);
350 extern __must_check bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock);
351 extern __must_check bool refcount_dec_and_lock_irqsave(refcount_t *r,
352 						       spinlock_t *lock,
353 						       unsigned long *flags);
354 #endif /* _LINUX_REFCOUNT_H */
355