xref: /linux-6.15/include/linux/refcount.h (revision 65b00855)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_REFCOUNT_H
3 #define _LINUX_REFCOUNT_H
4 
5 #include <linux/atomic.h>
6 #include <linux/compiler.h>
7 #include <linux/limits.h>
8 #include <linux/spinlock_types.h>
9 
10 struct mutex;
11 
12 /**
13  * struct refcount_t - variant of atomic_t specialized for reference counts
14  * @refs: atomic_t counter field
15  *
16  * The counter saturates at REFCOUNT_SATURATED and will not move once
17  * there. This avoids wrapping the counter and causing 'spurious'
18  * use-after-free bugs.
19  */
20 typedef struct refcount_struct {
21 	atomic_t refs;
22 } refcount_t;
23 
24 #define REFCOUNT_INIT(n)	{ .refs = ATOMIC_INIT(n), }
25 #define REFCOUNT_MAX		INT_MAX
26 #define REFCOUNT_SATURATED	(INT_MIN / 2)
27 
28 enum refcount_saturation_type {
29 	REFCOUNT_ADD_NOT_ZERO_OVF,
30 	REFCOUNT_ADD_OVF,
31 	REFCOUNT_ADD_UAF,
32 	REFCOUNT_SUB_UAF,
33 	REFCOUNT_DEC_LEAK,
34 };
35 
36 void refcount_warn_saturate(refcount_t *r, enum refcount_saturation_type t);
37 
38 /**
39  * refcount_set - set a refcount's value
40  * @r: the refcount
41  * @n: value to which the refcount will be set
42  */
43 static inline void refcount_set(refcount_t *r, int n)
44 {
45 	atomic_set(&r->refs, n);
46 }
47 
48 /**
49  * refcount_read - get a refcount's value
50  * @r: the refcount
51  *
52  * Return: the refcount's value
53  */
54 static inline unsigned int refcount_read(const refcount_t *r)
55 {
56 	return atomic_read(&r->refs);
57 }
58 
59 #ifdef CONFIG_REFCOUNT_FULL
60 #include <linux/bug.h>
61 
62 /*
63  * Variant of atomic_t specialized for reference counts.
64  *
65  * The interface matches the atomic_t interface (to aid in porting) but only
66  * provides the few functions one should use for reference counting.
67  *
68  * Saturation semantics
69  * ====================
70  *
71  * refcount_t differs from atomic_t in that the counter saturates at
72  * REFCOUNT_SATURATED and will not move once there. This avoids wrapping the
73  * counter and causing 'spurious' use-after-free issues. In order to avoid the
74  * cost associated with introducing cmpxchg() loops into all of the saturating
75  * operations, we temporarily allow the counter to take on an unchecked value
76  * and then explicitly set it to REFCOUNT_SATURATED on detecting that underflow
77  * or overflow has occurred. Although this is racy when multiple threads
78  * access the refcount concurrently, by placing REFCOUNT_SATURATED roughly
79  * equidistant from 0 and INT_MAX we minimise the scope for error:
80  *
81  * 	                           INT_MAX     REFCOUNT_SATURATED   UINT_MAX
82  *   0                          (0x7fff_ffff)    (0xc000_0000)    (0xffff_ffff)
83  *   +--------------------------------+----------------+----------------+
84  *                                     <---------- bad value! ---------->
85  *
86  * (in a signed view of the world, the "bad value" range corresponds to
87  * a negative counter value).
88  *
89  * As an example, consider a refcount_inc() operation that causes the counter
90  * to overflow:
91  *
92  * 	int old = atomic_fetch_add_relaxed(r);
93  *	// old is INT_MAX, refcount now INT_MIN (0x8000_0000)
94  *	if (old < 0)
95  *		atomic_set(r, REFCOUNT_SATURATED);
96  *
97  * If another thread also performs a refcount_inc() operation between the two
98  * atomic operations, then the count will continue to edge closer to 0. If it
99  * reaches a value of 1 before /any/ of the threads reset it to the saturated
100  * value, then a concurrent refcount_dec_and_test() may erroneously free the
101  * underlying object. Given the precise timing details involved with the
102  * round-robin scheduling of each thread manipulating the refcount and the need
103  * to hit the race multiple times in succession, there doesn't appear to be a
104  * practical avenue of attack even if using refcount_add() operations with
105  * larger increments.
106  *
107  * Memory ordering
108  * ===============
109  *
110  * Memory ordering rules are slightly relaxed wrt regular atomic_t functions
111  * and provide only what is strictly required for refcounts.
112  *
113  * The increments are fully relaxed; these will not provide ordering. The
114  * rationale is that whatever is used to obtain the object we're increasing the
115  * reference count on will provide the ordering. For locked data structures,
116  * its the lock acquire, for RCU/lockless data structures its the dependent
117  * load.
118  *
119  * Do note that inc_not_zero() provides a control dependency which will order
120  * future stores against the inc, this ensures we'll never modify the object
121  * if we did not in fact acquire a reference.
122  *
123  * The decrements will provide release order, such that all the prior loads and
124  * stores will be issued before, it also provides a control dependency, which
125  * will order us against the subsequent free().
126  *
127  * The control dependency is against the load of the cmpxchg (ll/sc) that
128  * succeeded. This means the stores aren't fully ordered, but this is fine
129  * because the 1->0 transition indicates no concurrency.
130  *
131  * Note that the allocator is responsible for ordering things between free()
132  * and alloc().
133  *
134  * The decrements dec_and_test() and sub_and_test() also provide acquire
135  * ordering on success.
136  *
137  */
138 
139 /**
140  * refcount_add_not_zero - add a value to a refcount unless it is 0
141  * @i: the value to add to the refcount
142  * @r: the refcount
143  *
144  * Will saturate at REFCOUNT_SATURATED and WARN.
145  *
146  * Provides no memory ordering, it is assumed the caller has guaranteed the
147  * object memory to be stable (RCU, etc.). It does provide a control dependency
148  * and thereby orders future stores. See the comment on top.
149  *
150  * Use of this function is not recommended for the normal reference counting
151  * use case in which references are taken and released one at a time.  In these
152  * cases, refcount_inc(), or one of its variants, should instead be used to
153  * increment a reference count.
154  *
155  * Return: false if the passed refcount is 0, true otherwise
156  */
157 static inline __must_check bool refcount_add_not_zero(int i, refcount_t *r)
158 {
159 	int old = refcount_read(r);
160 
161 	do {
162 		if (!old)
163 			break;
164 	} while (!atomic_try_cmpxchg_relaxed(&r->refs, &old, old + i));
165 
166 	if (unlikely(old < 0 || old + i < 0))
167 		refcount_warn_saturate(r, REFCOUNT_ADD_NOT_ZERO_OVF);
168 
169 	return old;
170 }
171 
172 /**
173  * refcount_add - add a value to a refcount
174  * @i: the value to add to the refcount
175  * @r: the refcount
176  *
177  * Similar to atomic_add(), but will saturate at REFCOUNT_SATURATED and WARN.
178  *
179  * Provides no memory ordering, it is assumed the caller has guaranteed the
180  * object memory to be stable (RCU, etc.). It does provide a control dependency
181  * and thereby orders future stores. See the comment on top.
182  *
183  * Use of this function is not recommended for the normal reference counting
184  * use case in which references are taken and released one at a time.  In these
185  * cases, refcount_inc(), or one of its variants, should instead be used to
186  * increment a reference count.
187  */
188 static inline void refcount_add(int i, refcount_t *r)
189 {
190 	int old = atomic_fetch_add_relaxed(i, &r->refs);
191 
192 	if (unlikely(!old))
193 		refcount_warn_saturate(r, REFCOUNT_ADD_UAF);
194 	else if (unlikely(old < 0 || old + i < 0))
195 		refcount_warn_saturate(r, REFCOUNT_ADD_OVF);
196 }
197 
198 /**
199  * refcount_inc_not_zero - increment a refcount unless it is 0
200  * @r: the refcount to increment
201  *
202  * Similar to atomic_inc_not_zero(), but will saturate at REFCOUNT_SATURATED
203  * and WARN.
204  *
205  * Provides no memory ordering, it is assumed the caller has guaranteed the
206  * object memory to be stable (RCU, etc.). It does provide a control dependency
207  * and thereby orders future stores. See the comment on top.
208  *
209  * Return: true if the increment was successful, false otherwise
210  */
211 static inline __must_check bool refcount_inc_not_zero(refcount_t *r)
212 {
213 	return refcount_add_not_zero(1, r);
214 }
215 
216 /**
217  * refcount_inc - increment a refcount
218  * @r: the refcount to increment
219  *
220  * Similar to atomic_inc(), but will saturate at REFCOUNT_SATURATED and WARN.
221  *
222  * Provides no memory ordering, it is assumed the caller already has a
223  * reference on the object.
224  *
225  * Will WARN if the refcount is 0, as this represents a possible use-after-free
226  * condition.
227  */
228 static inline void refcount_inc(refcount_t *r)
229 {
230 	refcount_add(1, r);
231 }
232 
233 /**
234  * refcount_sub_and_test - subtract from a refcount and test if it is 0
235  * @i: amount to subtract from the refcount
236  * @r: the refcount
237  *
238  * Similar to atomic_dec_and_test(), but it will WARN, return false and
239  * ultimately leak on underflow and will fail to decrement when saturated
240  * at REFCOUNT_SATURATED.
241  *
242  * Provides release memory ordering, such that prior loads and stores are done
243  * before, and provides an acquire ordering on success such that free()
244  * must come after.
245  *
246  * Use of this function is not recommended for the normal reference counting
247  * use case in which references are taken and released one at a time.  In these
248  * cases, refcount_dec(), or one of its variants, should instead be used to
249  * decrement a reference count.
250  *
251  * Return: true if the resulting refcount is 0, false otherwise
252  */
253 static inline __must_check bool refcount_sub_and_test(int i, refcount_t *r)
254 {
255 	int old = atomic_fetch_sub_release(i, &r->refs);
256 
257 	if (old == i) {
258 		smp_acquire__after_ctrl_dep();
259 		return true;
260 	}
261 
262 	if (unlikely(old < 0 || old - i < 0))
263 		refcount_warn_saturate(r, REFCOUNT_SUB_UAF);
264 
265 	return false;
266 }
267 
268 /**
269  * refcount_dec_and_test - decrement a refcount and test if it is 0
270  * @r: the refcount
271  *
272  * Similar to atomic_dec_and_test(), it will WARN on underflow and fail to
273  * decrement when saturated at REFCOUNT_SATURATED.
274  *
275  * Provides release memory ordering, such that prior loads and stores are done
276  * before, and provides an acquire ordering on success such that free()
277  * must come after.
278  *
279  * Return: true if the resulting refcount is 0, false otherwise
280  */
281 static inline __must_check bool refcount_dec_and_test(refcount_t *r)
282 {
283 	return refcount_sub_and_test(1, r);
284 }
285 
286 /**
287  * refcount_dec - decrement a refcount
288  * @r: the refcount
289  *
290  * Similar to atomic_dec(), it will WARN on underflow and fail to decrement
291  * when saturated at REFCOUNT_SATURATED.
292  *
293  * Provides release memory ordering, such that prior loads and stores are done
294  * before.
295  */
296 static inline void refcount_dec(refcount_t *r)
297 {
298 	if (unlikely(atomic_fetch_sub_release(1, &r->refs) <= 1))
299 		refcount_warn_saturate(r, REFCOUNT_DEC_LEAK);
300 }
301 #else /* CONFIG_REFCOUNT_FULL */
302 # ifdef CONFIG_ARCH_HAS_REFCOUNT
303 #  include <asm/refcount.h>
304 # else
305 static inline __must_check bool refcount_add_not_zero(int i, refcount_t *r)
306 {
307 	return atomic_add_unless(&r->refs, i, 0);
308 }
309 
310 static inline void refcount_add(int i, refcount_t *r)
311 {
312 	atomic_add(i, &r->refs);
313 }
314 
315 static inline __must_check bool refcount_inc_not_zero(refcount_t *r)
316 {
317 	return atomic_add_unless(&r->refs, 1, 0);
318 }
319 
320 static inline void refcount_inc(refcount_t *r)
321 {
322 	atomic_inc(&r->refs);
323 }
324 
325 static inline __must_check bool refcount_sub_and_test(int i, refcount_t *r)
326 {
327 	return atomic_sub_and_test(i, &r->refs);
328 }
329 
330 static inline __must_check bool refcount_dec_and_test(refcount_t *r)
331 {
332 	return atomic_dec_and_test(&r->refs);
333 }
334 
335 static inline void refcount_dec(refcount_t *r)
336 {
337 	atomic_dec(&r->refs);
338 }
339 # endif /* !CONFIG_ARCH_HAS_REFCOUNT */
340 #endif /* !CONFIG_REFCOUNT_FULL */
341 
342 extern __must_check bool refcount_dec_if_one(refcount_t *r);
343 extern __must_check bool refcount_dec_not_one(refcount_t *r);
344 extern __must_check bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock);
345 extern __must_check bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock);
346 extern __must_check bool refcount_dec_and_lock_irqsave(refcount_t *r,
347 						       spinlock_t *lock,
348 						       unsigned long *flags);
349 #endif /* _LINUX_REFCOUNT_H */
350