xref: /linux-6.15/include/linux/refcount.h (revision 77e9971c)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_REFCOUNT_H
3 #define _LINUX_REFCOUNT_H
4 
5 #include <linux/atomic.h>
6 #include <linux/compiler.h>
7 #include <linux/limits.h>
8 #include <linux/spinlock_types.h>
9 
10 struct mutex;
11 
12 /**
13  * struct refcount_t - variant of atomic_t specialized for reference counts
14  * @refs: atomic_t counter field
15  *
16  * The counter saturates at REFCOUNT_SATURATED and will not move once
17  * there. This avoids wrapping the counter and causing 'spurious'
18  * use-after-free bugs.
19  */
20 typedef struct refcount_struct {
21 	atomic_t refs;
22 } refcount_t;
23 
24 #define REFCOUNT_INIT(n)	{ .refs = ATOMIC_INIT(n), }
25 
26 /**
27  * refcount_set - set a refcount's value
28  * @r: the refcount
29  * @n: value to which the refcount will be set
30  */
31 static inline void refcount_set(refcount_t *r, int n)
32 {
33 	atomic_set(&r->refs, n);
34 }
35 
36 /**
37  * refcount_read - get a refcount's value
38  * @r: the refcount
39  *
40  * Return: the refcount's value
41  */
42 static inline unsigned int refcount_read(const refcount_t *r)
43 {
44 	return atomic_read(&r->refs);
45 }
46 
47 #ifdef CONFIG_REFCOUNT_FULL
48 #include <linux/bug.h>
49 
50 #define REFCOUNT_MAX		(UINT_MAX - 1)
51 #define REFCOUNT_SATURATED	UINT_MAX
52 
53 /*
54  * Variant of atomic_t specialized for reference counts.
55  *
56  * The interface matches the atomic_t interface (to aid in porting) but only
57  * provides the few functions one should use for reference counting.
58  *
59  * It differs in that the counter saturates at REFCOUNT_SATURATED and will not
60  * move once there. This avoids wrapping the counter and causing 'spurious'
61  * use-after-free issues.
62  *
63  * Memory ordering rules are slightly relaxed wrt regular atomic_t functions
64  * and provide only what is strictly required for refcounts.
65  *
66  * The increments are fully relaxed; these will not provide ordering. The
67  * rationale is that whatever is used to obtain the object we're increasing the
68  * reference count on will provide the ordering. For locked data structures,
69  * its the lock acquire, for RCU/lockless data structures its the dependent
70  * load.
71  *
72  * Do note that inc_not_zero() provides a control dependency which will order
73  * future stores against the inc, this ensures we'll never modify the object
74  * if we did not in fact acquire a reference.
75  *
76  * The decrements will provide release order, such that all the prior loads and
77  * stores will be issued before, it also provides a control dependency, which
78  * will order us against the subsequent free().
79  *
80  * The control dependency is against the load of the cmpxchg (ll/sc) that
81  * succeeded. This means the stores aren't fully ordered, but this is fine
82  * because the 1->0 transition indicates no concurrency.
83  *
84  * Note that the allocator is responsible for ordering things between free()
85  * and alloc().
86  *
87  * The decrements dec_and_test() and sub_and_test() also provide acquire
88  * ordering on success.
89  *
90  */
91 
92 /**
93  * refcount_add_not_zero - add a value to a refcount unless it is 0
94  * @i: the value to add to the refcount
95  * @r: the refcount
96  *
97  * Will saturate at REFCOUNT_SATURATED and WARN.
98  *
99  * Provides no memory ordering, it is assumed the caller has guaranteed the
100  * object memory to be stable (RCU, etc.). It does provide a control dependency
101  * and thereby orders future stores. See the comment on top.
102  *
103  * Use of this function is not recommended for the normal reference counting
104  * use case in which references are taken and released one at a time.  In these
105  * cases, refcount_inc(), or one of its variants, should instead be used to
106  * increment a reference count.
107  *
108  * Return: false if the passed refcount is 0, true otherwise
109  */
110 static inline __must_check bool refcount_add_not_zero(int i, refcount_t *r)
111 {
112 	unsigned int new, val = atomic_read(&r->refs);
113 
114 	do {
115 		if (!val)
116 			return false;
117 
118 		if (unlikely(val == REFCOUNT_SATURATED))
119 			return true;
120 
121 		new = val + i;
122 		if (new < val)
123 			new = REFCOUNT_SATURATED;
124 
125 	} while (!atomic_try_cmpxchg_relaxed(&r->refs, &val, new));
126 
127 	WARN_ONCE(new == REFCOUNT_SATURATED,
128 		  "refcount_t: saturated; leaking memory.\n");
129 
130 	return true;
131 }
132 
133 /**
134  * refcount_add - add a value to a refcount
135  * @i: the value to add to the refcount
136  * @r: the refcount
137  *
138  * Similar to atomic_add(), but will saturate at REFCOUNT_SATURATED and WARN.
139  *
140  * Provides no memory ordering, it is assumed the caller has guaranteed the
141  * object memory to be stable (RCU, etc.). It does provide a control dependency
142  * and thereby orders future stores. See the comment on top.
143  *
144  * Use of this function is not recommended for the normal reference counting
145  * use case in which references are taken and released one at a time.  In these
146  * cases, refcount_inc(), or one of its variants, should instead be used to
147  * increment a reference count.
148  */
149 static inline void refcount_add(int i, refcount_t *r)
150 {
151 	WARN_ONCE(!refcount_add_not_zero(i, r), "refcount_t: addition on 0; use-after-free.\n");
152 }
153 
154 /**
155  * refcount_inc_not_zero - increment a refcount unless it is 0
156  * @r: the refcount to increment
157  *
158  * Similar to atomic_inc_not_zero(), but will saturate at REFCOUNT_SATURATED
159  * and WARN.
160  *
161  * Provides no memory ordering, it is assumed the caller has guaranteed the
162  * object memory to be stable (RCU, etc.). It does provide a control dependency
163  * and thereby orders future stores. See the comment on top.
164  *
165  * Return: true if the increment was successful, false otherwise
166  */
167 static inline __must_check bool refcount_inc_not_zero(refcount_t *r)
168 {
169 	unsigned int new, val = atomic_read(&r->refs);
170 
171 	do {
172 		new = val + 1;
173 
174 		if (!val)
175 			return false;
176 
177 		if (unlikely(!new))
178 			return true;
179 
180 	} while (!atomic_try_cmpxchg_relaxed(&r->refs, &val, new));
181 
182 	WARN_ONCE(new == REFCOUNT_SATURATED,
183 		  "refcount_t: saturated; leaking memory.\n");
184 
185 	return true;
186 }
187 
188 /**
189  * refcount_inc - increment a refcount
190  * @r: the refcount to increment
191  *
192  * Similar to atomic_inc(), but will saturate at REFCOUNT_SATURATED and WARN.
193  *
194  * Provides no memory ordering, it is assumed the caller already has a
195  * reference on the object.
196  *
197  * Will WARN if the refcount is 0, as this represents a possible use-after-free
198  * condition.
199  */
200 static inline void refcount_inc(refcount_t *r)
201 {
202 	WARN_ONCE(!refcount_inc_not_zero(r), "refcount_t: increment on 0; use-after-free.\n");
203 }
204 
205 /**
206  * refcount_sub_and_test - subtract from a refcount and test if it is 0
207  * @i: amount to subtract from the refcount
208  * @r: the refcount
209  *
210  * Similar to atomic_dec_and_test(), but it will WARN, return false and
211  * ultimately leak on underflow and will fail to decrement when saturated
212  * at REFCOUNT_SATURATED.
213  *
214  * Provides release memory ordering, such that prior loads and stores are done
215  * before, and provides an acquire ordering on success such that free()
216  * must come after.
217  *
218  * Use of this function is not recommended for the normal reference counting
219  * use case in which references are taken and released one at a time.  In these
220  * cases, refcount_dec(), or one of its variants, should instead be used to
221  * decrement a reference count.
222  *
223  * Return: true if the resulting refcount is 0, false otherwise
224  */
225 static inline __must_check bool refcount_sub_and_test(int i, refcount_t *r)
226 {
227 	unsigned int new, val = atomic_read(&r->refs);
228 
229 	do {
230 		if (unlikely(val == REFCOUNT_SATURATED))
231 			return false;
232 
233 		new = val - i;
234 		if (new > val) {
235 			WARN_ONCE(new > val, "refcount_t: underflow; use-after-free.\n");
236 			return false;
237 		}
238 
239 	} while (!atomic_try_cmpxchg_release(&r->refs, &val, new));
240 
241 	if (!new) {
242 		smp_acquire__after_ctrl_dep();
243 		return true;
244 	}
245 	return false;
246 
247 }
248 
249 /**
250  * refcount_dec_and_test - decrement a refcount and test if it is 0
251  * @r: the refcount
252  *
253  * Similar to atomic_dec_and_test(), it will WARN on underflow and fail to
254  * decrement when saturated at REFCOUNT_SATURATED.
255  *
256  * Provides release memory ordering, such that prior loads and stores are done
257  * before, and provides an acquire ordering on success such that free()
258  * must come after.
259  *
260  * Return: true if the resulting refcount is 0, false otherwise
261  */
262 static inline __must_check bool refcount_dec_and_test(refcount_t *r)
263 {
264 	return refcount_sub_and_test(1, r);
265 }
266 
267 /**
268  * refcount_dec - decrement a refcount
269  * @r: the refcount
270  *
271  * Similar to atomic_dec(), it will WARN on underflow and fail to decrement
272  * when saturated at REFCOUNT_SATURATED.
273  *
274  * Provides release memory ordering, such that prior loads and stores are done
275  * before.
276  */
277 static inline void refcount_dec(refcount_t *r)
278 {
279 	WARN_ONCE(refcount_dec_and_test(r), "refcount_t: decrement hit 0; leaking memory.\n");
280 }
281 
282 #else /* CONFIG_REFCOUNT_FULL */
283 
284 #define REFCOUNT_MAX		INT_MAX
285 #define REFCOUNT_SATURATED	(INT_MIN / 2)
286 
287 # ifdef CONFIG_ARCH_HAS_REFCOUNT
288 #  include <asm/refcount.h>
289 # else
290 static inline __must_check bool refcount_add_not_zero(int i, refcount_t *r)
291 {
292 	return atomic_add_unless(&r->refs, i, 0);
293 }
294 
295 static inline void refcount_add(int i, refcount_t *r)
296 {
297 	atomic_add(i, &r->refs);
298 }
299 
300 static inline __must_check bool refcount_inc_not_zero(refcount_t *r)
301 {
302 	return atomic_add_unless(&r->refs, 1, 0);
303 }
304 
305 static inline void refcount_inc(refcount_t *r)
306 {
307 	atomic_inc(&r->refs);
308 }
309 
310 static inline __must_check bool refcount_sub_and_test(int i, refcount_t *r)
311 {
312 	return atomic_sub_and_test(i, &r->refs);
313 }
314 
315 static inline __must_check bool refcount_dec_and_test(refcount_t *r)
316 {
317 	return atomic_dec_and_test(&r->refs);
318 }
319 
320 static inline void refcount_dec(refcount_t *r)
321 {
322 	atomic_dec(&r->refs);
323 }
324 # endif /* !CONFIG_ARCH_HAS_REFCOUNT */
325 #endif /* !CONFIG_REFCOUNT_FULL */
326 
327 extern __must_check bool refcount_dec_if_one(refcount_t *r);
328 extern __must_check bool refcount_dec_not_one(refcount_t *r);
329 extern __must_check bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock);
330 extern __must_check bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock);
331 extern __must_check bool refcount_dec_and_lock_irqsave(refcount_t *r,
332 						       spinlock_t *lock,
333 						       unsigned long *flags);
334 #endif /* _LINUX_REFCOUNT_H */
335