xref: /linux-6.15/lib/refcount.c (revision 75a040ff)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Variant of atomic_t specialized for reference counts.
4  *
5  * The interface matches the atomic_t interface (to aid in porting) but only
6  * provides the few functions one should use for reference counting.
7  *
8  * It differs in that the counter saturates at UINT_MAX and will not move once
9  * there. This avoids wrapping the counter and causing 'spurious'
10  * use-after-free issues.
11  *
12  * Memory ordering rules are slightly relaxed wrt regular atomic_t functions
13  * and provide only what is strictly required for refcounts.
14  *
15  * The increments are fully relaxed; these will not provide ordering. The
16  * rationale is that whatever is used to obtain the object we're increasing the
17  * reference count on will provide the ordering. For locked data structures,
18  * its the lock acquire, for RCU/lockless data structures its the dependent
19  * load.
20  *
21  * Do note that inc_not_zero() provides a control dependency which will order
22  * future stores against the inc, this ensures we'll never modify the object
23  * if we did not in fact acquire a reference.
24  *
25  * The decrements will provide release order, such that all the prior loads and
26  * stores will be issued before, it also provides a control dependency, which
27  * will order us against the subsequent free().
28  *
29  * The control dependency is against the load of the cmpxchg (ll/sc) that
30  * succeeded. This means the stores aren't fully ordered, but this is fine
31  * because the 1->0 transition indicates no concurrency.
32  *
33  * Note that the allocator is responsible for ordering things between free()
34  * and alloc().
35  *
36  */
37 
38 #include <linux/mutex.h>
39 #include <linux/refcount.h>
40 #include <linux/spinlock.h>
41 #include <linux/bug.h>
42 
43 #ifdef CONFIG_REFCOUNT_FULL
44 
45 /**
46  * refcount_add_not_zero - add a value to a refcount unless it is 0
47  * @i: the value to add to the refcount
48  * @r: the refcount
49  *
50  * Will saturate at UINT_MAX and WARN.
51  *
52  * Provides no memory ordering, it is assumed the caller has guaranteed the
53  * object memory to be stable (RCU, etc.). It does provide a control dependency
54  * and thereby orders future stores. See the comment on top.
55  *
56  * Use of this function is not recommended for the normal reference counting
57  * use case in which references are taken and released one at a time.  In these
58  * cases, refcount_inc(), or one of its variants, should instead be used to
59  * increment a reference count.
60  *
61  * Return: false if the passed refcount is 0, true otherwise
62  */
63 bool refcount_add_not_zero(unsigned int i, refcount_t *r)
64 {
65 	unsigned int new, val = atomic_read(&r->refs);
66 
67 	do {
68 		if (!val)
69 			return false;
70 
71 		if (unlikely(val == UINT_MAX))
72 			return true;
73 
74 		new = val + i;
75 		if (new < val)
76 			new = UINT_MAX;
77 
78 	} while (!atomic_try_cmpxchg_relaxed(&r->refs, &val, new));
79 
80 	WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
81 
82 	return true;
83 }
84 EXPORT_SYMBOL(refcount_add_not_zero);
85 
86 /**
87  * refcount_add - add a value to a refcount
88  * @i: the value to add to the refcount
89  * @r: the refcount
90  *
91  * Similar to atomic_add(), but will saturate at UINT_MAX and WARN.
92  *
93  * Provides no memory ordering, it is assumed the caller has guaranteed the
94  * object memory to be stable (RCU, etc.). It does provide a control dependency
95  * and thereby orders future stores. See the comment on top.
96  *
97  * Use of this function is not recommended for the normal reference counting
98  * use case in which references are taken and released one at a time.  In these
99  * cases, refcount_inc(), or one of its variants, should instead be used to
100  * increment a reference count.
101  */
102 void refcount_add(unsigned int i, refcount_t *r)
103 {
104 	WARN_ONCE(!refcount_add_not_zero(i, r), "refcount_t: addition on 0; use-after-free.\n");
105 }
106 EXPORT_SYMBOL(refcount_add);
107 
108 /**
109  * refcount_inc_not_zero - increment a refcount unless it is 0
110  * @r: the refcount to increment
111  *
112  * Similar to atomic_inc_not_zero(), but will saturate at UINT_MAX and WARN.
113  *
114  * Provides no memory ordering, it is assumed the caller has guaranteed the
115  * object memory to be stable (RCU, etc.). It does provide a control dependency
116  * and thereby orders future stores. See the comment on top.
117  *
118  * Return: true if the increment was successful, false otherwise
119  */
120 bool refcount_inc_not_zero(refcount_t *r)
121 {
122 	unsigned int new, val = atomic_read(&r->refs);
123 
124 	do {
125 		new = val + 1;
126 
127 		if (!val)
128 			return false;
129 
130 		if (unlikely(!new))
131 			return true;
132 
133 	} while (!atomic_try_cmpxchg_relaxed(&r->refs, &val, new));
134 
135 	WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
136 
137 	return true;
138 }
139 EXPORT_SYMBOL(refcount_inc_not_zero);
140 
141 /**
142  * refcount_inc - increment a refcount
143  * @r: the refcount to increment
144  *
145  * Similar to atomic_inc(), but will saturate at UINT_MAX and WARN.
146  *
147  * Provides no memory ordering, it is assumed the caller already has a
148  * reference on the object.
149  *
150  * Will WARN if the refcount is 0, as this represents a possible use-after-free
151  * condition.
152  */
153 void refcount_inc(refcount_t *r)
154 {
155 	WARN_ONCE(!refcount_inc_not_zero(r), "refcount_t: increment on 0; use-after-free.\n");
156 }
157 EXPORT_SYMBOL(refcount_inc);
158 
159 /**
160  * refcount_sub_and_test - subtract from a refcount and test if it is 0
161  * @i: amount to subtract from the refcount
162  * @r: the refcount
163  *
164  * Similar to atomic_dec_and_test(), but it will WARN, return false and
165  * ultimately leak on underflow and will fail to decrement when saturated
166  * at UINT_MAX.
167  *
168  * Provides release memory ordering, such that prior loads and stores are done
169  * before, and provides a control dependency such that free() must come after.
170  * See the comment on top.
171  *
172  * Use of this function is not recommended for the normal reference counting
173  * use case in which references are taken and released one at a time.  In these
174  * cases, refcount_dec(), or one of its variants, should instead be used to
175  * decrement a reference count.
176  *
177  * Return: true if the resulting refcount is 0, false otherwise
178  */
179 bool refcount_sub_and_test(unsigned int i, refcount_t *r)
180 {
181 	unsigned int new, val = atomic_read(&r->refs);
182 
183 	do {
184 		if (unlikely(val == UINT_MAX))
185 			return false;
186 
187 		new = val - i;
188 		if (new > val) {
189 			WARN_ONCE(new > val, "refcount_t: underflow; use-after-free.\n");
190 			return false;
191 		}
192 
193 	} while (!atomic_try_cmpxchg_release(&r->refs, &val, new));
194 
195 	return !new;
196 }
197 EXPORT_SYMBOL(refcount_sub_and_test);
198 
199 /**
200  * refcount_dec_and_test - decrement a refcount and test if it is 0
201  * @r: the refcount
202  *
203  * Similar to atomic_dec_and_test(), it will WARN on underflow and fail to
204  * decrement when saturated at UINT_MAX.
205  *
206  * Provides release memory ordering, such that prior loads and stores are done
207  * before, and provides a control dependency such that free() must come after.
208  * See the comment on top.
209  *
210  * Return: true if the resulting refcount is 0, false otherwise
211  */
212 bool refcount_dec_and_test(refcount_t *r)
213 {
214 	return refcount_sub_and_test(1, r);
215 }
216 EXPORT_SYMBOL(refcount_dec_and_test);
217 
218 /**
219  * refcount_dec - decrement a refcount
220  * @r: the refcount
221  *
222  * Similar to atomic_dec(), it will WARN on underflow and fail to decrement
223  * when saturated at UINT_MAX.
224  *
225  * Provides release memory ordering, such that prior loads and stores are done
226  * before.
227  */
228 void refcount_dec(refcount_t *r)
229 {
230 	WARN_ONCE(refcount_dec_and_test(r), "refcount_t: decrement hit 0; leaking memory.\n");
231 }
232 EXPORT_SYMBOL(refcount_dec);
233 #endif /* CONFIG_REFCOUNT_FULL */
234 
235 /**
236  * refcount_dec_if_one - decrement a refcount if it is 1
237  * @r: the refcount
238  *
239  * No atomic_t counterpart, it attempts a 1 -> 0 transition and returns the
240  * success thereof.
241  *
242  * Like all decrement operations, it provides release memory order and provides
243  * a control dependency.
244  *
245  * It can be used like a try-delete operator; this explicit case is provided
246  * and not cmpxchg in generic, because that would allow implementing unsafe
247  * operations.
248  *
249  * Return: true if the resulting refcount is 0, false otherwise
250  */
251 bool refcount_dec_if_one(refcount_t *r)
252 {
253 	int val = 1;
254 
255 	return atomic_try_cmpxchg_release(&r->refs, &val, 0);
256 }
257 EXPORT_SYMBOL(refcount_dec_if_one);
258 
259 /**
260  * refcount_dec_not_one - decrement a refcount if it is not 1
261  * @r: the refcount
262  *
263  * No atomic_t counterpart, it decrements unless the value is 1, in which case
264  * it will return false.
265  *
266  * Was often done like: atomic_add_unless(&var, -1, 1)
267  *
268  * Return: true if the decrement operation was successful, false otherwise
269  */
270 bool refcount_dec_not_one(refcount_t *r)
271 {
272 	unsigned int new, val = atomic_read(&r->refs);
273 
274 	do {
275 		if (unlikely(val == UINT_MAX))
276 			return true;
277 
278 		if (val == 1)
279 			return false;
280 
281 		new = val - 1;
282 		if (new > val) {
283 			WARN_ONCE(new > val, "refcount_t: underflow; use-after-free.\n");
284 			return true;
285 		}
286 
287 	} while (!atomic_try_cmpxchg_release(&r->refs, &val, new));
288 
289 	return true;
290 }
291 EXPORT_SYMBOL(refcount_dec_not_one);
292 
293 /**
294  * refcount_dec_and_mutex_lock - return holding mutex if able to decrement
295  *                               refcount to 0
296  * @r: the refcount
297  * @lock: the mutex to be locked
298  *
299  * Similar to atomic_dec_and_mutex_lock(), it will WARN on underflow and fail
300  * to decrement when saturated at UINT_MAX.
301  *
302  * Provides release memory ordering, such that prior loads and stores are done
303  * before, and provides a control dependency such that free() must come after.
304  * See the comment on top.
305  *
306  * Return: true and hold mutex if able to decrement refcount to 0, false
307  *         otherwise
308  */
309 bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock)
310 {
311 	if (refcount_dec_not_one(r))
312 		return false;
313 
314 	mutex_lock(lock);
315 	if (!refcount_dec_and_test(r)) {
316 		mutex_unlock(lock);
317 		return false;
318 	}
319 
320 	return true;
321 }
322 EXPORT_SYMBOL(refcount_dec_and_mutex_lock);
323 
324 /**
325  * refcount_dec_and_lock - return holding spinlock if able to decrement
326  *                         refcount to 0
327  * @r: the refcount
328  * @lock: the spinlock to be locked
329  *
330  * Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to
331  * decrement when saturated at UINT_MAX.
332  *
333  * Provides release memory ordering, such that prior loads and stores are done
334  * before, and provides a control dependency such that free() must come after.
335  * See the comment on top.
336  *
337  * Return: true and hold spinlock if able to decrement refcount to 0, false
338  *         otherwise
339  */
340 bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock)
341 {
342 	if (refcount_dec_not_one(r))
343 		return false;
344 
345 	spin_lock(lock);
346 	if (!refcount_dec_and_test(r)) {
347 		spin_unlock(lock);
348 		return false;
349 	}
350 
351 	return true;
352 }
353 EXPORT_SYMBOL(refcount_dec_and_lock);
354 
355