xref: /f-stack/freebsd/sys/refcount.h (revision 22ce4aff)
1a9643ea8Slogwang /*-
2*22ce4affSfengbojiang  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3*22ce4affSfengbojiang  *
4a9643ea8Slogwang  * Copyright (c) 2005 John Baldwin <[email protected]>
5a9643ea8Slogwang  *
6a9643ea8Slogwang  * Redistribution and use in source and binary forms, with or without
7a9643ea8Slogwang  * modification, are permitted provided that the following conditions
8a9643ea8Slogwang  * are met:
9a9643ea8Slogwang  * 1. Redistributions of source code must retain the above copyright
10a9643ea8Slogwang  *    notice, this list of conditions and the following disclaimer.
11a9643ea8Slogwang  * 2. Redistributions in binary form must reproduce the above copyright
12a9643ea8Slogwang  *    notice, this list of conditions and the following disclaimer in the
13a9643ea8Slogwang  *    documentation and/or other materials provided with the distribution.
14a9643ea8Slogwang  *
15a9643ea8Slogwang  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16a9643ea8Slogwang  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17a9643ea8Slogwang  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18a9643ea8Slogwang  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19a9643ea8Slogwang  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20a9643ea8Slogwang  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21a9643ea8Slogwang  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22a9643ea8Slogwang  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23a9643ea8Slogwang  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24a9643ea8Slogwang  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25a9643ea8Slogwang  * SUCH DAMAGE.
26a9643ea8Slogwang  *
27a9643ea8Slogwang  * $FreeBSD$
28a9643ea8Slogwang  */
29a9643ea8Slogwang 
30a9643ea8Slogwang #ifndef __SYS_REFCOUNT_H__
31a9643ea8Slogwang #define __SYS_REFCOUNT_H__
32a9643ea8Slogwang 
33a9643ea8Slogwang #include <machine/atomic.h>
34a9643ea8Slogwang 
35*22ce4affSfengbojiang #if defined(_KERNEL) || defined(_STANDALONE)
36a9643ea8Slogwang #include <sys/systm.h>
37a9643ea8Slogwang #else
38*22ce4affSfengbojiang #include <stdbool.h>
39a9643ea8Slogwang #define	KASSERT(exp, msg)	/* */
40a9643ea8Slogwang #endif
41a9643ea8Slogwang 
42*22ce4affSfengbojiang #define	REFCOUNT_SATURATED(val)		(((val) & (1U << 31)) != 0)
43*22ce4affSfengbojiang #define	REFCOUNT_SATURATION_VALUE	(3U << 30)
44*22ce4affSfengbojiang 
45*22ce4affSfengbojiang /*
46*22ce4affSfengbojiang  * Attempt to handle reference count overflow and underflow.  Force the counter
47*22ce4affSfengbojiang  * to stay at the saturation value so that a counter overflow cannot trigger
48*22ce4affSfengbojiang  * destruction of the containing object and instead leads to a less harmful
49*22ce4affSfengbojiang  * memory leak.
50*22ce4affSfengbojiang  */
51*22ce4affSfengbojiang static __inline void
_refcount_update_saturated(volatile u_int * count)52*22ce4affSfengbojiang _refcount_update_saturated(volatile u_int *count)
53*22ce4affSfengbojiang {
54*22ce4affSfengbojiang #ifdef INVARIANTS
55*22ce4affSfengbojiang 	panic("refcount %p wraparound", count);
56*22ce4affSfengbojiang #else
57*22ce4affSfengbojiang 	atomic_store_int(count, REFCOUNT_SATURATION_VALUE);
58*22ce4affSfengbojiang #endif
59*22ce4affSfengbojiang }
60*22ce4affSfengbojiang 
61a9643ea8Slogwang static __inline void
refcount_init(volatile u_int * count,u_int value)62a9643ea8Slogwang refcount_init(volatile u_int *count, u_int value)
63a9643ea8Slogwang {
64*22ce4affSfengbojiang 	KASSERT(!REFCOUNT_SATURATED(value),
65*22ce4affSfengbojiang 	    ("invalid initial refcount value %u", value));
66*22ce4affSfengbojiang 	atomic_store_int(count, value);
67a9643ea8Slogwang }
68a9643ea8Slogwang 
69*22ce4affSfengbojiang static __inline u_int
refcount_load(volatile u_int * count)70*22ce4affSfengbojiang refcount_load(volatile u_int *count)
71a9643ea8Slogwang {
72*22ce4affSfengbojiang 	return (atomic_load_int(count));
73a9643ea8Slogwang }
74a9643ea8Slogwang 
75*22ce4affSfengbojiang static __inline u_int
refcount_acquire(volatile u_int * count)76*22ce4affSfengbojiang refcount_acquire(volatile u_int *count)
77a9643ea8Slogwang {
78a9643ea8Slogwang 	u_int old;
79a9643ea8Slogwang 
80*22ce4affSfengbojiang 	old = atomic_fetchadd_int(count, 1);
81*22ce4affSfengbojiang 	if (__predict_false(REFCOUNT_SATURATED(old)))
82*22ce4affSfengbojiang 		_refcount_update_saturated(count);
83*22ce4affSfengbojiang 
84*22ce4affSfengbojiang 	return (old);
85*22ce4affSfengbojiang }
86*22ce4affSfengbojiang 
87*22ce4affSfengbojiang static __inline u_int
refcount_acquiren(volatile u_int * count,u_int n)88*22ce4affSfengbojiang refcount_acquiren(volatile u_int *count, u_int n)
89*22ce4affSfengbojiang {
90*22ce4affSfengbojiang 	u_int old;
91*22ce4affSfengbojiang 
92*22ce4affSfengbojiang 	KASSERT(n < REFCOUNT_SATURATION_VALUE / 2,
93*22ce4affSfengbojiang 	    ("refcount_acquiren: n=%u too large", n));
94*22ce4affSfengbojiang 	old = atomic_fetchadd_int(count, n);
95*22ce4affSfengbojiang 	if (__predict_false(REFCOUNT_SATURATED(old)))
96*22ce4affSfengbojiang 		_refcount_update_saturated(count);
97*22ce4affSfengbojiang 
98*22ce4affSfengbojiang 	return (old);
99*22ce4affSfengbojiang }
100*22ce4affSfengbojiang 
101*22ce4affSfengbojiang static __inline __result_use_check bool
refcount_acquire_checked(volatile u_int * count)102*22ce4affSfengbojiang refcount_acquire_checked(volatile u_int *count)
103*22ce4affSfengbojiang {
104*22ce4affSfengbojiang 	u_int old;
105*22ce4affSfengbojiang 
106*22ce4affSfengbojiang 	old = atomic_load_int(count);
107*22ce4affSfengbojiang 	for (;;) {
108*22ce4affSfengbojiang 		if (__predict_false(REFCOUNT_SATURATED(old + 1)))
109*22ce4affSfengbojiang 			return (false);
110*22ce4affSfengbojiang 		if (__predict_true(atomic_fcmpset_int(count, &old,
111*22ce4affSfengbojiang 		    old + 1) == 1))
112*22ce4affSfengbojiang 			return (true);
113*22ce4affSfengbojiang 	}
114*22ce4affSfengbojiang }
115*22ce4affSfengbojiang 
116*22ce4affSfengbojiang /*
117*22ce4affSfengbojiang  * This functions returns non-zero if the refcount was
118*22ce4affSfengbojiang  * incremented. Else zero is returned.
119*22ce4affSfengbojiang  */
120*22ce4affSfengbojiang static __inline __result_use_check bool
refcount_acquire_if_gt(volatile u_int * count,u_int n)121*22ce4affSfengbojiang refcount_acquire_if_gt(volatile u_int *count, u_int n)
122*22ce4affSfengbojiang {
123*22ce4affSfengbojiang 	u_int old;
124*22ce4affSfengbojiang 
125*22ce4affSfengbojiang 	old = atomic_load_int(count);
126*22ce4affSfengbojiang 	for (;;) {
127*22ce4affSfengbojiang 		if (old <= n)
128*22ce4affSfengbojiang 			return (false);
129*22ce4affSfengbojiang 		if (__predict_false(REFCOUNT_SATURATED(old)))
130*22ce4affSfengbojiang 			return (true);
131*22ce4affSfengbojiang 		if (atomic_fcmpset_int(count, &old, old + 1))
132*22ce4affSfengbojiang 			return (true);
133*22ce4affSfengbojiang 	}
134*22ce4affSfengbojiang }
135*22ce4affSfengbojiang 
136*22ce4affSfengbojiang static __inline __result_use_check bool
refcount_acquire_if_not_zero(volatile u_int * count)137*22ce4affSfengbojiang refcount_acquire_if_not_zero(volatile u_int *count)
138*22ce4affSfengbojiang {
139*22ce4affSfengbojiang 
140*22ce4affSfengbojiang 	return (refcount_acquire_if_gt(count, 0));
141*22ce4affSfengbojiang }
142*22ce4affSfengbojiang 
143*22ce4affSfengbojiang static __inline bool
refcount_releasen(volatile u_int * count,u_int n)144*22ce4affSfengbojiang refcount_releasen(volatile u_int *count, u_int n)
145*22ce4affSfengbojiang {
146*22ce4affSfengbojiang 	u_int old;
147*22ce4affSfengbojiang 
148*22ce4affSfengbojiang 	KASSERT(n < REFCOUNT_SATURATION_VALUE / 2,
149*22ce4affSfengbojiang 	    ("refcount_releasen: n=%u too large", n));
150*22ce4affSfengbojiang 
151*22ce4affSfengbojiang 	atomic_thread_fence_rel();
152*22ce4affSfengbojiang 	old = atomic_fetchadd_int(count, -n);
153*22ce4affSfengbojiang 	if (__predict_false(old < n || REFCOUNT_SATURATED(old))) {
154*22ce4affSfengbojiang 		_refcount_update_saturated(count);
155*22ce4affSfengbojiang 		return (false);
156*22ce4affSfengbojiang 	}
157*22ce4affSfengbojiang 	if (old > n)
158*22ce4affSfengbojiang 		return (false);
159*22ce4affSfengbojiang 
160*22ce4affSfengbojiang 	/*
161*22ce4affSfengbojiang 	 * Last reference.  Signal the user to call the destructor.
162*22ce4affSfengbojiang 	 *
163*22ce4affSfengbojiang 	 * Ensure that the destructor sees all updates. This synchronizes with
164*22ce4affSfengbojiang 	 * release fences from all routines which drop the count.
165*22ce4affSfengbojiang 	 */
166*22ce4affSfengbojiang 	atomic_thread_fence_acq();
167*22ce4affSfengbojiang 	return (true);
168*22ce4affSfengbojiang }
169*22ce4affSfengbojiang 
170*22ce4affSfengbojiang static __inline bool
refcount_release(volatile u_int * count)171*22ce4affSfengbojiang refcount_release(volatile u_int *count)
172*22ce4affSfengbojiang {
173*22ce4affSfengbojiang 
174*22ce4affSfengbojiang 	return (refcount_releasen(count, 1));
175*22ce4affSfengbojiang }
176*22ce4affSfengbojiang 
177*22ce4affSfengbojiang #define	_refcount_release_if_cond(cond, name)				\
178*22ce4affSfengbojiang static __inline __result_use_check bool					\
179*22ce4affSfengbojiang _refcount_release_if_##name(volatile u_int *count, u_int n)		\
180*22ce4affSfengbojiang {									\
181*22ce4affSfengbojiang 	u_int old;							\
182*22ce4affSfengbojiang 									\
183*22ce4affSfengbojiang 	KASSERT(n > 0, ("%s: zero increment", __func__));		\
184*22ce4affSfengbojiang 	old = atomic_load_int(count);					\
185*22ce4affSfengbojiang 	for (;;) {							\
186*22ce4affSfengbojiang 		if (!(cond))						\
187*22ce4affSfengbojiang 			return (false);					\
188*22ce4affSfengbojiang 		if (__predict_false(REFCOUNT_SATURATED(old)))		\
189*22ce4affSfengbojiang 			return (false);					\
190*22ce4affSfengbojiang 		if (atomic_fcmpset_rel_int(count, &old, old - 1))	\
191*22ce4affSfengbojiang 			return (true);					\
192*22ce4affSfengbojiang 	}								\
193*22ce4affSfengbojiang }
194*22ce4affSfengbojiang _refcount_release_if_cond(old > n, gt)
195*22ce4affSfengbojiang _refcount_release_if_cond(old == n, eq)
196*22ce4affSfengbojiang 
197*22ce4affSfengbojiang static __inline __result_use_check bool
refcount_release_if_gt(volatile u_int * count,u_int n)198*22ce4affSfengbojiang refcount_release_if_gt(volatile u_int *count, u_int n)
199*22ce4affSfengbojiang {
200*22ce4affSfengbojiang 
201*22ce4affSfengbojiang 	return (_refcount_release_if_gt(count, n));
202*22ce4affSfengbojiang }
203*22ce4affSfengbojiang 
204*22ce4affSfengbojiang static __inline __result_use_check bool
refcount_release_if_last(volatile u_int * count)205*22ce4affSfengbojiang refcount_release_if_last(volatile u_int *count)
206*22ce4affSfengbojiang {
207*22ce4affSfengbojiang 
208*22ce4affSfengbojiang 	if (_refcount_release_if_eq(count, 1)) {
209*22ce4affSfengbojiang 		/* See the comment in refcount_releasen(). */
210*22ce4affSfengbojiang 		atomic_thread_fence_acq();
211*22ce4affSfengbojiang 		return (true);
212*22ce4affSfengbojiang 	}
213*22ce4affSfengbojiang 	return (false);
214*22ce4affSfengbojiang }
215*22ce4affSfengbojiang 
216*22ce4affSfengbojiang static __inline __result_use_check bool
refcount_release_if_not_last(volatile u_int * count)217*22ce4affSfengbojiang refcount_release_if_not_last(volatile u_int *count)
218*22ce4affSfengbojiang {
219*22ce4affSfengbojiang 
220*22ce4affSfengbojiang 	return (_refcount_release_if_gt(count, 1));
221a9643ea8Slogwang }
222a9643ea8Slogwang 
223a9643ea8Slogwang #endif /* !__SYS_REFCOUNT_H__ */
224