1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
273a9bf95SArnaldo Carvalho de Melo #ifndef _TOOLS_LINUX_REFCOUNT_H
373a9bf95SArnaldo Carvalho de Melo #define _TOOLS_LINUX_REFCOUNT_H
473a9bf95SArnaldo Carvalho de Melo
573a9bf95SArnaldo Carvalho de Melo /*
673a9bf95SArnaldo Carvalho de Melo * Variant of atomic_t specialized for reference counts.
773a9bf95SArnaldo Carvalho de Melo *
873a9bf95SArnaldo Carvalho de Melo * The interface matches the atomic_t interface (to aid in porting) but only
973a9bf95SArnaldo Carvalho de Melo * provides the few functions one should use for reference counting.
1073a9bf95SArnaldo Carvalho de Melo *
1173a9bf95SArnaldo Carvalho de Melo * It differs in that the counter saturates at UINT_MAX and will not move once
1273a9bf95SArnaldo Carvalho de Melo * there. This avoids wrapping the counter and causing 'spurious'
1373a9bf95SArnaldo Carvalho de Melo * use-after-free issues.
1473a9bf95SArnaldo Carvalho de Melo *
1573a9bf95SArnaldo Carvalho de Melo * Memory ordering rules are slightly relaxed wrt regular atomic_t functions
1673a9bf95SArnaldo Carvalho de Melo * and provide only what is strictly required for refcounts.
1773a9bf95SArnaldo Carvalho de Melo *
1873a9bf95SArnaldo Carvalho de Melo * The increments are fully relaxed; these will not provide ordering. The
1973a9bf95SArnaldo Carvalho de Melo * rationale is that whatever is used to obtain the object we're increasing the
2073a9bf95SArnaldo Carvalho de Melo * reference count on will provide the ordering. For locked data structures,
2173a9bf95SArnaldo Carvalho de Melo * its the lock acquire, for RCU/lockless data structures its the dependent
2273a9bf95SArnaldo Carvalho de Melo * load.
2373a9bf95SArnaldo Carvalho de Melo *
2473a9bf95SArnaldo Carvalho de Melo * Do note that inc_not_zero() provides a control dependency which will order
2573a9bf95SArnaldo Carvalho de Melo * future stores against the inc, this ensures we'll never modify the object
2673a9bf95SArnaldo Carvalho de Melo * if we did not in fact acquire a reference.
2773a9bf95SArnaldo Carvalho de Melo *
2873a9bf95SArnaldo Carvalho de Melo * The decrements will provide release order, such that all the prior loads and
2973a9bf95SArnaldo Carvalho de Melo * stores will be issued before, it also provides a control dependency, which
3073a9bf95SArnaldo Carvalho de Melo * will order us against the subsequent free().
3173a9bf95SArnaldo Carvalho de Melo *
3273a9bf95SArnaldo Carvalho de Melo * The control dependency is against the load of the cmpxchg (ll/sc) that
3373a9bf95SArnaldo Carvalho de Melo * succeeded. This means the stores aren't fully ordered, but this is fine
3473a9bf95SArnaldo Carvalho de Melo * because the 1->0 transition indicates no concurrency.
3573a9bf95SArnaldo Carvalho de Melo *
3673a9bf95SArnaldo Carvalho de Melo * Note that the allocator is responsible for ordering things between free()
3773a9bf95SArnaldo Carvalho de Melo * and alloc().
3873a9bf95SArnaldo Carvalho de Melo *
3973a9bf95SArnaldo Carvalho de Melo */
4073a9bf95SArnaldo Carvalho de Melo
4173a9bf95SArnaldo Carvalho de Melo #include <linux/atomic.h>
4273a9bf95SArnaldo Carvalho de Melo #include <linux/kernel.h>
4373a9bf95SArnaldo Carvalho de Melo
4473a9bf95SArnaldo Carvalho de Melo #ifdef NDEBUG
4573a9bf95SArnaldo Carvalho de Melo #define REFCOUNT_WARN(cond, str) (void)(cond)
4673a9bf95SArnaldo Carvalho de Melo #define __refcount_check
4773a9bf95SArnaldo Carvalho de Melo #else
4873a9bf95SArnaldo Carvalho de Melo #define REFCOUNT_WARN(cond, str) BUG_ON(cond)
4973a9bf95SArnaldo Carvalho de Melo #define __refcount_check __must_check
5073a9bf95SArnaldo Carvalho de Melo #endif
5173a9bf95SArnaldo Carvalho de Melo
5273a9bf95SArnaldo Carvalho de Melo typedef struct refcount_struct {
5373a9bf95SArnaldo Carvalho de Melo atomic_t refs;
5473a9bf95SArnaldo Carvalho de Melo } refcount_t;
5573a9bf95SArnaldo Carvalho de Melo
5673a9bf95SArnaldo Carvalho de Melo #define REFCOUNT_INIT(n) { .refs = ATOMIC_INIT(n), }
5773a9bf95SArnaldo Carvalho de Melo
refcount_set(refcount_t * r,unsigned int n)5873a9bf95SArnaldo Carvalho de Melo static inline void refcount_set(refcount_t *r, unsigned int n)
5973a9bf95SArnaldo Carvalho de Melo {
6073a9bf95SArnaldo Carvalho de Melo atomic_set(&r->refs, n);
6173a9bf95SArnaldo Carvalho de Melo }
6273a9bf95SArnaldo Carvalho de Melo
refcount_set_release(refcount_t * r,unsigned int n)63*31041385SSuren Baghdasaryan static inline void refcount_set_release(refcount_t *r, unsigned int n)
64*31041385SSuren Baghdasaryan {
65*31041385SSuren Baghdasaryan atomic_set(&r->refs, n);
66*31041385SSuren Baghdasaryan }
67*31041385SSuren Baghdasaryan
refcount_read(const refcount_t * r)6873a9bf95SArnaldo Carvalho de Melo static inline unsigned int refcount_read(const refcount_t *r)
6973a9bf95SArnaldo Carvalho de Melo {
7073a9bf95SArnaldo Carvalho de Melo return atomic_read(&r->refs);
7173a9bf95SArnaldo Carvalho de Melo }
7273a9bf95SArnaldo Carvalho de Melo
7373a9bf95SArnaldo Carvalho de Melo /*
7473a9bf95SArnaldo Carvalho de Melo * Similar to atomic_inc_not_zero(), will saturate at UINT_MAX and WARN.
7573a9bf95SArnaldo Carvalho de Melo *
7673a9bf95SArnaldo Carvalho de Melo * Provides no memory ordering, it is assumed the caller has guaranteed the
7773a9bf95SArnaldo Carvalho de Melo * object memory to be stable (RCU, etc.). It does provide a control dependency
7873a9bf95SArnaldo Carvalho de Melo * and thereby orders future stores. See the comment on top.
7973a9bf95SArnaldo Carvalho de Melo */
8073a9bf95SArnaldo Carvalho de Melo static inline __refcount_check
refcount_inc_not_zero(refcount_t * r)8173a9bf95SArnaldo Carvalho de Melo bool refcount_inc_not_zero(refcount_t *r)
8273a9bf95SArnaldo Carvalho de Melo {
8373a9bf95SArnaldo Carvalho de Melo unsigned int old, new, val = atomic_read(&r->refs);
8473a9bf95SArnaldo Carvalho de Melo
8573a9bf95SArnaldo Carvalho de Melo for (;;) {
8673a9bf95SArnaldo Carvalho de Melo new = val + 1;
8773a9bf95SArnaldo Carvalho de Melo
8873a9bf95SArnaldo Carvalho de Melo if (!val)
8973a9bf95SArnaldo Carvalho de Melo return false;
9073a9bf95SArnaldo Carvalho de Melo
9173a9bf95SArnaldo Carvalho de Melo if (unlikely(!new))
9273a9bf95SArnaldo Carvalho de Melo return true;
9373a9bf95SArnaldo Carvalho de Melo
9473a9bf95SArnaldo Carvalho de Melo old = atomic_cmpxchg_relaxed(&r->refs, val, new);
9573a9bf95SArnaldo Carvalho de Melo if (old == val)
9673a9bf95SArnaldo Carvalho de Melo break;
9773a9bf95SArnaldo Carvalho de Melo
9873a9bf95SArnaldo Carvalho de Melo val = old;
9973a9bf95SArnaldo Carvalho de Melo }
10073a9bf95SArnaldo Carvalho de Melo
10173a9bf95SArnaldo Carvalho de Melo REFCOUNT_WARN(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
10273a9bf95SArnaldo Carvalho de Melo
10373a9bf95SArnaldo Carvalho de Melo return true;
10473a9bf95SArnaldo Carvalho de Melo }
10573a9bf95SArnaldo Carvalho de Melo
10673a9bf95SArnaldo Carvalho de Melo /*
10773a9bf95SArnaldo Carvalho de Melo * Similar to atomic_inc(), will saturate at UINT_MAX and WARN.
10873a9bf95SArnaldo Carvalho de Melo *
10973a9bf95SArnaldo Carvalho de Melo * Provides no memory ordering, it is assumed the caller already has a
11073a9bf95SArnaldo Carvalho de Melo * reference on the object, will WARN when this is not so.
11173a9bf95SArnaldo Carvalho de Melo */
refcount_inc(refcount_t * r)11273a9bf95SArnaldo Carvalho de Melo static inline void refcount_inc(refcount_t *r)
11373a9bf95SArnaldo Carvalho de Melo {
11473a9bf95SArnaldo Carvalho de Melo REFCOUNT_WARN(!refcount_inc_not_zero(r), "refcount_t: increment on 0; use-after-free.\n");
11573a9bf95SArnaldo Carvalho de Melo }
11673a9bf95SArnaldo Carvalho de Melo
11773a9bf95SArnaldo Carvalho de Melo /*
11873a9bf95SArnaldo Carvalho de Melo * Similar to atomic_dec_and_test(), it will WARN on underflow and fail to
11973a9bf95SArnaldo Carvalho de Melo * decrement when saturated at UINT_MAX.
12073a9bf95SArnaldo Carvalho de Melo *
12173a9bf95SArnaldo Carvalho de Melo * Provides release memory ordering, such that prior loads and stores are done
12273a9bf95SArnaldo Carvalho de Melo * before, and provides a control dependency such that free() must come after.
12373a9bf95SArnaldo Carvalho de Melo * See the comment on top.
12473a9bf95SArnaldo Carvalho de Melo */
12573a9bf95SArnaldo Carvalho de Melo static inline __refcount_check
refcount_sub_and_test(unsigned int i,refcount_t * r)12673a9bf95SArnaldo Carvalho de Melo bool refcount_sub_and_test(unsigned int i, refcount_t *r)
12773a9bf95SArnaldo Carvalho de Melo {
12873a9bf95SArnaldo Carvalho de Melo unsigned int old, new, val = atomic_read(&r->refs);
12973a9bf95SArnaldo Carvalho de Melo
13073a9bf95SArnaldo Carvalho de Melo for (;;) {
13173a9bf95SArnaldo Carvalho de Melo if (unlikely(val == UINT_MAX))
13273a9bf95SArnaldo Carvalho de Melo return false;
13373a9bf95SArnaldo Carvalho de Melo
13473a9bf95SArnaldo Carvalho de Melo new = val - i;
13573a9bf95SArnaldo Carvalho de Melo if (new > val) {
13673a9bf95SArnaldo Carvalho de Melo REFCOUNT_WARN(new > val, "refcount_t: underflow; use-after-free.\n");
13773a9bf95SArnaldo Carvalho de Melo return false;
13873a9bf95SArnaldo Carvalho de Melo }
13973a9bf95SArnaldo Carvalho de Melo
14073a9bf95SArnaldo Carvalho de Melo old = atomic_cmpxchg_release(&r->refs, val, new);
14173a9bf95SArnaldo Carvalho de Melo if (old == val)
14273a9bf95SArnaldo Carvalho de Melo break;
14373a9bf95SArnaldo Carvalho de Melo
14473a9bf95SArnaldo Carvalho de Melo val = old;
14573a9bf95SArnaldo Carvalho de Melo }
14673a9bf95SArnaldo Carvalho de Melo
14773a9bf95SArnaldo Carvalho de Melo return !new;
14873a9bf95SArnaldo Carvalho de Melo }
14973a9bf95SArnaldo Carvalho de Melo
15073a9bf95SArnaldo Carvalho de Melo static inline __refcount_check
refcount_dec_and_test(refcount_t * r)15173a9bf95SArnaldo Carvalho de Melo bool refcount_dec_and_test(refcount_t *r)
15273a9bf95SArnaldo Carvalho de Melo {
15373a9bf95SArnaldo Carvalho de Melo return refcount_sub_and_test(1, r);
15473a9bf95SArnaldo Carvalho de Melo }
15573a9bf95SArnaldo Carvalho de Melo
15673a9bf95SArnaldo Carvalho de Melo
15773a9bf95SArnaldo Carvalho de Melo #endif /* _ATOMIC_LINUX_REFCOUNT_H */
158