xref: /linux-6.15/include/linux/file_ref.h (revision 4d07bbf2)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 #ifndef _LINUX_FILE_REF_H
3 #define _LINUX_FILE_REF_H
4 
5 #include <linux/atomic.h>
6 #include <linux/preempt.h>
7 #include <linux/types.h>
8 
9 /*
10  * file_ref is a reference count implementation specifically for use by
11  * files. It takes inspiration from rcuref but differs in key aspects
12  * such as support for SLAB_TYPESAFE_BY_RCU type caches.
13  *
14  * FILE_REF_ONEREF                FILE_REF_MAXREF
15  * 0x0000000000000000UL      0x7FFFFFFFFFFFFFFFUL
16  * <-------------------valid ------------------->
17  *
18  *                       FILE_REF_SATURATED
19  * 0x8000000000000000UL 0xA000000000000000UL 0xBFFFFFFFFFFFFFFFUL
20  * <-----------------------saturation zone---------------------->
21  *
22  * FILE_REF_RELEASED                   FILE_REF_DEAD
23  * 0xC000000000000000UL         0xE000000000000000UL
24  * <-------------------dead zone------------------->
25  *
26  * FILE_REF_NOREF
27  * 0xFFFFFFFFFFFFFFFFUL
28  */
29 
30 #ifdef CONFIG_64BIT
31 #define FILE_REF_ONEREF		0x0000000000000000UL
32 #define FILE_REF_MAXREF		0x7FFFFFFFFFFFFFFFUL
33 #define FILE_REF_SATURATED	0xA000000000000000UL
34 #define FILE_REF_RELEASED	0xC000000000000000UL
35 #define FILE_REF_DEAD		0xE000000000000000UL
36 #define FILE_REF_NOREF		0xFFFFFFFFFFFFFFFFUL
37 #else
38 #define FILE_REF_ONEREF		0x00000000U
39 #define FILE_REF_MAXREF		0x7FFFFFFFU
40 #define FILE_REF_SATURATED	0xA0000000U
41 #define FILE_REF_RELEASED	0xC0000000U
42 #define FILE_REF_DEAD		0xE0000000U
43 #define FILE_REF_NOREF		0xFFFFFFFFU
44 #endif
45 
46 typedef struct {
47 #ifdef CONFIG_64BIT
48 	atomic64_t refcnt;
49 #else
50 	atomic_t refcnt;
51 #endif
52 } file_ref_t;
53 
54 /**
55  * file_ref_init - Initialize a file reference count
56  * @ref: Pointer to the reference count
57  * @cnt: The initial reference count typically '1'
58  */
59 static inline void file_ref_init(file_ref_t *ref, unsigned long cnt)
60 {
61 	atomic_long_set(&ref->refcnt, cnt - 1);
62 }
63 
64 bool __file_ref_put_badval(file_ref_t *ref, unsigned long cnt);
65 bool __file_ref_put(file_ref_t *ref, unsigned long cnt);
66 
67 /**
68  * file_ref_get - Acquire one reference on a file
69  * @ref: Pointer to the reference count
70  *
71  * Similar to atomic_inc_not_zero() but saturates at FILE_REF_MAXREF.
72  *
73  * Provides full memory ordering.
74  *
75  * Return: False if the attempt to acquire a reference failed. This happens
76  *         when the last reference has been put already. True if a reference
77  *         was successfully acquired
78  */
79 static __always_inline __must_check bool file_ref_get(file_ref_t *ref)
80 {
81 	/*
82 	 * Unconditionally increase the reference count with full
83 	 * ordering. The saturation and dead zones provide enough
84 	 * tolerance for this.
85 	 *
86 	 * If this indicates negative the file in question the fail can
87 	 * be freed and immediately reused due to SLAB_TYPSAFE_BY_RCU.
88 	 * Hence, unconditionally altering the file reference count to
89 	 * e.g., reset the file reference count back to the middle of
90 	 * the deadzone risk end up marking someone else's file as dead
91 	 * behind their back.
92 	 *
93 	 * It would be possible to do a careful:
94 	 *
95 	 * cnt = atomic_long_inc_return();
96 	 * if (likely(cnt >= 0))
97 	 *	return true;
98 	 *
99 	 * and then something like:
100 	 *
101 	 * if (cnt >= FILE_REF_RELEASE)
102 	 *	atomic_long_try_cmpxchg(&ref->refcnt, &cnt, FILE_REF_DEAD),
103 	 *
104 	 * to set the value back to the middle of the deadzone. But it's
105 	 * practically impossible to go from FILE_REF_DEAD to
106 	 * FILE_REF_ONEREF. It would need 2305843009213693952/2^61
107 	 * file_ref_get()s to resurrect such a dead file.
108 	 */
109 	return !atomic_long_add_negative(1, &ref->refcnt);
110 }
111 
112 /**
113  * file_ref_inc - Acquire one reference on a file
114  * @ref: Pointer to the reference count
115  *
116  * Acquire an additional reference on a file. Warns if the caller didn't
117  * already hold a reference.
118  */
119 static __always_inline void file_ref_inc(file_ref_t *ref)
120 {
121 	long prior = atomic_long_fetch_inc_relaxed(&ref->refcnt);
122 	WARN_ONCE(prior < 0, "file_ref_inc() on a released file reference");
123 }
124 
125 /**
126  * file_ref_put -- Release a file reference
127  * @ref:	Pointer to the reference count
128  *
129  * Provides release memory ordering, such that prior loads and stores
130  * are done before, and provides an acquire ordering on success such
131  * that free() must come after.
132  *
133  * Return: True if this was the last reference with no future references
134  *         possible. This signals the caller that it can safely release
135  *         the object which is protected by the reference counter.
136  *         False if there are still active references or the put() raced
137  *         with a concurrent get()/put() pair. Caller is not allowed to
138  *         release the protected object.
139  */
140 static __always_inline __must_check bool file_ref_put(file_ref_t *ref)
141 {
142 	long cnt;
143 
144 	/*
145 	 * While files are SLAB_TYPESAFE_BY_RCU and thus file_ref_put()
146 	 * calls don't risk UAFs when a file is recyclyed, it is still
147 	 * vulnerable to UAFs caused by freeing the whole slab page once
148 	 * it becomes unused. Prevent file_ref_put() from being
149 	 * preempted protects against this.
150 	 */
151 	guard(preempt)();
152 	/*
153 	 * Unconditionally decrease the reference count. The saturation
154 	 * and dead zones provide enough tolerance for this. If this
155 	 * fails then we need to handle the last reference drop and
156 	 * cases inside the saturation and dead zones.
157 	 */
158 	cnt = atomic_long_dec_return(&ref->refcnt);
159 	if (cnt >= 0)
160 		return false;
161 	return __file_ref_put(ref, cnt);
162 }
163 
164 /**
165  * file_ref_put_close - drop a reference expecting it would transition to FILE_REF_NOREF
166  * @ref:	Pointer to the reference count
167  *
168  * Semantically it is equivalent to calling file_ref_put(), but it trades lower
169  * performance in face of other CPUs also modifying the refcount for higher
170  * performance when this happens to be the last reference.
171  *
172  * For the last reference file_ref_put() issues 2 atomics. One to drop the
173  * reference and another to transition it to FILE_REF_DEAD. This routine does
174  * the work in one step, but in order to do it has to pre-read the variable which
175  * decreases scalability.
176  *
177  * Use with close() et al, stick to file_ref_put() by default.
178  */
179 static __always_inline __must_check bool file_ref_put_close(file_ref_t *ref)
180 {
181 	long old, new;
182 
183 	old = atomic_long_read(&ref->refcnt);
184 	do {
185 		if (unlikely(old < 0))
186 			return __file_ref_put_badval(ref, old);
187 
188 		if (old == FILE_REF_ONEREF)
189 			new = FILE_REF_DEAD;
190 		else
191 			new = old - 1;
192 	} while (!atomic_long_try_cmpxchg(&ref->refcnt, &old, new));
193 
194 	return new == FILE_REF_DEAD;
195 }
196 
197 /**
198  * file_ref_read - Read the number of file references
199  * @ref: Pointer to the reference count
200  *
201  * Return: The number of held references (0 ... N)
202  */
203 static inline unsigned long file_ref_read(file_ref_t *ref)
204 {
205 	unsigned long c = atomic_long_read(&ref->refcnt);
206 
207 	/* Return 0 if within the DEAD zone. */
208 	return c >= FILE_REF_RELEASED ? 0 : c + 1;
209 }
210 
211 /*
212  * __file_ref_read_raw - Return the value stored in ref->refcnt
213  * @ref: Pointer to the reference count
214  *
215  * Return: The raw value found in the counter
216  *
217  * A hack for file_needs_f_pos_lock(), you probably want to use
218  * file_ref_read() instead.
219  */
220 static inline unsigned long __file_ref_read_raw(file_ref_t *ref)
221 {
222 	return atomic_long_read(&ref->refcnt);
223 }
224 
225 #endif
226