1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Percpu refcounts: 4 * (C) 2012 Google, Inc. 5 * Author: Kent Overstreet <[email protected]> 6 * 7 * This implements a refcount with similar semantics to atomic_t - atomic_inc(), 8 * atomic_dec_and_test() - but percpu. 9 * 10 * There's one important difference between percpu refs and normal atomic_t 11 * refcounts; you have to keep track of your initial refcount, and then when you 12 * start shutting down you call percpu_ref_kill() _before_ dropping the initial 13 * refcount. 14 * 15 * The refcount will have a range of 0 to ((1U << 31) - 1), i.e. one bit less 16 * than an atomic_t - this is because of the way shutdown works, see 17 * percpu_ref_kill()/PERCPU_COUNT_BIAS. 18 * 19 * Before you call percpu_ref_kill(), percpu_ref_put() does not check for the 20 * refcount hitting 0 - it can't, if it was in percpu mode. percpu_ref_kill() 21 * puts the ref back in single atomic_t mode, collecting the per cpu refs and 22 * issuing the appropriate barriers, and then marks the ref as shutting down so 23 * that percpu_ref_put() will check for the ref hitting 0. After it returns, 24 * it's safe to drop the initial ref. 25 * 26 * USAGE: 27 * 28 * See fs/aio.c for some example usage; it's used there for struct kioctx, which 29 * is created when userspaces calls io_setup(), and destroyed when userspace 30 * calls io_destroy() or the process exits. 31 * 32 * In the aio code, kill_ioctx() is called when we wish to destroy a kioctx; it 33 * removes the kioctx from the proccess's table of kioctxs and kills percpu_ref. 34 * After that, there can't be any new users of the kioctx (from lookup_ioctx()) 35 * and it's then safe to drop the initial ref with percpu_ref_put(). 36 * 37 * Note that the free path, free_ioctx(), needs to go through explicit call_rcu() 38 * to synchronize with RCU protected lookup_ioctx(). percpu_ref operations don't 39 * imply RCU grace periods of any kind and if a user wants to combine percpu_ref 40 * with RCU protection, it must be done explicitly. 41 * 42 * Code that does a two stage shutdown like this often needs some kind of 43 * explicit synchronization to ensure the initial refcount can only be dropped 44 * once - percpu_ref_kill() does this for you, it returns true once and false if 45 * someone else already called it. The aio code uses it this way, but it's not 46 * necessary if the code has some other mechanism to synchronize teardown. 47 * around. 48 */ 49 50 #ifndef _LINUX_PERCPU_REFCOUNT_H 51 #define _LINUX_PERCPU_REFCOUNT_H 52 53 #include <linux/atomic.h> 54 #include <linux/kernel.h> 55 #include <linux/percpu.h> 56 #include <linux/rcupdate.h> 57 #include <linux/gfp.h> 58 59 struct percpu_ref; 60 typedef void (percpu_ref_func_t)(struct percpu_ref *); 61 62 /* flags set in the lower bits of percpu_ref->percpu_count_ptr */ 63 enum { 64 __PERCPU_REF_ATOMIC = 1LU << 0, /* operating in atomic mode */ 65 __PERCPU_REF_DEAD = 1LU << 1, /* (being) killed */ 66 __PERCPU_REF_ATOMIC_DEAD = __PERCPU_REF_ATOMIC | __PERCPU_REF_DEAD, 67 68 __PERCPU_REF_FLAG_BITS = 2, 69 }; 70 71 /* @flags for percpu_ref_init() */ 72 enum { 73 /* 74 * Start w/ ref == 1 in atomic mode. Can be switched to percpu 75 * operation using percpu_ref_switch_to_percpu(). If initialized 76 * with this flag, the ref will stay in atomic mode until 77 * percpu_ref_switch_to_percpu() is invoked on it. 78 * Implies ALLOW_REINIT. 79 */ 80 PERCPU_REF_INIT_ATOMIC = 1 << 0, 81 82 /* 83 * Start dead w/ ref == 0 in atomic mode. Must be revived with 84 * percpu_ref_reinit() before used. Implies INIT_ATOMIC and 85 * ALLOW_REINIT. 86 */ 87 PERCPU_REF_INIT_DEAD = 1 << 1, 88 89 /* 90 * Allow switching from atomic mode to percpu mode. 91 */ 92 PERCPU_REF_ALLOW_REINIT = 1 << 2, 93 }; 94 95 struct percpu_ref { 96 atomic_long_t count; 97 /* 98 * The low bit of the pointer indicates whether the ref is in percpu 99 * mode; if set, then get/put will manipulate the atomic_t. 100 */ 101 unsigned long percpu_count_ptr; 102 percpu_ref_func_t *release; 103 percpu_ref_func_t *confirm_switch; 104 bool force_atomic:1; 105 struct rcu_head rcu; 106 }; 107 108 int __must_check percpu_ref_init(struct percpu_ref *ref, 109 percpu_ref_func_t *release, unsigned int flags, 110 gfp_t gfp); 111 void percpu_ref_exit(struct percpu_ref *ref); 112 void percpu_ref_switch_to_atomic(struct percpu_ref *ref, 113 percpu_ref_func_t *confirm_switch); 114 void percpu_ref_switch_to_atomic_sync(struct percpu_ref *ref); 115 void percpu_ref_switch_to_percpu(struct percpu_ref *ref); 116 void percpu_ref_kill_and_confirm(struct percpu_ref *ref, 117 percpu_ref_func_t *confirm_kill); 118 void percpu_ref_resurrect(struct percpu_ref *ref); 119 void percpu_ref_reinit(struct percpu_ref *ref); 120 121 /** 122 * percpu_ref_kill - drop the initial ref 123 * @ref: percpu_ref to kill 124 * 125 * Must be used to drop the initial ref on a percpu refcount; must be called 126 * precisely once before shutdown. 127 * 128 * Switches @ref into atomic mode before gathering up the percpu counters 129 * and dropping the initial ref. 130 * 131 * There are no implied RCU grace periods between kill and release. 132 */ 133 static inline void percpu_ref_kill(struct percpu_ref *ref) 134 { 135 percpu_ref_kill_and_confirm(ref, NULL); 136 } 137 138 /* 139 * Internal helper. Don't use outside percpu-refcount proper. The 140 * function doesn't return the pointer and let the caller test it for NULL 141 * because doing so forces the compiler to generate two conditional 142 * branches as it can't assume that @ref->percpu_count is not NULL. 143 */ 144 static inline bool __ref_is_percpu(struct percpu_ref *ref, 145 unsigned long __percpu **percpu_countp) 146 { 147 unsigned long percpu_ptr; 148 149 /* 150 * The value of @ref->percpu_count_ptr is tested for 151 * !__PERCPU_REF_ATOMIC, which may be set asynchronously, and then 152 * used as a pointer. If the compiler generates a separate fetch 153 * when using it as a pointer, __PERCPU_REF_ATOMIC may be set in 154 * between contaminating the pointer value, meaning that 155 * READ_ONCE() is required when fetching it. 156 * 157 * The smp_read_barrier_depends() implied by READ_ONCE() pairs 158 * with smp_store_release() in __percpu_ref_switch_to_percpu(). 159 */ 160 percpu_ptr = READ_ONCE(ref->percpu_count_ptr); 161 162 /* 163 * Theoretically, the following could test just ATOMIC; however, 164 * then we'd have to mask off DEAD separately as DEAD may be 165 * visible without ATOMIC if we race with percpu_ref_kill(). DEAD 166 * implies ATOMIC anyway. Test them together. 167 */ 168 if (unlikely(percpu_ptr & __PERCPU_REF_ATOMIC_DEAD)) 169 return false; 170 171 *percpu_countp = (unsigned long __percpu *)percpu_ptr; 172 return true; 173 } 174 175 /** 176 * percpu_ref_get_many - increment a percpu refcount 177 * @ref: percpu_ref to get 178 * @nr: number of references to get 179 * 180 * Analogous to atomic_long_add(). 181 * 182 * This function is safe to call as long as @ref is between init and exit. 183 */ 184 static inline void percpu_ref_get_many(struct percpu_ref *ref, unsigned long nr) 185 { 186 unsigned long __percpu *percpu_count; 187 188 rcu_read_lock_sched(); 189 190 if (__ref_is_percpu(ref, &percpu_count)) 191 this_cpu_add(*percpu_count, nr); 192 else 193 atomic_long_add(nr, &ref->count); 194 195 rcu_read_unlock_sched(); 196 } 197 198 /** 199 * percpu_ref_get - increment a percpu refcount 200 * @ref: percpu_ref to get 201 * 202 * Analagous to atomic_long_inc(). 203 * 204 * This function is safe to call as long as @ref is between init and exit. 205 */ 206 static inline void percpu_ref_get(struct percpu_ref *ref) 207 { 208 percpu_ref_get_many(ref, 1); 209 } 210 211 /** 212 * percpu_ref_tryget - try to increment a percpu refcount 213 * @ref: percpu_ref to try-get 214 * 215 * Increment a percpu refcount unless its count already reached zero. 216 * Returns %true on success; %false on failure. 217 * 218 * This function is safe to call as long as @ref is between init and exit. 219 */ 220 static inline bool percpu_ref_tryget(struct percpu_ref *ref) 221 { 222 unsigned long __percpu *percpu_count; 223 bool ret; 224 225 rcu_read_lock_sched(); 226 227 if (__ref_is_percpu(ref, &percpu_count)) { 228 this_cpu_inc(*percpu_count); 229 ret = true; 230 } else { 231 ret = atomic_long_inc_not_zero(&ref->count); 232 } 233 234 rcu_read_unlock_sched(); 235 236 return ret; 237 } 238 239 /** 240 * percpu_ref_tryget_live - try to increment a live percpu refcount 241 * @ref: percpu_ref to try-get 242 * 243 * Increment a percpu refcount unless it has already been killed. Returns 244 * %true on success; %false on failure. 245 * 246 * Completion of percpu_ref_kill() in itself doesn't guarantee that this 247 * function will fail. For such guarantee, percpu_ref_kill_and_confirm() 248 * should be used. After the confirm_kill callback is invoked, it's 249 * guaranteed that no new reference will be given out by 250 * percpu_ref_tryget_live(). 251 * 252 * This function is safe to call as long as @ref is between init and exit. 253 */ 254 static inline bool percpu_ref_tryget_live(struct percpu_ref *ref) 255 { 256 unsigned long __percpu *percpu_count; 257 bool ret = false; 258 259 rcu_read_lock_sched(); 260 261 if (__ref_is_percpu(ref, &percpu_count)) { 262 this_cpu_inc(*percpu_count); 263 ret = true; 264 } else if (!(ref->percpu_count_ptr & __PERCPU_REF_DEAD)) { 265 ret = atomic_long_inc_not_zero(&ref->count); 266 } 267 268 rcu_read_unlock_sched(); 269 270 return ret; 271 } 272 273 /** 274 * percpu_ref_put_many - decrement a percpu refcount 275 * @ref: percpu_ref to put 276 * @nr: number of references to put 277 * 278 * Decrement the refcount, and if 0, call the release function (which was passed 279 * to percpu_ref_init()) 280 * 281 * This function is safe to call as long as @ref is between init and exit. 282 */ 283 static inline void percpu_ref_put_many(struct percpu_ref *ref, unsigned long nr) 284 { 285 unsigned long __percpu *percpu_count; 286 287 rcu_read_lock_sched(); 288 289 if (__ref_is_percpu(ref, &percpu_count)) 290 this_cpu_sub(*percpu_count, nr); 291 else if (unlikely(atomic_long_sub_and_test(nr, &ref->count))) 292 ref->release(ref); 293 294 rcu_read_unlock_sched(); 295 } 296 297 /** 298 * percpu_ref_put - decrement a percpu refcount 299 * @ref: percpu_ref to put 300 * 301 * Decrement the refcount, and if 0, call the release function (which was passed 302 * to percpu_ref_init()) 303 * 304 * This function is safe to call as long as @ref is between init and exit. 305 */ 306 static inline void percpu_ref_put(struct percpu_ref *ref) 307 { 308 percpu_ref_put_many(ref, 1); 309 } 310 311 /** 312 * percpu_ref_is_dying - test whether a percpu refcount is dying or dead 313 * @ref: percpu_ref to test 314 * 315 * Returns %true if @ref is dying or dead. 316 * 317 * This function is safe to call as long as @ref is between init and exit 318 * and the caller is responsible for synchronizing against state changes. 319 */ 320 static inline bool percpu_ref_is_dying(struct percpu_ref *ref) 321 { 322 return ref->percpu_count_ptr & __PERCPU_REF_DEAD; 323 } 324 325 /** 326 * percpu_ref_is_zero - test whether a percpu refcount reached zero 327 * @ref: percpu_ref to test 328 * 329 * Returns %true if @ref reached zero. 330 * 331 * This function is safe to call as long as @ref is between init and exit. 332 */ 333 static inline bool percpu_ref_is_zero(struct percpu_ref *ref) 334 { 335 unsigned long __percpu *percpu_count; 336 337 if (__ref_is_percpu(ref, &percpu_count)) 338 return false; 339 return !atomic_long_read(&ref->count); 340 } 341 342 #endif 343