1 /* 2 * kref.h - library routines for handling generic reference counted objects 3 * 4 * Copyright (C) 2004 Greg Kroah-Hartman <[email protected]> 5 * Copyright (C) 2004 IBM Corp. 6 * 7 * based on kobject.h which was: 8 * Copyright (C) 2002-2003 Patrick Mochel <[email protected]> 9 * Copyright (C) 2002-2003 Open Source Development Labs 10 * 11 * This file is released under the GPLv2. 12 * 13 */ 14 15 #ifndef _KREF_H_ 16 #define _KREF_H_ 17 18 #include <linux/bug.h> 19 #include <linux/atomic.h> 20 #include <linux/kernel.h> 21 #include <linux/mutex.h> 22 23 struct kref { 24 atomic_t refcount; 25 }; 26 27 #define KREF_INIT(n) { .refcount = ATOMIC_INIT(n), } 28 29 /** 30 * kref_init - initialize object. 31 * @kref: object in question. 32 */ 33 static inline void kref_init(struct kref *kref) 34 { 35 atomic_set(&kref->refcount, 1); 36 } 37 38 /** 39 * kref_get - increment refcount for object. 40 * @kref: object. 41 */ 42 static inline void kref_get(struct kref *kref) 43 { 44 /* If refcount was 0 before incrementing then we have a race 45 * condition when this kref is freeing by some other thread right now. 46 * In this case one should use kref_get_unless_zero() 47 */ 48 WARN_ON_ONCE(atomic_inc_return(&kref->refcount) < 2); 49 } 50 51 /** 52 * kref_sub - subtract a number of refcounts for object. 53 * @kref: object. 54 * @count: Number of recounts to subtract. 55 * @release: pointer to the function that will clean up the object when the 56 * last reference to the object is released. 57 * This pointer is required, and it is not acceptable to pass kfree 58 * in as this function. If the caller does pass kfree to this 59 * function, you will be publicly mocked mercilessly by the kref 60 * maintainer, and anyone else who happens to notice it. You have 61 * been warned. 62 * 63 * Subtract @count from the refcount, and if 0, call release(). 64 * Return 1 if the object was removed, otherwise return 0. Beware, if this 65 * function returns 0, you still can not count on the kref from remaining in 66 * memory. Only use the return value if you want to see if the kref is now 67 * gone, not present. 68 */ 69 static inline int kref_sub(struct kref *kref, unsigned int count, 70 void (*release)(struct kref *kref)) 71 { 72 WARN_ON(release == NULL); 73 74 if (atomic_sub_and_test((int) count, &kref->refcount)) { 75 release(kref); 76 return 1; 77 } 78 return 0; 79 } 80 81 /** 82 * kref_put - decrement refcount for object. 83 * @kref: object. 84 * @release: pointer to the function that will clean up the object when the 85 * last reference to the object is released. 86 * This pointer is required, and it is not acceptable to pass kfree 87 * in as this function. If the caller does pass kfree to this 88 * function, you will be publicly mocked mercilessly by the kref 89 * maintainer, and anyone else who happens to notice it. You have 90 * been warned. 91 * 92 * Decrement the refcount, and if 0, call release(). 93 * Return 1 if the object was removed, otherwise return 0. Beware, if this 94 * function returns 0, you still can not count on the kref from remaining in 95 * memory. Only use the return value if you want to see if the kref is now 96 * gone, not present. 97 */ 98 static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref)) 99 { 100 return kref_sub(kref, 1, release); 101 } 102 103 static inline int kref_put_mutex(struct kref *kref, 104 void (*release)(struct kref *kref), 105 struct mutex *lock) 106 { 107 WARN_ON(release == NULL); 108 if (unlikely(!atomic_add_unless(&kref->refcount, -1, 1))) { 109 mutex_lock(lock); 110 if (unlikely(!atomic_dec_and_test(&kref->refcount))) { 111 mutex_unlock(lock); 112 return 0; 113 } 114 release(kref); 115 return 1; 116 } 117 return 0; 118 } 119 120 /** 121 * kref_get_unless_zero - Increment refcount for object unless it is zero. 122 * @kref: object. 123 * 124 * Return non-zero if the increment succeeded. Otherwise return 0. 125 * 126 * This function is intended to simplify locking around refcounting for 127 * objects that can be looked up from a lookup structure, and which are 128 * removed from that lookup structure in the object destructor. 129 * Operations on such objects require at least a read lock around 130 * lookup + kref_get, and a write lock around kref_put + remove from lookup 131 * structure. Furthermore, RCU implementations become extremely tricky. 132 * With a lookup followed by a kref_get_unless_zero *with return value check* 133 * locking in the kref_put path can be deferred to the actual removal from 134 * the lookup structure and RCU lookups become trivial. 135 */ 136 static inline int __must_check kref_get_unless_zero(struct kref *kref) 137 { 138 return atomic_add_unless(&kref->refcount, 1, 0); 139 } 140 #endif /* _KREF_H_ */ 141