1 #ifndef __LINUX_LOCKREF_H 2 #define __LINUX_LOCKREF_H 3 4 /* 5 * Locked reference counts. 6 * 7 * These are different from just plain atomic refcounts in that they 8 * are atomic with respect to the spinlock that goes with them. In 9 * particular, there can be implementations that don't actually get 10 * the spinlock for the common decrement/increment operations, but they 11 * still have to check that the operation is done semantically as if 12 * the spinlock had been taken (using a cmpxchg operation that covers 13 * both the lock and the count word, or using memory transactions, for 14 * example). 15 */ 16 17 #include <linux/spinlock.h> 18 19 struct lockref { 20 union { 21 #ifdef CONFIG_CMPXCHG_LOCKREF 22 aligned_u64 lock_count; 23 #endif 24 struct { 25 spinlock_t lock; 26 unsigned int count; 27 }; 28 }; 29 }; 30 31 extern void lockref_get(struct lockref *); 32 extern int lockref_get_not_zero(struct lockref *); 33 extern int lockref_get_or_lock(struct lockref *); 34 extern int lockref_put_or_lock(struct lockref *); 35 36 extern void lockref_mark_dead(struct lockref *); 37 extern int lockref_get_not_dead(struct lockref *); 38 39 #endif /* __LINUX_LOCKREF_H */ 40