1 /* rwsem.h: R/W semaphores, public interface 2 * 3 * Written by David Howells ([email protected]). 4 * Derived from asm-i386/semaphore.h 5 */ 6 7 #ifndef _LINUX_RWSEM_H 8 #define _LINUX_RWSEM_H 9 10 #include <linux/linkage.h> 11 12 #include <linux/types.h> 13 #include <linux/kernel.h> 14 #include <linux/list.h> 15 #include <linux/spinlock.h> 16 17 #include <linux/atomic.h> 18 19 struct optimistic_spin_queue; 20 struct rw_semaphore; 21 22 #ifdef CONFIG_RWSEM_GENERIC_SPINLOCK 23 #include <linux/rwsem-spinlock.h> /* use a generic implementation */ 24 #else 25 /* All arch specific implementations share the same struct */ 26 struct rw_semaphore { 27 long count; 28 raw_spinlock_t wait_lock; 29 struct list_head wait_list; 30 #ifdef CONFIG_SMP 31 /* 32 * Write owner. Used as a speculative check to see 33 * if the owner is running on the cpu. 34 */ 35 struct task_struct *owner; 36 struct optimistic_spin_queue *osq; /* spinner MCS lock */ 37 #endif 38 #ifdef CONFIG_DEBUG_LOCK_ALLOC 39 struct lockdep_map dep_map; 40 #endif 41 }; 42 43 extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem); 44 extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem); 45 extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *); 46 extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem); 47 48 /* Include the arch specific part */ 49 #include <asm/rwsem.h> 50 51 /* In all implementations count != 0 means locked */ 52 static inline int rwsem_is_locked(struct rw_semaphore *sem) 53 { 54 return sem->count != 0; 55 } 56 57 #endif 58 59 /* Common initializer macros and functions */ 60 61 #ifdef CONFIG_DEBUG_LOCK_ALLOC 62 # define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname } 63 #else 64 # define __RWSEM_DEP_MAP_INIT(lockname) 65 #endif 66 67 #if defined(CONFIG_SMP) && !defined(CONFIG_RWSEM_GENERIC_SPINLOCK) 68 #define __RWSEM_INITIALIZER(name) \ 69 { RWSEM_UNLOCKED_VALUE, \ 70 __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \ 71 LIST_HEAD_INIT((name).wait_list), \ 72 NULL, /* owner */ \ 73 NULL /* mcs lock */ \ 74 __RWSEM_DEP_MAP_INIT(name) } 75 #else 76 #define __RWSEM_INITIALIZER(name) \ 77 { RWSEM_UNLOCKED_VALUE, \ 78 __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \ 79 LIST_HEAD_INIT((name).wait_list) \ 80 __RWSEM_DEP_MAP_INIT(name) } 81 #endif 82 83 #define DECLARE_RWSEM(name) \ 84 struct rw_semaphore name = __RWSEM_INITIALIZER(name) 85 86 extern void __init_rwsem(struct rw_semaphore *sem, const char *name, 87 struct lock_class_key *key); 88 89 #define init_rwsem(sem) \ 90 do { \ 91 static struct lock_class_key __key; \ 92 \ 93 __init_rwsem((sem), #sem, &__key); \ 94 } while (0) 95 96 /* 97 * This is the same regardless of which rwsem implementation that is being used. 98 * It is just a heuristic meant to be called by somebody alreadying holding the 99 * rwsem to see if somebody from an incompatible type is wanting access to the 100 * lock. 101 */ 102 static inline int rwsem_is_contended(struct rw_semaphore *sem) 103 { 104 return !list_empty(&sem->wait_list); 105 } 106 107 /* 108 * lock for reading 109 */ 110 extern void down_read(struct rw_semaphore *sem); 111 112 /* 113 * trylock for reading -- returns 1 if successful, 0 if contention 114 */ 115 extern int down_read_trylock(struct rw_semaphore *sem); 116 117 /* 118 * lock for writing 119 */ 120 extern void down_write(struct rw_semaphore *sem); 121 122 /* 123 * trylock for writing -- returns 1 if successful, 0 if contention 124 */ 125 extern int down_write_trylock(struct rw_semaphore *sem); 126 127 /* 128 * release a read lock 129 */ 130 extern void up_read(struct rw_semaphore *sem); 131 132 /* 133 * release a write lock 134 */ 135 extern void up_write(struct rw_semaphore *sem); 136 137 /* 138 * downgrade write lock to read lock 139 */ 140 extern void downgrade_write(struct rw_semaphore *sem); 141 142 #ifdef CONFIG_DEBUG_LOCK_ALLOC 143 /* 144 * nested locking. NOTE: rwsems are not allowed to recurse 145 * (which occurs if the same task tries to acquire the same 146 * lock instance multiple times), but multiple locks of the 147 * same lock class might be taken, if the order of the locks 148 * is always the same. This ordering rule can be expressed 149 * to lockdep via the _nested() APIs, but enumerating the 150 * subclasses that are used. (If the nesting relationship is 151 * static then another method for expressing nested locking is 152 * the explicit definition of lock class keys and the use of 153 * lockdep_set_class() at lock initialization time. 154 * See Documentation/lockdep-design.txt for more details.) 155 */ 156 extern void down_read_nested(struct rw_semaphore *sem, int subclass); 157 extern void down_write_nested(struct rw_semaphore *sem, int subclass); 158 extern void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest_lock); 159 160 # define down_write_nest_lock(sem, nest_lock) \ 161 do { \ 162 typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \ 163 _down_write_nest_lock(sem, &(nest_lock)->dep_map); \ 164 } while (0); 165 166 /* 167 * Take/release a lock when not the owner will release it. 168 * 169 * [ This API should be avoided as much as possible - the 170 * proper abstraction for this case is completions. ] 171 */ 172 extern void down_read_non_owner(struct rw_semaphore *sem); 173 extern void up_read_non_owner(struct rw_semaphore *sem); 174 #else 175 # define down_read_nested(sem, subclass) down_read(sem) 176 # define down_write_nest_lock(sem, nest_lock) down_write(sem) 177 # define down_write_nested(sem, subclass) down_write(sem) 178 # define down_read_non_owner(sem) down_read(sem) 179 # define up_read_non_owner(sem) up_read(sem) 180 #endif 181 182 #endif /* _LINUX_RWSEM_H */ 183