1 /* 2 * Mutexes: blocking mutual exclusion locks 3 * 4 * started by Ingo Molnar: 5 * 6 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <[email protected]> 7 * 8 * This file contains the main data structure and API definitions. 9 */ 10 #ifndef __LINUX_MUTEX_H 11 #define __LINUX_MUTEX_H 12 13 #include <linux/list.h> 14 #include <linux/spinlock_types.h> 15 #include <linux/linkage.h> 16 #include <linux/lockdep.h> 17 18 #include <asm/atomic.h> 19 20 /* 21 * Simple, straightforward mutexes with strict semantics: 22 * 23 * - only one task can hold the mutex at a time 24 * - only the owner can unlock the mutex 25 * - multiple unlocks are not permitted 26 * - recursive locking is not permitted 27 * - a mutex object must be initialized via the API 28 * - a mutex object must not be initialized via memset or copying 29 * - task may not exit with mutex held 30 * - memory areas where held locks reside must not be freed 31 * - held mutexes must not be reinitialized 32 * - mutexes may not be used in hardware or software interrupt 33 * contexts such as tasklets and timers 34 * 35 * These semantics are fully enforced when DEBUG_MUTEXES is 36 * enabled. Furthermore, besides enforcing the above rules, the mutex 37 * debugging code also implements a number of additional features 38 * that make lock debugging easier and faster: 39 * 40 * - uses symbolic names of mutexes, whenever they are printed in debug output 41 * - point-of-acquire tracking, symbolic lookup of function names 42 * - list of all locks held in the system, printout of them 43 * - owner tracking 44 * - detects self-recursing locks and prints out all relevant info 45 * - detects multi-task circular deadlocks and prints out all affected 46 * locks and tasks (and only those tasks) 47 */ 48 struct mutex { 49 /* 1: unlocked, 0: locked, negative: locked, possible waiters */ 50 atomic_t count; 51 spinlock_t wait_lock; 52 struct list_head wait_list; 53 #if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP) 54 struct thread_info *owner; 55 #endif 56 #ifdef CONFIG_DEBUG_MUTEXES 57 const char *name; 58 void *magic; 59 #endif 60 #ifdef CONFIG_DEBUG_LOCK_ALLOC 61 struct lockdep_map dep_map; 62 #endif 63 }; 64 65 /* 66 * This is the control structure for tasks blocked on mutex, 67 * which resides on the blocked task's kernel stack: 68 */ 69 struct mutex_waiter { 70 struct list_head list; 71 struct task_struct *task; 72 #ifdef CONFIG_DEBUG_MUTEXES 73 void *magic; 74 #endif 75 }; 76 77 #ifdef CONFIG_DEBUG_MUTEXES 78 # include <linux/mutex-debug.h> 79 #else 80 # define __DEBUG_MUTEX_INITIALIZER(lockname) 81 # define mutex_init(mutex) \ 82 do { \ 83 static struct lock_class_key __key; \ 84 \ 85 __mutex_init((mutex), #mutex, &__key); \ 86 } while (0) 87 # define mutex_destroy(mutex) do { } while (0) 88 #endif 89 90 #ifdef CONFIG_DEBUG_LOCK_ALLOC 91 # define __DEP_MAP_MUTEX_INITIALIZER(lockname) \ 92 , .dep_map = { .name = #lockname } 93 #else 94 # define __DEP_MAP_MUTEX_INITIALIZER(lockname) 95 #endif 96 97 #define __MUTEX_INITIALIZER(lockname) \ 98 { .count = ATOMIC_INIT(1) \ 99 , .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \ 100 , .wait_list = LIST_HEAD_INIT(lockname.wait_list) \ 101 __DEBUG_MUTEX_INITIALIZER(lockname) \ 102 __DEP_MAP_MUTEX_INITIALIZER(lockname) } 103 104 #define DEFINE_MUTEX(mutexname) \ 105 struct mutex mutexname = __MUTEX_INITIALIZER(mutexname) 106 107 extern void __mutex_init(struct mutex *lock, const char *name, 108 struct lock_class_key *key); 109 110 /** 111 * mutex_is_locked - is the mutex locked 112 * @lock: the mutex to be queried 113 * 114 * Returns 1 if the mutex is locked, 0 if unlocked. 115 */ 116 static inline int mutex_is_locked(struct mutex *lock) 117 { 118 return atomic_read(&lock->count) != 1; 119 } 120 121 /* 122 * See kernel/mutex.c for detailed documentation of these APIs. 123 * Also see Documentation/mutex-design.txt. 124 */ 125 #ifdef CONFIG_DEBUG_LOCK_ALLOC 126 extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass); 127 extern int __must_check mutex_lock_interruptible_nested(struct mutex *lock, 128 unsigned int subclass); 129 extern int __must_check mutex_lock_killable_nested(struct mutex *lock, 130 unsigned int subclass); 131 132 #define mutex_lock(lock) mutex_lock_nested(lock, 0) 133 #define mutex_lock_interruptible(lock) mutex_lock_interruptible_nested(lock, 0) 134 #define mutex_lock_killable(lock) mutex_lock_killable_nested(lock, 0) 135 #else 136 extern void mutex_lock(struct mutex *lock); 137 extern int __must_check mutex_lock_interruptible(struct mutex *lock); 138 extern int __must_check mutex_lock_killable(struct mutex *lock); 139 140 # define mutex_lock_nested(lock, subclass) mutex_lock(lock) 141 # define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock) 142 # define mutex_lock_killable_nested(lock, subclass) mutex_lock_killable(lock) 143 #endif 144 145 /* 146 * NOTE: mutex_trylock() follows the spin_trylock() convention, 147 * not the down_trylock() convention! 148 * 149 * Returns 1 if the mutex has been acquired successfully, and 0 on contention. 150 */ 151 extern int mutex_trylock(struct mutex *lock); 152 extern void mutex_unlock(struct mutex *lock); 153 extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock); 154 155 #endif 156