1 /* 2 * Mutexes: blocking mutual exclusion locks 3 * 4 * started by Ingo Molnar: 5 * 6 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <[email protected]> 7 * 8 * This file contains the main data structure and API definitions. 9 */ 10 #ifndef __LINUX_MUTEX_H 11 #define __LINUX_MUTEX_H 12 13 #include <linux/list.h> 14 #include <linux/spinlock_types.h> 15 #include <linux/linkage.h> 16 17 #include <asm/atomic.h> 18 19 /* 20 * Simple, straightforward mutexes with strict semantics: 21 * 22 * - only one task can hold the mutex at a time 23 * - only the owner can unlock the mutex 24 * - multiple unlocks are not permitted 25 * - recursive locking is not permitted 26 * - a mutex object must be initialized via the API 27 * - a mutex object must not be initialized via memset or copying 28 * - task may not exit with mutex held 29 * - memory areas where held locks reside must not be freed 30 * - held mutexes must not be reinitialized 31 * - mutexes may not be used in irq contexts 32 * 33 * These semantics are fully enforced when DEBUG_MUTEXES is 34 * enabled. Furthermore, besides enforcing the above rules, the mutex 35 * debugging code also implements a number of additional features 36 * that make lock debugging easier and faster: 37 * 38 * - uses symbolic names of mutexes, whenever they are printed in debug output 39 * - point-of-acquire tracking, symbolic lookup of function names 40 * - list of all locks held in the system, printout of them 41 * - owner tracking 42 * - detects self-recursing locks and prints out all relevant info 43 * - detects multi-task circular deadlocks and prints out all affected 44 * locks and tasks (and only those tasks) 45 */ 46 struct mutex { 47 /* 1: unlocked, 0: locked, negative: locked, possible waiters */ 48 atomic_t count; 49 spinlock_t wait_lock; 50 struct list_head wait_list; 51 #ifdef CONFIG_DEBUG_MUTEXES 52 struct thread_info *owner; 53 struct list_head held_list; 54 unsigned long acquire_ip; 55 const char *name; 56 void *magic; 57 #endif 58 }; 59 60 /* 61 * This is the control structure for tasks blocked on mutex, 62 * which resides on the blocked task's kernel stack: 63 */ 64 struct mutex_waiter { 65 struct list_head list; 66 struct task_struct *task; 67 #ifdef CONFIG_DEBUG_MUTEXES 68 struct mutex *lock; 69 void *magic; 70 #endif 71 }; 72 73 #ifdef CONFIG_DEBUG_MUTEXES 74 # include <linux/mutex-debug.h> 75 #else 76 # define __DEBUG_MUTEX_INITIALIZER(lockname) 77 # define mutex_init(mutex) __mutex_init(mutex, NULL) 78 # define mutex_destroy(mutex) do { } while (0) 79 # define mutex_debug_show_all_locks() do { } while (0) 80 # define mutex_debug_show_held_locks(p) do { } while (0) 81 # define mutex_debug_check_no_locks_held(task) do { } while (0) 82 # define mutex_debug_check_no_locks_freed(from, len) do { } while (0) 83 #endif 84 85 #define __MUTEX_INITIALIZER(lockname) \ 86 { .count = ATOMIC_INIT(1) \ 87 , .wait_lock = SPIN_LOCK_UNLOCKED \ 88 , .wait_list = LIST_HEAD_INIT(lockname.wait_list) \ 89 __DEBUG_MUTEX_INITIALIZER(lockname) } 90 91 #define DEFINE_MUTEX(mutexname) \ 92 struct mutex mutexname = __MUTEX_INITIALIZER(mutexname) 93 94 extern void fastcall __mutex_init(struct mutex *lock, const char *name); 95 96 /*** 97 * mutex_is_locked - is the mutex locked 98 * @lock: the mutex to be queried 99 * 100 * Returns 1 if the mutex is locked, 0 if unlocked. 101 */ 102 static inline int fastcall mutex_is_locked(struct mutex *lock) 103 { 104 return atomic_read(&lock->count) != 1; 105 } 106 107 /* 108 * See kernel/mutex.c for detailed documentation of these APIs. 109 * Also see Documentation/mutex-design.txt. 110 */ 111 extern void fastcall mutex_lock(struct mutex *lock); 112 extern int fastcall mutex_lock_interruptible(struct mutex *lock); 113 /* 114 * NOTE: mutex_trylock() follows the spin_trylock() convention, 115 * not the down_trylock() convention! 116 */ 117 extern int fastcall mutex_trylock(struct mutex *lock); 118 extern void fastcall mutex_unlock(struct mutex *lock); 119 120 #endif 121