1 #ifndef __LINUX_SPINLOCK_UP_H 2 #define __LINUX_SPINLOCK_UP_H 3 4 #ifndef __LINUX_SPINLOCK_H 5 # error "please don't include this file directly" 6 #endif 7 8 #include <asm/processor.h> /* for cpu_relax() */ 9 #include <asm/barrier.h> 10 11 /* 12 * include/linux/spinlock_up.h - UP-debug version of spinlocks. 13 * 14 * portions Copyright 2005, Red Hat, Inc., Ingo Molnar 15 * Released under the General Public License (GPL). 16 * 17 * In the debug case, 1 means unlocked, 0 means locked. (the values 18 * are inverted, to catch initialization bugs) 19 * 20 * No atomicity anywhere, we are on UP. However, we still need 21 * the compiler barriers, because we do not want the compiler to 22 * move potentially faulting instructions (notably user accesses) 23 * into the locked sequence, resulting in non-atomic execution. 24 */ 25 26 #ifdef CONFIG_DEBUG_SPINLOCK 27 #define arch_spin_is_locked(x) ((x)->slock == 0) 28 29 static inline void arch_spin_lock(arch_spinlock_t *lock) 30 { 31 lock->slock = 0; 32 barrier(); 33 } 34 35 static inline void 36 arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) 37 { 38 local_irq_save(flags); 39 lock->slock = 0; 40 barrier(); 41 } 42 43 static inline int arch_spin_trylock(arch_spinlock_t *lock) 44 { 45 char oldval = lock->slock; 46 47 lock->slock = 0; 48 barrier(); 49 50 return oldval > 0; 51 } 52 53 static inline void arch_spin_unlock(arch_spinlock_t *lock) 54 { 55 barrier(); 56 lock->slock = 1; 57 } 58 59 /* 60 * Read-write spinlocks. No debug version. 61 */ 62 #define arch_read_lock(lock) do { barrier(); (void)(lock); } while (0) 63 #define arch_write_lock(lock) do { barrier(); (void)(lock); } while (0) 64 #define arch_read_trylock(lock) ({ barrier(); (void)(lock); 1; }) 65 #define arch_write_trylock(lock) ({ barrier(); (void)(lock); 1; }) 66 #define arch_read_unlock(lock) do { barrier(); (void)(lock); } while (0) 67 #define arch_write_unlock(lock) do { barrier(); (void)(lock); } while (0) 68 69 #else /* DEBUG_SPINLOCK */ 70 #define arch_spin_is_locked(lock) ((void)(lock), 0) 71 /* for sched/core.c and kernel_lock.c: */ 72 # define arch_spin_lock(lock) do { barrier(); (void)(lock); } while (0) 73 # define arch_spin_lock_flags(lock, flags) do { barrier(); (void)(lock); } while (0) 74 # define arch_spin_unlock(lock) do { barrier(); (void)(lock); } while (0) 75 # define arch_spin_trylock(lock) ({ barrier(); (void)(lock); 1; }) 76 #endif /* DEBUG_SPINLOCK */ 77 78 #define arch_spin_is_contended(lock) (((void)(lock), 0)) 79 80 #define arch_read_can_lock(lock) (((void)(lock), 1)) 81 #define arch_write_can_lock(lock) (((void)(lock), 1)) 82 83 #endif /* __LINUX_SPINLOCK_UP_H */ 84