1 #ifndef __LINUX_SPINLOCK_API_SMP_H
2 #define __LINUX_SPINLOCK_API_SMP_H
3 
4 #ifndef __LINUX_SPINLOCK_H
5 # error "please don't include this file directly"
6 #endif
7 
8 /*
9  * include/linux/spinlock_api_smp.h
10  *
11  * spinlock API declarations on SMP (and debug)
12  * (implemented in kernel/spinlock.c)
13  *
14  * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
15  * Released under the General Public License (GPL).
16  */
17 
18 int in_lock_functions(unsigned long addr);
19 
20 #define assert_spin_locked(x)	BUG_ON(!spin_is_locked(x))
21 
22 void __lockfunc _spin_lock(spinlock_t *lock)		__acquires(lock);
23 void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass)
24 							__acquires(lock);
25 void __lockfunc _spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *map)
26 							__acquires(lock);
27 void __lockfunc _spin_lock_bh(spinlock_t *lock)		__acquires(lock);
28 void __lockfunc _spin_lock_irq(spinlock_t *lock)	__acquires(lock);
29 
30 unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
31 							__acquires(lock);
32 unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass)
33 							__acquires(lock);
34 int __lockfunc _spin_trylock(spinlock_t *lock);
35 int __lockfunc _spin_trylock_bh(spinlock_t *lock);
36 void __lockfunc _spin_unlock(spinlock_t *lock)		__releases(lock);
37 void __lockfunc _spin_unlock_bh(spinlock_t *lock)	__releases(lock);
38 void __lockfunc _spin_unlock_irq(spinlock_t *lock)	__releases(lock);
39 void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
40 							__releases(lock);
41 
42 #ifdef CONFIG_INLINE_SPIN_LOCK
43 #define _spin_lock(lock) __spin_lock(lock)
44 #endif
45 
46 #ifdef CONFIG_INLINE_SPIN_LOCK_BH
47 #define _spin_lock_bh(lock) __spin_lock_bh(lock)
48 #endif
49 
50 #ifdef CONFIG_INLINE_SPIN_LOCK_IRQ
51 #define _spin_lock_irq(lock) __spin_lock_irq(lock)
52 #endif
53 
54 #ifdef CONFIG_INLINE_SPIN_LOCK_IRQSAVE
55 #define _spin_lock_irqsave(lock) __spin_lock_irqsave(lock)
56 #endif
57 
58 #ifdef CONFIG_INLINE_SPIN_TRYLOCK
59 #define _spin_trylock(lock) __spin_trylock(lock)
60 #endif
61 
62 #ifdef CONFIG_INLINE_SPIN_TRYLOCK_BH
63 #define _spin_trylock_bh(lock) __spin_trylock_bh(lock)
64 #endif
65 
66 #ifdef CONFIG_INLINE_SPIN_UNLOCK
67 #define _spin_unlock(lock) __spin_unlock(lock)
68 #endif
69 
70 #ifdef CONFIG_INLINE_SPIN_UNLOCK_BH
71 #define _spin_unlock_bh(lock) __spin_unlock_bh(lock)
72 #endif
73 
74 #ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQ
75 #define _spin_unlock_irq(lock) __spin_unlock_irq(lock)
76 #endif
77 
78 #ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE
79 #define _spin_unlock_irqrestore(lock, flags) __spin_unlock_irqrestore(lock, flags)
80 #endif
81 
82 static inline int __spin_trylock(spinlock_t *lock)
83 {
84 	preempt_disable();
85 	if (_raw_spin_trylock(lock)) {
86 		spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
87 		return 1;
88 	}
89 	preempt_enable();
90 	return 0;
91 }
92 
93 /*
94  * If lockdep is enabled then we use the non-preemption spin-ops
95  * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
96  * not re-enabled during lock-acquire (which the preempt-spin-ops do):
97  */
98 #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
99 
100 static inline unsigned long __spin_lock_irqsave(spinlock_t *lock)
101 {
102 	unsigned long flags;
103 
104 	local_irq_save(flags);
105 	preempt_disable();
106 	spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
107 	/*
108 	 * On lockdep we dont want the hand-coded irq-enable of
109 	 * _raw_spin_lock_flags() code, because lockdep assumes
110 	 * that interrupts are not re-enabled during lock-acquire:
111 	 */
112 #ifdef CONFIG_LOCKDEP
113 	LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
114 #else
115 	_raw_spin_lock_flags(lock, &flags);
116 #endif
117 	return flags;
118 }
119 
120 static inline void __spin_lock_irq(spinlock_t *lock)
121 {
122 	local_irq_disable();
123 	preempt_disable();
124 	spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
125 	LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
126 }
127 
128 static inline void __spin_lock_bh(spinlock_t *lock)
129 {
130 	local_bh_disable();
131 	preempt_disable();
132 	spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
133 	LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
134 }
135 
136 static inline void __spin_lock(spinlock_t *lock)
137 {
138 	preempt_disable();
139 	spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
140 	LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
141 }
142 
143 #endif /* CONFIG_PREEMPT */
144 
145 static inline void __spin_unlock(spinlock_t *lock)
146 {
147 	spin_release(&lock->dep_map, 1, _RET_IP_);
148 	_raw_spin_unlock(lock);
149 	preempt_enable();
150 }
151 
152 static inline void __spin_unlock_irqrestore(spinlock_t *lock,
153 					    unsigned long flags)
154 {
155 	spin_release(&lock->dep_map, 1, _RET_IP_);
156 	_raw_spin_unlock(lock);
157 	local_irq_restore(flags);
158 	preempt_enable();
159 }
160 
161 static inline void __spin_unlock_irq(spinlock_t *lock)
162 {
163 	spin_release(&lock->dep_map, 1, _RET_IP_);
164 	_raw_spin_unlock(lock);
165 	local_irq_enable();
166 	preempt_enable();
167 }
168 
169 static inline void __spin_unlock_bh(spinlock_t *lock)
170 {
171 	spin_release(&lock->dep_map, 1, _RET_IP_);
172 	_raw_spin_unlock(lock);
173 	preempt_enable_no_resched();
174 	local_bh_enable_ip((unsigned long)__builtin_return_address(0));
175 }
176 
177 static inline int __spin_trylock_bh(spinlock_t *lock)
178 {
179 	local_bh_disable();
180 	preempt_disable();
181 	if (_raw_spin_trylock(lock)) {
182 		spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
183 		return 1;
184 	}
185 	preempt_enable_no_resched();
186 	local_bh_enable_ip((unsigned long)__builtin_return_address(0));
187 	return 0;
188 }
189 
190 #include <linux/rwlock_api_smp.h>
191 
192 #endif /* __LINUX_SPINLOCK_API_SMP_H */
193