1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_LOCAL_LOCK_H
3 # error "Do not include directly, include linux/local_lock.h"
4 #endif
5 
6 #include <linux/percpu-defs.h>
7 #include <linux/lockdep.h>
8 
9 #ifndef CONFIG_PREEMPT_RT
10 
11 typedef struct {
12 #ifdef CONFIG_DEBUG_LOCK_ALLOC
13 	struct lockdep_map	dep_map;
14 	struct task_struct	*owner;
15 #endif
16 } local_lock_t;
17 
18 typedef struct {
19 	local_lock_t	llock;
20 	unsigned int	acquired;
21 } localtry_lock_t;
22 
23 #ifdef CONFIG_DEBUG_LOCK_ALLOC
24 # define LOCAL_LOCK_DEBUG_INIT(lockname)		\
25 	.dep_map = {					\
26 		.name = #lockname,			\
27 		.wait_type_inner = LD_WAIT_CONFIG,	\
28 		.lock_type = LD_LOCK_PERCPU,		\
29 	},						\
30 	.owner = NULL,
31 
32 static inline void local_lock_acquire(local_lock_t *l)
33 {
34 	lock_map_acquire(&l->dep_map);
35 	DEBUG_LOCKS_WARN_ON(l->owner);
36 	l->owner = current;
37 }
38 
39 static inline void local_trylock_acquire(local_lock_t *l)
40 {
41 	lock_map_acquire_try(&l->dep_map);
42 	DEBUG_LOCKS_WARN_ON(l->owner);
43 	l->owner = current;
44 }
45 
46 static inline void local_lock_release(local_lock_t *l)
47 {
48 	DEBUG_LOCKS_WARN_ON(l->owner != current);
49 	l->owner = NULL;
50 	lock_map_release(&l->dep_map);
51 }
52 
53 static inline void local_lock_debug_init(local_lock_t *l)
54 {
55 	l->owner = NULL;
56 }
57 #else /* CONFIG_DEBUG_LOCK_ALLOC */
58 # define LOCAL_LOCK_DEBUG_INIT(lockname)
59 static inline void local_lock_acquire(local_lock_t *l) { }
60 static inline void local_trylock_acquire(local_lock_t *l) { }
61 static inline void local_lock_release(local_lock_t *l) { }
62 static inline void local_lock_debug_init(local_lock_t *l) { }
63 #endif /* !CONFIG_DEBUG_LOCK_ALLOC */
64 
65 #define INIT_LOCAL_LOCK(lockname)	{ LOCAL_LOCK_DEBUG_INIT(lockname) }
66 #define INIT_LOCALTRY_LOCK(lockname)	{ .llock = { LOCAL_LOCK_DEBUG_INIT(lockname.llock) }}
67 
68 #define __local_lock_init(lock)					\
69 do {								\
70 	static struct lock_class_key __key;			\
71 								\
72 	debug_check_no_locks_freed((void *)lock, sizeof(*lock));\
73 	lockdep_init_map_type(&(lock)->dep_map, #lock, &__key,  \
74 			      0, LD_WAIT_CONFIG, LD_WAIT_INV,	\
75 			      LD_LOCK_PERCPU);			\
76 	local_lock_debug_init(lock);				\
77 } while (0)
78 
79 #define __spinlock_nested_bh_init(lock)				\
80 do {								\
81 	static struct lock_class_key __key;			\
82 								\
83 	debug_check_no_locks_freed((void *)lock, sizeof(*lock));\
84 	lockdep_init_map_type(&(lock)->dep_map, #lock, &__key,  \
85 			      0, LD_WAIT_CONFIG, LD_WAIT_INV,	\
86 			      LD_LOCK_NORMAL);			\
87 	local_lock_debug_init(lock);				\
88 } while (0)
89 
90 #define __local_lock(lock)					\
91 	do {							\
92 		preempt_disable();				\
93 		local_lock_acquire(this_cpu_ptr(lock));		\
94 	} while (0)
95 
96 #define __local_lock_irq(lock)					\
97 	do {							\
98 		local_irq_disable();				\
99 		local_lock_acquire(this_cpu_ptr(lock));		\
100 	} while (0)
101 
102 #define __local_lock_irqsave(lock, flags)			\
103 	do {							\
104 		local_irq_save(flags);				\
105 		local_lock_acquire(this_cpu_ptr(lock));		\
106 	} while (0)
107 
108 #define __local_unlock(lock)					\
109 	do {							\
110 		local_lock_release(this_cpu_ptr(lock));		\
111 		preempt_enable();				\
112 	} while (0)
113 
114 #define __local_unlock_irq(lock)				\
115 	do {							\
116 		local_lock_release(this_cpu_ptr(lock));		\
117 		local_irq_enable();				\
118 	} while (0)
119 
120 #define __local_unlock_irqrestore(lock, flags)			\
121 	do {							\
122 		local_lock_release(this_cpu_ptr(lock));		\
123 		local_irq_restore(flags);			\
124 	} while (0)
125 
126 #define __local_lock_nested_bh(lock)				\
127 	do {							\
128 		lockdep_assert_in_softirq();			\
129 		local_lock_acquire(this_cpu_ptr(lock));	\
130 	} while (0)
131 
132 #define __local_unlock_nested_bh(lock)				\
133 	local_lock_release(this_cpu_ptr(lock))
134 
135 /* localtry_lock_t variants */
136 
137 #define __localtry_lock_init(lock)				\
138 do {								\
139 	__local_lock_init(&(lock)->llock);			\
140 	WRITE_ONCE((lock)->acquired, 0);			\
141 } while (0)
142 
143 #define __localtry_lock(lock)					\
144 	do {							\
145 		localtry_lock_t *lt;				\
146 		preempt_disable();				\
147 		lt = this_cpu_ptr(lock);			\
148 		local_lock_acquire(&lt->llock);			\
149 		WRITE_ONCE(lt->acquired, 1);			\
150 	} while (0)
151 
152 #define __localtry_lock_irq(lock)				\
153 	do {							\
154 		localtry_lock_t *lt;				\
155 		local_irq_disable();				\
156 		lt = this_cpu_ptr(lock);			\
157 		local_lock_acquire(&lt->llock);			\
158 		WRITE_ONCE(lt->acquired, 1);			\
159 	} while (0)
160 
161 #define __localtry_lock_irqsave(lock, flags)			\
162 	do {							\
163 		localtry_lock_t *lt;				\
164 		local_irq_save(flags);				\
165 		lt = this_cpu_ptr(lock);			\
166 		local_lock_acquire(&lt->llock);			\
167 		WRITE_ONCE(lt->acquired, 1);			\
168 	} while (0)
169 
170 #define __localtry_trylock(lock)				\
171 	({							\
172 		localtry_lock_t *lt;				\
173 		bool _ret;					\
174 								\
175 		preempt_disable();				\
176 		lt = this_cpu_ptr(lock);			\
177 		if (!READ_ONCE(lt->acquired)) {			\
178 			WRITE_ONCE(lt->acquired, 1);		\
179 			local_trylock_acquire(&lt->llock);	\
180 			_ret = true;				\
181 		} else {					\
182 			_ret = false;				\
183 			preempt_enable();			\
184 		}						\
185 		_ret;						\
186 	})
187 
188 #define __localtry_trylock_irqsave(lock, flags)			\
189 	({							\
190 		localtry_lock_t *lt;				\
191 		bool _ret;					\
192 								\
193 		local_irq_save(flags);				\
194 		lt = this_cpu_ptr(lock);			\
195 		if (!READ_ONCE(lt->acquired)) {			\
196 			WRITE_ONCE(lt->acquired, 1);		\
197 			local_trylock_acquire(&lt->llock);	\
198 			_ret = true;				\
199 		} else {					\
200 			_ret = false;				\
201 			local_irq_restore(flags);		\
202 		}						\
203 		_ret;						\
204 	})
205 
206 #define __localtry_unlock(lock)					\
207 	do {							\
208 		localtry_lock_t *lt;				\
209 		lt = this_cpu_ptr(lock);			\
210 		WRITE_ONCE(lt->acquired, 0);			\
211 		local_lock_release(&lt->llock);			\
212 		preempt_enable();				\
213 	} while (0)
214 
215 #define __localtry_unlock_irq(lock)				\
216 	do {							\
217 		localtry_lock_t *lt;				\
218 		lt = this_cpu_ptr(lock);			\
219 		WRITE_ONCE(lt->acquired, 0);			\
220 		local_lock_release(&lt->llock);			\
221 		local_irq_enable();				\
222 	} while (0)
223 
224 #define __localtry_unlock_irqrestore(lock, flags)		\
225 	do {							\
226 		localtry_lock_t *lt;				\
227 		lt = this_cpu_ptr(lock);			\
228 		WRITE_ONCE(lt->acquired, 0);			\
229 		local_lock_release(&lt->llock);			\
230 		local_irq_restore(flags);			\
231 	} while (0)
232 
233 #else /* !CONFIG_PREEMPT_RT */
234 
235 /*
236  * On PREEMPT_RT local_lock maps to a per CPU spinlock, which protects the
237  * critical section while staying preemptible.
238  */
239 typedef spinlock_t local_lock_t;
240 typedef spinlock_t localtry_lock_t;
241 
242 #define INIT_LOCAL_LOCK(lockname) __LOCAL_SPIN_LOCK_UNLOCKED((lockname))
243 #define INIT_LOCALTRY_LOCK(lockname) INIT_LOCAL_LOCK(lockname)
244 
245 #define __local_lock_init(l)					\
246 	do {							\
247 		local_spin_lock_init((l));			\
248 	} while (0)
249 
250 #define __local_lock(__lock)					\
251 	do {							\
252 		migrate_disable();				\
253 		spin_lock(this_cpu_ptr((__lock)));		\
254 	} while (0)
255 
256 #define __local_lock_irq(lock)			__local_lock(lock)
257 
258 #define __local_lock_irqsave(lock, flags)			\
259 	do {							\
260 		typecheck(unsigned long, flags);		\
261 		flags = 0;					\
262 		__local_lock(lock);				\
263 	} while (0)
264 
265 #define __local_unlock(__lock)					\
266 	do {							\
267 		spin_unlock(this_cpu_ptr((__lock)));		\
268 		migrate_enable();				\
269 	} while (0)
270 
271 #define __local_unlock_irq(lock)		__local_unlock(lock)
272 
273 #define __local_unlock_irqrestore(lock, flags)	__local_unlock(lock)
274 
275 #define __local_lock_nested_bh(lock)				\
276 do {								\
277 	lockdep_assert_in_softirq_func();			\
278 	spin_lock(this_cpu_ptr(lock));				\
279 } while (0)
280 
281 #define __local_unlock_nested_bh(lock)				\
282 do {								\
283 	spin_unlock(this_cpu_ptr((lock)));			\
284 } while (0)
285 
286 /* localtry_lock_t variants */
287 
288 #define __localtry_lock_init(lock)			__local_lock_init(lock)
289 #define __localtry_lock(lock)				__local_lock(lock)
290 #define __localtry_lock_irq(lock)			__local_lock(lock)
291 #define __localtry_lock_irqsave(lock, flags)		__local_lock_irqsave(lock, flags)
292 #define __localtry_unlock(lock)				__local_unlock(lock)
293 #define __localtry_unlock_irq(lock)			__local_unlock(lock)
294 #define __localtry_unlock_irqrestore(lock, flags)	__local_unlock_irqrestore(lock, flags)
295 
296 #define __localtry_trylock(lock)				\
297 	({							\
298 		int __locked;					\
299 								\
300 		if (in_nmi() | in_hardirq()) {			\
301 			__locked = 0;				\
302 		} else {					\
303 			migrate_disable();			\
304 			__locked = spin_trylock(this_cpu_ptr((lock)));	\
305 			if (!__locked)				\
306 				migrate_enable();		\
307 		}						\
308 		__locked;					\
309 	})
310 
311 #define __localtry_trylock_irqsave(lock, flags)			\
312 	({							\
313 		typecheck(unsigned long, flags);		\
314 		flags = 0;					\
315 		__localtry_trylock(lock);			\
316 	})
317 
318 #endif /* CONFIG_PREEMPT_RT */
319