xref: /linux-6.15/kernel/locking/rwsem.c (revision c0bed69d)
1 // SPDX-License-Identifier: GPL-2.0
2 /* kernel/rwsem.c: R/W semaphores, public implementation
3  *
4  * Written by David Howells ([email protected]).
5  * Derived from asm-i386/semaphore.h
6  *
7  * Writer lock-stealing by Alex Shi <[email protected]>
8  * and Michel Lespinasse <[email protected]>
9  *
10  * Optimistic spinning by Tim Chen <[email protected]>
11  * and Davidlohr Bueso <[email protected]>. Based on mutexes.
12  *
13  * Rwsem count bit fields re-definition and rwsem rearchitecture by
14  * Waiman Long <[email protected]> and
15  * Peter Zijlstra <[email protected]>.
16  */
17 
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/sched.h>
21 #include <linux/sched/rt.h>
22 #include <linux/sched/task.h>
23 #include <linux/sched/debug.h>
24 #include <linux/sched/wake_q.h>
25 #include <linux/sched/signal.h>
26 #include <linux/sched/clock.h>
27 #include <linux/export.h>
28 #include <linux/rwsem.h>
29 #include <linux/atomic.h>
30 
31 #ifndef CONFIG_PREEMPT_RT
32 #include "lock_events.h"
33 
34 /*
35  * The least significant 2 bits of the owner value has the following
36  * meanings when set.
37  *  - Bit 0: RWSEM_READER_OWNED - The rwsem is owned by readers
38  *  - Bit 1: RWSEM_NONSPINNABLE - Cannot spin on a reader-owned lock
39  *
40  * When the rwsem is reader-owned and a spinning writer has timed out,
41  * the nonspinnable bit will be set to disable optimistic spinning.
42 
43  * When a writer acquires a rwsem, it puts its task_struct pointer
44  * into the owner field. It is cleared after an unlock.
45  *
46  * When a reader acquires a rwsem, it will also puts its task_struct
47  * pointer into the owner field with the RWSEM_READER_OWNED bit set.
48  * On unlock, the owner field will largely be left untouched. So
49  * for a free or reader-owned rwsem, the owner value may contain
50  * information about the last reader that acquires the rwsem.
51  *
52  * That information may be helpful in debugging cases where the system
53  * seems to hang on a reader owned rwsem especially if only one reader
54  * is involved. Ideally we would like to track all the readers that own
55  * a rwsem, but the overhead is simply too big.
56  *
57  * A fast path reader optimistic lock stealing is supported when the rwsem
58  * is previously owned by a writer and the following conditions are met:
59  *  - rwsem is not currently writer owned
60  *  - the handoff isn't set.
61  */
62 #define RWSEM_READER_OWNED	(1UL << 0)
63 #define RWSEM_NONSPINNABLE	(1UL << 1)
64 #define RWSEM_OWNER_FLAGS_MASK	(RWSEM_READER_OWNED | RWSEM_NONSPINNABLE)
65 
66 #ifdef CONFIG_DEBUG_RWSEMS
67 # define DEBUG_RWSEMS_WARN_ON(c, sem)	do {			\
68 	if (!debug_locks_silent &&				\
69 	    WARN_ONCE(c, "DEBUG_RWSEMS_WARN_ON(%s): count = 0x%lx, magic = 0x%lx, owner = 0x%lx, curr 0x%lx, list %sempty\n",\
70 		#c, atomic_long_read(&(sem)->count),		\
71 		(unsigned long) sem->magic,			\
72 		atomic_long_read(&(sem)->owner), (long)current,	\
73 		list_empty(&(sem)->wait_list) ? "" : "not "))	\
74 			debug_locks_off();			\
75 	} while (0)
76 #else
77 # define DEBUG_RWSEMS_WARN_ON(c, sem)
78 #endif
79 
80 /*
81  * On 64-bit architectures, the bit definitions of the count are:
82  *
83  * Bit  0    - writer locked bit
84  * Bit  1    - waiters present bit
85  * Bit  2    - lock handoff bit
86  * Bits 3-7  - reserved
87  * Bits 8-62 - 55-bit reader count
88  * Bit  63   - read fail bit
89  *
90  * On 32-bit architectures, the bit definitions of the count are:
91  *
92  * Bit  0    - writer locked bit
93  * Bit  1    - waiters present bit
94  * Bit  2    - lock handoff bit
95  * Bits 3-7  - reserved
96  * Bits 8-30 - 23-bit reader count
97  * Bit  31   - read fail bit
98  *
99  * It is not likely that the most significant bit (read fail bit) will ever
100  * be set. This guard bit is still checked anyway in the down_read() fastpath
101  * just in case we need to use up more of the reader bits for other purpose
102  * in the future.
103  *
104  * atomic_long_fetch_add() is used to obtain reader lock, whereas
105  * atomic_long_cmpxchg() will be used to obtain writer lock.
106  *
107  * There are three places where the lock handoff bit may be set or cleared.
108  * 1) rwsem_mark_wake() for readers.
109  * 2) rwsem_try_write_lock() for writers.
110  * 3) Error path of rwsem_down_write_slowpath().
111  *
112  * For all the above cases, wait_lock will be held. A writer must also
113  * be the first one in the wait_list to be eligible for setting the handoff
114  * bit. So concurrent setting/clearing of handoff bit is not possible.
115  */
116 #define RWSEM_WRITER_LOCKED	(1UL << 0)
117 #define RWSEM_FLAG_WAITERS	(1UL << 1)
118 #define RWSEM_FLAG_HANDOFF	(1UL << 2)
119 #define RWSEM_FLAG_READFAIL	(1UL << (BITS_PER_LONG - 1))
120 
121 #define RWSEM_READER_SHIFT	8
122 #define RWSEM_READER_BIAS	(1UL << RWSEM_READER_SHIFT)
123 #define RWSEM_READER_MASK	(~(RWSEM_READER_BIAS - 1))
124 #define RWSEM_WRITER_MASK	RWSEM_WRITER_LOCKED
125 #define RWSEM_LOCK_MASK		(RWSEM_WRITER_MASK|RWSEM_READER_MASK)
126 #define RWSEM_READ_FAILED_MASK	(RWSEM_WRITER_MASK|RWSEM_FLAG_WAITERS|\
127 				 RWSEM_FLAG_HANDOFF|RWSEM_FLAG_READFAIL)
128 
129 /*
130  * All writes to owner are protected by WRITE_ONCE() to make sure that
131  * store tearing can't happen as optimistic spinners may read and use
132  * the owner value concurrently without lock. Read from owner, however,
133  * may not need READ_ONCE() as long as the pointer value is only used
134  * for comparison and isn't being dereferenced.
135  */
136 static inline void rwsem_set_owner(struct rw_semaphore *sem)
137 {
138 	atomic_long_set(&sem->owner, (long)current);
139 }
140 
141 static inline void rwsem_clear_owner(struct rw_semaphore *sem)
142 {
143 	atomic_long_set(&sem->owner, 0);
144 }
145 
146 /*
147  * Test the flags in the owner field.
148  */
149 static inline bool rwsem_test_oflags(struct rw_semaphore *sem, long flags)
150 {
151 	return atomic_long_read(&sem->owner) & flags;
152 }
153 
154 /*
155  * The task_struct pointer of the last owning reader will be left in
156  * the owner field.
157  *
158  * Note that the owner value just indicates the task has owned the rwsem
159  * previously, it may not be the real owner or one of the real owners
160  * anymore when that field is examined, so take it with a grain of salt.
161  *
162  * The reader non-spinnable bit is preserved.
163  */
164 static inline void __rwsem_set_reader_owned(struct rw_semaphore *sem,
165 					    struct task_struct *owner)
166 {
167 	unsigned long val = (unsigned long)owner | RWSEM_READER_OWNED |
168 		(atomic_long_read(&sem->owner) & RWSEM_NONSPINNABLE);
169 
170 	atomic_long_set(&sem->owner, val);
171 }
172 
173 static inline void rwsem_set_reader_owned(struct rw_semaphore *sem)
174 {
175 	__rwsem_set_reader_owned(sem, current);
176 }
177 
178 /*
179  * Return true if the rwsem is owned by a reader.
180  */
181 static inline bool is_rwsem_reader_owned(struct rw_semaphore *sem)
182 {
183 #ifdef CONFIG_DEBUG_RWSEMS
184 	/*
185 	 * Check the count to see if it is write-locked.
186 	 */
187 	long count = atomic_long_read(&sem->count);
188 
189 	if (count & RWSEM_WRITER_MASK)
190 		return false;
191 #endif
192 	return rwsem_test_oflags(sem, RWSEM_READER_OWNED);
193 }
194 
195 #ifdef CONFIG_DEBUG_RWSEMS
196 /*
197  * With CONFIG_DEBUG_RWSEMS configured, it will make sure that if there
198  * is a task pointer in owner of a reader-owned rwsem, it will be the
199  * real owner or one of the real owners. The only exception is when the
200  * unlock is done by up_read_non_owner().
201  */
202 static inline void rwsem_clear_reader_owned(struct rw_semaphore *sem)
203 {
204 	unsigned long val = atomic_long_read(&sem->owner);
205 
206 	while ((val & ~RWSEM_OWNER_FLAGS_MASK) == (unsigned long)current) {
207 		if (atomic_long_try_cmpxchg(&sem->owner, &val,
208 					    val & RWSEM_OWNER_FLAGS_MASK))
209 			return;
210 	}
211 }
212 #else
213 static inline void rwsem_clear_reader_owned(struct rw_semaphore *sem)
214 {
215 }
216 #endif
217 
218 /*
219  * Set the RWSEM_NONSPINNABLE bits if the RWSEM_READER_OWNED flag
220  * remains set. Otherwise, the operation will be aborted.
221  */
222 static inline void rwsem_set_nonspinnable(struct rw_semaphore *sem)
223 {
224 	unsigned long owner = atomic_long_read(&sem->owner);
225 
226 	do {
227 		if (!(owner & RWSEM_READER_OWNED))
228 			break;
229 		if (owner & RWSEM_NONSPINNABLE)
230 			break;
231 	} while (!atomic_long_try_cmpxchg(&sem->owner, &owner,
232 					  owner | RWSEM_NONSPINNABLE));
233 }
234 
235 static inline bool rwsem_read_trylock(struct rw_semaphore *sem, long *cntp)
236 {
237 	*cntp = atomic_long_add_return_acquire(RWSEM_READER_BIAS, &sem->count);
238 
239 	if (WARN_ON_ONCE(*cntp < 0))
240 		rwsem_set_nonspinnable(sem);
241 
242 	if (!(*cntp & RWSEM_READ_FAILED_MASK)) {
243 		rwsem_set_reader_owned(sem);
244 		return true;
245 	}
246 
247 	return false;
248 }
249 
250 static inline bool rwsem_write_trylock(struct rw_semaphore *sem)
251 {
252 	long tmp = RWSEM_UNLOCKED_VALUE;
253 
254 	if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp, RWSEM_WRITER_LOCKED)) {
255 		rwsem_set_owner(sem);
256 		return true;
257 	}
258 
259 	return false;
260 }
261 
262 /*
263  * Return just the real task structure pointer of the owner
264  */
265 static inline struct task_struct *rwsem_owner(struct rw_semaphore *sem)
266 {
267 	return (struct task_struct *)
268 		(atomic_long_read(&sem->owner) & ~RWSEM_OWNER_FLAGS_MASK);
269 }
270 
271 /*
272  * Return the real task structure pointer of the owner and the embedded
273  * flags in the owner. pflags must be non-NULL.
274  */
275 static inline struct task_struct *
276 rwsem_owner_flags(struct rw_semaphore *sem, unsigned long *pflags)
277 {
278 	unsigned long owner = atomic_long_read(&sem->owner);
279 
280 	*pflags = owner & RWSEM_OWNER_FLAGS_MASK;
281 	return (struct task_struct *)(owner & ~RWSEM_OWNER_FLAGS_MASK);
282 }
283 
284 /*
285  * Guide to the rw_semaphore's count field.
286  *
287  * When the RWSEM_WRITER_LOCKED bit in count is set, the lock is owned
288  * by a writer.
289  *
290  * The lock is owned by readers when
291  * (1) the RWSEM_WRITER_LOCKED isn't set in count,
292  * (2) some of the reader bits are set in count, and
293  * (3) the owner field has RWSEM_READ_OWNED bit set.
294  *
295  * Having some reader bits set is not enough to guarantee a readers owned
296  * lock as the readers may be in the process of backing out from the count
297  * and a writer has just released the lock. So another writer may steal
298  * the lock immediately after that.
299  */
300 
301 /*
302  * Initialize an rwsem:
303  */
304 void __init_rwsem(struct rw_semaphore *sem, const char *name,
305 		  struct lock_class_key *key)
306 {
307 #ifdef CONFIG_DEBUG_LOCK_ALLOC
308 	/*
309 	 * Make sure we are not reinitializing a held semaphore:
310 	 */
311 	debug_check_no_locks_freed((void *)sem, sizeof(*sem));
312 	lockdep_init_map_wait(&sem->dep_map, name, key, 0, LD_WAIT_SLEEP);
313 #endif
314 #ifdef CONFIG_DEBUG_RWSEMS
315 	sem->magic = sem;
316 #endif
317 	atomic_long_set(&sem->count, RWSEM_UNLOCKED_VALUE);
318 	raw_spin_lock_init(&sem->wait_lock);
319 	INIT_LIST_HEAD(&sem->wait_list);
320 	atomic_long_set(&sem->owner, 0L);
321 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
322 	osq_lock_init(&sem->osq);
323 #endif
324 }
325 EXPORT_SYMBOL(__init_rwsem);
326 
327 enum rwsem_waiter_type {
328 	RWSEM_WAITING_FOR_WRITE,
329 	RWSEM_WAITING_FOR_READ
330 };
331 
332 struct rwsem_waiter {
333 	struct list_head list;
334 	struct task_struct *task;
335 	enum rwsem_waiter_type type;
336 	unsigned long timeout;
337 };
338 #define rwsem_first_waiter(sem) \
339 	list_first_entry(&sem->wait_list, struct rwsem_waiter, list)
340 
341 enum rwsem_wake_type {
342 	RWSEM_WAKE_ANY,		/* Wake whatever's at head of wait list */
343 	RWSEM_WAKE_READERS,	/* Wake readers only */
344 	RWSEM_WAKE_READ_OWNED	/* Waker thread holds the read lock */
345 };
346 
347 enum writer_wait_state {
348 	WRITER_NOT_FIRST,	/* Writer is not first in wait list */
349 	WRITER_FIRST,		/* Writer is first in wait list     */
350 	WRITER_HANDOFF		/* Writer is first & handoff needed */
351 };
352 
353 /*
354  * The typical HZ value is either 250 or 1000. So set the minimum waiting
355  * time to at least 4ms or 1 jiffy (if it is higher than 4ms) in the wait
356  * queue before initiating the handoff protocol.
357  */
358 #define RWSEM_WAIT_TIMEOUT	DIV_ROUND_UP(HZ, 250)
359 
360 /*
361  * Magic number to batch-wakeup waiting readers, even when writers are
362  * also present in the queue. This both limits the amount of work the
363  * waking thread must do and also prevents any potential counter overflow,
364  * however unlikely.
365  */
366 #define MAX_READERS_WAKEUP	0x100
367 
368 /*
369  * handle the lock release when processes blocked on it that can now run
370  * - if we come here from up_xxxx(), then the RWSEM_FLAG_WAITERS bit must
371  *   have been set.
372  * - there must be someone on the queue
373  * - the wait_lock must be held by the caller
374  * - tasks are marked for wakeup, the caller must later invoke wake_up_q()
375  *   to actually wakeup the blocked task(s) and drop the reference count,
376  *   preferably when the wait_lock is released
377  * - woken process blocks are discarded from the list after having task zeroed
378  * - writers are only marked woken if downgrading is false
379  */
380 static void rwsem_mark_wake(struct rw_semaphore *sem,
381 			    enum rwsem_wake_type wake_type,
382 			    struct wake_q_head *wake_q)
383 {
384 	struct rwsem_waiter *waiter, *tmp;
385 	long oldcount, woken = 0, adjustment = 0;
386 	struct list_head wlist;
387 
388 	lockdep_assert_held(&sem->wait_lock);
389 
390 	/*
391 	 * Take a peek at the queue head waiter such that we can determine
392 	 * the wakeup(s) to perform.
393 	 */
394 	waiter = rwsem_first_waiter(sem);
395 
396 	if (waiter->type == RWSEM_WAITING_FOR_WRITE) {
397 		if (wake_type == RWSEM_WAKE_ANY) {
398 			/*
399 			 * Mark writer at the front of the queue for wakeup.
400 			 * Until the task is actually later awoken later by
401 			 * the caller, other writers are able to steal it.
402 			 * Readers, on the other hand, will block as they
403 			 * will notice the queued writer.
404 			 */
405 			wake_q_add(wake_q, waiter->task);
406 			lockevent_inc(rwsem_wake_writer);
407 		}
408 
409 		return;
410 	}
411 
412 	/*
413 	 * No reader wakeup if there are too many of them already.
414 	 */
415 	if (unlikely(atomic_long_read(&sem->count) < 0))
416 		return;
417 
418 	/*
419 	 * Writers might steal the lock before we grant it to the next reader.
420 	 * We prefer to do the first reader grant before counting readers
421 	 * so we can bail out early if a writer stole the lock.
422 	 */
423 	if (wake_type != RWSEM_WAKE_READ_OWNED) {
424 		struct task_struct *owner;
425 
426 		adjustment = RWSEM_READER_BIAS;
427 		oldcount = atomic_long_fetch_add(adjustment, &sem->count);
428 		if (unlikely(oldcount & RWSEM_WRITER_MASK)) {
429 			/*
430 			 * When we've been waiting "too" long (for writers
431 			 * to give up the lock), request a HANDOFF to
432 			 * force the issue.
433 			 */
434 			if (!(oldcount & RWSEM_FLAG_HANDOFF) &&
435 			    time_after(jiffies, waiter->timeout)) {
436 				adjustment -= RWSEM_FLAG_HANDOFF;
437 				lockevent_inc(rwsem_rlock_handoff);
438 			}
439 
440 			atomic_long_add(-adjustment, &sem->count);
441 			return;
442 		}
443 		/*
444 		 * Set it to reader-owned to give spinners an early
445 		 * indication that readers now have the lock.
446 		 * The reader nonspinnable bit seen at slowpath entry of
447 		 * the reader is copied over.
448 		 */
449 		owner = waiter->task;
450 		__rwsem_set_reader_owned(sem, owner);
451 	}
452 
453 	/*
454 	 * Grant up to MAX_READERS_WAKEUP read locks to all the readers in the
455 	 * queue. We know that the woken will be at least 1 as we accounted
456 	 * for above. Note we increment the 'active part' of the count by the
457 	 * number of readers before waking any processes up.
458 	 *
459 	 * This is an adaptation of the phase-fair R/W locks where at the
460 	 * reader phase (first waiter is a reader), all readers are eligible
461 	 * to acquire the lock at the same time irrespective of their order
462 	 * in the queue. The writers acquire the lock according to their
463 	 * order in the queue.
464 	 *
465 	 * We have to do wakeup in 2 passes to prevent the possibility that
466 	 * the reader count may be decremented before it is incremented. It
467 	 * is because the to-be-woken waiter may not have slept yet. So it
468 	 * may see waiter->task got cleared, finish its critical section and
469 	 * do an unlock before the reader count increment.
470 	 *
471 	 * 1) Collect the read-waiters in a separate list, count them and
472 	 *    fully increment the reader count in rwsem.
473 	 * 2) For each waiters in the new list, clear waiter->task and
474 	 *    put them into wake_q to be woken up later.
475 	 */
476 	INIT_LIST_HEAD(&wlist);
477 	list_for_each_entry_safe(waiter, tmp, &sem->wait_list, list) {
478 		if (waiter->type == RWSEM_WAITING_FOR_WRITE)
479 			continue;
480 
481 		woken++;
482 		list_move_tail(&waiter->list, &wlist);
483 
484 		/*
485 		 * Limit # of readers that can be woken up per wakeup call.
486 		 */
487 		if (unlikely(woken >= MAX_READERS_WAKEUP))
488 			break;
489 	}
490 
491 	adjustment = woken * RWSEM_READER_BIAS - adjustment;
492 	lockevent_cond_inc(rwsem_wake_reader, woken);
493 	if (list_empty(&sem->wait_list)) {
494 		/* hit end of list above */
495 		adjustment -= RWSEM_FLAG_WAITERS;
496 	}
497 
498 	/*
499 	 * When we've woken a reader, we no longer need to force writers
500 	 * to give up the lock and we can clear HANDOFF.
501 	 */
502 	if (woken && (atomic_long_read(&sem->count) & RWSEM_FLAG_HANDOFF))
503 		adjustment -= RWSEM_FLAG_HANDOFF;
504 
505 	if (adjustment)
506 		atomic_long_add(adjustment, &sem->count);
507 
508 	/* 2nd pass */
509 	list_for_each_entry_safe(waiter, tmp, &wlist, list) {
510 		struct task_struct *tsk;
511 
512 		tsk = waiter->task;
513 		get_task_struct(tsk);
514 
515 		/*
516 		 * Ensure calling get_task_struct() before setting the reader
517 		 * waiter to nil such that rwsem_down_read_slowpath() cannot
518 		 * race with do_exit() by always holding a reference count
519 		 * to the task to wakeup.
520 		 */
521 		smp_store_release(&waiter->task, NULL);
522 		/*
523 		 * Ensure issuing the wakeup (either by us or someone else)
524 		 * after setting the reader waiter to nil.
525 		 */
526 		wake_q_add_safe(wake_q, tsk);
527 	}
528 }
529 
530 /*
531  * This function must be called with the sem->wait_lock held to prevent
532  * race conditions between checking the rwsem wait list and setting the
533  * sem->count accordingly.
534  *
535  * If wstate is WRITER_HANDOFF, it will make sure that either the handoff
536  * bit is set or the lock is acquired with handoff bit cleared.
537  */
538 static inline bool rwsem_try_write_lock(struct rw_semaphore *sem,
539 					enum writer_wait_state wstate)
540 {
541 	long count, new;
542 
543 	lockdep_assert_held(&sem->wait_lock);
544 
545 	count = atomic_long_read(&sem->count);
546 	do {
547 		bool has_handoff = !!(count & RWSEM_FLAG_HANDOFF);
548 
549 		if (has_handoff && wstate == WRITER_NOT_FIRST)
550 			return false;
551 
552 		new = count;
553 
554 		if (count & RWSEM_LOCK_MASK) {
555 			if (has_handoff || (wstate != WRITER_HANDOFF))
556 				return false;
557 
558 			new |= RWSEM_FLAG_HANDOFF;
559 		} else {
560 			new |= RWSEM_WRITER_LOCKED;
561 			new &= ~RWSEM_FLAG_HANDOFF;
562 
563 			if (list_is_singular(&sem->wait_list))
564 				new &= ~RWSEM_FLAG_WAITERS;
565 		}
566 	} while (!atomic_long_try_cmpxchg_acquire(&sem->count, &count, new));
567 
568 	/*
569 	 * We have either acquired the lock with handoff bit cleared or
570 	 * set the handoff bit.
571 	 */
572 	if (new & RWSEM_FLAG_HANDOFF)
573 		return false;
574 
575 	rwsem_set_owner(sem);
576 	return true;
577 }
578 
579 /*
580  * The rwsem_spin_on_owner() function returns the following 4 values
581  * depending on the lock owner state.
582  *   OWNER_NULL  : owner is currently NULL
583  *   OWNER_WRITER: when owner changes and is a writer
584  *   OWNER_READER: when owner changes and the new owner may be a reader.
585  *   OWNER_NONSPINNABLE:
586  *		   when optimistic spinning has to stop because either the
587  *		   owner stops running, is unknown, or its timeslice has
588  *		   been used up.
589  */
590 enum owner_state {
591 	OWNER_NULL		= 1 << 0,
592 	OWNER_WRITER		= 1 << 1,
593 	OWNER_READER		= 1 << 2,
594 	OWNER_NONSPINNABLE	= 1 << 3,
595 };
596 
597 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
598 /*
599  * Try to acquire write lock before the writer has been put on wait queue.
600  */
601 static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem)
602 {
603 	long count = atomic_long_read(&sem->count);
604 
605 	while (!(count & (RWSEM_LOCK_MASK|RWSEM_FLAG_HANDOFF))) {
606 		if (atomic_long_try_cmpxchg_acquire(&sem->count, &count,
607 					count | RWSEM_WRITER_LOCKED)) {
608 			rwsem_set_owner(sem);
609 			lockevent_inc(rwsem_opt_lock);
610 			return true;
611 		}
612 	}
613 	return false;
614 }
615 
616 static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
617 {
618 	struct task_struct *owner;
619 	unsigned long flags;
620 	bool ret = true;
621 
622 	if (need_resched()) {
623 		lockevent_inc(rwsem_opt_fail);
624 		return false;
625 	}
626 
627 	preempt_disable();
628 	/*
629 	 * Disable preemption is equal to the RCU read-side crital section,
630 	 * thus the task_strcut structure won't go away.
631 	 */
632 	owner = rwsem_owner_flags(sem, &flags);
633 	/*
634 	 * Don't check the read-owner as the entry may be stale.
635 	 */
636 	if ((flags & RWSEM_NONSPINNABLE) ||
637 	    (owner && !(flags & RWSEM_READER_OWNED) && !owner_on_cpu(owner)))
638 		ret = false;
639 	preempt_enable();
640 
641 	lockevent_cond_inc(rwsem_opt_fail, !ret);
642 	return ret;
643 }
644 
645 #define OWNER_SPINNABLE		(OWNER_NULL | OWNER_WRITER | OWNER_READER)
646 
647 static inline enum owner_state
648 rwsem_owner_state(struct task_struct *owner, unsigned long flags)
649 {
650 	if (flags & RWSEM_NONSPINNABLE)
651 		return OWNER_NONSPINNABLE;
652 
653 	if (flags & RWSEM_READER_OWNED)
654 		return OWNER_READER;
655 
656 	return owner ? OWNER_WRITER : OWNER_NULL;
657 }
658 
659 static noinline enum owner_state
660 rwsem_spin_on_owner(struct rw_semaphore *sem)
661 {
662 	struct task_struct *new, *owner;
663 	unsigned long flags, new_flags;
664 	enum owner_state state;
665 
666 	lockdep_assert_preemption_disabled();
667 
668 	owner = rwsem_owner_flags(sem, &flags);
669 	state = rwsem_owner_state(owner, flags);
670 	if (state != OWNER_WRITER)
671 		return state;
672 
673 	for (;;) {
674 		/*
675 		 * When a waiting writer set the handoff flag, it may spin
676 		 * on the owner as well. Once that writer acquires the lock,
677 		 * we can spin on it. So we don't need to quit even when the
678 		 * handoff bit is set.
679 		 */
680 		new = rwsem_owner_flags(sem, &new_flags);
681 		if ((new != owner) || (new_flags != flags)) {
682 			state = rwsem_owner_state(new, new_flags);
683 			break;
684 		}
685 
686 		/*
687 		 * Ensure we emit the owner->on_cpu, dereference _after_
688 		 * checking sem->owner still matches owner, if that fails,
689 		 * owner might point to free()d memory, if it still matches,
690 		 * our spinning context already disabled preemption which is
691 		 * equal to RCU read-side crital section ensures the memory
692 		 * stays valid.
693 		 */
694 		barrier();
695 
696 		if (need_resched() || !owner_on_cpu(owner)) {
697 			state = OWNER_NONSPINNABLE;
698 			break;
699 		}
700 
701 		cpu_relax();
702 	}
703 
704 	return state;
705 }
706 
707 /*
708  * Calculate reader-owned rwsem spinning threshold for writer
709  *
710  * The more readers own the rwsem, the longer it will take for them to
711  * wind down and free the rwsem. So the empirical formula used to
712  * determine the actual spinning time limit here is:
713  *
714  *   Spinning threshold = (10 + nr_readers/2)us
715  *
716  * The limit is capped to a maximum of 25us (30 readers). This is just
717  * a heuristic and is subjected to change in the future.
718  */
719 static inline u64 rwsem_rspin_threshold(struct rw_semaphore *sem)
720 {
721 	long count = atomic_long_read(&sem->count);
722 	int readers = count >> RWSEM_READER_SHIFT;
723 	u64 delta;
724 
725 	if (readers > 30)
726 		readers = 30;
727 	delta = (20 + readers) * NSEC_PER_USEC / 2;
728 
729 	return sched_clock() + delta;
730 }
731 
732 static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
733 {
734 	bool taken = false;
735 	int prev_owner_state = OWNER_NULL;
736 	int loop = 0;
737 	u64 rspin_threshold = 0;
738 
739 	preempt_disable();
740 
741 	/* sem->wait_lock should not be held when doing optimistic spinning */
742 	if (!osq_lock(&sem->osq))
743 		goto done;
744 
745 	/*
746 	 * Optimistically spin on the owner field and attempt to acquire the
747 	 * lock whenever the owner changes. Spinning will be stopped when:
748 	 *  1) the owning writer isn't running; or
749 	 *  2) readers own the lock and spinning time has exceeded limit.
750 	 */
751 	for (;;) {
752 		enum owner_state owner_state;
753 
754 		owner_state = rwsem_spin_on_owner(sem);
755 		if (!(owner_state & OWNER_SPINNABLE))
756 			break;
757 
758 		/*
759 		 * Try to acquire the lock
760 		 */
761 		taken = rwsem_try_write_lock_unqueued(sem);
762 
763 		if (taken)
764 			break;
765 
766 		/*
767 		 * Time-based reader-owned rwsem optimistic spinning
768 		 */
769 		if (owner_state == OWNER_READER) {
770 			/*
771 			 * Re-initialize rspin_threshold every time when
772 			 * the owner state changes from non-reader to reader.
773 			 * This allows a writer to steal the lock in between
774 			 * 2 reader phases and have the threshold reset at
775 			 * the beginning of the 2nd reader phase.
776 			 */
777 			if (prev_owner_state != OWNER_READER) {
778 				if (rwsem_test_oflags(sem, RWSEM_NONSPINNABLE))
779 					break;
780 				rspin_threshold = rwsem_rspin_threshold(sem);
781 				loop = 0;
782 			}
783 
784 			/*
785 			 * Check time threshold once every 16 iterations to
786 			 * avoid calling sched_clock() too frequently so
787 			 * as to reduce the average latency between the times
788 			 * when the lock becomes free and when the spinner
789 			 * is ready to do a trylock.
790 			 */
791 			else if (!(++loop & 0xf) && (sched_clock() > rspin_threshold)) {
792 				rwsem_set_nonspinnable(sem);
793 				lockevent_inc(rwsem_opt_nospin);
794 				break;
795 			}
796 		}
797 
798 		/*
799 		 * An RT task cannot do optimistic spinning if it cannot
800 		 * be sure the lock holder is running or live-lock may
801 		 * happen if the current task and the lock holder happen
802 		 * to run in the same CPU. However, aborting optimistic
803 		 * spinning while a NULL owner is detected may miss some
804 		 * opportunity where spinning can continue without causing
805 		 * problem.
806 		 *
807 		 * There are 2 possible cases where an RT task may be able
808 		 * to continue spinning.
809 		 *
810 		 * 1) The lock owner is in the process of releasing the
811 		 *    lock, sem->owner is cleared but the lock has not
812 		 *    been released yet.
813 		 * 2) The lock was free and owner cleared, but another
814 		 *    task just comes in and acquire the lock before
815 		 *    we try to get it. The new owner may be a spinnable
816 		 *    writer.
817 		 *
818 		 * To take advantage of two scenarios listed above, the RT
819 		 * task is made to retry one more time to see if it can
820 		 * acquire the lock or continue spinning on the new owning
821 		 * writer. Of course, if the time lag is long enough or the
822 		 * new owner is not a writer or spinnable, the RT task will
823 		 * quit spinning.
824 		 *
825 		 * If the owner is a writer, the need_resched() check is
826 		 * done inside rwsem_spin_on_owner(). If the owner is not
827 		 * a writer, need_resched() check needs to be done here.
828 		 */
829 		if (owner_state != OWNER_WRITER) {
830 			if (need_resched())
831 				break;
832 			if (rt_task(current) &&
833 			   (prev_owner_state != OWNER_WRITER))
834 				break;
835 		}
836 		prev_owner_state = owner_state;
837 
838 		/*
839 		 * The cpu_relax() call is a compiler barrier which forces
840 		 * everything in this loop to be re-loaded. We don't need
841 		 * memory barriers as we'll eventually observe the right
842 		 * values at the cost of a few extra spins.
843 		 */
844 		cpu_relax();
845 	}
846 	osq_unlock(&sem->osq);
847 done:
848 	preempt_enable();
849 	lockevent_cond_inc(rwsem_opt_fail, !taken);
850 	return taken;
851 }
852 
853 /*
854  * Clear the owner's RWSEM_NONSPINNABLE bit if it is set. This should
855  * only be called when the reader count reaches 0.
856  */
857 static inline void clear_nonspinnable(struct rw_semaphore *sem)
858 {
859 	if (rwsem_test_oflags(sem, RWSEM_NONSPINNABLE))
860 		atomic_long_andnot(RWSEM_NONSPINNABLE, &sem->owner);
861 }
862 
863 #else
864 static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
865 {
866 	return false;
867 }
868 
869 static inline bool rwsem_optimistic_spin(struct rw_semaphore *sem)
870 {
871 	return false;
872 }
873 
874 static inline void clear_nonspinnable(struct rw_semaphore *sem) { }
875 
876 static inline enum owner_state
877 rwsem_spin_on_owner(struct rw_semaphore *sem)
878 {
879 	return OWNER_NONSPINNABLE;
880 }
881 #endif
882 
883 /*
884  * Wait for the read lock to be granted
885  */
886 static struct rw_semaphore __sched *
887 rwsem_down_read_slowpath(struct rw_semaphore *sem, long count, unsigned int state)
888 {
889 	long adjustment = -RWSEM_READER_BIAS;
890 	long rcnt = (count >> RWSEM_READER_SHIFT);
891 	struct rwsem_waiter waiter;
892 	DEFINE_WAKE_Q(wake_q);
893 	bool wake = false;
894 
895 	/*
896 	 * To prevent a constant stream of readers from starving a sleeping
897 	 * waiter, don't attempt optimistic lock stealing if the lock is
898 	 * currently owned by readers.
899 	 */
900 	if ((atomic_long_read(&sem->owner) & RWSEM_READER_OWNED) &&
901 	    (rcnt > 1) && !(count & RWSEM_WRITER_LOCKED))
902 		goto queue;
903 
904 	/*
905 	 * Reader optimistic lock stealing.
906 	 */
907 	if (!(count & (RWSEM_WRITER_LOCKED | RWSEM_FLAG_HANDOFF))) {
908 		rwsem_set_reader_owned(sem);
909 		lockevent_inc(rwsem_rlock_steal);
910 
911 		/*
912 		 * Wake up other readers in the wait queue if it is
913 		 * the first reader.
914 		 */
915 		if ((rcnt == 1) && (count & RWSEM_FLAG_WAITERS)) {
916 			raw_spin_lock_irq(&sem->wait_lock);
917 			if (!list_empty(&sem->wait_list))
918 				rwsem_mark_wake(sem, RWSEM_WAKE_READ_OWNED,
919 						&wake_q);
920 			raw_spin_unlock_irq(&sem->wait_lock);
921 			wake_up_q(&wake_q);
922 		}
923 		return sem;
924 	}
925 
926 queue:
927 	waiter.task = current;
928 	waiter.type = RWSEM_WAITING_FOR_READ;
929 	waiter.timeout = jiffies + RWSEM_WAIT_TIMEOUT;
930 
931 	raw_spin_lock_irq(&sem->wait_lock);
932 	if (list_empty(&sem->wait_list)) {
933 		/*
934 		 * In case the wait queue is empty and the lock isn't owned
935 		 * by a writer or has the handoff bit set, this reader can
936 		 * exit the slowpath and return immediately as its
937 		 * RWSEM_READER_BIAS has already been set in the count.
938 		 */
939 		if (!(atomic_long_read(&sem->count) &
940 		     (RWSEM_WRITER_MASK | RWSEM_FLAG_HANDOFF))) {
941 			/* Provide lock ACQUIRE */
942 			smp_acquire__after_ctrl_dep();
943 			raw_spin_unlock_irq(&sem->wait_lock);
944 			rwsem_set_reader_owned(sem);
945 			lockevent_inc(rwsem_rlock_fast);
946 			return sem;
947 		}
948 		adjustment += RWSEM_FLAG_WAITERS;
949 	}
950 	list_add_tail(&waiter.list, &sem->wait_list);
951 
952 	/* we're now waiting on the lock, but no longer actively locking */
953 	count = atomic_long_add_return(adjustment, &sem->count);
954 
955 	/*
956 	 * If there are no active locks, wake the front queued process(es).
957 	 *
958 	 * If there are no writers and we are first in the queue,
959 	 * wake our own waiter to join the existing active readers !
960 	 */
961 	if (!(count & RWSEM_LOCK_MASK)) {
962 		clear_nonspinnable(sem);
963 		wake = true;
964 	}
965 	if (wake || (!(count & RWSEM_WRITER_MASK) &&
966 		    (adjustment & RWSEM_FLAG_WAITERS)))
967 		rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
968 
969 	raw_spin_unlock_irq(&sem->wait_lock);
970 	wake_up_q(&wake_q);
971 
972 	/* wait to be given the lock */
973 	for (;;) {
974 		set_current_state(state);
975 		if (!smp_load_acquire(&waiter.task)) {
976 			/* Matches rwsem_mark_wake()'s smp_store_release(). */
977 			break;
978 		}
979 		if (signal_pending_state(state, current)) {
980 			raw_spin_lock_irq(&sem->wait_lock);
981 			if (waiter.task)
982 				goto out_nolock;
983 			raw_spin_unlock_irq(&sem->wait_lock);
984 			/* Ordered by sem->wait_lock against rwsem_mark_wake(). */
985 			break;
986 		}
987 		schedule();
988 		lockevent_inc(rwsem_sleep_reader);
989 	}
990 
991 	__set_current_state(TASK_RUNNING);
992 	lockevent_inc(rwsem_rlock);
993 	return sem;
994 
995 out_nolock:
996 	list_del(&waiter.list);
997 	if (list_empty(&sem->wait_list)) {
998 		atomic_long_andnot(RWSEM_FLAG_WAITERS|RWSEM_FLAG_HANDOFF,
999 				   &sem->count);
1000 	}
1001 	raw_spin_unlock_irq(&sem->wait_lock);
1002 	__set_current_state(TASK_RUNNING);
1003 	lockevent_inc(rwsem_rlock_fail);
1004 	return ERR_PTR(-EINTR);
1005 }
1006 
1007 /*
1008  * Wait until we successfully acquire the write lock
1009  */
1010 static struct rw_semaphore *
1011 rwsem_down_write_slowpath(struct rw_semaphore *sem, int state)
1012 {
1013 	long count;
1014 	enum writer_wait_state wstate;
1015 	struct rwsem_waiter waiter;
1016 	struct rw_semaphore *ret = sem;
1017 	DEFINE_WAKE_Q(wake_q);
1018 
1019 	/* do optimistic spinning and steal lock if possible */
1020 	if (rwsem_can_spin_on_owner(sem) && rwsem_optimistic_spin(sem)) {
1021 		/* rwsem_optimistic_spin() implies ACQUIRE on success */
1022 		return sem;
1023 	}
1024 
1025 	/*
1026 	 * Optimistic spinning failed, proceed to the slowpath
1027 	 * and block until we can acquire the sem.
1028 	 */
1029 	waiter.task = current;
1030 	waiter.type = RWSEM_WAITING_FOR_WRITE;
1031 	waiter.timeout = jiffies + RWSEM_WAIT_TIMEOUT;
1032 
1033 	raw_spin_lock_irq(&sem->wait_lock);
1034 
1035 	/* account for this before adding a new element to the list */
1036 	wstate = list_empty(&sem->wait_list) ? WRITER_FIRST : WRITER_NOT_FIRST;
1037 
1038 	list_add_tail(&waiter.list, &sem->wait_list);
1039 
1040 	/* we're now waiting on the lock */
1041 	if (wstate == WRITER_NOT_FIRST) {
1042 		count = atomic_long_read(&sem->count);
1043 
1044 		/*
1045 		 * If there were already threads queued before us and:
1046 		 *  1) there are no active locks, wake the front
1047 		 *     queued process(es) as the handoff bit might be set.
1048 		 *  2) there are no active writers and some readers, the lock
1049 		 *     must be read owned; so we try to wake any read lock
1050 		 *     waiters that were queued ahead of us.
1051 		 */
1052 		if (count & RWSEM_WRITER_MASK)
1053 			goto wait;
1054 
1055 		rwsem_mark_wake(sem, (count & RWSEM_READER_MASK)
1056 					? RWSEM_WAKE_READERS
1057 					: RWSEM_WAKE_ANY, &wake_q);
1058 
1059 		if (!wake_q_empty(&wake_q)) {
1060 			/*
1061 			 * We want to minimize wait_lock hold time especially
1062 			 * when a large number of readers are to be woken up.
1063 			 */
1064 			raw_spin_unlock_irq(&sem->wait_lock);
1065 			wake_up_q(&wake_q);
1066 			wake_q_init(&wake_q);	/* Used again, reinit */
1067 			raw_spin_lock_irq(&sem->wait_lock);
1068 		}
1069 	} else {
1070 		atomic_long_or(RWSEM_FLAG_WAITERS, &sem->count);
1071 	}
1072 
1073 wait:
1074 	/* wait until we successfully acquire the lock */
1075 	set_current_state(state);
1076 	for (;;) {
1077 		if (rwsem_try_write_lock(sem, wstate)) {
1078 			/* rwsem_try_write_lock() implies ACQUIRE on success */
1079 			break;
1080 		}
1081 
1082 		raw_spin_unlock_irq(&sem->wait_lock);
1083 
1084 		/*
1085 		 * After setting the handoff bit and failing to acquire
1086 		 * the lock, attempt to spin on owner to accelerate lock
1087 		 * transfer. If the previous owner is a on-cpu writer and it
1088 		 * has just released the lock, OWNER_NULL will be returned.
1089 		 * In this case, we attempt to acquire the lock again
1090 		 * without sleeping.
1091 		 */
1092 		if (wstate == WRITER_HANDOFF) {
1093 			enum owner_state owner_state;
1094 
1095 			preempt_disable();
1096 			owner_state = rwsem_spin_on_owner(sem);
1097 			preempt_enable();
1098 
1099 			if (owner_state == OWNER_NULL)
1100 				goto trylock_again;
1101 		}
1102 
1103 		/* Block until there are no active lockers. */
1104 		for (;;) {
1105 			if (signal_pending_state(state, current))
1106 				goto out_nolock;
1107 
1108 			schedule();
1109 			lockevent_inc(rwsem_sleep_writer);
1110 			set_current_state(state);
1111 			/*
1112 			 * If HANDOFF bit is set, unconditionally do
1113 			 * a trylock.
1114 			 */
1115 			if (wstate == WRITER_HANDOFF)
1116 				break;
1117 
1118 			if ((wstate == WRITER_NOT_FIRST) &&
1119 			    (rwsem_first_waiter(sem) == &waiter))
1120 				wstate = WRITER_FIRST;
1121 
1122 			count = atomic_long_read(&sem->count);
1123 			if (!(count & RWSEM_LOCK_MASK))
1124 				break;
1125 
1126 			/*
1127 			 * The setting of the handoff bit is deferred
1128 			 * until rwsem_try_write_lock() is called.
1129 			 */
1130 			if ((wstate == WRITER_FIRST) && (rt_task(current) ||
1131 			    time_after(jiffies, waiter.timeout))) {
1132 				wstate = WRITER_HANDOFF;
1133 				lockevent_inc(rwsem_wlock_handoff);
1134 				break;
1135 			}
1136 		}
1137 trylock_again:
1138 		raw_spin_lock_irq(&sem->wait_lock);
1139 	}
1140 	__set_current_state(TASK_RUNNING);
1141 	list_del(&waiter.list);
1142 	raw_spin_unlock_irq(&sem->wait_lock);
1143 	lockevent_inc(rwsem_wlock);
1144 
1145 	return ret;
1146 
1147 out_nolock:
1148 	__set_current_state(TASK_RUNNING);
1149 	raw_spin_lock_irq(&sem->wait_lock);
1150 	list_del(&waiter.list);
1151 
1152 	if (unlikely(wstate == WRITER_HANDOFF))
1153 		atomic_long_add(-RWSEM_FLAG_HANDOFF,  &sem->count);
1154 
1155 	if (list_empty(&sem->wait_list))
1156 		atomic_long_andnot(RWSEM_FLAG_WAITERS, &sem->count);
1157 	else
1158 		rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
1159 	raw_spin_unlock_irq(&sem->wait_lock);
1160 	wake_up_q(&wake_q);
1161 	lockevent_inc(rwsem_wlock_fail);
1162 
1163 	return ERR_PTR(-EINTR);
1164 }
1165 
1166 /*
1167  * handle waking up a waiter on the semaphore
1168  * - up_read/up_write has decremented the active part of count if we come here
1169  */
1170 static struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
1171 {
1172 	unsigned long flags;
1173 	DEFINE_WAKE_Q(wake_q);
1174 
1175 	raw_spin_lock_irqsave(&sem->wait_lock, flags);
1176 
1177 	if (!list_empty(&sem->wait_list))
1178 		rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
1179 
1180 	raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
1181 	wake_up_q(&wake_q);
1182 
1183 	return sem;
1184 }
1185 
1186 /*
1187  * downgrade a write lock into a read lock
1188  * - caller incremented waiting part of count and discovered it still negative
1189  * - just wake up any readers at the front of the queue
1190  */
1191 static struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
1192 {
1193 	unsigned long flags;
1194 	DEFINE_WAKE_Q(wake_q);
1195 
1196 	raw_spin_lock_irqsave(&sem->wait_lock, flags);
1197 
1198 	if (!list_empty(&sem->wait_list))
1199 		rwsem_mark_wake(sem, RWSEM_WAKE_READ_OWNED, &wake_q);
1200 
1201 	raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
1202 	wake_up_q(&wake_q);
1203 
1204 	return sem;
1205 }
1206 
1207 /*
1208  * lock for reading
1209  */
1210 static inline int __down_read_common(struct rw_semaphore *sem, int state)
1211 {
1212 	long count;
1213 
1214 	if (!rwsem_read_trylock(sem, &count)) {
1215 		if (IS_ERR(rwsem_down_read_slowpath(sem, count, state)))
1216 			return -EINTR;
1217 		DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
1218 	}
1219 	return 0;
1220 }
1221 
1222 static inline void __down_read(struct rw_semaphore *sem)
1223 {
1224 	__down_read_common(sem, TASK_UNINTERRUPTIBLE);
1225 }
1226 
1227 static inline int __down_read_interruptible(struct rw_semaphore *sem)
1228 {
1229 	return __down_read_common(sem, TASK_INTERRUPTIBLE);
1230 }
1231 
1232 static inline int __down_read_killable(struct rw_semaphore *sem)
1233 {
1234 	return __down_read_common(sem, TASK_KILLABLE);
1235 }
1236 
1237 static inline int __down_read_trylock(struct rw_semaphore *sem)
1238 {
1239 	long tmp;
1240 
1241 	DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
1242 
1243 	/*
1244 	 * Optimize for the case when the rwsem is not locked at all.
1245 	 */
1246 	tmp = RWSEM_UNLOCKED_VALUE;
1247 	do {
1248 		if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
1249 					tmp + RWSEM_READER_BIAS)) {
1250 			rwsem_set_reader_owned(sem);
1251 			return 1;
1252 		}
1253 	} while (!(tmp & RWSEM_READ_FAILED_MASK));
1254 	return 0;
1255 }
1256 
1257 /*
1258  * lock for writing
1259  */
1260 static inline int __down_write_common(struct rw_semaphore *sem, int state)
1261 {
1262 	if (unlikely(!rwsem_write_trylock(sem))) {
1263 		if (IS_ERR(rwsem_down_write_slowpath(sem, state)))
1264 			return -EINTR;
1265 	}
1266 
1267 	return 0;
1268 }
1269 
1270 static inline void __down_write(struct rw_semaphore *sem)
1271 {
1272 	__down_write_common(sem, TASK_UNINTERRUPTIBLE);
1273 }
1274 
1275 static inline int __down_write_killable(struct rw_semaphore *sem)
1276 {
1277 	return __down_write_common(sem, TASK_KILLABLE);
1278 }
1279 
1280 static inline int __down_write_trylock(struct rw_semaphore *sem)
1281 {
1282 	DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
1283 	return rwsem_write_trylock(sem);
1284 }
1285 
1286 /*
1287  * unlock after reading
1288  */
1289 static inline void __up_read(struct rw_semaphore *sem)
1290 {
1291 	long tmp;
1292 
1293 	DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
1294 	DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
1295 
1296 	rwsem_clear_reader_owned(sem);
1297 	tmp = atomic_long_add_return_release(-RWSEM_READER_BIAS, &sem->count);
1298 	DEBUG_RWSEMS_WARN_ON(tmp < 0, sem);
1299 	if (unlikely((tmp & (RWSEM_LOCK_MASK|RWSEM_FLAG_WAITERS)) ==
1300 		      RWSEM_FLAG_WAITERS)) {
1301 		clear_nonspinnable(sem);
1302 		rwsem_wake(sem);
1303 	}
1304 }
1305 
1306 /*
1307  * unlock after writing
1308  */
1309 static inline void __up_write(struct rw_semaphore *sem)
1310 {
1311 	long tmp;
1312 
1313 	DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
1314 	/*
1315 	 * sem->owner may differ from current if the ownership is transferred
1316 	 * to an anonymous writer by setting the RWSEM_NONSPINNABLE bits.
1317 	 */
1318 	DEBUG_RWSEMS_WARN_ON((rwsem_owner(sem) != current) &&
1319 			    !rwsem_test_oflags(sem, RWSEM_NONSPINNABLE), sem);
1320 
1321 	rwsem_clear_owner(sem);
1322 	tmp = atomic_long_fetch_add_release(-RWSEM_WRITER_LOCKED, &sem->count);
1323 	if (unlikely(tmp & RWSEM_FLAG_WAITERS))
1324 		rwsem_wake(sem);
1325 }
1326 
1327 /*
1328  * downgrade write lock to read lock
1329  */
1330 static inline void __downgrade_write(struct rw_semaphore *sem)
1331 {
1332 	long tmp;
1333 
1334 	/*
1335 	 * When downgrading from exclusive to shared ownership,
1336 	 * anything inside the write-locked region cannot leak
1337 	 * into the read side. In contrast, anything in the
1338 	 * read-locked region is ok to be re-ordered into the
1339 	 * write side. As such, rely on RELEASE semantics.
1340 	 */
1341 	DEBUG_RWSEMS_WARN_ON(rwsem_owner(sem) != current, sem);
1342 	tmp = atomic_long_fetch_add_release(
1343 		-RWSEM_WRITER_LOCKED+RWSEM_READER_BIAS, &sem->count);
1344 	rwsem_set_reader_owned(sem);
1345 	if (tmp & RWSEM_FLAG_WAITERS)
1346 		rwsem_downgrade_wake(sem);
1347 }
1348 
1349 #else /* !CONFIG_PREEMPT_RT */
1350 
1351 #define RT_MUTEX_BUILD_MUTEX
1352 #include "rtmutex.c"
1353 
1354 #define rwbase_set_and_save_current_state(state)	\
1355 	set_current_state(state)
1356 
1357 #define rwbase_restore_current_state()			\
1358 	__set_current_state(TASK_RUNNING)
1359 
1360 #define rwbase_rtmutex_lock_state(rtm, state)		\
1361 	__rt_mutex_lock(rtm, state)
1362 
1363 #define rwbase_rtmutex_slowlock_locked(rtm, state)	\
1364 	__rt_mutex_slowlock_locked(rtm, NULL, state)
1365 
1366 #define rwbase_rtmutex_unlock(rtm)			\
1367 	__rt_mutex_unlock(rtm)
1368 
1369 #define rwbase_rtmutex_trylock(rtm)			\
1370 	__rt_mutex_trylock(rtm)
1371 
1372 #define rwbase_signal_pending_state(state, current)	\
1373 	signal_pending_state(state, current)
1374 
1375 #define rwbase_schedule()				\
1376 	schedule()
1377 
1378 #include "rwbase_rt.c"
1379 
1380 void __init_rwsem(struct rw_semaphore *sem, const char *name,
1381 		  struct lock_class_key *key)
1382 {
1383 	init_rwbase_rt(&(sem)->rwbase);
1384 
1385 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1386 	debug_check_no_locks_freed((void *)sem, sizeof(*sem));
1387 	lockdep_init_map_wait(&sem->dep_map, name, key, 0, LD_WAIT_SLEEP);
1388 #endif
1389 }
1390 EXPORT_SYMBOL(__init_rwsem);
1391 
1392 static inline void __down_read(struct rw_semaphore *sem)
1393 {
1394 	rwbase_read_lock(&sem->rwbase, TASK_UNINTERRUPTIBLE);
1395 }
1396 
1397 static inline int __down_read_interruptible(struct rw_semaphore *sem)
1398 {
1399 	return rwbase_read_lock(&sem->rwbase, TASK_INTERRUPTIBLE);
1400 }
1401 
1402 static inline int __down_read_killable(struct rw_semaphore *sem)
1403 {
1404 	return rwbase_read_lock(&sem->rwbase, TASK_KILLABLE);
1405 }
1406 
1407 static inline int __down_read_trylock(struct rw_semaphore *sem)
1408 {
1409 	return rwbase_read_trylock(&sem->rwbase);
1410 }
1411 
1412 static inline void __up_read(struct rw_semaphore *sem)
1413 {
1414 	rwbase_read_unlock(&sem->rwbase, TASK_NORMAL);
1415 }
1416 
1417 static inline void __sched __down_write(struct rw_semaphore *sem)
1418 {
1419 	rwbase_write_lock(&sem->rwbase, TASK_UNINTERRUPTIBLE);
1420 }
1421 
1422 static inline int __sched __down_write_killable(struct rw_semaphore *sem)
1423 {
1424 	return rwbase_write_lock(&sem->rwbase, TASK_KILLABLE);
1425 }
1426 
1427 static inline int __down_write_trylock(struct rw_semaphore *sem)
1428 {
1429 	return rwbase_write_trylock(&sem->rwbase);
1430 }
1431 
1432 static inline void __up_write(struct rw_semaphore *sem)
1433 {
1434 	rwbase_write_unlock(&sem->rwbase);
1435 }
1436 
1437 static inline void __downgrade_write(struct rw_semaphore *sem)
1438 {
1439 	rwbase_write_downgrade(&sem->rwbase);
1440 }
1441 
1442 /* Debug stubs for the common API */
1443 #define DEBUG_RWSEMS_WARN_ON(c, sem)
1444 
1445 static inline void __rwsem_set_reader_owned(struct rw_semaphore *sem,
1446 					    struct task_struct *owner)
1447 {
1448 }
1449 
1450 static inline bool is_rwsem_reader_owned(struct rw_semaphore *sem)
1451 {
1452 	int count = atomic_read(&sem->rwbase.readers);
1453 
1454 	return count < 0 && count != READER_BIAS;
1455 }
1456 
1457 #endif /* CONFIG_PREEMPT_RT */
1458 
1459 /*
1460  * lock for reading
1461  */
1462 void __sched down_read(struct rw_semaphore *sem)
1463 {
1464 	might_sleep();
1465 	rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
1466 
1467 	LOCK_CONTENDED(sem, __down_read_trylock, __down_read);
1468 }
1469 EXPORT_SYMBOL(down_read);
1470 
1471 int __sched down_read_interruptible(struct rw_semaphore *sem)
1472 {
1473 	might_sleep();
1474 	rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
1475 
1476 	if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_interruptible)) {
1477 		rwsem_release(&sem->dep_map, _RET_IP_);
1478 		return -EINTR;
1479 	}
1480 
1481 	return 0;
1482 }
1483 EXPORT_SYMBOL(down_read_interruptible);
1484 
1485 int __sched down_read_killable(struct rw_semaphore *sem)
1486 {
1487 	might_sleep();
1488 	rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
1489 
1490 	if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_killable)) {
1491 		rwsem_release(&sem->dep_map, _RET_IP_);
1492 		return -EINTR;
1493 	}
1494 
1495 	return 0;
1496 }
1497 EXPORT_SYMBOL(down_read_killable);
1498 
1499 /*
1500  * trylock for reading -- returns 1 if successful, 0 if contention
1501  */
1502 int down_read_trylock(struct rw_semaphore *sem)
1503 {
1504 	int ret = __down_read_trylock(sem);
1505 
1506 	if (ret == 1)
1507 		rwsem_acquire_read(&sem->dep_map, 0, 1, _RET_IP_);
1508 	return ret;
1509 }
1510 EXPORT_SYMBOL(down_read_trylock);
1511 
1512 /*
1513  * lock for writing
1514  */
1515 void __sched down_write(struct rw_semaphore *sem)
1516 {
1517 	might_sleep();
1518 	rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);
1519 	LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
1520 }
1521 EXPORT_SYMBOL(down_write);
1522 
1523 /*
1524  * lock for writing
1525  */
1526 int __sched down_write_killable(struct rw_semaphore *sem)
1527 {
1528 	might_sleep();
1529 	rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);
1530 
1531 	if (LOCK_CONTENDED_RETURN(sem, __down_write_trylock,
1532 				  __down_write_killable)) {
1533 		rwsem_release(&sem->dep_map, _RET_IP_);
1534 		return -EINTR;
1535 	}
1536 
1537 	return 0;
1538 }
1539 EXPORT_SYMBOL(down_write_killable);
1540 
1541 /*
1542  * trylock for writing -- returns 1 if successful, 0 if contention
1543  */
1544 int down_write_trylock(struct rw_semaphore *sem)
1545 {
1546 	int ret = __down_write_trylock(sem);
1547 
1548 	if (ret == 1)
1549 		rwsem_acquire(&sem->dep_map, 0, 1, _RET_IP_);
1550 
1551 	return ret;
1552 }
1553 EXPORT_SYMBOL(down_write_trylock);
1554 
1555 /*
1556  * release a read lock
1557  */
1558 void up_read(struct rw_semaphore *sem)
1559 {
1560 	rwsem_release(&sem->dep_map, _RET_IP_);
1561 	__up_read(sem);
1562 }
1563 EXPORT_SYMBOL(up_read);
1564 
1565 /*
1566  * release a write lock
1567  */
1568 void up_write(struct rw_semaphore *sem)
1569 {
1570 	rwsem_release(&sem->dep_map, _RET_IP_);
1571 	__up_write(sem);
1572 }
1573 EXPORT_SYMBOL(up_write);
1574 
1575 /*
1576  * downgrade write lock to read lock
1577  */
1578 void downgrade_write(struct rw_semaphore *sem)
1579 {
1580 	lock_downgrade(&sem->dep_map, _RET_IP_);
1581 	__downgrade_write(sem);
1582 }
1583 EXPORT_SYMBOL(downgrade_write);
1584 
1585 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1586 
1587 void down_read_nested(struct rw_semaphore *sem, int subclass)
1588 {
1589 	might_sleep();
1590 	rwsem_acquire_read(&sem->dep_map, subclass, 0, _RET_IP_);
1591 	LOCK_CONTENDED(sem, __down_read_trylock, __down_read);
1592 }
1593 EXPORT_SYMBOL(down_read_nested);
1594 
1595 int down_read_killable_nested(struct rw_semaphore *sem, int subclass)
1596 {
1597 	might_sleep();
1598 	rwsem_acquire_read(&sem->dep_map, subclass, 0, _RET_IP_);
1599 
1600 	if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_killable)) {
1601 		rwsem_release(&sem->dep_map, _RET_IP_);
1602 		return -EINTR;
1603 	}
1604 
1605 	return 0;
1606 }
1607 EXPORT_SYMBOL(down_read_killable_nested);
1608 
1609 void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest)
1610 {
1611 	might_sleep();
1612 	rwsem_acquire_nest(&sem->dep_map, 0, 0, nest, _RET_IP_);
1613 	LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
1614 }
1615 EXPORT_SYMBOL(_down_write_nest_lock);
1616 
1617 void down_read_non_owner(struct rw_semaphore *sem)
1618 {
1619 	might_sleep();
1620 	__down_read(sem);
1621 	__rwsem_set_reader_owned(sem, NULL);
1622 }
1623 EXPORT_SYMBOL(down_read_non_owner);
1624 
1625 void down_write_nested(struct rw_semaphore *sem, int subclass)
1626 {
1627 	might_sleep();
1628 	rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_);
1629 	LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
1630 }
1631 EXPORT_SYMBOL(down_write_nested);
1632 
1633 int __sched down_write_killable_nested(struct rw_semaphore *sem, int subclass)
1634 {
1635 	might_sleep();
1636 	rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_);
1637 
1638 	if (LOCK_CONTENDED_RETURN(sem, __down_write_trylock,
1639 				  __down_write_killable)) {
1640 		rwsem_release(&sem->dep_map, _RET_IP_);
1641 		return -EINTR;
1642 	}
1643 
1644 	return 0;
1645 }
1646 EXPORT_SYMBOL(down_write_killable_nested);
1647 
1648 void up_read_non_owner(struct rw_semaphore *sem)
1649 {
1650 	DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
1651 	__up_read(sem);
1652 }
1653 EXPORT_SYMBOL(up_read_non_owner);
1654 
1655 #endif
1656