13e456101SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2e25a64c4SPeter Zijlstra /*
3e25a64c4SPeter Zijlstra * Copyright (c) 2008 Intel Corporation
4e25a64c4SPeter Zijlstra * Author: Matthew Wilcox <[email protected]>
5e25a64c4SPeter Zijlstra *
6e25a64c4SPeter Zijlstra * This file implements counting semaphores.
7e25a64c4SPeter Zijlstra * A counting semaphore may be acquired 'n' times before sleeping.
8e25a64c4SPeter Zijlstra * See mutex.c for single-acquisition sleeping locks which enforce
9e25a64c4SPeter Zijlstra * rules which allow code to be debugged more easily.
10e25a64c4SPeter Zijlstra */
11e25a64c4SPeter Zijlstra
12e25a64c4SPeter Zijlstra /*
13e25a64c4SPeter Zijlstra * Some notes on the implementation:
14e25a64c4SPeter Zijlstra *
15e25a64c4SPeter Zijlstra * The spinlock controls access to the other members of the semaphore.
16e25a64c4SPeter Zijlstra * down_trylock() and up() can be called from interrupt context, so we
17e25a64c4SPeter Zijlstra * have to disable interrupts when taking the lock. It turns out various
18e25a64c4SPeter Zijlstra * parts of the kernel expect to be able to use down() on a semaphore in
19e25a64c4SPeter Zijlstra * interrupt context when they know it will succeed, so we have to use
20e25a64c4SPeter Zijlstra * irqsave variants for down(), down_interruptible() and down_killable()
21e25a64c4SPeter Zijlstra * too.
22e25a64c4SPeter Zijlstra *
23e25a64c4SPeter Zijlstra * The ->count variable represents how many more tasks can acquire this
24e25a64c4SPeter Zijlstra * semaphore. If it's zero, there may be tasks waiting on the wait_list.
25e25a64c4SPeter Zijlstra */
26e25a64c4SPeter Zijlstra
27e25a64c4SPeter Zijlstra #include <linux/compiler.h>
28e25a64c4SPeter Zijlstra #include <linux/kernel.h>
29e25a64c4SPeter Zijlstra #include <linux/export.h>
30e25a64c4SPeter Zijlstra #include <linux/sched.h>
31b17b0153SIngo Molnar #include <linux/sched/debug.h>
32*85b2b9c1SWaiman Long #include <linux/sched/wake_q.h>
33e25a64c4SPeter Zijlstra #include <linux/semaphore.h>
34e25a64c4SPeter Zijlstra #include <linux/spinlock.h>
35e25a64c4SPeter Zijlstra #include <linux/ftrace.h>
36ee042be1SNamhyung Kim #include <trace/events/lock.h>
37e25a64c4SPeter Zijlstra
38e25a64c4SPeter Zijlstra static noinline void __down(struct semaphore *sem);
39e25a64c4SPeter Zijlstra static noinline int __down_interruptible(struct semaphore *sem);
40e25a64c4SPeter Zijlstra static noinline int __down_killable(struct semaphore *sem);
4131542769SMark Rustad static noinline int __down_timeout(struct semaphore *sem, long timeout);
42*85b2b9c1SWaiman Long static noinline void __up(struct semaphore *sem, struct wake_q_head *wake_q);
43e25a64c4SPeter Zijlstra
44e25a64c4SPeter Zijlstra /**
45e25a64c4SPeter Zijlstra * down - acquire the semaphore
46e25a64c4SPeter Zijlstra * @sem: the semaphore to be acquired
47e25a64c4SPeter Zijlstra *
48e25a64c4SPeter Zijlstra * Acquires the semaphore. If no more tasks are allowed to acquire the
49e25a64c4SPeter Zijlstra * semaphore, calling this function will put the task to sleep until the
50e25a64c4SPeter Zijlstra * semaphore is released.
51e25a64c4SPeter Zijlstra *
52e25a64c4SPeter Zijlstra * Use of this function is deprecated, please use down_interruptible() or
53e25a64c4SPeter Zijlstra * down_killable() instead.
54e25a64c4SPeter Zijlstra */
down(struct semaphore * sem)550d97db02SNamhyung Kim void __sched down(struct semaphore *sem)
56e25a64c4SPeter Zijlstra {
57e25a64c4SPeter Zijlstra unsigned long flags;
58e25a64c4SPeter Zijlstra
5999409b93SXiaoming Ni might_sleep();
60e25a64c4SPeter Zijlstra raw_spin_lock_irqsave(&sem->lock, flags);
61e25a64c4SPeter Zijlstra if (likely(sem->count > 0))
62e25a64c4SPeter Zijlstra sem->count--;
63e25a64c4SPeter Zijlstra else
64e25a64c4SPeter Zijlstra __down(sem);
65e25a64c4SPeter Zijlstra raw_spin_unlock_irqrestore(&sem->lock, flags);
66e25a64c4SPeter Zijlstra }
67e25a64c4SPeter Zijlstra EXPORT_SYMBOL(down);
68e25a64c4SPeter Zijlstra
69e25a64c4SPeter Zijlstra /**
70e25a64c4SPeter Zijlstra * down_interruptible - acquire the semaphore unless interrupted
71e25a64c4SPeter Zijlstra * @sem: the semaphore to be acquired
72e25a64c4SPeter Zijlstra *
73e25a64c4SPeter Zijlstra * Attempts to acquire the semaphore. If no more tasks are allowed to
74e25a64c4SPeter Zijlstra * acquire the semaphore, calling this function will put the task to sleep.
75e25a64c4SPeter Zijlstra * If the sleep is interrupted by a signal, this function will return -EINTR.
76e25a64c4SPeter Zijlstra * If the semaphore is successfully acquired, this function returns 0.
77e25a64c4SPeter Zijlstra */
down_interruptible(struct semaphore * sem)780d97db02SNamhyung Kim int __sched down_interruptible(struct semaphore *sem)
79e25a64c4SPeter Zijlstra {
80e25a64c4SPeter Zijlstra unsigned long flags;
81e25a64c4SPeter Zijlstra int result = 0;
82e25a64c4SPeter Zijlstra
8399409b93SXiaoming Ni might_sleep();
84e25a64c4SPeter Zijlstra raw_spin_lock_irqsave(&sem->lock, flags);
85e25a64c4SPeter Zijlstra if (likely(sem->count > 0))
86e25a64c4SPeter Zijlstra sem->count--;
87e25a64c4SPeter Zijlstra else
88e25a64c4SPeter Zijlstra result = __down_interruptible(sem);
89e25a64c4SPeter Zijlstra raw_spin_unlock_irqrestore(&sem->lock, flags);
90e25a64c4SPeter Zijlstra
91e25a64c4SPeter Zijlstra return result;
92e25a64c4SPeter Zijlstra }
93e25a64c4SPeter Zijlstra EXPORT_SYMBOL(down_interruptible);
94e25a64c4SPeter Zijlstra
95e25a64c4SPeter Zijlstra /**
96e25a64c4SPeter Zijlstra * down_killable - acquire the semaphore unless killed
97e25a64c4SPeter Zijlstra * @sem: the semaphore to be acquired
98e25a64c4SPeter Zijlstra *
99e25a64c4SPeter Zijlstra * Attempts to acquire the semaphore. If no more tasks are allowed to
100e25a64c4SPeter Zijlstra * acquire the semaphore, calling this function will put the task to sleep.
101e25a64c4SPeter Zijlstra * If the sleep is interrupted by a fatal signal, this function will return
102e25a64c4SPeter Zijlstra * -EINTR. If the semaphore is successfully acquired, this function returns
103e25a64c4SPeter Zijlstra * 0.
104e25a64c4SPeter Zijlstra */
down_killable(struct semaphore * sem)1050d97db02SNamhyung Kim int __sched down_killable(struct semaphore *sem)
106e25a64c4SPeter Zijlstra {
107e25a64c4SPeter Zijlstra unsigned long flags;
108e25a64c4SPeter Zijlstra int result = 0;
109e25a64c4SPeter Zijlstra
11099409b93SXiaoming Ni might_sleep();
111e25a64c4SPeter Zijlstra raw_spin_lock_irqsave(&sem->lock, flags);
112e25a64c4SPeter Zijlstra if (likely(sem->count > 0))
113e25a64c4SPeter Zijlstra sem->count--;
114e25a64c4SPeter Zijlstra else
115e25a64c4SPeter Zijlstra result = __down_killable(sem);
116e25a64c4SPeter Zijlstra raw_spin_unlock_irqrestore(&sem->lock, flags);
117e25a64c4SPeter Zijlstra
118e25a64c4SPeter Zijlstra return result;
119e25a64c4SPeter Zijlstra }
120e25a64c4SPeter Zijlstra EXPORT_SYMBOL(down_killable);
121e25a64c4SPeter Zijlstra
122e25a64c4SPeter Zijlstra /**
123e25a64c4SPeter Zijlstra * down_trylock - try to acquire the semaphore, without waiting
124e25a64c4SPeter Zijlstra * @sem: the semaphore to be acquired
125e25a64c4SPeter Zijlstra *
126e25a64c4SPeter Zijlstra * Try to acquire the semaphore atomically. Returns 0 if the semaphore has
127c034f48eSRandy Dunlap * been acquired successfully or 1 if it cannot be acquired.
128e25a64c4SPeter Zijlstra *
129e25a64c4SPeter Zijlstra * NOTE: This return value is inverted from both spin_trylock and
130e25a64c4SPeter Zijlstra * mutex_trylock! Be careful about this when converting code.
131e25a64c4SPeter Zijlstra *
132e25a64c4SPeter Zijlstra * Unlike mutex_trylock, this function can be used from interrupt context,
133e25a64c4SPeter Zijlstra * and the semaphore can be released by any task or interrupt.
134e25a64c4SPeter Zijlstra */
down_trylock(struct semaphore * sem)1350d97db02SNamhyung Kim int __sched down_trylock(struct semaphore *sem)
136e25a64c4SPeter Zijlstra {
137e25a64c4SPeter Zijlstra unsigned long flags;
138e25a64c4SPeter Zijlstra int count;
139e25a64c4SPeter Zijlstra
140e25a64c4SPeter Zijlstra raw_spin_lock_irqsave(&sem->lock, flags);
141e25a64c4SPeter Zijlstra count = sem->count - 1;
142e25a64c4SPeter Zijlstra if (likely(count >= 0))
143e25a64c4SPeter Zijlstra sem->count = count;
144e25a64c4SPeter Zijlstra raw_spin_unlock_irqrestore(&sem->lock, flags);
145e25a64c4SPeter Zijlstra
146e25a64c4SPeter Zijlstra return (count < 0);
147e25a64c4SPeter Zijlstra }
148e25a64c4SPeter Zijlstra EXPORT_SYMBOL(down_trylock);
149e25a64c4SPeter Zijlstra
150e25a64c4SPeter Zijlstra /**
151e25a64c4SPeter Zijlstra * down_timeout - acquire the semaphore within a specified time
152e25a64c4SPeter Zijlstra * @sem: the semaphore to be acquired
15331542769SMark Rustad * @timeout: how long to wait before failing
154e25a64c4SPeter Zijlstra *
155e25a64c4SPeter Zijlstra * Attempts to acquire the semaphore. If no more tasks are allowed to
156e25a64c4SPeter Zijlstra * acquire the semaphore, calling this function will put the task to sleep.
157e25a64c4SPeter Zijlstra * If the semaphore is not released within the specified number of jiffies,
158e25a64c4SPeter Zijlstra * this function returns -ETIME. It returns 0 if the semaphore was acquired.
159e25a64c4SPeter Zijlstra */
down_timeout(struct semaphore * sem,long timeout)1600d97db02SNamhyung Kim int __sched down_timeout(struct semaphore *sem, long timeout)
161e25a64c4SPeter Zijlstra {
162e25a64c4SPeter Zijlstra unsigned long flags;
163e25a64c4SPeter Zijlstra int result = 0;
164e25a64c4SPeter Zijlstra
16599409b93SXiaoming Ni might_sleep();
166e25a64c4SPeter Zijlstra raw_spin_lock_irqsave(&sem->lock, flags);
167e25a64c4SPeter Zijlstra if (likely(sem->count > 0))
168e25a64c4SPeter Zijlstra sem->count--;
169e25a64c4SPeter Zijlstra else
17031542769SMark Rustad result = __down_timeout(sem, timeout);
171e25a64c4SPeter Zijlstra raw_spin_unlock_irqrestore(&sem->lock, flags);
172e25a64c4SPeter Zijlstra
173e25a64c4SPeter Zijlstra return result;
174e25a64c4SPeter Zijlstra }
175e25a64c4SPeter Zijlstra EXPORT_SYMBOL(down_timeout);
176e25a64c4SPeter Zijlstra
177e25a64c4SPeter Zijlstra /**
178e25a64c4SPeter Zijlstra * up - release the semaphore
179e25a64c4SPeter Zijlstra * @sem: the semaphore to release
180e25a64c4SPeter Zijlstra *
181e25a64c4SPeter Zijlstra * Release the semaphore. Unlike mutexes, up() may be called from any
182e25a64c4SPeter Zijlstra * context and even by tasks which have never called down().
183e25a64c4SPeter Zijlstra */
up(struct semaphore * sem)1840d97db02SNamhyung Kim void __sched up(struct semaphore *sem)
185e25a64c4SPeter Zijlstra {
186e25a64c4SPeter Zijlstra unsigned long flags;
187*85b2b9c1SWaiman Long DEFINE_WAKE_Q(wake_q);
188e25a64c4SPeter Zijlstra
189e25a64c4SPeter Zijlstra raw_spin_lock_irqsave(&sem->lock, flags);
190e25a64c4SPeter Zijlstra if (likely(list_empty(&sem->wait_list)))
191e25a64c4SPeter Zijlstra sem->count++;
192e25a64c4SPeter Zijlstra else
193*85b2b9c1SWaiman Long __up(sem, &wake_q);
194e25a64c4SPeter Zijlstra raw_spin_unlock_irqrestore(&sem->lock, flags);
195*85b2b9c1SWaiman Long if (!wake_q_empty(&wake_q))
196*85b2b9c1SWaiman Long wake_up_q(&wake_q);
197e25a64c4SPeter Zijlstra }
198e25a64c4SPeter Zijlstra EXPORT_SYMBOL(up);
199e25a64c4SPeter Zijlstra
200e25a64c4SPeter Zijlstra /* Functions for the contended case */
201e25a64c4SPeter Zijlstra
202e25a64c4SPeter Zijlstra struct semaphore_waiter {
203e25a64c4SPeter Zijlstra struct list_head list;
204e25a64c4SPeter Zijlstra struct task_struct *task;
205e25a64c4SPeter Zijlstra bool up;
206e25a64c4SPeter Zijlstra };
207e25a64c4SPeter Zijlstra
208e25a64c4SPeter Zijlstra /*
209e25a64c4SPeter Zijlstra * Because this function is inlined, the 'state' parameter will be
210e25a64c4SPeter Zijlstra * constant, and thus optimised away by the compiler. Likewise the
211e25a64c4SPeter Zijlstra * 'timeout' parameter for the cases without timeouts.
212e25a64c4SPeter Zijlstra */
___down_common(struct semaphore * sem,long state,long timeout)213ee042be1SNamhyung Kim static inline int __sched ___down_common(struct semaphore *sem, long state,
214e25a64c4SPeter Zijlstra long timeout)
215e25a64c4SPeter Zijlstra {
216e25a64c4SPeter Zijlstra struct semaphore_waiter waiter;
217e25a64c4SPeter Zijlstra
218e25a64c4SPeter Zijlstra list_add_tail(&waiter.list, &sem->wait_list);
219d269a8b8SDavidlohr Bueso waiter.task = current;
220e25a64c4SPeter Zijlstra waiter.up = false;
221e25a64c4SPeter Zijlstra
222e25a64c4SPeter Zijlstra for (;;) {
223d269a8b8SDavidlohr Bueso if (signal_pending_state(state, current))
224e25a64c4SPeter Zijlstra goto interrupted;
225e25a64c4SPeter Zijlstra if (unlikely(timeout <= 0))
226e25a64c4SPeter Zijlstra goto timed_out;
227642fa448SDavidlohr Bueso __set_current_state(state);
228e25a64c4SPeter Zijlstra raw_spin_unlock_irq(&sem->lock);
229e25a64c4SPeter Zijlstra timeout = schedule_timeout(timeout);
230e25a64c4SPeter Zijlstra raw_spin_lock_irq(&sem->lock);
231e25a64c4SPeter Zijlstra if (waiter.up)
232e25a64c4SPeter Zijlstra return 0;
233e25a64c4SPeter Zijlstra }
234e25a64c4SPeter Zijlstra
235e25a64c4SPeter Zijlstra timed_out:
236e25a64c4SPeter Zijlstra list_del(&waiter.list);
237e25a64c4SPeter Zijlstra return -ETIME;
238e25a64c4SPeter Zijlstra
239e25a64c4SPeter Zijlstra interrupted:
240e25a64c4SPeter Zijlstra list_del(&waiter.list);
241e25a64c4SPeter Zijlstra return -EINTR;
242e25a64c4SPeter Zijlstra }
243e25a64c4SPeter Zijlstra
__down_common(struct semaphore * sem,long state,long timeout)244ee042be1SNamhyung Kim static inline int __sched __down_common(struct semaphore *sem, long state,
245ee042be1SNamhyung Kim long timeout)
246ee042be1SNamhyung Kim {
247ee042be1SNamhyung Kim int ret;
248ee042be1SNamhyung Kim
249ee042be1SNamhyung Kim trace_contention_begin(sem, 0);
250ee042be1SNamhyung Kim ret = ___down_common(sem, state, timeout);
251ee042be1SNamhyung Kim trace_contention_end(sem, ret);
252ee042be1SNamhyung Kim
253ee042be1SNamhyung Kim return ret;
254ee042be1SNamhyung Kim }
255ee042be1SNamhyung Kim
__down(struct semaphore * sem)256e25a64c4SPeter Zijlstra static noinline void __sched __down(struct semaphore *sem)
257e25a64c4SPeter Zijlstra {
258e25a64c4SPeter Zijlstra __down_common(sem, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
259e25a64c4SPeter Zijlstra }
260e25a64c4SPeter Zijlstra
__down_interruptible(struct semaphore * sem)261e25a64c4SPeter Zijlstra static noinline int __sched __down_interruptible(struct semaphore *sem)
262e25a64c4SPeter Zijlstra {
263e25a64c4SPeter Zijlstra return __down_common(sem, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
264e25a64c4SPeter Zijlstra }
265e25a64c4SPeter Zijlstra
__down_killable(struct semaphore * sem)266e25a64c4SPeter Zijlstra static noinline int __sched __down_killable(struct semaphore *sem)
267e25a64c4SPeter Zijlstra {
268e25a64c4SPeter Zijlstra return __down_common(sem, TASK_KILLABLE, MAX_SCHEDULE_TIMEOUT);
269e25a64c4SPeter Zijlstra }
270e25a64c4SPeter Zijlstra
__down_timeout(struct semaphore * sem,long timeout)27131542769SMark Rustad static noinline int __sched __down_timeout(struct semaphore *sem, long timeout)
272e25a64c4SPeter Zijlstra {
27331542769SMark Rustad return __down_common(sem, TASK_UNINTERRUPTIBLE, timeout);
274e25a64c4SPeter Zijlstra }
275e25a64c4SPeter Zijlstra
__up(struct semaphore * sem,struct wake_q_head * wake_q)276*85b2b9c1SWaiman Long static noinline void __sched __up(struct semaphore *sem,
277*85b2b9c1SWaiman Long struct wake_q_head *wake_q)
278e25a64c4SPeter Zijlstra {
279e25a64c4SPeter Zijlstra struct semaphore_waiter *waiter = list_first_entry(&sem->wait_list,
280e25a64c4SPeter Zijlstra struct semaphore_waiter, list);
281e25a64c4SPeter Zijlstra list_del(&waiter->list);
282e25a64c4SPeter Zijlstra waiter->up = true;
283*85b2b9c1SWaiman Long wake_q_add(wake_q, waiter->task);
284e25a64c4SPeter Zijlstra }
285