1eebba71eSSuman Anna /* SPDX-License-Identifier: GPL-2.0 */
2bd9a4c7dSOhad Ben-Cohen /*
3bd9a4c7dSOhad Ben-Cohen * Hardware spinlock public header
4bd9a4c7dSOhad Ben-Cohen *
5bd9a4c7dSOhad Ben-Cohen * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com
6bd9a4c7dSOhad Ben-Cohen *
7bd9a4c7dSOhad Ben-Cohen * Contact: Ohad Ben-Cohen <[email protected]>
8bd9a4c7dSOhad Ben-Cohen */
9bd9a4c7dSOhad Ben-Cohen
10bd9a4c7dSOhad Ben-Cohen #ifndef __LINUX_HWSPINLOCK_H
11bd9a4c7dSOhad Ben-Cohen #define __LINUX_HWSPINLOCK_H
12bd9a4c7dSOhad Ben-Cohen
13bd9a4c7dSOhad Ben-Cohen #include <linux/err.h>
14bd9a4c7dSOhad Ben-Cohen #include <linux/sched.h>
15bd9a4c7dSOhad Ben-Cohen
16bd9a4c7dSOhad Ben-Cohen /* hwspinlock mode argument */
17bd9a4c7dSOhad Ben-Cohen #define HWLOCK_IRQSTATE 0x01 /* Disable interrupts, save state */
18bd9a4c7dSOhad Ben-Cohen #define HWLOCK_IRQ 0x02 /* Disable interrupts, don't save state */
191e6c06a7SBaolin Wang #define HWLOCK_RAW 0x03
20360aa640SFabien Dessenne #define HWLOCK_IN_ATOMIC 0x04 /* Called while in atomic context */
21bd9a4c7dSOhad Ben-Cohen
22313162d0SPaul Gortmaker struct device;
23fb7737e9SSuman Anna struct device_node;
24bd9a4c7dSOhad Ben-Cohen struct hwspinlock;
25300bab97SOhad Ben-Cohen struct hwspinlock_device;
26300bab97SOhad Ben-Cohen struct hwspinlock_ops;
27bd9a4c7dSOhad Ben-Cohen
28c3c1250eSOhad Ben-Cohen /**
29c3c1250eSOhad Ben-Cohen * struct hwspinlock_pdata - platform data for hwspinlock drivers
30c3c1250eSOhad Ben-Cohen * @base_id: base id for this hwspinlock device
31c3c1250eSOhad Ben-Cohen *
32c3c1250eSOhad Ben-Cohen * hwspinlock devices provide system-wide hardware locks that are used
33c3c1250eSOhad Ben-Cohen * by remote processors that have no other way to achieve synchronization.
34c3c1250eSOhad Ben-Cohen *
35c3c1250eSOhad Ben-Cohen * To achieve that, each physical lock must have a system-wide id number
36c3c1250eSOhad Ben-Cohen * that is agreed upon, otherwise remote processors can't possibly assume
37c3c1250eSOhad Ben-Cohen * they're using the same hardware lock.
38c3c1250eSOhad Ben-Cohen *
39c3c1250eSOhad Ben-Cohen * Usually boards have a single hwspinlock device, which provides several
40c3c1250eSOhad Ben-Cohen * hwspinlocks, and in this case, they can be trivially numbered 0 to
41c3c1250eSOhad Ben-Cohen * (num-of-locks - 1).
42c3c1250eSOhad Ben-Cohen *
43c3c1250eSOhad Ben-Cohen * In case boards have several hwspinlocks devices, a different base id
44c3c1250eSOhad Ben-Cohen * should be used for each hwspinlock device (they can't all use 0 as
45c3c1250eSOhad Ben-Cohen * a starting id!).
46c3c1250eSOhad Ben-Cohen *
47c3c1250eSOhad Ben-Cohen * This platform data structure should be used to provide the base id
48c3c1250eSOhad Ben-Cohen * for each device (which is trivially 0 when only a single hwspinlock
49c3c1250eSOhad Ben-Cohen * device exists). It can be shared between different platforms, hence
50c3c1250eSOhad Ben-Cohen * its location.
51c3c1250eSOhad Ben-Cohen */
52c3c1250eSOhad Ben-Cohen struct hwspinlock_pdata {
53c3c1250eSOhad Ben-Cohen int base_id;
54c3c1250eSOhad Ben-Cohen };
55c3c1250eSOhad Ben-Cohen
562ceda54cSBaolin Wang #ifdef CONFIG_HWSPINLOCK
57bd9a4c7dSOhad Ben-Cohen
58300bab97SOhad Ben-Cohen int hwspin_lock_register(struct hwspinlock_device *bank, struct device *dev,
59300bab97SOhad Ben-Cohen const struct hwspinlock_ops *ops, int base_id, int num_locks);
60300bab97SOhad Ben-Cohen int hwspin_lock_unregister(struct hwspinlock_device *bank);
61bd9a4c7dSOhad Ben-Cohen struct hwspinlock *hwspin_lock_request_specific(unsigned int id);
62bd9a4c7dSOhad Ben-Cohen int hwspin_lock_free(struct hwspinlock *hwlock);
63fb7737e9SSuman Anna int of_hwspin_lock_get_id(struct device_node *np, int index);
64bd9a4c7dSOhad Ben-Cohen int __hwspin_lock_timeout(struct hwspinlock *, unsigned int, int,
65bd9a4c7dSOhad Ben-Cohen unsigned long *);
66bd9a4c7dSOhad Ben-Cohen int __hwspin_trylock(struct hwspinlock *, int, unsigned long *);
67bd9a4c7dSOhad Ben-Cohen void __hwspin_unlock(struct hwspinlock *, int, unsigned long *);
685560f70cSBaolin Wang int of_hwspin_lock_get_id_byname(struct device_node *np, const char *name);
69*7c327d56SRichard Maina int hwspin_lock_bust(struct hwspinlock *hwlock, unsigned int id);
704f1acd75SBaolin Wang int devm_hwspin_lock_free(struct device *dev, struct hwspinlock *hwlock);
714f1acd75SBaolin Wang struct hwspinlock *devm_hwspin_lock_request_specific(struct device *dev,
724f1acd75SBaolin Wang unsigned int id);
73c102780aSBaolin Wang int devm_hwspin_lock_unregister(struct device *dev,
74c102780aSBaolin Wang struct hwspinlock_device *bank);
75c102780aSBaolin Wang int devm_hwspin_lock_register(struct device *dev,
76c102780aSBaolin Wang struct hwspinlock_device *bank,
77c102780aSBaolin Wang const struct hwspinlock_ops *ops,
78c102780aSBaolin Wang int base_id, int num_locks);
79bd9a4c7dSOhad Ben-Cohen
80bd9a4c7dSOhad Ben-Cohen #else /* !CONFIG_HWSPINLOCK */
81bd9a4c7dSOhad Ben-Cohen
82bd9a4c7dSOhad Ben-Cohen /*
83bd9a4c7dSOhad Ben-Cohen * We don't want these functions to fail if CONFIG_HWSPINLOCK is not
84bd9a4c7dSOhad Ben-Cohen * enabled. We prefer to silently succeed in this case, and let the
85bd9a4c7dSOhad Ben-Cohen * code path get compiled away. This way, if CONFIG_HWSPINLOCK is not
86bd9a4c7dSOhad Ben-Cohen * required on a given setup, users will still work.
87bd9a4c7dSOhad Ben-Cohen *
88bd9a4c7dSOhad Ben-Cohen * The only exception is hwspin_lock_register/hwspin_lock_unregister, with which
89bd9a4c7dSOhad Ben-Cohen * we _do_ want users to fail (no point in registering hwspinlock instances if
90bd9a4c7dSOhad Ben-Cohen * the framework is not available).
91bd9a4c7dSOhad Ben-Cohen *
92bd9a4c7dSOhad Ben-Cohen * Note: ERR_PTR(-ENODEV) will still be considered a success for NULL-checking
93bd9a4c7dSOhad Ben-Cohen * users. Others, which care, can still check this with IS_ERR.
94bd9a4c7dSOhad Ben-Cohen */
hwspin_lock_request_specific(unsigned int id)95bd9a4c7dSOhad Ben-Cohen static inline struct hwspinlock *hwspin_lock_request_specific(unsigned int id)
96bd9a4c7dSOhad Ben-Cohen {
97bd9a4c7dSOhad Ben-Cohen return ERR_PTR(-ENODEV);
98bd9a4c7dSOhad Ben-Cohen }
99bd9a4c7dSOhad Ben-Cohen
hwspin_lock_free(struct hwspinlock * hwlock)100bd9a4c7dSOhad Ben-Cohen static inline int hwspin_lock_free(struct hwspinlock *hwlock)
101bd9a4c7dSOhad Ben-Cohen {
102bd9a4c7dSOhad Ben-Cohen return 0;
103bd9a4c7dSOhad Ben-Cohen }
104bd9a4c7dSOhad Ben-Cohen
105bd9a4c7dSOhad Ben-Cohen static inline
__hwspin_lock_timeout(struct hwspinlock * hwlock,unsigned int to,int mode,unsigned long * flags)106bd9a4c7dSOhad Ben-Cohen int __hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to,
107bd9a4c7dSOhad Ben-Cohen int mode, unsigned long *flags)
108bd9a4c7dSOhad Ben-Cohen {
109bd9a4c7dSOhad Ben-Cohen return 0;
110bd9a4c7dSOhad Ben-Cohen }
111bd9a4c7dSOhad Ben-Cohen
112bd9a4c7dSOhad Ben-Cohen static inline
__hwspin_trylock(struct hwspinlock * hwlock,int mode,unsigned long * flags)113bd9a4c7dSOhad Ben-Cohen int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
114bd9a4c7dSOhad Ben-Cohen {
115bd9a4c7dSOhad Ben-Cohen return 0;
116bd9a4c7dSOhad Ben-Cohen }
117bd9a4c7dSOhad Ben-Cohen
118bd9a4c7dSOhad Ben-Cohen static inline
__hwspin_unlock(struct hwspinlock * hwlock,int mode,unsigned long * flags)119bd9a4c7dSOhad Ben-Cohen void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
120bd9a4c7dSOhad Ben-Cohen {
121bd9a4c7dSOhad Ben-Cohen }
122bd9a4c7dSOhad Ben-Cohen
hwspin_lock_bust(struct hwspinlock * hwlock,unsigned int id)123*7c327d56SRichard Maina static inline int hwspin_lock_bust(struct hwspinlock *hwlock, unsigned int id)
124*7c327d56SRichard Maina {
125*7c327d56SRichard Maina return 0;
126*7c327d56SRichard Maina }
127*7c327d56SRichard Maina
of_hwspin_lock_get_id(struct device_node * np,int index)128fb7737e9SSuman Anna static inline int of_hwspin_lock_get_id(struct device_node *np, int index)
129fb7737e9SSuman Anna {
130fb7737e9SSuman Anna return 0;
131fb7737e9SSuman Anna }
132fb7737e9SSuman Anna
1335560f70cSBaolin Wang static inline
of_hwspin_lock_get_id_byname(struct device_node * np,const char * name)1345560f70cSBaolin Wang int of_hwspin_lock_get_id_byname(struct device_node *np, const char *name)
1355560f70cSBaolin Wang {
1365560f70cSBaolin Wang return 0;
1375560f70cSBaolin Wang }
1385560f70cSBaolin Wang
1394f1acd75SBaolin Wang static inline
devm_hwspin_lock_free(struct device * dev,struct hwspinlock * hwlock)1404f1acd75SBaolin Wang int devm_hwspin_lock_free(struct device *dev, struct hwspinlock *hwlock)
1414f1acd75SBaolin Wang {
1424f1acd75SBaolin Wang return 0;
1434f1acd75SBaolin Wang }
1444f1acd75SBaolin Wang
1454f1acd75SBaolin Wang static inline
devm_hwspin_lock_request_specific(struct device * dev,unsigned int id)1464f1acd75SBaolin Wang struct hwspinlock *devm_hwspin_lock_request_specific(struct device *dev,
1474f1acd75SBaolin Wang unsigned int id)
1484f1acd75SBaolin Wang {
1494f1acd75SBaolin Wang return ERR_PTR(-ENODEV);
1504f1acd75SBaolin Wang }
1514f1acd75SBaolin Wang
152bd9a4c7dSOhad Ben-Cohen #endif /* !CONFIG_HWSPINLOCK */
153bd9a4c7dSOhad Ben-Cohen
154bd9a4c7dSOhad Ben-Cohen /**
155bd9a4c7dSOhad Ben-Cohen * hwspin_trylock_irqsave() - try to lock an hwspinlock, disable interrupts
156bd9a4c7dSOhad Ben-Cohen * @hwlock: an hwspinlock which we want to trylock
157bd9a4c7dSOhad Ben-Cohen * @flags: a pointer to where the caller's interrupt state will be saved at
158bd9a4c7dSOhad Ben-Cohen *
159bd9a4c7dSOhad Ben-Cohen * This function attempts to lock the underlying hwspinlock, and will
160bd9a4c7dSOhad Ben-Cohen * immediately fail if the hwspinlock is already locked.
161bd9a4c7dSOhad Ben-Cohen *
162bd9a4c7dSOhad Ben-Cohen * Upon a successful return from this function, preemption and local
163bd9a4c7dSOhad Ben-Cohen * interrupts are disabled (previous interrupts state is saved at @flags),
164bd9a4c7dSOhad Ben-Cohen * so the caller must not sleep, and is advised to release the hwspinlock
165bd9a4c7dSOhad Ben-Cohen * as soon as possible.
166bd9a4c7dSOhad Ben-Cohen *
167bd9a4c7dSOhad Ben-Cohen * Returns 0 if we successfully locked the hwspinlock, -EBUSY if
168bd9a4c7dSOhad Ben-Cohen * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
169bd9a4c7dSOhad Ben-Cohen */
170bd9a4c7dSOhad Ben-Cohen static inline
hwspin_trylock_irqsave(struct hwspinlock * hwlock,unsigned long * flags)171bd9a4c7dSOhad Ben-Cohen int hwspin_trylock_irqsave(struct hwspinlock *hwlock, unsigned long *flags)
172bd9a4c7dSOhad Ben-Cohen {
173bd9a4c7dSOhad Ben-Cohen return __hwspin_trylock(hwlock, HWLOCK_IRQSTATE, flags);
174bd9a4c7dSOhad Ben-Cohen }
175bd9a4c7dSOhad Ben-Cohen
176bd9a4c7dSOhad Ben-Cohen /**
177bd9a4c7dSOhad Ben-Cohen * hwspin_trylock_irq() - try to lock an hwspinlock, disable interrupts
178bd9a4c7dSOhad Ben-Cohen * @hwlock: an hwspinlock which we want to trylock
179bd9a4c7dSOhad Ben-Cohen *
180bd9a4c7dSOhad Ben-Cohen * This function attempts to lock the underlying hwspinlock, and will
181bd9a4c7dSOhad Ben-Cohen * immediately fail if the hwspinlock is already locked.
182bd9a4c7dSOhad Ben-Cohen *
183bd9a4c7dSOhad Ben-Cohen * Upon a successful return from this function, preemption and local
184bd9a4c7dSOhad Ben-Cohen * interrupts are disabled, so the caller must not sleep, and is advised
185bd9a4c7dSOhad Ben-Cohen * to release the hwspinlock as soon as possible.
186bd9a4c7dSOhad Ben-Cohen *
187bd9a4c7dSOhad Ben-Cohen * Returns 0 if we successfully locked the hwspinlock, -EBUSY if
188bd9a4c7dSOhad Ben-Cohen * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
189bd9a4c7dSOhad Ben-Cohen */
hwspin_trylock_irq(struct hwspinlock * hwlock)190bd9a4c7dSOhad Ben-Cohen static inline int hwspin_trylock_irq(struct hwspinlock *hwlock)
191bd9a4c7dSOhad Ben-Cohen {
192bd9a4c7dSOhad Ben-Cohen return __hwspin_trylock(hwlock, HWLOCK_IRQ, NULL);
193bd9a4c7dSOhad Ben-Cohen }
194bd9a4c7dSOhad Ben-Cohen
195bd9a4c7dSOhad Ben-Cohen /**
1961e6c06a7SBaolin Wang * hwspin_trylock_raw() - attempt to lock a specific hwspinlock
1971e6c06a7SBaolin Wang * @hwlock: an hwspinlock which we want to trylock
1981e6c06a7SBaolin Wang *
1991e6c06a7SBaolin Wang * This function attempts to lock an hwspinlock, and will immediately fail
2001e6c06a7SBaolin Wang * if the hwspinlock is already taken.
2011e6c06a7SBaolin Wang *
2021e6c06a7SBaolin Wang * Caution: User must protect the routine of getting hardware lock with mutex
2031e6c06a7SBaolin Wang * or spinlock to avoid dead-lock, that will let user can do some time-consuming
2041e6c06a7SBaolin Wang * or sleepable operations under the hardware lock.
2051e6c06a7SBaolin Wang *
2061e6c06a7SBaolin Wang * Returns 0 if we successfully locked the hwspinlock, -EBUSY if
2071e6c06a7SBaolin Wang * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
2081e6c06a7SBaolin Wang */
hwspin_trylock_raw(struct hwspinlock * hwlock)2091e6c06a7SBaolin Wang static inline int hwspin_trylock_raw(struct hwspinlock *hwlock)
2101e6c06a7SBaolin Wang {
2111e6c06a7SBaolin Wang return __hwspin_trylock(hwlock, HWLOCK_RAW, NULL);
2121e6c06a7SBaolin Wang }
2131e6c06a7SBaolin Wang
2141e6c06a7SBaolin Wang /**
215360aa640SFabien Dessenne * hwspin_trylock_in_atomic() - attempt to lock a specific hwspinlock
216360aa640SFabien Dessenne * @hwlock: an hwspinlock which we want to trylock
217360aa640SFabien Dessenne *
218360aa640SFabien Dessenne * This function attempts to lock an hwspinlock, and will immediately fail
219360aa640SFabien Dessenne * if the hwspinlock is already taken.
220360aa640SFabien Dessenne *
221360aa640SFabien Dessenne * This function shall be called only from an atomic context.
222360aa640SFabien Dessenne *
223360aa640SFabien Dessenne * Returns 0 if we successfully locked the hwspinlock, -EBUSY if
224360aa640SFabien Dessenne * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
225360aa640SFabien Dessenne */
hwspin_trylock_in_atomic(struct hwspinlock * hwlock)226360aa640SFabien Dessenne static inline int hwspin_trylock_in_atomic(struct hwspinlock *hwlock)
227360aa640SFabien Dessenne {
228360aa640SFabien Dessenne return __hwspin_trylock(hwlock, HWLOCK_IN_ATOMIC, NULL);
229360aa640SFabien Dessenne }
230360aa640SFabien Dessenne
231360aa640SFabien Dessenne /**
232bd9a4c7dSOhad Ben-Cohen * hwspin_trylock() - attempt to lock a specific hwspinlock
233bd9a4c7dSOhad Ben-Cohen * @hwlock: an hwspinlock which we want to trylock
234bd9a4c7dSOhad Ben-Cohen *
235bd9a4c7dSOhad Ben-Cohen * This function attempts to lock an hwspinlock, and will immediately fail
236bd9a4c7dSOhad Ben-Cohen * if the hwspinlock is already taken.
237bd9a4c7dSOhad Ben-Cohen *
238bd9a4c7dSOhad Ben-Cohen * Upon a successful return from this function, preemption is disabled,
239bd9a4c7dSOhad Ben-Cohen * so the caller must not sleep, and is advised to release the hwspinlock
240bd9a4c7dSOhad Ben-Cohen * as soon as possible. This is required in order to minimize remote cores
241bd9a4c7dSOhad Ben-Cohen * polling on the hardware interconnect.
242bd9a4c7dSOhad Ben-Cohen *
243bd9a4c7dSOhad Ben-Cohen * Returns 0 if we successfully locked the hwspinlock, -EBUSY if
244bd9a4c7dSOhad Ben-Cohen * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
245bd9a4c7dSOhad Ben-Cohen */
hwspin_trylock(struct hwspinlock * hwlock)246bd9a4c7dSOhad Ben-Cohen static inline int hwspin_trylock(struct hwspinlock *hwlock)
247bd9a4c7dSOhad Ben-Cohen {
248bd9a4c7dSOhad Ben-Cohen return __hwspin_trylock(hwlock, 0, NULL);
249bd9a4c7dSOhad Ben-Cohen }
250bd9a4c7dSOhad Ben-Cohen
251bd9a4c7dSOhad Ben-Cohen /**
252bd9a4c7dSOhad Ben-Cohen * hwspin_lock_timeout_irqsave() - lock hwspinlock, with timeout, disable irqs
253bd9a4c7dSOhad Ben-Cohen * @hwlock: the hwspinlock to be locked
254bd9a4c7dSOhad Ben-Cohen * @to: timeout value in msecs
255bd9a4c7dSOhad Ben-Cohen * @flags: a pointer to where the caller's interrupt state will be saved at
256bd9a4c7dSOhad Ben-Cohen *
257bd9a4c7dSOhad Ben-Cohen * This function locks the underlying @hwlock. If the @hwlock
258bd9a4c7dSOhad Ben-Cohen * is already taken, the function will busy loop waiting for it to
259bd9a4c7dSOhad Ben-Cohen * be released, but give up when @timeout msecs have elapsed.
260bd9a4c7dSOhad Ben-Cohen *
261bd9a4c7dSOhad Ben-Cohen * Upon a successful return from this function, preemption and local interrupts
262bd9a4c7dSOhad Ben-Cohen * are disabled (plus previous interrupt state is saved), so the caller must
263bd9a4c7dSOhad Ben-Cohen * not sleep, and is advised to release the hwspinlock as soon as possible.
264bd9a4c7dSOhad Ben-Cohen *
265bd9a4c7dSOhad Ben-Cohen * Returns 0 when the @hwlock was successfully taken, and an appropriate
266bd9a4c7dSOhad Ben-Cohen * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
267bd9a4c7dSOhad Ben-Cohen * busy after @timeout msecs). The function will never sleep.
268bd9a4c7dSOhad Ben-Cohen */
hwspin_lock_timeout_irqsave(struct hwspinlock * hwlock,unsigned int to,unsigned long * flags)269bd9a4c7dSOhad Ben-Cohen static inline int hwspin_lock_timeout_irqsave(struct hwspinlock *hwlock,
270bd9a4c7dSOhad Ben-Cohen unsigned int to, unsigned long *flags)
271bd9a4c7dSOhad Ben-Cohen {
272bd9a4c7dSOhad Ben-Cohen return __hwspin_lock_timeout(hwlock, to, HWLOCK_IRQSTATE, flags);
273bd9a4c7dSOhad Ben-Cohen }
274bd9a4c7dSOhad Ben-Cohen
275bd9a4c7dSOhad Ben-Cohen /**
276bd9a4c7dSOhad Ben-Cohen * hwspin_lock_timeout_irq() - lock hwspinlock, with timeout, disable irqs
277bd9a4c7dSOhad Ben-Cohen * @hwlock: the hwspinlock to be locked
278bd9a4c7dSOhad Ben-Cohen * @to: timeout value in msecs
279bd9a4c7dSOhad Ben-Cohen *
280bd9a4c7dSOhad Ben-Cohen * This function locks the underlying @hwlock. If the @hwlock
281bd9a4c7dSOhad Ben-Cohen * is already taken, the function will busy loop waiting for it to
282bd9a4c7dSOhad Ben-Cohen * be released, but give up when @timeout msecs have elapsed.
283bd9a4c7dSOhad Ben-Cohen *
284bd9a4c7dSOhad Ben-Cohen * Upon a successful return from this function, preemption and local interrupts
285bd9a4c7dSOhad Ben-Cohen * are disabled so the caller must not sleep, and is advised to release the
286bd9a4c7dSOhad Ben-Cohen * hwspinlock as soon as possible.
287bd9a4c7dSOhad Ben-Cohen *
288bd9a4c7dSOhad Ben-Cohen * Returns 0 when the @hwlock was successfully taken, and an appropriate
289bd9a4c7dSOhad Ben-Cohen * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
290bd9a4c7dSOhad Ben-Cohen * busy after @timeout msecs). The function will never sleep.
291bd9a4c7dSOhad Ben-Cohen */
292bd9a4c7dSOhad Ben-Cohen static inline
hwspin_lock_timeout_irq(struct hwspinlock * hwlock,unsigned int to)293bd9a4c7dSOhad Ben-Cohen int hwspin_lock_timeout_irq(struct hwspinlock *hwlock, unsigned int to)
294bd9a4c7dSOhad Ben-Cohen {
295bd9a4c7dSOhad Ben-Cohen return __hwspin_lock_timeout(hwlock, to, HWLOCK_IRQ, NULL);
296bd9a4c7dSOhad Ben-Cohen }
297bd9a4c7dSOhad Ben-Cohen
298bd9a4c7dSOhad Ben-Cohen /**
2991e6c06a7SBaolin Wang * hwspin_lock_timeout_raw() - lock an hwspinlock with timeout limit
3001e6c06a7SBaolin Wang * @hwlock: the hwspinlock to be locked
3011e6c06a7SBaolin Wang * @to: timeout value in msecs
3021e6c06a7SBaolin Wang *
3031e6c06a7SBaolin Wang * This function locks the underlying @hwlock. If the @hwlock
3041e6c06a7SBaolin Wang * is already taken, the function will busy loop waiting for it to
3051e6c06a7SBaolin Wang * be released, but give up when @timeout msecs have elapsed.
3061e6c06a7SBaolin Wang *
3071e6c06a7SBaolin Wang * Caution: User must protect the routine of getting hardware lock with mutex
3081e6c06a7SBaolin Wang * or spinlock to avoid dead-lock, that will let user can do some time-consuming
3091e6c06a7SBaolin Wang * or sleepable operations under the hardware lock.
3101e6c06a7SBaolin Wang *
3111e6c06a7SBaolin Wang * Returns 0 when the @hwlock was successfully taken, and an appropriate
3121e6c06a7SBaolin Wang * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
3131e6c06a7SBaolin Wang * busy after @timeout msecs). The function will never sleep.
3141e6c06a7SBaolin Wang */
3151e6c06a7SBaolin Wang static inline
hwspin_lock_timeout_raw(struct hwspinlock * hwlock,unsigned int to)3161e6c06a7SBaolin Wang int hwspin_lock_timeout_raw(struct hwspinlock *hwlock, unsigned int to)
3171e6c06a7SBaolin Wang {
3181e6c06a7SBaolin Wang return __hwspin_lock_timeout(hwlock, to, HWLOCK_RAW, NULL);
3191e6c06a7SBaolin Wang }
3201e6c06a7SBaolin Wang
3211e6c06a7SBaolin Wang /**
322360aa640SFabien Dessenne * hwspin_lock_timeout_in_atomic() - lock an hwspinlock with timeout limit
323360aa640SFabien Dessenne * @hwlock: the hwspinlock to be locked
324360aa640SFabien Dessenne * @to: timeout value in msecs
325360aa640SFabien Dessenne *
326360aa640SFabien Dessenne * This function locks the underlying @hwlock. If the @hwlock
327360aa640SFabien Dessenne * is already taken, the function will busy loop waiting for it to
328360aa640SFabien Dessenne * be released, but give up when @timeout msecs have elapsed.
329360aa640SFabien Dessenne *
330360aa640SFabien Dessenne * This function shall be called only from an atomic context and the timeout
331360aa640SFabien Dessenne * value shall not exceed a few msecs.
332360aa640SFabien Dessenne *
333360aa640SFabien Dessenne * Returns 0 when the @hwlock was successfully taken, and an appropriate
334360aa640SFabien Dessenne * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
335360aa640SFabien Dessenne * busy after @timeout msecs). The function will never sleep.
336360aa640SFabien Dessenne */
337360aa640SFabien Dessenne static inline
hwspin_lock_timeout_in_atomic(struct hwspinlock * hwlock,unsigned int to)338360aa640SFabien Dessenne int hwspin_lock_timeout_in_atomic(struct hwspinlock *hwlock, unsigned int to)
339360aa640SFabien Dessenne {
340360aa640SFabien Dessenne return __hwspin_lock_timeout(hwlock, to, HWLOCK_IN_ATOMIC, NULL);
341360aa640SFabien Dessenne }
342360aa640SFabien Dessenne
343360aa640SFabien Dessenne /**
344bd9a4c7dSOhad Ben-Cohen * hwspin_lock_timeout() - lock an hwspinlock with timeout limit
345bd9a4c7dSOhad Ben-Cohen * @hwlock: the hwspinlock to be locked
346bd9a4c7dSOhad Ben-Cohen * @to: timeout value in msecs
347bd9a4c7dSOhad Ben-Cohen *
348bd9a4c7dSOhad Ben-Cohen * This function locks the underlying @hwlock. If the @hwlock
349bd9a4c7dSOhad Ben-Cohen * is already taken, the function will busy loop waiting for it to
350bd9a4c7dSOhad Ben-Cohen * be released, but give up when @timeout msecs have elapsed.
351bd9a4c7dSOhad Ben-Cohen *
352bd9a4c7dSOhad Ben-Cohen * Upon a successful return from this function, preemption is disabled
353bd9a4c7dSOhad Ben-Cohen * so the caller must not sleep, and is advised to release the hwspinlock
354bd9a4c7dSOhad Ben-Cohen * as soon as possible.
355bd9a4c7dSOhad Ben-Cohen * This is required in order to minimize remote cores polling on the
356bd9a4c7dSOhad Ben-Cohen * hardware interconnect.
357bd9a4c7dSOhad Ben-Cohen *
358bd9a4c7dSOhad Ben-Cohen * Returns 0 when the @hwlock was successfully taken, and an appropriate
359bd9a4c7dSOhad Ben-Cohen * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
360bd9a4c7dSOhad Ben-Cohen * busy after @timeout msecs). The function will never sleep.
361bd9a4c7dSOhad Ben-Cohen */
362bd9a4c7dSOhad Ben-Cohen static inline
hwspin_lock_timeout(struct hwspinlock * hwlock,unsigned int to)363bd9a4c7dSOhad Ben-Cohen int hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to)
364bd9a4c7dSOhad Ben-Cohen {
365bd9a4c7dSOhad Ben-Cohen return __hwspin_lock_timeout(hwlock, to, 0, NULL);
366bd9a4c7dSOhad Ben-Cohen }
367bd9a4c7dSOhad Ben-Cohen
368bd9a4c7dSOhad Ben-Cohen /**
369bd9a4c7dSOhad Ben-Cohen * hwspin_unlock_irqrestore() - unlock hwspinlock, restore irq state
370bd9a4c7dSOhad Ben-Cohen * @hwlock: a previously-acquired hwspinlock which we want to unlock
371bd9a4c7dSOhad Ben-Cohen * @flags: previous caller's interrupt state to restore
372bd9a4c7dSOhad Ben-Cohen *
373bd9a4c7dSOhad Ben-Cohen * This function will unlock a specific hwspinlock, enable preemption and
374bd9a4c7dSOhad Ben-Cohen * restore the previous state of the local interrupts. It should be used
375bd9a4c7dSOhad Ben-Cohen * to undo, e.g., hwspin_trylock_irqsave().
376bd9a4c7dSOhad Ben-Cohen *
377bd9a4c7dSOhad Ben-Cohen * @hwlock must be already locked before calling this function: it is a bug
378bd9a4c7dSOhad Ben-Cohen * to call unlock on a @hwlock that is already unlocked.
379bd9a4c7dSOhad Ben-Cohen */
hwspin_unlock_irqrestore(struct hwspinlock * hwlock,unsigned long * flags)380bd9a4c7dSOhad Ben-Cohen static inline void hwspin_unlock_irqrestore(struct hwspinlock *hwlock,
381bd9a4c7dSOhad Ben-Cohen unsigned long *flags)
382bd9a4c7dSOhad Ben-Cohen {
383bd9a4c7dSOhad Ben-Cohen __hwspin_unlock(hwlock, HWLOCK_IRQSTATE, flags);
384bd9a4c7dSOhad Ben-Cohen }
385bd9a4c7dSOhad Ben-Cohen
386bd9a4c7dSOhad Ben-Cohen /**
387bd9a4c7dSOhad Ben-Cohen * hwspin_unlock_irq() - unlock hwspinlock, enable interrupts
388bd9a4c7dSOhad Ben-Cohen * @hwlock: a previously-acquired hwspinlock which we want to unlock
389bd9a4c7dSOhad Ben-Cohen *
390bd9a4c7dSOhad Ben-Cohen * This function will unlock a specific hwspinlock, enable preemption and
391bd9a4c7dSOhad Ben-Cohen * enable local interrupts. Should be used to undo hwspin_lock_irq().
392bd9a4c7dSOhad Ben-Cohen *
393bd9a4c7dSOhad Ben-Cohen * @hwlock must be already locked (e.g. by hwspin_trylock_irq()) before
394bd9a4c7dSOhad Ben-Cohen * calling this function: it is a bug to call unlock on a @hwlock that is
395bd9a4c7dSOhad Ben-Cohen * already unlocked.
396bd9a4c7dSOhad Ben-Cohen */
hwspin_unlock_irq(struct hwspinlock * hwlock)397bd9a4c7dSOhad Ben-Cohen static inline void hwspin_unlock_irq(struct hwspinlock *hwlock)
398bd9a4c7dSOhad Ben-Cohen {
399bd9a4c7dSOhad Ben-Cohen __hwspin_unlock(hwlock, HWLOCK_IRQ, NULL);
400bd9a4c7dSOhad Ben-Cohen }
401bd9a4c7dSOhad Ben-Cohen
402bd9a4c7dSOhad Ben-Cohen /**
4031e6c06a7SBaolin Wang * hwspin_unlock_raw() - unlock hwspinlock
4041e6c06a7SBaolin Wang * @hwlock: a previously-acquired hwspinlock which we want to unlock
4051e6c06a7SBaolin Wang *
4061e6c06a7SBaolin Wang * This function will unlock a specific hwspinlock.
4071e6c06a7SBaolin Wang *
4081e6c06a7SBaolin Wang * @hwlock must be already locked (e.g. by hwspin_trylock()) before calling
4091e6c06a7SBaolin Wang * this function: it is a bug to call unlock on a @hwlock that is already
4101e6c06a7SBaolin Wang * unlocked.
4111e6c06a7SBaolin Wang */
hwspin_unlock_raw(struct hwspinlock * hwlock)4121e6c06a7SBaolin Wang static inline void hwspin_unlock_raw(struct hwspinlock *hwlock)
4131e6c06a7SBaolin Wang {
4141e6c06a7SBaolin Wang __hwspin_unlock(hwlock, HWLOCK_RAW, NULL);
4151e6c06a7SBaolin Wang }
4161e6c06a7SBaolin Wang
4171e6c06a7SBaolin Wang /**
418360aa640SFabien Dessenne * hwspin_unlock_in_atomic() - unlock hwspinlock
419360aa640SFabien Dessenne * @hwlock: a previously-acquired hwspinlock which we want to unlock
420360aa640SFabien Dessenne *
421360aa640SFabien Dessenne * This function will unlock a specific hwspinlock.
422360aa640SFabien Dessenne *
423360aa640SFabien Dessenne * @hwlock must be already locked (e.g. by hwspin_trylock()) before calling
424360aa640SFabien Dessenne * this function: it is a bug to call unlock on a @hwlock that is already
425360aa640SFabien Dessenne * unlocked.
426360aa640SFabien Dessenne */
hwspin_unlock_in_atomic(struct hwspinlock * hwlock)427360aa640SFabien Dessenne static inline void hwspin_unlock_in_atomic(struct hwspinlock *hwlock)
428360aa640SFabien Dessenne {
429360aa640SFabien Dessenne __hwspin_unlock(hwlock, HWLOCK_IN_ATOMIC, NULL);
430360aa640SFabien Dessenne }
431360aa640SFabien Dessenne
432360aa640SFabien Dessenne /**
433bd9a4c7dSOhad Ben-Cohen * hwspin_unlock() - unlock hwspinlock
434bd9a4c7dSOhad Ben-Cohen * @hwlock: a previously-acquired hwspinlock which we want to unlock
435bd9a4c7dSOhad Ben-Cohen *
436bd9a4c7dSOhad Ben-Cohen * This function will unlock a specific hwspinlock and enable preemption
437bd9a4c7dSOhad Ben-Cohen * back.
438bd9a4c7dSOhad Ben-Cohen *
439bd9a4c7dSOhad Ben-Cohen * @hwlock must be already locked (e.g. by hwspin_trylock()) before calling
440bd9a4c7dSOhad Ben-Cohen * this function: it is a bug to call unlock on a @hwlock that is already
441bd9a4c7dSOhad Ben-Cohen * unlocked.
442bd9a4c7dSOhad Ben-Cohen */
hwspin_unlock(struct hwspinlock * hwlock)443bd9a4c7dSOhad Ben-Cohen static inline void hwspin_unlock(struct hwspinlock *hwlock)
444bd9a4c7dSOhad Ben-Cohen {
445bd9a4c7dSOhad Ben-Cohen __hwspin_unlock(hwlock, 0, NULL);
446bd9a4c7dSOhad Ben-Cohen }
447bd9a4c7dSOhad Ben-Cohen
448bd9a4c7dSOhad Ben-Cohen #endif /* __LINUX_HWSPINLOCK_H */
449