1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
21da177e4SLinus Torvalds /* interrupt.h */
31da177e4SLinus Torvalds #ifndef _LINUX_INTERRUPT_H
41da177e4SLinus Torvalds #define _LINUX_INTERRUPT_H
51da177e4SLinus Torvalds
61da177e4SLinus Torvalds #include <linux/kernel.h>
71da177e4SLinus Torvalds #include <linux/bitops.h>
8c7649476SDmitry Torokhov #include <linux/cleanup.h>
9908dcecdSJan Beulich #include <linux/irqreturn.h>
10dd3a1db9SThomas Gleixner #include <linux/irqnr.h>
111da177e4SLinus Torvalds #include <linux/hardirq.h>
12de30a2b3SIngo Molnar #include <linux/irqflags.h>
139ba5f005SPeter Zijlstra #include <linux/hrtimer.h>
14cd7eab44SBen Hutchings #include <linux/kref.h>
15e1b6705bSYury Norov #include <linux/cpumask_types.h>
16cd7eab44SBen Hutchings #include <linux/workqueue.h>
1791cc470eSTanner Love #include <linux/jump_label.h>
180ebb26e7SIngo Molnar
1960063497SArun Sharma #include <linux/atomic.h>
201da177e4SLinus Torvalds #include <asm/ptrace.h>
217d65f4a6SFrederic Weisbecker #include <asm/irq.h>
22229a7186SMasami Hiramatsu #include <asm/sections.h>
231da177e4SLinus Torvalds
246e213616SThomas Gleixner /*
256e213616SThomas Gleixner * These correspond to the IORESOURCE_IRQ_* defines in
266e213616SThomas Gleixner * linux/ioport.h to select the interrupt line behaviour. When
276e213616SThomas Gleixner * requesting an interrupt without specifying a IRQF_TRIGGER, the
286e213616SThomas Gleixner * setting should be assumed to be "as already configured", which
296e213616SThomas Gleixner * may be as per machine or firmware initialisation.
306e213616SThomas Gleixner */
316e213616SThomas Gleixner #define IRQF_TRIGGER_NONE 0x00000000
326e213616SThomas Gleixner #define IRQF_TRIGGER_RISING 0x00000001
336e213616SThomas Gleixner #define IRQF_TRIGGER_FALLING 0x00000002
346e213616SThomas Gleixner #define IRQF_TRIGGER_HIGH 0x00000004
356e213616SThomas Gleixner #define IRQF_TRIGGER_LOW 0x00000008
366e213616SThomas Gleixner #define IRQF_TRIGGER_MASK (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW | \
376e213616SThomas Gleixner IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)
386e213616SThomas Gleixner #define IRQF_TRIGGER_PROBE 0x00000010
396e213616SThomas Gleixner
406e213616SThomas Gleixner /*
416e213616SThomas Gleixner * These flags used only by the kernel as part of the
426e213616SThomas Gleixner * irq handling routines.
436e213616SThomas Gleixner *
446e213616SThomas Gleixner * IRQF_SHARED - allow sharing the irq among several devices
456e213616SThomas Gleixner * IRQF_PROBE_SHARED - set by callers when they expect sharing mismatches to occur
466e213616SThomas Gleixner * IRQF_TIMER - Flag to mark this interrupt as timer interrupt
47950f4427SThomas Gleixner * IRQF_PERCPU - Interrupt is per cpu
48950f4427SThomas Gleixner * IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing
49d85a60d8SBernhard Walle * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is
50b8d62f33SGeert Uytterhoeven * registered first in a shared interrupt is considered for
51d85a60d8SBernhard Walle * performance reasons)
52b25c340cSThomas Gleixner * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished.
53b25c340cSThomas Gleixner * Used by threaded interrupts which need to keep the
54b25c340cSThomas Gleixner * irq line disabled until the threaded handler has been run.
55737eb030SMark Rutland * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend. Does not guarantee
56737eb030SMark Rutland * that this interrupt will wake the system from a suspended
57151f4e2bSMauro Carvalho Chehab * state. See Documentation/power/suspend-and-interrupts.rst
58dc5f219eSThomas Gleixner * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set
590c4602ffSThomas Gleixner * IRQF_NO_THREAD - Interrupt cannot be threaded
609bab0b7fSIan Campbell * IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device
619bab0b7fSIan Campbell * resume time.
6217f48034SRafael J. Wysocki * IRQF_COND_SUSPEND - If the IRQ is shared with a NO_SUSPEND user, execute this
6317f48034SRafael J. Wysocki * interrupt handler after suspending interrupts. For system
6417f48034SRafael J. Wysocki * wakeup devices users need to implement wakeup detection in
6517f48034SRafael J. Wysocki * their interrupt handlers.
66cbe16f35SBarry Song * IRQF_NO_AUTOEN - Don't enable IRQ or NMI automatically when users request it.
67cbe16f35SBarry Song * Users will enable it explicitly by enable_irq() or enable_nmi()
68cbe16f35SBarry Song * later.
69c2b1063eSThomas Gleixner * IRQF_NO_DEBUG - Exclude from runnaway detection for IPI and similar handlers,
70c2b1063eSThomas Gleixner * depends on IRQF_PERCPU.
71c2ddeb29SRafael J. Wysocki * IRQF_COND_ONESHOT - Agree to do IRQF_ONESHOT if already set for a shared
72c2ddeb29SRafael J. Wysocki * interrupt.
736e213616SThomas Gleixner */
746e213616SThomas Gleixner #define IRQF_SHARED 0x00000080
756e213616SThomas Gleixner #define IRQF_PROBE_SHARED 0x00000100
76685fd0b4SIan Campbell #define __IRQF_TIMER 0x00000200
77284c6680SThomas Gleixner #define IRQF_PERCPU 0x00000400
78950f4427SThomas Gleixner #define IRQF_NOBALANCING 0x00000800
79d85a60d8SBernhard Walle #define IRQF_IRQPOLL 0x00001000
80b25c340cSThomas Gleixner #define IRQF_ONESHOT 0x00002000
81685fd0b4SIan Campbell #define IRQF_NO_SUSPEND 0x00004000
82dc5f219eSThomas Gleixner #define IRQF_FORCE_RESUME 0x00008000
830c4602ffSThomas Gleixner #define IRQF_NO_THREAD 0x00010000
849bab0b7fSIan Campbell #define IRQF_EARLY_RESUME 0x00020000
8517f48034SRafael J. Wysocki #define IRQF_COND_SUSPEND 0x00040000
86cbe16f35SBarry Song #define IRQF_NO_AUTOEN 0x00080000
87c2b1063eSThomas Gleixner #define IRQF_NO_DEBUG 0x00100000
88c2ddeb29SRafael J. Wysocki #define IRQF_COND_ONESHOT 0x00200000
89685fd0b4SIan Campbell
900c4602ffSThomas Gleixner #define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
913aa551c9SThomas Gleixner
92b4e6b097SRandy Dunlap /*
93ae731f8dSMarc Zyngier * These values can be returned by request_any_context_irq() and
94ae731f8dSMarc Zyngier * describe the context the interrupt will be run in.
95ae731f8dSMarc Zyngier *
96ae731f8dSMarc Zyngier * IRQC_IS_HARDIRQ - interrupt runs in hardirq context
97ae731f8dSMarc Zyngier * IRQC_IS_NESTED - interrupt runs in a nested threaded context
98ae731f8dSMarc Zyngier */
99ae731f8dSMarc Zyngier enum {
100ae731f8dSMarc Zyngier IRQC_IS_HARDIRQ = 0,
101ae731f8dSMarc Zyngier IRQC_IS_NESTED,
102ae731f8dSMarc Zyngier };
103ae731f8dSMarc Zyngier
1047d12e780SDavid Howells typedef irqreturn_t (*irq_handler_t)(int, void *);
105da482792SDavid Howells
106a9d0a1a3SThomas Gleixner /**
107a9d0a1a3SThomas Gleixner * struct irqaction - per interrupt action descriptor
108a9d0a1a3SThomas Gleixner * @handler: interrupt handler function
109a9d0a1a3SThomas Gleixner * @name: name of the device
110a9d0a1a3SThomas Gleixner * @dev_id: cookie to identify the device
11131d9d9b6SMarc Zyngier * @percpu_dev_id: cookie to identify the device
112a9d0a1a3SThomas Gleixner * @next: pointer to the next irqaction for shared interrupts
113a9d0a1a3SThomas Gleixner * @irq: interrupt number
114c0ecaa06SThomas Gleixner * @flags: flags (see IRQF_* above)
11525985edcSLucas De Marchi * @thread_fn: interrupt handler function for threaded interrupts
1163aa551c9SThomas Gleixner * @thread: thread pointer for threaded interrupts
1172a1d3ab8SThomas Gleixner * @secondary: pointer to secondary irqaction (force threading)
1183aa551c9SThomas Gleixner * @thread_flags: flags related to @thread
119b5faba21SThomas Gleixner * @thread_mask: bitmask for keeping track of @thread activity
120c0ecaa06SThomas Gleixner * @dir: pointer to the proc/irq/NN/name entry
121a9d0a1a3SThomas Gleixner */
1221da177e4SLinus Torvalds struct irqaction {
123da482792SDavid Howells irq_handler_t handler;
1241da177e4SLinus Torvalds void *dev_id;
12531d9d9b6SMarc Zyngier void __percpu *percpu_dev_id;
1261da177e4SLinus Torvalds struct irqaction *next;
1273aa551c9SThomas Gleixner irq_handler_t thread_fn;
1283aa551c9SThomas Gleixner struct task_struct *thread;
1292a1d3ab8SThomas Gleixner struct irqaction *secondary;
130c0ecaa06SThomas Gleixner unsigned int irq;
131c0ecaa06SThomas Gleixner unsigned int flags;
1323aa551c9SThomas Gleixner unsigned long thread_flags;
133b5faba21SThomas Gleixner unsigned long thread_mask;
134f6cd2477SEric Dumazet const char *name;
135f6cd2477SEric Dumazet struct proc_dir_entry *dir;
136f6cd2477SEric Dumazet } ____cacheline_internodealigned_in_smp;
1371da177e4SLinus Torvalds
1387d12e780SDavid Howells extern irqreturn_t no_action(int cpl, void *dev_id);
1393aa551c9SThomas Gleixner
140e237a551SChen Fan /*
141e237a551SChen Fan * If a (PCI) device interrupt is not connected we set dev->irq to
142e237a551SChen Fan * IRQ_NOTCONNECTED. This causes request_irq() to fail with -ENOTCONN, so we
143e237a551SChen Fan * can distingiush that case from other error returns.
144e237a551SChen Fan *
145e237a551SChen Fan * 0x80000000 is guaranteed to be outside the available range of interrupts
146e237a551SChen Fan * and easy to distinguish from other possible incorrect values.
147e237a551SChen Fan */
148e237a551SChen Fan #define IRQ_NOTCONNECTED (1U << 31)
149e237a551SChen Fan
1503aa551c9SThomas Gleixner extern int __must_check
1513aa551c9SThomas Gleixner request_threaded_irq(unsigned int irq, irq_handler_t handler,
1523aa551c9SThomas Gleixner irq_handler_t thread_fn,
1533aa551c9SThomas Gleixner unsigned long flags, const char *name, void *dev);
1543aa551c9SThomas Gleixner
1555ca470a0SJonathan Corbet /**
1565ca470a0SJonathan Corbet * request_irq - Add a handler for an interrupt line
1575ca470a0SJonathan Corbet * @irq: The interrupt line to allocate
1585ca470a0SJonathan Corbet * @handler: Function to be called when the IRQ occurs.
1595ca470a0SJonathan Corbet * Primary handler for threaded interrupts
1605ca470a0SJonathan Corbet * If NULL, the default primary handler is installed
1615ca470a0SJonathan Corbet * @flags: Handling flags
1625ca470a0SJonathan Corbet * @name: Name of the device generating this interrupt
1635ca470a0SJonathan Corbet * @dev: A cookie passed to the handler function
1645ca470a0SJonathan Corbet *
1655ca470a0SJonathan Corbet * This call allocates an interrupt and establishes a handler; see
1665ca470a0SJonathan Corbet * the documentation for request_threaded_irq() for details.
1675ca470a0SJonathan Corbet */
1683aa551c9SThomas Gleixner static inline int __must_check
request_irq(unsigned int irq,irq_handler_t handler,unsigned long flags,const char * name,void * dev)1693aa551c9SThomas Gleixner request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
1703aa551c9SThomas Gleixner const char *name, void *dev)
1713aa551c9SThomas Gleixner {
172c37927a2SRafael J. Wysocki return request_threaded_irq(irq, handler, NULL, flags | IRQF_COND_ONESHOT, name, dev);
1733aa551c9SThomas Gleixner }
1743aa551c9SThomas Gleixner
175ae731f8dSMarc Zyngier extern int __must_check
176ae731f8dSMarc Zyngier request_any_context_irq(unsigned int irq, irq_handler_t handler,
177ae731f8dSMarc Zyngier unsigned long flags, const char *name, void *dev_id);
178ae731f8dSMarc Zyngier
17931d9d9b6SMarc Zyngier extern int __must_check
180c80081b9SDaniel Lezcano __request_percpu_irq(unsigned int irq, irq_handler_t handler,
181c80081b9SDaniel Lezcano unsigned long flags, const char *devname,
182c80081b9SDaniel Lezcano void __percpu *percpu_dev_id);
183c80081b9SDaniel Lezcano
184b525903cSJulien Thierry extern int __must_check
185b525903cSJulien Thierry request_nmi(unsigned int irq, irq_handler_t handler, unsigned long flags,
186b525903cSJulien Thierry const char *name, void *dev);
187b525903cSJulien Thierry
188c80081b9SDaniel Lezcano static inline int __must_check
request_percpu_irq(unsigned int irq,irq_handler_t handler,const char * devname,void __percpu * percpu_dev_id)18931d9d9b6SMarc Zyngier request_percpu_irq(unsigned int irq, irq_handler_t handler,
190c80081b9SDaniel Lezcano const char *devname, void __percpu *percpu_dev_id)
191c80081b9SDaniel Lezcano {
192c80081b9SDaniel Lezcano return __request_percpu_irq(irq, handler, 0,
193c80081b9SDaniel Lezcano devname, percpu_dev_id);
194c80081b9SDaniel Lezcano }
1953aa551c9SThomas Gleixner
1964b078c3fSJulien Thierry extern int __must_check
1974b078c3fSJulien Thierry request_percpu_nmi(unsigned int irq, irq_handler_t handler,
1984b078c3fSJulien Thierry const char *devname, void __percpu *dev);
1994b078c3fSJulien Thierry
20025ce4be7SChristoph Hellwig extern const void *free_irq(unsigned int, void *);
20131d9d9b6SMarc Zyngier extern void free_percpu_irq(unsigned int, void __percpu *);
2021da177e4SLinus Torvalds
203b525903cSJulien Thierry extern const void *free_nmi(unsigned int irq, void *dev_id);
2044b078c3fSJulien Thierry extern void free_percpu_nmi(unsigned int irq, void __percpu *percpu_dev_id);
205b525903cSJulien Thierry
2060af3678fSAl Viro struct device;
2070af3678fSAl Viro
208935bd5b9SArjan van de Ven extern int __must_check
209935bd5b9SArjan van de Ven devm_request_threaded_irq(struct device *dev, unsigned int irq,
210935bd5b9SArjan van de Ven irq_handler_t handler, irq_handler_t thread_fn,
211935bd5b9SArjan van de Ven unsigned long irqflags, const char *devname,
212935bd5b9SArjan van de Ven void *dev_id);
213935bd5b9SArjan van de Ven
214935bd5b9SArjan van de Ven static inline int __must_check
devm_request_irq(struct device * dev,unsigned int irq,irq_handler_t handler,unsigned long irqflags,const char * devname,void * dev_id)215935bd5b9SArjan van de Ven devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler,
216935bd5b9SArjan van de Ven unsigned long irqflags, const char *devname, void *dev_id)
217935bd5b9SArjan van de Ven {
218935bd5b9SArjan van de Ven return devm_request_threaded_irq(dev, irq, handler, NULL, irqflags,
219935bd5b9SArjan van de Ven devname, dev_id);
220935bd5b9SArjan van de Ven }
221935bd5b9SArjan van de Ven
2220668d306SStephen Boyd extern int __must_check
2230668d306SStephen Boyd devm_request_any_context_irq(struct device *dev, unsigned int irq,
2240668d306SStephen Boyd irq_handler_t handler, unsigned long irqflags,
2250668d306SStephen Boyd const char *devname, void *dev_id);
2260668d306SStephen Boyd
2279ac7849eSTejun Heo extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
2289ac7849eSTejun Heo
229a313357eSThomas Gleixner bool irq_has_action(unsigned int irq);
2301da177e4SLinus Torvalds extern void disable_irq_nosync(unsigned int irq);
23102cea395SPeter Zijlstra extern bool disable_hardirq(unsigned int irq);
2321da177e4SLinus Torvalds extern void disable_irq(unsigned int irq);
23331d9d9b6SMarc Zyngier extern void disable_percpu_irq(unsigned int irq);
2341da177e4SLinus Torvalds extern void enable_irq(unsigned int irq);
2351e7c5fd2SMarc Zyngier extern void enable_percpu_irq(unsigned int irq, unsigned int type);
236f0cb3220SThomas Petazzoni extern bool irq_percpu_is_enabled(unsigned int irq);
237a92444c6SThomas Gleixner extern void irq_wake_thread(unsigned int irq, void *dev_id);
238ba9a2331SThomas Gleixner
239c7649476SDmitry Torokhov DEFINE_LOCK_GUARD_1(disable_irq, int,
240c7649476SDmitry Torokhov disable_irq(*_T->lock), enable_irq(*_T->lock))
241c7649476SDmitry Torokhov
242b525903cSJulien Thierry extern void disable_nmi_nosync(unsigned int irq);
2434b078c3fSJulien Thierry extern void disable_percpu_nmi(unsigned int irq);
244b525903cSJulien Thierry extern void enable_nmi(unsigned int irq);
2454b078c3fSJulien Thierry extern void enable_percpu_nmi(unsigned int irq, unsigned int type);
2464b078c3fSJulien Thierry extern int prepare_percpu_nmi(unsigned int irq);
2474b078c3fSJulien Thierry extern void teardown_percpu_nmi(unsigned int irq);
248b525903cSJulien Thierry
249acd26bcfSThomas Gleixner extern int irq_inject_interrupt(unsigned int irq);
250acd26bcfSThomas Gleixner
2510a0c5168SRafael J. Wysocki /* The following three functions are for the core kernel use only. */
2520a0c5168SRafael J. Wysocki extern void suspend_device_irqs(void);
2530a0c5168SRafael J. Wysocki extern void resume_device_irqs(void);
2543a79bc63SRafael J. Wysocki extern void rearm_wake_irq(unsigned int irq);
2550a0c5168SRafael J. Wysocki
256f0ba3d05SEyal Perry /**
257f0ba3d05SEyal Perry * struct irq_affinity_notify - context for notification of IRQ affinity changes
258f0ba3d05SEyal Perry * @irq: Interrupt to which notification applies
259f0ba3d05SEyal Perry * @kref: Reference count, for internal use
260f0ba3d05SEyal Perry * @work: Work item, for internal use
261f0ba3d05SEyal Perry * @notify: Function to be called on change. This will be
262f0ba3d05SEyal Perry * called in process context.
263f0ba3d05SEyal Perry * @release: Function to be called on release. This will be
264f0ba3d05SEyal Perry * called in process context. Once registered, the
265f0ba3d05SEyal Perry * structure must only be freed when this function is
266f0ba3d05SEyal Perry * called or later.
267f0ba3d05SEyal Perry */
268f0ba3d05SEyal Perry struct irq_affinity_notify {
269f0ba3d05SEyal Perry unsigned int irq;
270f0ba3d05SEyal Perry struct kref kref;
271f0ba3d05SEyal Perry struct work_struct work;
272f0ba3d05SEyal Perry void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask);
273f0ba3d05SEyal Perry void (*release)(struct kref *ref);
274f0ba3d05SEyal Perry };
275f0ba3d05SEyal Perry
2769cfef55bSMing Lei #define IRQ_AFFINITY_MAX_SETS 4
2779cfef55bSMing Lei
27820e407e1SChristoph Hellwig /**
27917e28a9aSCosta Shulyupin * struct irq_affinity - Description for automatic irq affinity assignments
28020e407e1SChristoph Hellwig * @pre_vectors: Don't apply affinity to @pre_vectors at beginning of
28120e407e1SChristoph Hellwig * the MSI(-X) vector space
28220e407e1SChristoph Hellwig * @post_vectors: Don't apply affinity to @post_vectors at end of
28320e407e1SChristoph Hellwig * the MSI(-X) vector space
2849cfef55bSMing Lei * @nr_sets: The number of interrupt sets for which affinity
2859cfef55bSMing Lei * spreading is required
2869cfef55bSMing Lei * @set_size: Array holding the size of each interrupt set
287c66d4bd1SMing Lei * @calc_sets: Callback for calculating the number and size
288c66d4bd1SMing Lei * of interrupt sets
289c66d4bd1SMing Lei * @priv: Private data for usage by @calc_sets, usually a
290c66d4bd1SMing Lei * pointer to driver/device specific data.
29120e407e1SChristoph Hellwig */
29220e407e1SChristoph Hellwig struct irq_affinity {
2930145c30eSThomas Gleixner unsigned int pre_vectors;
2940145c30eSThomas Gleixner unsigned int post_vectors;
2950145c30eSThomas Gleixner unsigned int nr_sets;
2969cfef55bSMing Lei unsigned int set_size[IRQ_AFFINITY_MAX_SETS];
297c66d4bd1SMing Lei void (*calc_sets)(struct irq_affinity *, unsigned int nvecs);
298c66d4bd1SMing Lei void *priv;
29920e407e1SChristoph Hellwig };
30020e407e1SChristoph Hellwig
301bec04037SDou Liyang /**
302bec04037SDou Liyang * struct irq_affinity_desc - Interrupt affinity descriptor
303bec04037SDou Liyang * @mask: cpumask to hold the affinity assignment
30470921ae2SJonathan Corbet * @is_managed: 1 if the interrupt is managed internally
305bec04037SDou Liyang */
306bec04037SDou Liyang struct irq_affinity_desc {
307bec04037SDou Liyang struct cpumask mask;
308c410abbbSDou Liyang unsigned int is_managed : 1;
309bec04037SDou Liyang };
310bec04037SDou Liyang
3110244ad00SMartin Schwidefsky #if defined(CONFIG_SMP)
312d7b90689SRussell King
313d036e67bSRusty Russell extern cpumask_var_t irq_default_affinity;
31418404756SMax Krasnyansky
3154d80d6caSThomas Gleixner extern int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask);
3164d80d6caSThomas Gleixner extern int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask);
31701f8fa4fSThomas Gleixner
318d7b90689SRussell King extern int irq_can_set_affinity(unsigned int irq);
31918404756SMax Krasnyansky extern int irq_select_affinity(unsigned int irq);
320d7b90689SRussell King
32165c7cdedSThomas Gleixner extern int __irq_apply_affinity_hint(unsigned int irq, const struct cpumask *m,
32265c7cdedSThomas Gleixner bool setaffinity);
32365c7cdedSThomas Gleixner
32465c7cdedSThomas Gleixner /**
32565c7cdedSThomas Gleixner * irq_update_affinity_hint - Update the affinity hint
32665c7cdedSThomas Gleixner * @irq: Interrupt to update
32765c7cdedSThomas Gleixner * @m: cpumask pointer (NULL to clear the hint)
32865c7cdedSThomas Gleixner *
32965c7cdedSThomas Gleixner * Updates the affinity hint, but does not change the affinity of the interrupt.
33065c7cdedSThomas Gleixner */
33165c7cdedSThomas Gleixner static inline int
irq_update_affinity_hint(unsigned int irq,const struct cpumask * m)33265c7cdedSThomas Gleixner irq_update_affinity_hint(unsigned int irq, const struct cpumask *m)
33365c7cdedSThomas Gleixner {
33465c7cdedSThomas Gleixner return __irq_apply_affinity_hint(irq, m, false);
33565c7cdedSThomas Gleixner }
33665c7cdedSThomas Gleixner
33765c7cdedSThomas Gleixner /**
33865c7cdedSThomas Gleixner * irq_set_affinity_and_hint - Update the affinity hint and apply the provided
33965c7cdedSThomas Gleixner * cpumask to the interrupt
34065c7cdedSThomas Gleixner * @irq: Interrupt to update
34165c7cdedSThomas Gleixner * @m: cpumask pointer (NULL to clear the hint)
34265c7cdedSThomas Gleixner *
34365c7cdedSThomas Gleixner * Updates the affinity hint and if @m is not NULL it applies it as the
34465c7cdedSThomas Gleixner * affinity of that interrupt.
34565c7cdedSThomas Gleixner */
34665c7cdedSThomas Gleixner static inline int
irq_set_affinity_and_hint(unsigned int irq,const struct cpumask * m)34765c7cdedSThomas Gleixner irq_set_affinity_and_hint(unsigned int irq, const struct cpumask *m)
34865c7cdedSThomas Gleixner {
34965c7cdedSThomas Gleixner return __irq_apply_affinity_hint(irq, m, true);
35065c7cdedSThomas Gleixner }
35165c7cdedSThomas Gleixner
35265c7cdedSThomas Gleixner /*
35365c7cdedSThomas Gleixner * Deprecated. Use irq_update_affinity_hint() or irq_set_affinity_and_hint()
35465c7cdedSThomas Gleixner * instead.
35565c7cdedSThomas Gleixner */
irq_set_affinity_hint(unsigned int irq,const struct cpumask * m)35665c7cdedSThomas Gleixner static inline int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
35765c7cdedSThomas Gleixner {
35865c7cdedSThomas Gleixner return irq_set_affinity_and_hint(irq, m);
35965c7cdedSThomas Gleixner }
36065c7cdedSThomas Gleixner
3611d3aec89SJohn Garry extern int irq_update_affinity_desc(unsigned int irq,
3621d3aec89SJohn Garry struct irq_affinity_desc *affinity);
363cd7eab44SBen Hutchings
364cd7eab44SBen Hutchings extern int
365cd7eab44SBen Hutchings irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
366cd7eab44SBen Hutchings
367bec04037SDou Liyang struct irq_affinity_desc *
368c66d4bd1SMing Lei irq_create_affinity_masks(unsigned int nvec, struct irq_affinity *affd);
369bec04037SDou Liyang
3700145c30eSThomas Gleixner unsigned int irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec,
3710145c30eSThomas Gleixner const struct irq_affinity *affd);
3725e385a6eSChristoph Hellwig
373d7b90689SRussell King #else /* CONFIG_SMP */
374d7b90689SRussell King
irq_set_affinity(unsigned int irq,const struct cpumask * m)3750de26520SRusty Russell static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)
376d7b90689SRussell King {
377d7b90689SRussell King return -EINVAL;
378d7b90689SRussell King }
379d7b90689SRussell King
irq_force_affinity(unsigned int irq,const struct cpumask * cpumask)3804c88d7f9SArnd Bergmann static inline int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
3814c88d7f9SArnd Bergmann {
3824c88d7f9SArnd Bergmann return 0;
3834c88d7f9SArnd Bergmann }
3844c88d7f9SArnd Bergmann
irq_can_set_affinity(unsigned int irq)385d7b90689SRussell King static inline int irq_can_set_affinity(unsigned int irq)
386d7b90689SRussell King {
387d7b90689SRussell King return 0;
388d7b90689SRussell King }
389d7b90689SRussell King
irq_select_affinity(unsigned int irq)39018404756SMax Krasnyansky static inline int irq_select_affinity(unsigned int irq) { return 0; }
39118404756SMax Krasnyansky
irq_update_affinity_hint(unsigned int irq,const struct cpumask * m)39265c7cdedSThomas Gleixner static inline int irq_update_affinity_hint(unsigned int irq,
39365c7cdedSThomas Gleixner const struct cpumask *m)
39465c7cdedSThomas Gleixner {
39565c7cdedSThomas Gleixner return -EINVAL;
39665c7cdedSThomas Gleixner }
39765c7cdedSThomas Gleixner
irq_set_affinity_and_hint(unsigned int irq,const struct cpumask * m)39865c7cdedSThomas Gleixner static inline int irq_set_affinity_and_hint(unsigned int irq,
39965c7cdedSThomas Gleixner const struct cpumask *m)
40065c7cdedSThomas Gleixner {
40165c7cdedSThomas Gleixner return -EINVAL;
40265c7cdedSThomas Gleixner }
40365c7cdedSThomas Gleixner
irq_set_affinity_hint(unsigned int irq,const struct cpumask * m)404e7a297b0SPeter P Waskiewicz Jr static inline int irq_set_affinity_hint(unsigned int irq,
405e7a297b0SPeter P Waskiewicz Jr const struct cpumask *m)
406e7a297b0SPeter P Waskiewicz Jr {
407e7a297b0SPeter P Waskiewicz Jr return -EINVAL;
408e7a297b0SPeter P Waskiewicz Jr }
409f0ba3d05SEyal Perry
irq_update_affinity_desc(unsigned int irq,struct irq_affinity_desc * affinity)4101d3aec89SJohn Garry static inline int irq_update_affinity_desc(unsigned int irq,
4111d3aec89SJohn Garry struct irq_affinity_desc *affinity)
4121d3aec89SJohn Garry {
4131d3aec89SJohn Garry return -EINVAL;
4141d3aec89SJohn Garry }
4151d3aec89SJohn Garry
416f0ba3d05SEyal Perry static inline int
irq_set_affinity_notifier(unsigned int irq,struct irq_affinity_notify * notify)417f0ba3d05SEyal Perry irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
418f0ba3d05SEyal Perry {
419f0ba3d05SEyal Perry return 0;
420f0ba3d05SEyal Perry }
4215e385a6eSChristoph Hellwig
422bec04037SDou Liyang static inline struct irq_affinity_desc *
irq_create_affinity_masks(unsigned int nvec,struct irq_affinity * affd)423c66d4bd1SMing Lei irq_create_affinity_masks(unsigned int nvec, struct irq_affinity *affd)
42434c3d981SThomas Gleixner {
42534c3d981SThomas Gleixner return NULL;
42634c3d981SThomas Gleixner }
42734c3d981SThomas Gleixner
4280145c30eSThomas Gleixner static inline unsigned int
irq_calc_affinity_vectors(unsigned int minvec,unsigned int maxvec,const struct irq_affinity * affd)4290145c30eSThomas Gleixner irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec,
4300145c30eSThomas Gleixner const struct irq_affinity *affd)
43134c3d981SThomas Gleixner {
43234c3d981SThomas Gleixner return maxvec;
43334c3d981SThomas Gleixner }
43434c3d981SThomas Gleixner
4350244ad00SMartin Schwidefsky #endif /* CONFIG_SMP */
436d7b90689SRussell King
437c01d403bSIngo Molnar /*
438c01d403bSIngo Molnar * Special lockdep variants of irq disabling/enabling.
439c01d403bSIngo Molnar * These should be used for locking constructs that
440c01d403bSIngo Molnar * know that a particular irq context which is disabled,
441c01d403bSIngo Molnar * and which is the only irq-context user of a lock,
442c01d403bSIngo Molnar * that it's safe to take the lock in the irq-disabled
443c01d403bSIngo Molnar * section without disabling hardirqs.
444c01d403bSIngo Molnar *
445c01d403bSIngo Molnar * On !CONFIG_LOCKDEP they are equivalent to the normal
446c01d403bSIngo Molnar * irq disable/enable methods.
447c01d403bSIngo Molnar */
disable_irq_nosync_lockdep(unsigned int irq)448c01d403bSIngo Molnar static inline void disable_irq_nosync_lockdep(unsigned int irq)
449c01d403bSIngo Molnar {
450c01d403bSIngo Molnar disable_irq_nosync(irq);
451*87886b32SSebastian Andrzej Siewior #if defined(CONFIG_LOCKDEP) && !defined(CONFIG_PREEMPT_RT)
452c01d403bSIngo Molnar local_irq_disable();
453c01d403bSIngo Molnar #endif
454c01d403bSIngo Molnar }
455c01d403bSIngo Molnar
disable_irq_nosync_lockdep_irqsave(unsigned int irq,unsigned long * flags)456e8106b94SArjan van de Ven static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags)
457e8106b94SArjan van de Ven {
458e8106b94SArjan van de Ven disable_irq_nosync(irq);
459*87886b32SSebastian Andrzej Siewior #if defined(CONFIG_LOCKDEP) && !defined(CONFIG_PREEMPT_RT)
460e8106b94SArjan van de Ven local_irq_save(*flags);
461e8106b94SArjan van de Ven #endif
462e8106b94SArjan van de Ven }
463e8106b94SArjan van de Ven
enable_irq_lockdep(unsigned int irq)464c01d403bSIngo Molnar static inline void enable_irq_lockdep(unsigned int irq)
465c01d403bSIngo Molnar {
466*87886b32SSebastian Andrzej Siewior #if defined(CONFIG_LOCKDEP) && !defined(CONFIG_PREEMPT_RT)
467c01d403bSIngo Molnar local_irq_enable();
468c01d403bSIngo Molnar #endif
469c01d403bSIngo Molnar enable_irq(irq);
470c01d403bSIngo Molnar }
471c01d403bSIngo Molnar
enable_irq_lockdep_irqrestore(unsigned int irq,unsigned long * flags)472e8106b94SArjan van de Ven static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags)
473e8106b94SArjan van de Ven {
474*87886b32SSebastian Andrzej Siewior #if defined(CONFIG_LOCKDEP) && !defined(CONFIG_PREEMPT_RT)
475e8106b94SArjan van de Ven local_irq_restore(*flags);
476e8106b94SArjan van de Ven #endif
477e8106b94SArjan van de Ven enable_irq(irq);
478e8106b94SArjan van de Ven }
479e8106b94SArjan van de Ven
480ba9a2331SThomas Gleixner /* IRQ wakeup (PM) control: */
481a0cd9ca2SThomas Gleixner extern int irq_set_irq_wake(unsigned int irq, unsigned int on);
482a0cd9ca2SThomas Gleixner
enable_irq_wake(unsigned int irq)483ba9a2331SThomas Gleixner static inline int enable_irq_wake(unsigned int irq)
484ba9a2331SThomas Gleixner {
485a0cd9ca2SThomas Gleixner return irq_set_irq_wake(irq, 1);
486ba9a2331SThomas Gleixner }
487ba9a2331SThomas Gleixner
disable_irq_wake(unsigned int irq)488ba9a2331SThomas Gleixner static inline int disable_irq_wake(unsigned int irq)
489ba9a2331SThomas Gleixner {
490a0cd9ca2SThomas Gleixner return irq_set_irq_wake(irq, 0);
491ba9a2331SThomas Gleixner }
492ba9a2331SThomas Gleixner
4931b7047edSMarc Zyngier /*
4941b7047edSMarc Zyngier * irq_get_irqchip_state/irq_set_irqchip_state specific flags
4951b7047edSMarc Zyngier */
4961b7047edSMarc Zyngier enum irqchip_irq_state {
4971b7047edSMarc Zyngier IRQCHIP_STATE_PENDING, /* Is interrupt pending? */
4981b7047edSMarc Zyngier IRQCHIP_STATE_ACTIVE, /* Is interrupt in progress? */
4991b7047edSMarc Zyngier IRQCHIP_STATE_MASKED, /* Is interrupt masked? */
5001b7047edSMarc Zyngier IRQCHIP_STATE_LINE_LEVEL, /* Is IRQ line high? */
5011b7047edSMarc Zyngier };
5021b7047edSMarc Zyngier
5031b7047edSMarc Zyngier extern int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
5041b7047edSMarc Zyngier bool *state);
5051b7047edSMarc Zyngier extern int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
5061b7047edSMarc Zyngier bool state);
5078d32a307SThomas Gleixner
5088d32a307SThomas Gleixner #ifdef CONFIG_IRQ_FORCED_THREADING
509b6a32bbdSThomas Gleixner # ifdef CONFIG_PREEMPT_RT
51091cc470eSTanner Love # define force_irqthreads() (true)
511b6a32bbdSThomas Gleixner # else
51291cc470eSTanner Love DECLARE_STATIC_KEY_FALSE(force_irqthreads_key);
51391cc470eSTanner Love # define force_irqthreads() (static_branch_unlikely(&force_irqthreads_key))
514b6a32bbdSThomas Gleixner # endif
5158d32a307SThomas Gleixner #else
51691cc470eSTanner Love #define force_irqthreads() (false)
5178d32a307SThomas Gleixner #endif
5188d32a307SThomas Gleixner
5190fd7d862SFrederic Weisbecker #ifndef local_softirq_pending
5200fd7d862SFrederic Weisbecker
5210fd7d862SFrederic Weisbecker #ifndef local_softirq_pending_ref
5220fd7d862SFrederic Weisbecker #define local_softirq_pending_ref irq_stat.__softirq_pending
5230fd7d862SFrederic Weisbecker #endif
5240fd7d862SFrederic Weisbecker
5250fd7d862SFrederic Weisbecker #define local_softirq_pending() (__this_cpu_read(local_softirq_pending_ref))
5260fd7d862SFrederic Weisbecker #define set_softirq_pending(x) (__this_cpu_write(local_softirq_pending_ref, (x)))
5270fd7d862SFrederic Weisbecker #define or_softirq_pending(x) (__this_cpu_or(local_softirq_pending_ref, (x)))
5280fd7d862SFrederic Weisbecker
5290fd7d862SFrederic Weisbecker #endif /* local_softirq_pending */
5300fd7d862SFrederic Weisbecker
5312d3fbbb3SBenjamin Herrenschmidt /* Some architectures might implement lazy enabling/disabling of
5322d3fbbb3SBenjamin Herrenschmidt * interrupts. In some cases, such as stop_machine, we might want
5332d3fbbb3SBenjamin Herrenschmidt * to ensure that after a local_irq_disable(), interrupts have
5342d3fbbb3SBenjamin Herrenschmidt * really been disabled in hardware. Such architectures need to
5352d3fbbb3SBenjamin Herrenschmidt * implement the following hook.
5362d3fbbb3SBenjamin Herrenschmidt */
5372d3fbbb3SBenjamin Herrenschmidt #ifndef hard_irq_disable
5382d3fbbb3SBenjamin Herrenschmidt #define hard_irq_disable() do { } while(0)
5392d3fbbb3SBenjamin Herrenschmidt #endif
5402d3fbbb3SBenjamin Herrenschmidt
5411da177e4SLinus Torvalds /* PLEASE, avoid to allocate new softirqs, if you need not _really_ high
5421da177e4SLinus Torvalds frequency threaded job scheduling. For almost all the purposes
5431da177e4SLinus Torvalds tasklets are more than enough. F.e. all serial device BHs et
5441da177e4SLinus Torvalds al. should be converted to tasklets, not to softirqs.
5451da177e4SLinus Torvalds */
5461da177e4SLinus Torvalds
5471da177e4SLinus Torvalds enum
5481da177e4SLinus Torvalds {
5491da177e4SLinus Torvalds HI_SOFTIRQ=0,
5501da177e4SLinus Torvalds TIMER_SOFTIRQ,
5511da177e4SLinus Torvalds NET_TX_SOFTIRQ,
5521da177e4SLinus Torvalds NET_RX_SOFTIRQ,
553ff856badSJens Axboe BLOCK_SOFTIRQ,
554511cbce2SChristoph Hellwig IRQ_POLL_SOFTIRQ,
555c9819f45SChristoph Lameter TASKLET_SOFTIRQ,
556c9819f45SChristoph Lameter SCHED_SOFTIRQ,
5573bbc53f4SSebastian Andrzej Siewior HRTIMER_SOFTIRQ,
55809223371SShaohua Li RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */
559978b0116SAlexey Dobriyan
560978b0116SAlexey Dobriyan NR_SOFTIRQS
5611da177e4SLinus Torvalds };
5621da177e4SLinus Torvalds
5630345691bSFrederic Weisbecker /*
564f96272a9SFrederic Weisbecker * The following vectors can be safely ignored after ksoftirqd is parked:
565f96272a9SFrederic Weisbecker *
566f96272a9SFrederic Weisbecker * _ RCU:
567f96272a9SFrederic Weisbecker * 1) rcutree_migrate_callbacks() migrates the queue.
568448e9f34SFrederic Weisbecker * 2) rcutree_report_cpu_dead() reports the final quiescent states.
569f96272a9SFrederic Weisbecker *
570f96272a9SFrederic Weisbecker * _ IRQ_POLL: irq_poll_cpu_dead() migrates the queue
5711a6a4647SFrederic Weisbecker *
5721a6a4647SFrederic Weisbecker * _ (HR)TIMER_SOFTIRQ: (hr)timers_dead_cpu() migrates the queue
5730345691bSFrederic Weisbecker */
5741a6a4647SFrederic Weisbecker #define SOFTIRQ_HOTPLUG_SAFE_MASK (BIT(TIMER_SOFTIRQ) | BIT(IRQ_POLL_SOFTIRQ) |\
5751a6a4647SFrederic Weisbecker BIT(HRTIMER_SOFTIRQ) | BIT(RCU_SOFTIRQ))
5761a6a4647SFrederic Weisbecker
577803b0ebaSPaul E. McKenney
5785d592b44SJason Baron /* map softirq index to softirq name. update 'softirq_to_name' in
5795d592b44SJason Baron * kernel/softirq.c when adding a new softirq.
5805d592b44SJason Baron */
581ce85b4f2SJoe Perches extern const char * const softirq_to_name[NR_SOFTIRQS];
5825d592b44SJason Baron
5831da177e4SLinus Torvalds /* softirq mask and active fields moved to irq_cpustat_t in
5841da177e4SLinus Torvalds * asm/hardirq.h to get better cache usage. KAO
5851da177e4SLinus Torvalds */
5861da177e4SLinus Torvalds
5871da177e4SLinus Torvalds struct softirq_action
5881da177e4SLinus Torvalds {
589e68ac2b4SCaleb Sander Mateos void (*action)(void);
5901da177e4SLinus Torvalds };
5911da177e4SLinus Torvalds
5921da177e4SLinus Torvalds asmlinkage void do_softirq(void);
593eb0f1c44SAdrian Bunk asmlinkage void __do_softirq(void);
5947d65f4a6SFrederic Weisbecker
5951a90bfd2SSebastian Andrzej Siewior #ifdef CONFIG_PREEMPT_RT
5961a90bfd2SSebastian Andrzej Siewior extern void do_softirq_post_smp_call_flush(unsigned int was_pending);
5971a90bfd2SSebastian Andrzej Siewior #else
do_softirq_post_smp_call_flush(unsigned int unused)5981a90bfd2SSebastian Andrzej Siewior static inline void do_softirq_post_smp_call_flush(unsigned int unused)
5991a90bfd2SSebastian Andrzej Siewior {
6001a90bfd2SSebastian Andrzej Siewior do_softirq();
6011a90bfd2SSebastian Andrzej Siewior }
6021a90bfd2SSebastian Andrzej Siewior #endif
6031a90bfd2SSebastian Andrzej Siewior
604e68ac2b4SCaleb Sander Mateos extern void open_softirq(int nr, void (*action)(void));
6051da177e4SLinus Torvalds extern void softirq_init(void);
606f069686eSSteven Rostedt extern void __raise_softirq_irqoff(unsigned int nr);
6072bf2160dSLai Jiangshan
608b3c97528SHarvey Harrison extern void raise_softirq_irqoff(unsigned int nr);
609b3c97528SHarvey Harrison extern void raise_softirq(unsigned int nr);
6101da177e4SLinus Torvalds
61149a17639SSebastian Andrzej Siewior /*
61249a17639SSebastian Andrzej Siewior * With forced-threaded interrupts enabled a raised softirq is deferred to
61349a17639SSebastian Andrzej Siewior * ksoftirqd unless it can be handled within the threaded interrupt. This
61449a17639SSebastian Andrzej Siewior * affects timer_list timers and hrtimers which are explicitly marked with
61549a17639SSebastian Andrzej Siewior * HRTIMER_MODE_SOFT.
61649a17639SSebastian Andrzej Siewior * With PREEMPT_RT enabled more hrtimers are moved to softirq for processing
61749a17639SSebastian Andrzej Siewior * which includes all timers which are not explicitly marked HRTIMER_MODE_HARD.
61849a17639SSebastian Andrzej Siewior * Userspace controlled timers (like the clock_nanosleep() interface) is divided
61949a17639SSebastian Andrzej Siewior * into two categories: Tasks with elevated scheduling policy including
62049a17639SSebastian Andrzej Siewior * SCHED_{FIFO|RR|DL} and the remaining scheduling policy. The tasks with the
62149a17639SSebastian Andrzej Siewior * elevated scheduling policy are woken up directly from the HARDIRQ while all
62249a17639SSebastian Andrzej Siewior * other wake ups are delayed to softirq and so to ksoftirqd.
62349a17639SSebastian Andrzej Siewior *
62449a17639SSebastian Andrzej Siewior * The ksoftirqd runs at SCHED_OTHER policy at which it should remain since it
62549a17639SSebastian Andrzej Siewior * handles the softirq in an overloaded situation (not handled everything
62649a17639SSebastian Andrzej Siewior * within its last run).
62749a17639SSebastian Andrzej Siewior * If the timers are handled at SCHED_OTHER priority then they competes with all
62849a17639SSebastian Andrzej Siewior * other SCHED_OTHER tasks for CPU resources are possibly delayed.
62949a17639SSebastian Andrzej Siewior * Moving timers softirqs to a low priority SCHED_FIFO thread instead ensures
63049a17639SSebastian Andrzej Siewior * that timer are performed before scheduling any SCHED_OTHER thread.
63149a17639SSebastian Andrzej Siewior */
63249a17639SSebastian Andrzej Siewior DECLARE_PER_CPU(struct task_struct *, ktimerd);
63349a17639SSebastian Andrzej Siewior DECLARE_PER_CPU(unsigned long, pending_timer_softirq);
63449a17639SSebastian Andrzej Siewior void raise_ktimers_thread(unsigned int nr);
63549a17639SSebastian Andrzej Siewior
local_timers_pending_force_th(void)63649a17639SSebastian Andrzej Siewior static inline unsigned int local_timers_pending_force_th(void)
63749a17639SSebastian Andrzej Siewior {
63849a17639SSebastian Andrzej Siewior return __this_cpu_read(pending_timer_softirq);
63949a17639SSebastian Andrzej Siewior }
64049a17639SSebastian Andrzej Siewior
raise_timer_softirq(unsigned int nr)64149a17639SSebastian Andrzej Siewior static inline void raise_timer_softirq(unsigned int nr)
64249a17639SSebastian Andrzej Siewior {
64349a17639SSebastian Andrzej Siewior lockdep_assert_in_irq();
64449a17639SSebastian Andrzej Siewior if (force_irqthreads())
64549a17639SSebastian Andrzej Siewior raise_ktimers_thread(nr);
64649a17639SSebastian Andrzej Siewior else
64749a17639SSebastian Andrzej Siewior __raise_softirq_irqoff(nr);
64849a17639SSebastian Andrzej Siewior }
64949a17639SSebastian Andrzej Siewior
local_timers_pending(void)65049a17639SSebastian Andrzej Siewior static inline unsigned int local_timers_pending(void)
65149a17639SSebastian Andrzej Siewior {
65249a17639SSebastian Andrzej Siewior if (force_irqthreads())
65349a17639SSebastian Andrzej Siewior return local_timers_pending_force_th();
65449a17639SSebastian Andrzej Siewior else
65549a17639SSebastian Andrzej Siewior return local_softirq_pending();
65649a17639SSebastian Andrzej Siewior }
65749a17639SSebastian Andrzej Siewior
6584dd53d89SVenkatesh Pallipadi DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
6594dd53d89SVenkatesh Pallipadi
this_cpu_ksoftirqd(void)6604dd53d89SVenkatesh Pallipadi static inline struct task_struct *this_cpu_ksoftirqd(void)
6614dd53d89SVenkatesh Pallipadi {
6624dd53d89SVenkatesh Pallipadi return this_cpu_read(ksoftirqd);
6634dd53d89SVenkatesh Pallipadi }
6644dd53d89SVenkatesh Pallipadi
6651da177e4SLinus Torvalds /* Tasklets --- multithreaded analogue of BHs.
6661da177e4SLinus Torvalds
66712cc923fSRomain Perier This API is deprecated. Please consider using threaded IRQs instead:
66812cc923fSRomain Perier https://lore.kernel.org/lkml/[email protected]
66912cc923fSRomain Perier
6701da177e4SLinus Torvalds Main feature differing them of generic softirqs: tasklet
6711da177e4SLinus Torvalds is running only on one CPU simultaneously.
6721da177e4SLinus Torvalds
6731da177e4SLinus Torvalds Main feature differing them of BHs: different tasklets
6741da177e4SLinus Torvalds may be run simultaneously on different CPUs.
6751da177e4SLinus Torvalds
6761da177e4SLinus Torvalds Properties:
6771da177e4SLinus Torvalds * If tasklet_schedule() is called, then tasklet is guaranteed
6781da177e4SLinus Torvalds to be executed on some cpu at least once after this.
67925985edcSLucas De Marchi * If the tasklet is already scheduled, but its execution is still not
6801da177e4SLinus Torvalds started, it will be executed only once.
6811da177e4SLinus Torvalds * If this tasklet is already running on another CPU (or schedule is called
6821da177e4SLinus Torvalds from tasklet itself), it is rescheduled for later.
6831da177e4SLinus Torvalds * Tasklet is strictly serialized wrt itself, but not
6841da177e4SLinus Torvalds wrt another tasklets. If client needs some intertask synchronization,
6851da177e4SLinus Torvalds he makes it with spinlocks.
6861da177e4SLinus Torvalds */
6871da177e4SLinus Torvalds
6881da177e4SLinus Torvalds struct tasklet_struct
6891da177e4SLinus Torvalds {
6901da177e4SLinus Torvalds struct tasklet_struct *next;
6911da177e4SLinus Torvalds unsigned long state;
6921da177e4SLinus Torvalds atomic_t count;
69312cc923fSRomain Perier bool use_callback;
69412cc923fSRomain Perier union {
69512cc923fSRomain Perier void (*func)(unsigned long data);
69612cc923fSRomain Perier void (*callback)(struct tasklet_struct *t);
69712cc923fSRomain Perier };
6981da177e4SLinus Torvalds unsigned long data;
6991da177e4SLinus Torvalds };
7001da177e4SLinus Torvalds
70112cc923fSRomain Perier #define DECLARE_TASKLET(name, _callback) \
70212cc923fSRomain Perier struct tasklet_struct name = { \
70312cc923fSRomain Perier .count = ATOMIC_INIT(0), \
70412cc923fSRomain Perier .callback = _callback, \
70512cc923fSRomain Perier .use_callback = true, \
70612cc923fSRomain Perier }
70712cc923fSRomain Perier
70812cc923fSRomain Perier #define DECLARE_TASKLET_DISABLED(name, _callback) \
70912cc923fSRomain Perier struct tasklet_struct name = { \
71012cc923fSRomain Perier .count = ATOMIC_INIT(1), \
71112cc923fSRomain Perier .callback = _callback, \
71212cc923fSRomain Perier .use_callback = true, \
71312cc923fSRomain Perier }
71412cc923fSRomain Perier
71512cc923fSRomain Perier #define from_tasklet(var, callback_tasklet, tasklet_fieldname) \
71612cc923fSRomain Perier container_of(callback_tasklet, typeof(*var), tasklet_fieldname)
71712cc923fSRomain Perier
718b13fecb1SKees Cook #define DECLARE_TASKLET_OLD(name, _func) \
719b13fecb1SKees Cook struct tasklet_struct name = { \
720b13fecb1SKees Cook .count = ATOMIC_INIT(0), \
721b13fecb1SKees Cook .func = _func, \
722b13fecb1SKees Cook }
7231da177e4SLinus Torvalds
724b13fecb1SKees Cook #define DECLARE_TASKLET_DISABLED_OLD(name, _func) \
725b13fecb1SKees Cook struct tasklet_struct name = { \
726b13fecb1SKees Cook .count = ATOMIC_INIT(1), \
727b13fecb1SKees Cook .func = _func, \
728b13fecb1SKees Cook }
7291da177e4SLinus Torvalds
7301da177e4SLinus Torvalds enum
7311da177e4SLinus Torvalds {
7321da177e4SLinus Torvalds TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */
7331da177e4SLinus Torvalds TASKLET_STATE_RUN /* Tasklet is running (SMP only) */
7341da177e4SLinus Torvalds };
7351da177e4SLinus Torvalds
736eb2dafbbSThomas Gleixner #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
tasklet_trylock(struct tasklet_struct * t)7371da177e4SLinus Torvalds static inline int tasklet_trylock(struct tasklet_struct *t)
7381da177e4SLinus Torvalds {
7391da177e4SLinus Torvalds return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
7401da177e4SLinus Torvalds }
7411da177e4SLinus Torvalds
742da044747SPeter Zijlstra void tasklet_unlock(struct tasklet_struct *t);
743da044747SPeter Zijlstra void tasklet_unlock_wait(struct tasklet_struct *t);
744eb2dafbbSThomas Gleixner void tasklet_unlock_spin_wait(struct tasklet_struct *t);
745ca5f6251SThomas Gleixner
7461da177e4SLinus Torvalds #else
tasklet_trylock(struct tasklet_struct * t)7476951547aSThomas Gleixner static inline int tasklet_trylock(struct tasklet_struct *t) { return 1; }
tasklet_unlock(struct tasklet_struct * t)7486951547aSThomas Gleixner static inline void tasklet_unlock(struct tasklet_struct *t) { }
tasklet_unlock_wait(struct tasklet_struct * t)7496951547aSThomas Gleixner static inline void tasklet_unlock_wait(struct tasklet_struct *t) { }
tasklet_unlock_spin_wait(struct tasklet_struct * t)750ca5f6251SThomas Gleixner static inline void tasklet_unlock_spin_wait(struct tasklet_struct *t) { }
7511da177e4SLinus Torvalds #endif
7521da177e4SLinus Torvalds
753b3c97528SHarvey Harrison extern void __tasklet_schedule(struct tasklet_struct *t);
7541da177e4SLinus Torvalds
tasklet_schedule(struct tasklet_struct * t)7551da177e4SLinus Torvalds static inline void tasklet_schedule(struct tasklet_struct *t)
7561da177e4SLinus Torvalds {
7571da177e4SLinus Torvalds if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
7581da177e4SLinus Torvalds __tasklet_schedule(t);
7591da177e4SLinus Torvalds }
7601da177e4SLinus Torvalds
761b3c97528SHarvey Harrison extern void __tasklet_hi_schedule(struct tasklet_struct *t);
7621da177e4SLinus Torvalds
tasklet_hi_schedule(struct tasklet_struct * t)7631da177e4SLinus Torvalds static inline void tasklet_hi_schedule(struct tasklet_struct *t)
7641da177e4SLinus Torvalds {
7651da177e4SLinus Torvalds if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
7661da177e4SLinus Torvalds __tasklet_hi_schedule(t);
7671da177e4SLinus Torvalds }
7681da177e4SLinus Torvalds
tasklet_disable_nosync(struct tasklet_struct * t)7691da177e4SLinus Torvalds static inline void tasklet_disable_nosync(struct tasklet_struct *t)
7701da177e4SLinus Torvalds {
7711da177e4SLinus Torvalds atomic_inc(&t->count);
7724e857c58SPeter Zijlstra smp_mb__after_atomic();
7731da177e4SLinus Torvalds }
7741da177e4SLinus Torvalds
775ca5f6251SThomas Gleixner /*
776ca5f6251SThomas Gleixner * Do not use in new code. Disabling tasklets from atomic contexts is
777ca5f6251SThomas Gleixner * error prone and should be avoided.
778ca5f6251SThomas Gleixner */
tasklet_disable_in_atomic(struct tasklet_struct * t)779ca5f6251SThomas Gleixner static inline void tasklet_disable_in_atomic(struct tasklet_struct *t)
780ca5f6251SThomas Gleixner {
781ca5f6251SThomas Gleixner tasklet_disable_nosync(t);
782ca5f6251SThomas Gleixner tasklet_unlock_spin_wait(t);
783ca5f6251SThomas Gleixner smp_mb();
784ca5f6251SThomas Gleixner }
785ca5f6251SThomas Gleixner
tasklet_disable(struct tasklet_struct * t)7861da177e4SLinus Torvalds static inline void tasklet_disable(struct tasklet_struct *t)
7871da177e4SLinus Torvalds {
7881da177e4SLinus Torvalds tasklet_disable_nosync(t);
7896fd4e861SThomas Gleixner tasklet_unlock_wait(t);
7901da177e4SLinus Torvalds smp_mb();
7911da177e4SLinus Torvalds }
7921da177e4SLinus Torvalds
tasklet_enable(struct tasklet_struct * t)7931da177e4SLinus Torvalds static inline void tasklet_enable(struct tasklet_struct *t)
7941da177e4SLinus Torvalds {
7954e857c58SPeter Zijlstra smp_mb__before_atomic();
7961da177e4SLinus Torvalds atomic_dec(&t->count);
7971da177e4SLinus Torvalds }
7981da177e4SLinus Torvalds
7991da177e4SLinus Torvalds extern void tasklet_kill(struct tasklet_struct *t);
8001da177e4SLinus Torvalds extern void tasklet_init(struct tasklet_struct *t,
8011da177e4SLinus Torvalds void (*func)(unsigned long), unsigned long data);
80212cc923fSRomain Perier extern void tasklet_setup(struct tasklet_struct *t,
80312cc923fSRomain Perier void (*callback)(struct tasklet_struct *));
8041da177e4SLinus Torvalds
8051da177e4SLinus Torvalds /*
8061da177e4SLinus Torvalds * Autoprobing for irqs:
8071da177e4SLinus Torvalds *
8081da177e4SLinus Torvalds * probe_irq_on() and probe_irq_off() provide robust primitives
8091da177e4SLinus Torvalds * for accurate IRQ probing during kernel initialization. They are
8101da177e4SLinus Torvalds * reasonably simple to use, are not "fooled" by spurious interrupts,
8111da177e4SLinus Torvalds * and, unlike other attempts at IRQ probing, they do not get hung on
8121da177e4SLinus Torvalds * stuck interrupts (such as unused PS2 mouse interfaces on ASUS boards).
8131da177e4SLinus Torvalds *
8141da177e4SLinus Torvalds * For reasonably foolproof probing, use them as follows:
8151da177e4SLinus Torvalds *
8161da177e4SLinus Torvalds * 1. clear and/or mask the device's internal interrupt.
8171da177e4SLinus Torvalds * 2. sti();
8181da177e4SLinus Torvalds * 3. irqs = probe_irq_on(); // "take over" all unassigned idle IRQs
8191da177e4SLinus Torvalds * 4. enable the device and cause it to trigger an interrupt.
8201da177e4SLinus Torvalds * 5. wait for the device to interrupt, using non-intrusive polling or a delay.
8211da177e4SLinus Torvalds * 6. irq = probe_irq_off(irqs); // get IRQ number, 0=none, negative=multiple
8221da177e4SLinus Torvalds * 7. service the device to clear its pending interrupt.
8231da177e4SLinus Torvalds * 8. loop again if paranoia is required.
8241da177e4SLinus Torvalds *
8251da177e4SLinus Torvalds * probe_irq_on() returns a mask of allocated irq's.
8261da177e4SLinus Torvalds *
8271da177e4SLinus Torvalds * probe_irq_off() takes the mask as a parameter,
8281da177e4SLinus Torvalds * and returns the irq number which occurred,
8291da177e4SLinus Torvalds * or zero if none occurred, or a negative irq number
8301da177e4SLinus Torvalds * if more than one irq occurred.
8311da177e4SLinus Torvalds */
8321da177e4SLinus Torvalds
8330244ad00SMartin Schwidefsky #if !defined(CONFIG_GENERIC_IRQ_PROBE)
probe_irq_on(void)8341da177e4SLinus Torvalds static inline unsigned long probe_irq_on(void)
8351da177e4SLinus Torvalds {
8361da177e4SLinus Torvalds return 0;
8371da177e4SLinus Torvalds }
probe_irq_off(unsigned long val)8381da177e4SLinus Torvalds static inline int probe_irq_off(unsigned long val)
8391da177e4SLinus Torvalds {
8401da177e4SLinus Torvalds return 0;
8411da177e4SLinus Torvalds }
probe_irq_mask(unsigned long val)8421da177e4SLinus Torvalds static inline unsigned int probe_irq_mask(unsigned long val)
8431da177e4SLinus Torvalds {
8441da177e4SLinus Torvalds return 0;
8451da177e4SLinus Torvalds }
8461da177e4SLinus Torvalds #else
8471da177e4SLinus Torvalds extern unsigned long probe_irq_on(void); /* returns 0 on failure */
8481da177e4SLinus Torvalds extern int probe_irq_off(unsigned long); /* returns 0 or negative on failure */
8491da177e4SLinus Torvalds extern unsigned int probe_irq_mask(unsigned long); /* returns mask of ISA interrupts */
8501da177e4SLinus Torvalds #endif
8511da177e4SLinus Torvalds
8526168a702SAndrew Morton #ifdef CONFIG_PROC_FS
8536168a702SAndrew Morton /* Initialize /proc/irq/ */
8546168a702SAndrew Morton extern void init_irq_proc(void);
8556168a702SAndrew Morton #else
init_irq_proc(void)8566168a702SAndrew Morton static inline void init_irq_proc(void)
8576168a702SAndrew Morton {
8586168a702SAndrew Morton }
8596168a702SAndrew Morton #endif
8606168a702SAndrew Morton
861b2d3d61aSDaniel Lezcano #ifdef CONFIG_IRQ_TIMINGS
862b2d3d61aSDaniel Lezcano void irq_timings_enable(void);
863b2d3d61aSDaniel Lezcano void irq_timings_disable(void);
864e1c92149SDaniel Lezcano u64 irq_timings_next_event(u64 now);
865b2d3d61aSDaniel Lezcano #endif
866b2d3d61aSDaniel Lezcano
867d43c36dcSAlexey Dobriyan struct seq_file;
868f74596d0SAdrian Bunk int show_interrupts(struct seq_file *p, void *v);
869c78b9b65SThomas Gleixner int arch_show_interrupts(struct seq_file *p, int prec);
870f74596d0SAdrian Bunk
87143a25632SYinghai Lu extern int early_irq_init(void);
8724a046d17SYinghai Lu extern int arch_probe_nr_irqs(void);
87343a25632SYinghai Lu extern int arch_early_irq_init(void);
87443a25632SYinghai Lu
875be7635e7SAlexander Potapenko /*
876be7635e7SAlexander Potapenko * We want to know which function is an entrypoint of a hardirq or a softirq.
877be7635e7SAlexander Potapenko */
878f0178fc0SThomas Gleixner #ifndef __irq_entry
87933def849SJoe Perches # define __irq_entry __section(".irqentry.text")
880f0178fc0SThomas Gleixner #endif
881f0178fc0SThomas Gleixner
88233def849SJoe Perches #define __softirq_entry __section(".softirqentry.text")
883be7635e7SAlexander Potapenko
8841da177e4SLinus Torvalds #endif
885