xref: /linux-6.15/include/linux/interrupt.h (revision 9880e71c)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* interrupt.h */
3 #ifndef _LINUX_INTERRUPT_H
4 #define _LINUX_INTERRUPT_H
5 
6 #include <linux/kernel.h>
7 #include <linux/bitops.h>
8 #include <linux/cpumask.h>
9 #include <linux/irqreturn.h>
10 #include <linux/irqnr.h>
11 #include <linux/hardirq.h>
12 #include <linux/irqflags.h>
13 #include <linux/hrtimer.h>
14 #include <linux/kref.h>
15 #include <linux/workqueue.h>
16 
17 #include <linux/atomic.h>
18 #include <asm/ptrace.h>
19 #include <asm/irq.h>
20 #include <asm/sections.h>
21 
22 /*
23  * These correspond to the IORESOURCE_IRQ_* defines in
24  * linux/ioport.h to select the interrupt line behaviour.  When
25  * requesting an interrupt without specifying a IRQF_TRIGGER, the
26  * setting should be assumed to be "as already configured", which
27  * may be as per machine or firmware initialisation.
28  */
29 #define IRQF_TRIGGER_NONE	0x00000000
30 #define IRQF_TRIGGER_RISING	0x00000001
31 #define IRQF_TRIGGER_FALLING	0x00000002
32 #define IRQF_TRIGGER_HIGH	0x00000004
33 #define IRQF_TRIGGER_LOW	0x00000008
34 #define IRQF_TRIGGER_MASK	(IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW | \
35 				 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)
36 #define IRQF_TRIGGER_PROBE	0x00000010
37 
38 /*
39  * These flags used only by the kernel as part of the
40  * irq handling routines.
41  *
42  * IRQF_SHARED - allow sharing the irq among several devices
43  * IRQF_PROBE_SHARED - set by callers when they expect sharing mismatches to occur
44  * IRQF_TIMER - Flag to mark this interrupt as timer interrupt
45  * IRQF_PERCPU - Interrupt is per cpu
46  * IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing
47  * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is
48  *                registered first in a shared interrupt is considered for
49  *                performance reasons)
50  * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished.
51  *                Used by threaded interrupts which need to keep the
52  *                irq line disabled until the threaded handler has been run.
53  * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend.  Does not guarantee
54  *                   that this interrupt will wake the system from a suspended
55  *                   state.  See Documentation/power/suspend-and-interrupts.rst
56  * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set
57  * IRQF_NO_THREAD - Interrupt cannot be threaded
58  * IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device
59  *                resume time.
60  * IRQF_COND_SUSPEND - If the IRQ is shared with a NO_SUSPEND user, execute this
61  *                interrupt handler after suspending interrupts. For system
62  *                wakeup devices users need to implement wakeup detection in
63  *                their interrupt handlers.
64  */
65 #define IRQF_SHARED		0x00000080
66 #define IRQF_PROBE_SHARED	0x00000100
67 #define __IRQF_TIMER		0x00000200
68 #define IRQF_PERCPU		0x00000400
69 #define IRQF_NOBALANCING	0x00000800
70 #define IRQF_IRQPOLL		0x00001000
71 #define IRQF_ONESHOT		0x00002000
72 #define IRQF_NO_SUSPEND		0x00004000
73 #define IRQF_FORCE_RESUME	0x00008000
74 #define IRQF_NO_THREAD		0x00010000
75 #define IRQF_EARLY_RESUME	0x00020000
76 #define IRQF_COND_SUSPEND	0x00040000
77 
78 #define IRQF_TIMER		(__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
79 
80 /*
81  * These values can be returned by request_any_context_irq() and
82  * describe the context the interrupt will be run in.
83  *
84  * IRQC_IS_HARDIRQ - interrupt runs in hardirq context
85  * IRQC_IS_NESTED - interrupt runs in a nested threaded context
86  */
87 enum {
88 	IRQC_IS_HARDIRQ	= 0,
89 	IRQC_IS_NESTED,
90 };
91 
92 typedef irqreturn_t (*irq_handler_t)(int, void *);
93 
94 /**
95  * struct irqaction - per interrupt action descriptor
96  * @handler:	interrupt handler function
97  * @name:	name of the device
98  * @dev_id:	cookie to identify the device
99  * @percpu_dev_id:	cookie to identify the device
100  * @next:	pointer to the next irqaction for shared interrupts
101  * @irq:	interrupt number
102  * @flags:	flags (see IRQF_* above)
103  * @thread_fn:	interrupt handler function for threaded interrupts
104  * @thread:	thread pointer for threaded interrupts
105  * @secondary:	pointer to secondary irqaction (force threading)
106  * @thread_flags:	flags related to @thread
107  * @thread_mask:	bitmask for keeping track of @thread activity
108  * @dir:	pointer to the proc/irq/NN/name entry
109  */
110 struct irqaction {
111 	irq_handler_t		handler;
112 	void			*dev_id;
113 	void __percpu		*percpu_dev_id;
114 	struct irqaction	*next;
115 	irq_handler_t		thread_fn;
116 	struct task_struct	*thread;
117 	struct irqaction	*secondary;
118 	unsigned int		irq;
119 	unsigned int		flags;
120 	unsigned long		thread_flags;
121 	unsigned long		thread_mask;
122 	const char		*name;
123 	struct proc_dir_entry	*dir;
124 } ____cacheline_internodealigned_in_smp;
125 
126 extern irqreturn_t no_action(int cpl, void *dev_id);
127 
128 /*
129  * If a (PCI) device interrupt is not connected we set dev->irq to
130  * IRQ_NOTCONNECTED. This causes request_irq() to fail with -ENOTCONN, so we
131  * can distingiush that case from other error returns.
132  *
133  * 0x80000000 is guaranteed to be outside the available range of interrupts
134  * and easy to distinguish from other possible incorrect values.
135  */
136 #define IRQ_NOTCONNECTED	(1U << 31)
137 
138 extern int __must_check
139 request_threaded_irq(unsigned int irq, irq_handler_t handler,
140 		     irq_handler_t thread_fn,
141 		     unsigned long flags, const char *name, void *dev);
142 
143 /**
144  * request_irq - Add a handler for an interrupt line
145  * @irq:	The interrupt line to allocate
146  * @handler:	Function to be called when the IRQ occurs.
147  *		Primary handler for threaded interrupts
148  *		If NULL, the default primary handler is installed
149  * @flags:	Handling flags
150  * @name:	Name of the device generating this interrupt
151  * @dev:	A cookie passed to the handler function
152  *
153  * This call allocates an interrupt and establishes a handler; see
154  * the documentation for request_threaded_irq() for details.
155  */
156 static inline int __must_check
157 request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
158 	    const char *name, void *dev)
159 {
160 	return request_threaded_irq(irq, handler, NULL, flags, name, dev);
161 }
162 
163 extern int __must_check
164 request_any_context_irq(unsigned int irq, irq_handler_t handler,
165 			unsigned long flags, const char *name, void *dev_id);
166 
167 extern int __must_check
168 __request_percpu_irq(unsigned int irq, irq_handler_t handler,
169 		     unsigned long flags, const char *devname,
170 		     void __percpu *percpu_dev_id);
171 
172 extern int __must_check
173 request_nmi(unsigned int irq, irq_handler_t handler, unsigned long flags,
174 	    const char *name, void *dev);
175 
176 static inline int __must_check
177 request_percpu_irq(unsigned int irq, irq_handler_t handler,
178 		   const char *devname, void __percpu *percpu_dev_id)
179 {
180 	return __request_percpu_irq(irq, handler, 0,
181 				    devname, percpu_dev_id);
182 }
183 
184 extern int __must_check
185 request_percpu_nmi(unsigned int irq, irq_handler_t handler,
186 		   const char *devname, void __percpu *dev);
187 
188 extern const void *free_irq(unsigned int, void *);
189 extern void free_percpu_irq(unsigned int, void __percpu *);
190 
191 extern const void *free_nmi(unsigned int irq, void *dev_id);
192 extern void free_percpu_nmi(unsigned int irq, void __percpu *percpu_dev_id);
193 
194 struct device;
195 
196 extern int __must_check
197 devm_request_threaded_irq(struct device *dev, unsigned int irq,
198 			  irq_handler_t handler, irq_handler_t thread_fn,
199 			  unsigned long irqflags, const char *devname,
200 			  void *dev_id);
201 
202 static inline int __must_check
203 devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler,
204 		 unsigned long irqflags, const char *devname, void *dev_id)
205 {
206 	return devm_request_threaded_irq(dev, irq, handler, NULL, irqflags,
207 					 devname, dev_id);
208 }
209 
210 extern int __must_check
211 devm_request_any_context_irq(struct device *dev, unsigned int irq,
212 		 irq_handler_t handler, unsigned long irqflags,
213 		 const char *devname, void *dev_id);
214 
215 extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
216 
217 /*
218  * On lockdep we dont want to enable hardirqs in hardirq
219  * context. Use local_irq_enable_in_hardirq() to annotate
220  * kernel code that has to do this nevertheless (pretty much
221  * the only valid case is for old/broken hardware that is
222  * insanely slow).
223  *
224  * NOTE: in theory this might break fragile code that relies
225  * on hardirq delivery - in practice we dont seem to have such
226  * places left. So the only effect should be slightly increased
227  * irqs-off latencies.
228  */
229 #ifdef CONFIG_LOCKDEP
230 # define local_irq_enable_in_hardirq()	do { } while (0)
231 #else
232 # define local_irq_enable_in_hardirq()	local_irq_enable()
233 #endif
234 
235 extern void disable_irq_nosync(unsigned int irq);
236 extern bool disable_hardirq(unsigned int irq);
237 extern void disable_irq(unsigned int irq);
238 extern void disable_percpu_irq(unsigned int irq);
239 extern void enable_irq(unsigned int irq);
240 extern void enable_percpu_irq(unsigned int irq, unsigned int type);
241 extern bool irq_percpu_is_enabled(unsigned int irq);
242 extern void irq_wake_thread(unsigned int irq, void *dev_id);
243 
244 extern void disable_nmi_nosync(unsigned int irq);
245 extern void disable_percpu_nmi(unsigned int irq);
246 extern void enable_nmi(unsigned int irq);
247 extern void enable_percpu_nmi(unsigned int irq, unsigned int type);
248 extern int prepare_percpu_nmi(unsigned int irq);
249 extern void teardown_percpu_nmi(unsigned int irq);
250 
251 extern int irq_inject_interrupt(unsigned int irq);
252 
253 /* The following three functions are for the core kernel use only. */
254 extern void suspend_device_irqs(void);
255 extern void resume_device_irqs(void);
256 extern void rearm_wake_irq(unsigned int irq);
257 
258 /**
259  * struct irq_affinity_notify - context for notification of IRQ affinity changes
260  * @irq:		Interrupt to which notification applies
261  * @kref:		Reference count, for internal use
262  * @work:		Work item, for internal use
263  * @notify:		Function to be called on change.  This will be
264  *			called in process context.
265  * @release:		Function to be called on release.  This will be
266  *			called in process context.  Once registered, the
267  *			structure must only be freed when this function is
268  *			called or later.
269  */
270 struct irq_affinity_notify {
271 	unsigned int irq;
272 	struct kref kref;
273 	struct work_struct work;
274 	void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask);
275 	void (*release)(struct kref *ref);
276 };
277 
278 #define	IRQ_AFFINITY_MAX_SETS  4
279 
280 /**
281  * struct irq_affinity - Description for automatic irq affinity assignements
282  * @pre_vectors:	Don't apply affinity to @pre_vectors at beginning of
283  *			the MSI(-X) vector space
284  * @post_vectors:	Don't apply affinity to @post_vectors at end of
285  *			the MSI(-X) vector space
286  * @nr_sets:		The number of interrupt sets for which affinity
287  *			spreading is required
288  * @set_size:		Array holding the size of each interrupt set
289  * @calc_sets:		Callback for calculating the number and size
290  *			of interrupt sets
291  * @priv:		Private data for usage by @calc_sets, usually a
292  *			pointer to driver/device specific data.
293  */
294 struct irq_affinity {
295 	unsigned int	pre_vectors;
296 	unsigned int	post_vectors;
297 	unsigned int	nr_sets;
298 	unsigned int	set_size[IRQ_AFFINITY_MAX_SETS];
299 	void		(*calc_sets)(struct irq_affinity *, unsigned int nvecs);
300 	void		*priv;
301 };
302 
303 /**
304  * struct irq_affinity_desc - Interrupt affinity descriptor
305  * @mask:	cpumask to hold the affinity assignment
306  * @is_managed: 1 if the interrupt is managed internally
307  */
308 struct irq_affinity_desc {
309 	struct cpumask	mask;
310 	unsigned int	is_managed : 1;
311 };
312 
313 #if defined(CONFIG_SMP)
314 
315 extern cpumask_var_t irq_default_affinity;
316 
317 /* Internal implementation. Use the helpers below */
318 extern int __irq_set_affinity(unsigned int irq, const struct cpumask *cpumask,
319 			      bool force);
320 
321 /**
322  * irq_set_affinity - Set the irq affinity of a given irq
323  * @irq:	Interrupt to set affinity
324  * @cpumask:	cpumask
325  *
326  * Fails if cpumask does not contain an online CPU
327  */
328 static inline int
329 irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
330 {
331 	return __irq_set_affinity(irq, cpumask, false);
332 }
333 
334 /**
335  * irq_force_affinity - Force the irq affinity of a given irq
336  * @irq:	Interrupt to set affinity
337  * @cpumask:	cpumask
338  *
339  * Same as irq_set_affinity, but without checking the mask against
340  * online cpus.
341  *
342  * Solely for low level cpu hotplug code, where we need to make per
343  * cpu interrupts affine before the cpu becomes online.
344  */
345 static inline int
346 irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
347 {
348 	return __irq_set_affinity(irq, cpumask, true);
349 }
350 
351 extern int irq_can_set_affinity(unsigned int irq);
352 extern int irq_select_affinity(unsigned int irq);
353 
354 extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m);
355 extern int irq_update_affinity_desc(unsigned int irq,
356 				    struct irq_affinity_desc *affinity);
357 
358 extern int
359 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
360 
361 struct irq_affinity_desc *
362 irq_create_affinity_masks(unsigned int nvec, struct irq_affinity *affd);
363 
364 unsigned int irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec,
365 				       const struct irq_affinity *affd);
366 
367 #else /* CONFIG_SMP */
368 
369 static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)
370 {
371 	return -EINVAL;
372 }
373 
374 static inline int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
375 {
376 	return 0;
377 }
378 
379 static inline int irq_can_set_affinity(unsigned int irq)
380 {
381 	return 0;
382 }
383 
384 static inline int irq_select_affinity(unsigned int irq)  { return 0; }
385 
386 static inline int irq_set_affinity_hint(unsigned int irq,
387 					const struct cpumask *m)
388 {
389 	return -EINVAL;
390 }
391 
392 static inline int irq_update_affinity_desc(unsigned int irq,
393 					   struct irq_affinity_desc *affinity)
394 {
395 	return -EINVAL;
396 }
397 
398 static inline int
399 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
400 {
401 	return 0;
402 }
403 
404 static inline struct irq_affinity_desc *
405 irq_create_affinity_masks(unsigned int nvec, struct irq_affinity *affd)
406 {
407 	return NULL;
408 }
409 
410 static inline unsigned int
411 irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec,
412 			  const struct irq_affinity *affd)
413 {
414 	return maxvec;
415 }
416 
417 #endif /* CONFIG_SMP */
418 
419 /*
420  * Special lockdep variants of irq disabling/enabling.
421  * These should be used for locking constructs that
422  * know that a particular irq context which is disabled,
423  * and which is the only irq-context user of a lock,
424  * that it's safe to take the lock in the irq-disabled
425  * section without disabling hardirqs.
426  *
427  * On !CONFIG_LOCKDEP they are equivalent to the normal
428  * irq disable/enable methods.
429  */
430 static inline void disable_irq_nosync_lockdep(unsigned int irq)
431 {
432 	disable_irq_nosync(irq);
433 #ifdef CONFIG_LOCKDEP
434 	local_irq_disable();
435 #endif
436 }
437 
438 static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags)
439 {
440 	disable_irq_nosync(irq);
441 #ifdef CONFIG_LOCKDEP
442 	local_irq_save(*flags);
443 #endif
444 }
445 
446 static inline void disable_irq_lockdep(unsigned int irq)
447 {
448 	disable_irq(irq);
449 #ifdef CONFIG_LOCKDEP
450 	local_irq_disable();
451 #endif
452 }
453 
454 static inline void enable_irq_lockdep(unsigned int irq)
455 {
456 #ifdef CONFIG_LOCKDEP
457 	local_irq_enable();
458 #endif
459 	enable_irq(irq);
460 }
461 
462 static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags)
463 {
464 #ifdef CONFIG_LOCKDEP
465 	local_irq_restore(*flags);
466 #endif
467 	enable_irq(irq);
468 }
469 
470 /* IRQ wakeup (PM) control: */
471 extern int irq_set_irq_wake(unsigned int irq, unsigned int on);
472 
473 static inline int enable_irq_wake(unsigned int irq)
474 {
475 	return irq_set_irq_wake(irq, 1);
476 }
477 
478 static inline int disable_irq_wake(unsigned int irq)
479 {
480 	return irq_set_irq_wake(irq, 0);
481 }
482 
483 /*
484  * irq_get_irqchip_state/irq_set_irqchip_state specific flags
485  */
486 enum irqchip_irq_state {
487 	IRQCHIP_STATE_PENDING,		/* Is interrupt pending? */
488 	IRQCHIP_STATE_ACTIVE,		/* Is interrupt in progress? */
489 	IRQCHIP_STATE_MASKED,		/* Is interrupt masked? */
490 	IRQCHIP_STATE_LINE_LEVEL,	/* Is IRQ line high? */
491 };
492 
493 extern int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
494 				 bool *state);
495 extern int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
496 				 bool state);
497 
498 #ifdef CONFIG_IRQ_FORCED_THREADING
499 # ifdef CONFIG_PREEMPT_RT
500 #  define force_irqthreads	(true)
501 # else
502 extern bool force_irqthreads;
503 # endif
504 #else
505 #define force_irqthreads	(0)
506 #endif
507 
508 #ifndef local_softirq_pending
509 
510 #ifndef local_softirq_pending_ref
511 #define local_softirq_pending_ref irq_stat.__softirq_pending
512 #endif
513 
514 #define local_softirq_pending()	(__this_cpu_read(local_softirq_pending_ref))
515 #define set_softirq_pending(x)	(__this_cpu_write(local_softirq_pending_ref, (x)))
516 #define or_softirq_pending(x)	(__this_cpu_or(local_softirq_pending_ref, (x)))
517 
518 #endif /* local_softirq_pending */
519 
520 /* Some architectures might implement lazy enabling/disabling of
521  * interrupts. In some cases, such as stop_machine, we might want
522  * to ensure that after a local_irq_disable(), interrupts have
523  * really been disabled in hardware. Such architectures need to
524  * implement the following hook.
525  */
526 #ifndef hard_irq_disable
527 #define hard_irq_disable()	do { } while(0)
528 #endif
529 
530 /* PLEASE, avoid to allocate new softirqs, if you need not _really_ high
531    frequency threaded job scheduling. For almost all the purposes
532    tasklets are more than enough. F.e. all serial device BHs et
533    al. should be converted to tasklets, not to softirqs.
534  */
535 
536 enum
537 {
538 	HI_SOFTIRQ=0,
539 	TIMER_SOFTIRQ,
540 	NET_TX_SOFTIRQ,
541 	NET_RX_SOFTIRQ,
542 	BLOCK_SOFTIRQ,
543 	IRQ_POLL_SOFTIRQ,
544 	TASKLET_SOFTIRQ,
545 	SCHED_SOFTIRQ,
546 	HRTIMER_SOFTIRQ,
547 	RCU_SOFTIRQ,    /* Preferable RCU should always be the last softirq */
548 
549 	NR_SOFTIRQS
550 };
551 
552 #define SOFTIRQ_STOP_IDLE_MASK (~(1 << RCU_SOFTIRQ))
553 
554 /* map softirq index to softirq name. update 'softirq_to_name' in
555  * kernel/softirq.c when adding a new softirq.
556  */
557 extern const char * const softirq_to_name[NR_SOFTIRQS];
558 
559 /* softirq mask and active fields moved to irq_cpustat_t in
560  * asm/hardirq.h to get better cache usage.  KAO
561  */
562 
563 struct softirq_action
564 {
565 	void	(*action)(struct softirq_action *);
566 };
567 
568 asmlinkage void do_softirq(void);
569 asmlinkage void __do_softirq(void);
570 
571 #ifdef __ARCH_HAS_DO_SOFTIRQ
572 void do_softirq_own_stack(void);
573 #else
574 static inline void do_softirq_own_stack(void)
575 {
576 	__do_softirq();
577 }
578 #endif
579 
580 extern void open_softirq(int nr, void (*action)(struct softirq_action *));
581 extern void softirq_init(void);
582 extern void __raise_softirq_irqoff(unsigned int nr);
583 
584 extern void raise_softirq_irqoff(unsigned int nr);
585 extern void raise_softirq(unsigned int nr);
586 
587 DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
588 
589 static inline struct task_struct *this_cpu_ksoftirqd(void)
590 {
591 	return this_cpu_read(ksoftirqd);
592 }
593 
594 /* Tasklets --- multithreaded analogue of BHs.
595 
596    This API is deprecated. Please consider using threaded IRQs instead:
597    https://lore.kernel.org/lkml/[email protected]
598 
599    Main feature differing them of generic softirqs: tasklet
600    is running only on one CPU simultaneously.
601 
602    Main feature differing them of BHs: different tasklets
603    may be run simultaneously on different CPUs.
604 
605    Properties:
606    * If tasklet_schedule() is called, then tasklet is guaranteed
607      to be executed on some cpu at least once after this.
608    * If the tasklet is already scheduled, but its execution is still not
609      started, it will be executed only once.
610    * If this tasklet is already running on another CPU (or schedule is called
611      from tasklet itself), it is rescheduled for later.
612    * Tasklet is strictly serialized wrt itself, but not
613      wrt another tasklets. If client needs some intertask synchronization,
614      he makes it with spinlocks.
615  */
616 
617 struct tasklet_struct
618 {
619 	struct tasklet_struct *next;
620 	unsigned long state;
621 	atomic_t count;
622 	bool use_callback;
623 	union {
624 		void (*func)(unsigned long data);
625 		void (*callback)(struct tasklet_struct *t);
626 	};
627 	unsigned long data;
628 };
629 
630 #define DECLARE_TASKLET(name, _callback)		\
631 struct tasklet_struct name = {				\
632 	.count = ATOMIC_INIT(0),			\
633 	.callback = _callback,				\
634 	.use_callback = true,				\
635 }
636 
637 #define DECLARE_TASKLET_DISABLED(name, _callback)	\
638 struct tasklet_struct name = {				\
639 	.count = ATOMIC_INIT(1),			\
640 	.callback = _callback,				\
641 	.use_callback = true,				\
642 }
643 
644 #define from_tasklet(var, callback_tasklet, tasklet_fieldname)	\
645 	container_of(callback_tasklet, typeof(*var), tasklet_fieldname)
646 
647 #define DECLARE_TASKLET_OLD(name, _func)		\
648 struct tasklet_struct name = {				\
649 	.count = ATOMIC_INIT(0),			\
650 	.func = _func,					\
651 }
652 
653 #define DECLARE_TASKLET_DISABLED_OLD(name, _func)	\
654 struct tasklet_struct name = {				\
655 	.count = ATOMIC_INIT(1),			\
656 	.func = _func,					\
657 }
658 
659 enum
660 {
661 	TASKLET_STATE_SCHED,	/* Tasklet is scheduled for execution */
662 	TASKLET_STATE_RUN	/* Tasklet is running (SMP only) */
663 };
664 
665 #ifdef CONFIG_SMP
666 static inline int tasklet_trylock(struct tasklet_struct *t)
667 {
668 	return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
669 }
670 
671 static inline void tasklet_unlock(struct tasklet_struct *t)
672 {
673 	smp_mb__before_atomic();
674 	clear_bit(TASKLET_STATE_RUN, &(t)->state);
675 }
676 
677 static inline void tasklet_unlock_wait(struct tasklet_struct *t)
678 {
679 	while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
680 }
681 #else
682 #define tasklet_trylock(t) 1
683 #define tasklet_unlock_wait(t) do { } while (0)
684 #define tasklet_unlock(t) do { } while (0)
685 #endif
686 
687 extern void __tasklet_schedule(struct tasklet_struct *t);
688 
689 static inline void tasklet_schedule(struct tasklet_struct *t)
690 {
691 	if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
692 		__tasklet_schedule(t);
693 }
694 
695 extern void __tasklet_hi_schedule(struct tasklet_struct *t);
696 
697 static inline void tasklet_hi_schedule(struct tasklet_struct *t)
698 {
699 	if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
700 		__tasklet_hi_schedule(t);
701 }
702 
703 static inline void tasklet_disable_nosync(struct tasklet_struct *t)
704 {
705 	atomic_inc(&t->count);
706 	smp_mb__after_atomic();
707 }
708 
709 static inline void tasklet_disable(struct tasklet_struct *t)
710 {
711 	tasklet_disable_nosync(t);
712 	tasklet_unlock_wait(t);
713 	smp_mb();
714 }
715 
716 static inline void tasklet_enable(struct tasklet_struct *t)
717 {
718 	smp_mb__before_atomic();
719 	atomic_dec(&t->count);
720 }
721 
722 extern void tasklet_kill(struct tasklet_struct *t);
723 extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
724 extern void tasklet_init(struct tasklet_struct *t,
725 			 void (*func)(unsigned long), unsigned long data);
726 extern void tasklet_setup(struct tasklet_struct *t,
727 			  void (*callback)(struct tasklet_struct *));
728 
729 /*
730  * Autoprobing for irqs:
731  *
732  * probe_irq_on() and probe_irq_off() provide robust primitives
733  * for accurate IRQ probing during kernel initialization.  They are
734  * reasonably simple to use, are not "fooled" by spurious interrupts,
735  * and, unlike other attempts at IRQ probing, they do not get hung on
736  * stuck interrupts (such as unused PS2 mouse interfaces on ASUS boards).
737  *
738  * For reasonably foolproof probing, use them as follows:
739  *
740  * 1. clear and/or mask the device's internal interrupt.
741  * 2. sti();
742  * 3. irqs = probe_irq_on();      // "take over" all unassigned idle IRQs
743  * 4. enable the device and cause it to trigger an interrupt.
744  * 5. wait for the device to interrupt, using non-intrusive polling or a delay.
745  * 6. irq = probe_irq_off(irqs);  // get IRQ number, 0=none, negative=multiple
746  * 7. service the device to clear its pending interrupt.
747  * 8. loop again if paranoia is required.
748  *
749  * probe_irq_on() returns a mask of allocated irq's.
750  *
751  * probe_irq_off() takes the mask as a parameter,
752  * and returns the irq number which occurred,
753  * or zero if none occurred, or a negative irq number
754  * if more than one irq occurred.
755  */
756 
757 #if !defined(CONFIG_GENERIC_IRQ_PROBE)
758 static inline unsigned long probe_irq_on(void)
759 {
760 	return 0;
761 }
762 static inline int probe_irq_off(unsigned long val)
763 {
764 	return 0;
765 }
766 static inline unsigned int probe_irq_mask(unsigned long val)
767 {
768 	return 0;
769 }
770 #else
771 extern unsigned long probe_irq_on(void);	/* returns 0 on failure */
772 extern int probe_irq_off(unsigned long);	/* returns 0 or negative on failure */
773 extern unsigned int probe_irq_mask(unsigned long);	/* returns mask of ISA interrupts */
774 #endif
775 
776 #ifdef CONFIG_PROC_FS
777 /* Initialize /proc/irq/ */
778 extern void init_irq_proc(void);
779 #else
780 static inline void init_irq_proc(void)
781 {
782 }
783 #endif
784 
785 #ifdef CONFIG_IRQ_TIMINGS
786 void irq_timings_enable(void);
787 void irq_timings_disable(void);
788 u64 irq_timings_next_event(u64 now);
789 #endif
790 
791 struct seq_file;
792 int show_interrupts(struct seq_file *p, void *v);
793 int arch_show_interrupts(struct seq_file *p, int prec);
794 
795 extern int early_irq_init(void);
796 extern int arch_probe_nr_irqs(void);
797 extern int arch_early_irq_init(void);
798 
799 /*
800  * We want to know which function is an entrypoint of a hardirq or a softirq.
801  */
802 #ifndef __irq_entry
803 # define __irq_entry	 __section(".irqentry.text")
804 #endif
805 
806 #define __softirq_entry  __section(".softirqentry.text")
807 
808 #endif
809