xref: /linux-6.15/include/linux/interrupt.h (revision 00a62703)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* interrupt.h */
3 #ifndef _LINUX_INTERRUPT_H
4 #define _LINUX_INTERRUPT_H
5 
6 #include <linux/kernel.h>
7 #include <linux/bitops.h>
8 #include <linux/cpumask.h>
9 #include <linux/irqreturn.h>
10 #include <linux/irqnr.h>
11 #include <linux/hardirq.h>
12 #include <linux/irqflags.h>
13 #include <linux/hrtimer.h>
14 #include <linux/kref.h>
15 #include <linux/workqueue.h>
16 
17 #include <linux/atomic.h>
18 #include <asm/ptrace.h>
19 #include <asm/irq.h>
20 #include <asm/sections.h>
21 
22 /*
23  * These correspond to the IORESOURCE_IRQ_* defines in
24  * linux/ioport.h to select the interrupt line behaviour.  When
25  * requesting an interrupt without specifying a IRQF_TRIGGER, the
26  * setting should be assumed to be "as already configured", which
27  * may be as per machine or firmware initialisation.
28  */
29 #define IRQF_TRIGGER_NONE	0x00000000
30 #define IRQF_TRIGGER_RISING	0x00000001
31 #define IRQF_TRIGGER_FALLING	0x00000002
32 #define IRQF_TRIGGER_HIGH	0x00000004
33 #define IRQF_TRIGGER_LOW	0x00000008
34 #define IRQF_TRIGGER_MASK	(IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW | \
35 				 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)
36 #define IRQF_TRIGGER_PROBE	0x00000010
37 
38 /*
39  * These flags used only by the kernel as part of the
40  * irq handling routines.
41  *
42  * IRQF_SHARED - allow sharing the irq among several devices
43  * IRQF_PROBE_SHARED - set by callers when they expect sharing mismatches to occur
44  * IRQF_TIMER - Flag to mark this interrupt as timer interrupt
45  * IRQF_PERCPU - Interrupt is per cpu
46  * IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing
47  * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is
48  *                registered first in an shared interrupt is considered for
49  *                performance reasons)
50  * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished.
51  *                Used by threaded interrupts which need to keep the
52  *                irq line disabled until the threaded handler has been run.
53  * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend.  Does not guarantee
54  *                   that this interrupt will wake the system from a suspended
55  *                   state.  See Documentation/power/suspend-and-interrupts.txt
56  * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set
57  * IRQF_NO_THREAD - Interrupt cannot be threaded
58  * IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device
59  *                resume time.
60  * IRQF_COND_SUSPEND - If the IRQ is shared with a NO_SUSPEND user, execute this
61  *                interrupt handler after suspending interrupts. For system
62  *                wakeup devices users need to implement wakeup detection in
63  *                their interrupt handlers.
64  */
65 #define IRQF_SHARED		0x00000080
66 #define IRQF_PROBE_SHARED	0x00000100
67 #define __IRQF_TIMER		0x00000200
68 #define IRQF_PERCPU		0x00000400
69 #define IRQF_NOBALANCING	0x00000800
70 #define IRQF_IRQPOLL		0x00001000
71 #define IRQF_ONESHOT		0x00002000
72 #define IRQF_NO_SUSPEND		0x00004000
73 #define IRQF_FORCE_RESUME	0x00008000
74 #define IRQF_NO_THREAD		0x00010000
75 #define IRQF_EARLY_RESUME	0x00020000
76 #define IRQF_COND_SUSPEND	0x00040000
77 
78 #define IRQF_TIMER		(__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
79 
80 /*
81  * These values can be returned by request_any_context_irq() and
82  * describe the context the interrupt will be run in.
83  *
84  * IRQC_IS_HARDIRQ - interrupt runs in hardirq context
85  * IRQC_IS_NESTED - interrupt runs in a nested threaded context
86  */
87 enum {
88 	IRQC_IS_HARDIRQ	= 0,
89 	IRQC_IS_NESTED,
90 };
91 
92 typedef irqreturn_t (*irq_handler_t)(int, void *);
93 
94 /**
95  * struct irqaction - per interrupt action descriptor
96  * @handler:	interrupt handler function
97  * @name:	name of the device
98  * @dev_id:	cookie to identify the device
99  * @percpu_dev_id:	cookie to identify the device
100  * @next:	pointer to the next irqaction for shared interrupts
101  * @irq:	interrupt number
102  * @flags:	flags (see IRQF_* above)
103  * @thread_fn:	interrupt handler function for threaded interrupts
104  * @thread:	thread pointer for threaded interrupts
105  * @secondary:	pointer to secondary irqaction (force threading)
106  * @thread_flags:	flags related to @thread
107  * @thread_mask:	bitmask for keeping track of @thread activity
108  * @dir:	pointer to the proc/irq/NN/name entry
109  */
110 struct irqaction {
111 	irq_handler_t		handler;
112 	void			*dev_id;
113 	void __percpu		*percpu_dev_id;
114 	struct irqaction	*next;
115 	irq_handler_t		thread_fn;
116 	struct task_struct	*thread;
117 	struct irqaction	*secondary;
118 	unsigned int		irq;
119 	unsigned int		flags;
120 	unsigned long		thread_flags;
121 	unsigned long		thread_mask;
122 	const char		*name;
123 	struct proc_dir_entry	*dir;
124 } ____cacheline_internodealigned_in_smp;
125 
126 extern irqreturn_t no_action(int cpl, void *dev_id);
127 
128 /*
129  * If a (PCI) device interrupt is not connected we set dev->irq to
130  * IRQ_NOTCONNECTED. This causes request_irq() to fail with -ENOTCONN, so we
131  * can distingiush that case from other error returns.
132  *
133  * 0x80000000 is guaranteed to be outside the available range of interrupts
134  * and easy to distinguish from other possible incorrect values.
135  */
136 #define IRQ_NOTCONNECTED	(1U << 31)
137 
138 extern int __must_check
139 request_threaded_irq(unsigned int irq, irq_handler_t handler,
140 		     irq_handler_t thread_fn,
141 		     unsigned long flags, const char *name, void *dev);
142 
143 static inline int __must_check
144 request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
145 	    const char *name, void *dev)
146 {
147 	return request_threaded_irq(irq, handler, NULL, flags, name, dev);
148 }
149 
150 extern int __must_check
151 request_any_context_irq(unsigned int irq, irq_handler_t handler,
152 			unsigned long flags, const char *name, void *dev_id);
153 
154 extern int __must_check
155 __request_percpu_irq(unsigned int irq, irq_handler_t handler,
156 		     unsigned long flags, const char *devname,
157 		     void __percpu *percpu_dev_id);
158 
159 static inline int __must_check
160 request_percpu_irq(unsigned int irq, irq_handler_t handler,
161 		   const char *devname, void __percpu *percpu_dev_id)
162 {
163 	return __request_percpu_irq(irq, handler, 0,
164 				    devname, percpu_dev_id);
165 }
166 
167 extern const void *free_irq(unsigned int, void *);
168 extern void free_percpu_irq(unsigned int, void __percpu *);
169 
170 struct device;
171 
172 extern int __must_check
173 devm_request_threaded_irq(struct device *dev, unsigned int irq,
174 			  irq_handler_t handler, irq_handler_t thread_fn,
175 			  unsigned long irqflags, const char *devname,
176 			  void *dev_id);
177 
178 static inline int __must_check
179 devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler,
180 		 unsigned long irqflags, const char *devname, void *dev_id)
181 {
182 	return devm_request_threaded_irq(dev, irq, handler, NULL, irqflags,
183 					 devname, dev_id);
184 }
185 
186 extern int __must_check
187 devm_request_any_context_irq(struct device *dev, unsigned int irq,
188 		 irq_handler_t handler, unsigned long irqflags,
189 		 const char *devname, void *dev_id);
190 
191 extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
192 
193 /*
194  * On lockdep we dont want to enable hardirqs in hardirq
195  * context. Use local_irq_enable_in_hardirq() to annotate
196  * kernel code that has to do this nevertheless (pretty much
197  * the only valid case is for old/broken hardware that is
198  * insanely slow).
199  *
200  * NOTE: in theory this might break fragile code that relies
201  * on hardirq delivery - in practice we dont seem to have such
202  * places left. So the only effect should be slightly increased
203  * irqs-off latencies.
204  */
205 #ifdef CONFIG_LOCKDEP
206 # define local_irq_enable_in_hardirq()	do { } while (0)
207 #else
208 # define local_irq_enable_in_hardirq()	local_irq_enable()
209 #endif
210 
211 extern void disable_irq_nosync(unsigned int irq);
212 extern bool disable_hardirq(unsigned int irq);
213 extern void disable_irq(unsigned int irq);
214 extern void disable_percpu_irq(unsigned int irq);
215 extern void enable_irq(unsigned int irq);
216 extern void enable_percpu_irq(unsigned int irq, unsigned int type);
217 extern bool irq_percpu_is_enabled(unsigned int irq);
218 extern void irq_wake_thread(unsigned int irq, void *dev_id);
219 
220 /* The following three functions are for the core kernel use only. */
221 extern void suspend_device_irqs(void);
222 extern void resume_device_irqs(void);
223 
224 /**
225  * struct irq_affinity_notify - context for notification of IRQ affinity changes
226  * @irq:		Interrupt to which notification applies
227  * @kref:		Reference count, for internal use
228  * @work:		Work item, for internal use
229  * @notify:		Function to be called on change.  This will be
230  *			called in process context.
231  * @release:		Function to be called on release.  This will be
232  *			called in process context.  Once registered, the
233  *			structure must only be freed when this function is
234  *			called or later.
235  */
236 struct irq_affinity_notify {
237 	unsigned int irq;
238 	struct kref kref;
239 	struct work_struct work;
240 	void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask);
241 	void (*release)(struct kref *ref);
242 };
243 
244 /**
245  * struct irq_affinity - Description for automatic irq affinity assignements
246  * @pre_vectors:	Don't apply affinity to @pre_vectors at beginning of
247  *			the MSI(-X) vector space
248  * @post_vectors:	Don't apply affinity to @post_vectors at end of
249  *			the MSI(-X) vector space
250  */
251 struct irq_affinity {
252 	int	pre_vectors;
253 	int	post_vectors;
254 };
255 
256 #if defined(CONFIG_SMP)
257 
258 extern cpumask_var_t irq_default_affinity;
259 
260 /* Internal implementation. Use the helpers below */
261 extern int __irq_set_affinity(unsigned int irq, const struct cpumask *cpumask,
262 			      bool force);
263 
264 /**
265  * irq_set_affinity - Set the irq affinity of a given irq
266  * @irq:	Interrupt to set affinity
267  * @cpumask:	cpumask
268  *
269  * Fails if cpumask does not contain an online CPU
270  */
271 static inline int
272 irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
273 {
274 	return __irq_set_affinity(irq, cpumask, false);
275 }
276 
277 /**
278  * irq_force_affinity - Force the irq affinity of a given irq
279  * @irq:	Interrupt to set affinity
280  * @cpumask:	cpumask
281  *
282  * Same as irq_set_affinity, but without checking the mask against
283  * online cpus.
284  *
285  * Solely for low level cpu hotplug code, where we need to make per
286  * cpu interrupts affine before the cpu becomes online.
287  */
288 static inline int
289 irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
290 {
291 	return __irq_set_affinity(irq, cpumask, true);
292 }
293 
294 extern int irq_can_set_affinity(unsigned int irq);
295 extern int irq_select_affinity(unsigned int irq);
296 
297 extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m);
298 
299 extern int
300 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
301 
302 struct cpumask *irq_create_affinity_masks(int nvec, const struct irq_affinity *affd);
303 int irq_calc_affinity_vectors(int minvec, int maxvec, const struct irq_affinity *affd);
304 
305 #else /* CONFIG_SMP */
306 
307 static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)
308 {
309 	return -EINVAL;
310 }
311 
312 static inline int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
313 {
314 	return 0;
315 }
316 
317 static inline int irq_can_set_affinity(unsigned int irq)
318 {
319 	return 0;
320 }
321 
322 static inline int irq_select_affinity(unsigned int irq)  { return 0; }
323 
324 static inline int irq_set_affinity_hint(unsigned int irq,
325 					const struct cpumask *m)
326 {
327 	return -EINVAL;
328 }
329 
330 static inline int
331 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
332 {
333 	return 0;
334 }
335 
336 static inline struct cpumask *
337 irq_create_affinity_masks(int nvec, const struct irq_affinity *affd)
338 {
339 	return NULL;
340 }
341 
342 static inline int
343 irq_calc_affinity_vectors(int minvec, int maxvec, const struct irq_affinity *affd)
344 {
345 	return maxvec;
346 }
347 
348 #endif /* CONFIG_SMP */
349 
350 /*
351  * Special lockdep variants of irq disabling/enabling.
352  * These should be used for locking constructs that
353  * know that a particular irq context which is disabled,
354  * and which is the only irq-context user of a lock,
355  * that it's safe to take the lock in the irq-disabled
356  * section without disabling hardirqs.
357  *
358  * On !CONFIG_LOCKDEP they are equivalent to the normal
359  * irq disable/enable methods.
360  */
361 static inline void disable_irq_nosync_lockdep(unsigned int irq)
362 {
363 	disable_irq_nosync(irq);
364 #ifdef CONFIG_LOCKDEP
365 	local_irq_disable();
366 #endif
367 }
368 
369 static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags)
370 {
371 	disable_irq_nosync(irq);
372 #ifdef CONFIG_LOCKDEP
373 	local_irq_save(*flags);
374 #endif
375 }
376 
377 static inline void disable_irq_lockdep(unsigned int irq)
378 {
379 	disable_irq(irq);
380 #ifdef CONFIG_LOCKDEP
381 	local_irq_disable();
382 #endif
383 }
384 
385 static inline void enable_irq_lockdep(unsigned int irq)
386 {
387 #ifdef CONFIG_LOCKDEP
388 	local_irq_enable();
389 #endif
390 	enable_irq(irq);
391 }
392 
393 static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags)
394 {
395 #ifdef CONFIG_LOCKDEP
396 	local_irq_restore(*flags);
397 #endif
398 	enable_irq(irq);
399 }
400 
401 /* IRQ wakeup (PM) control: */
402 extern int irq_set_irq_wake(unsigned int irq, unsigned int on);
403 
404 static inline int enable_irq_wake(unsigned int irq)
405 {
406 	return irq_set_irq_wake(irq, 1);
407 }
408 
409 static inline int disable_irq_wake(unsigned int irq)
410 {
411 	return irq_set_irq_wake(irq, 0);
412 }
413 
414 /*
415  * irq_get_irqchip_state/irq_set_irqchip_state specific flags
416  */
417 enum irqchip_irq_state {
418 	IRQCHIP_STATE_PENDING,		/* Is interrupt pending? */
419 	IRQCHIP_STATE_ACTIVE,		/* Is interrupt in progress? */
420 	IRQCHIP_STATE_MASKED,		/* Is interrupt masked? */
421 	IRQCHIP_STATE_LINE_LEVEL,	/* Is IRQ line high? */
422 };
423 
424 extern int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
425 				 bool *state);
426 extern int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
427 				 bool state);
428 
429 #ifdef CONFIG_IRQ_FORCED_THREADING
430 extern bool force_irqthreads;
431 #else
432 #define force_irqthreads	(0)
433 #endif
434 
435 #ifndef __ARCH_SET_SOFTIRQ_PENDING
436 #define set_softirq_pending(x) (local_softirq_pending() = (x))
437 #define or_softirq_pending(x)  (local_softirq_pending() |= (x))
438 #endif
439 
440 /* Some architectures might implement lazy enabling/disabling of
441  * interrupts. In some cases, such as stop_machine, we might want
442  * to ensure that after a local_irq_disable(), interrupts have
443  * really been disabled in hardware. Such architectures need to
444  * implement the following hook.
445  */
446 #ifndef hard_irq_disable
447 #define hard_irq_disable()	do { } while(0)
448 #endif
449 
450 /* PLEASE, avoid to allocate new softirqs, if you need not _really_ high
451    frequency threaded job scheduling. For almost all the purposes
452    tasklets are more than enough. F.e. all serial device BHs et
453    al. should be converted to tasklets, not to softirqs.
454  */
455 
456 enum
457 {
458 	HI_SOFTIRQ=0,
459 	TIMER_SOFTIRQ,
460 	NET_TX_SOFTIRQ,
461 	NET_RX_SOFTIRQ,
462 	BLOCK_SOFTIRQ,
463 	IRQ_POLL_SOFTIRQ,
464 	TASKLET_SOFTIRQ,
465 	SCHED_SOFTIRQ,
466 	HRTIMER_SOFTIRQ, /* Unused, but kept as tools rely on the
467 			    numbering. Sigh! */
468 	RCU_SOFTIRQ,    /* Preferable RCU should always be the last softirq */
469 
470 	NR_SOFTIRQS
471 };
472 
473 #define SOFTIRQ_STOP_IDLE_MASK (~(1 << RCU_SOFTIRQ))
474 
475 /* map softirq index to softirq name. update 'softirq_to_name' in
476  * kernel/softirq.c when adding a new softirq.
477  */
478 extern const char * const softirq_to_name[NR_SOFTIRQS];
479 
480 /* softirq mask and active fields moved to irq_cpustat_t in
481  * asm/hardirq.h to get better cache usage.  KAO
482  */
483 
484 struct softirq_action
485 {
486 	void	(*action)(struct softirq_action *);
487 };
488 
489 asmlinkage void do_softirq(void);
490 asmlinkage void __do_softirq(void);
491 
492 #ifdef __ARCH_HAS_DO_SOFTIRQ
493 void do_softirq_own_stack(void);
494 #else
495 static inline void do_softirq_own_stack(void)
496 {
497 	__do_softirq();
498 }
499 #endif
500 
501 extern void open_softirq(int nr, void (*action)(struct softirq_action *));
502 extern void softirq_init(void);
503 extern void __raise_softirq_irqoff(unsigned int nr);
504 
505 extern void raise_softirq_irqoff(unsigned int nr);
506 extern void raise_softirq(unsigned int nr);
507 
508 DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
509 
510 static inline struct task_struct *this_cpu_ksoftirqd(void)
511 {
512 	return this_cpu_read(ksoftirqd);
513 }
514 
515 /* Tasklets --- multithreaded analogue of BHs.
516 
517    Main feature differing them of generic softirqs: tasklet
518    is running only on one CPU simultaneously.
519 
520    Main feature differing them of BHs: different tasklets
521    may be run simultaneously on different CPUs.
522 
523    Properties:
524    * If tasklet_schedule() is called, then tasklet is guaranteed
525      to be executed on some cpu at least once after this.
526    * If the tasklet is already scheduled, but its execution is still not
527      started, it will be executed only once.
528    * If this tasklet is already running on another CPU (or schedule is called
529      from tasklet itself), it is rescheduled for later.
530    * Tasklet is strictly serialized wrt itself, but not
531      wrt another tasklets. If client needs some intertask synchronization,
532      he makes it with spinlocks.
533  */
534 
535 struct tasklet_struct
536 {
537 	struct tasklet_struct *next;
538 	unsigned long state;
539 	atomic_t count;
540 	void (*func)(unsigned long);
541 	unsigned long data;
542 };
543 
544 #define DECLARE_TASKLET(name, func, data) \
545 struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data }
546 
547 #define DECLARE_TASKLET_DISABLED(name, func, data) \
548 struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }
549 
550 
551 enum
552 {
553 	TASKLET_STATE_SCHED,	/* Tasklet is scheduled for execution */
554 	TASKLET_STATE_RUN	/* Tasklet is running (SMP only) */
555 };
556 
557 #ifdef CONFIG_SMP
558 static inline int tasklet_trylock(struct tasklet_struct *t)
559 {
560 	return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
561 }
562 
563 static inline void tasklet_unlock(struct tasklet_struct *t)
564 {
565 	smp_mb__before_atomic();
566 	clear_bit(TASKLET_STATE_RUN, &(t)->state);
567 }
568 
569 static inline void tasklet_unlock_wait(struct tasklet_struct *t)
570 {
571 	while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
572 }
573 #else
574 #define tasklet_trylock(t) 1
575 #define tasklet_unlock_wait(t) do { } while (0)
576 #define tasklet_unlock(t) do { } while (0)
577 #endif
578 
579 extern void __tasklet_schedule(struct tasklet_struct *t);
580 
581 static inline void tasklet_schedule(struct tasklet_struct *t)
582 {
583 	if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
584 		__tasklet_schedule(t);
585 }
586 
587 extern void __tasklet_hi_schedule(struct tasklet_struct *t);
588 
589 static inline void tasklet_hi_schedule(struct tasklet_struct *t)
590 {
591 	if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
592 		__tasklet_hi_schedule(t);
593 }
594 
595 static inline void tasklet_disable_nosync(struct tasklet_struct *t)
596 {
597 	atomic_inc(&t->count);
598 	smp_mb__after_atomic();
599 }
600 
601 static inline void tasklet_disable(struct tasklet_struct *t)
602 {
603 	tasklet_disable_nosync(t);
604 	tasklet_unlock_wait(t);
605 	smp_mb();
606 }
607 
608 static inline void tasklet_enable(struct tasklet_struct *t)
609 {
610 	smp_mb__before_atomic();
611 	atomic_dec(&t->count);
612 }
613 
614 extern void tasklet_kill(struct tasklet_struct *t);
615 extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
616 extern void tasklet_init(struct tasklet_struct *t,
617 			 void (*func)(unsigned long), unsigned long data);
618 
619 struct tasklet_hrtimer {
620 	struct hrtimer		timer;
621 	struct tasklet_struct	tasklet;
622 	enum hrtimer_restart	(*function)(struct hrtimer *);
623 };
624 
625 extern void
626 tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
627 		     enum hrtimer_restart (*function)(struct hrtimer *),
628 		     clockid_t which_clock, enum hrtimer_mode mode);
629 
630 static inline
631 void tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time,
632 			   const enum hrtimer_mode mode)
633 {
634 	hrtimer_start(&ttimer->timer, time, mode);
635 }
636 
637 static inline
638 void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer)
639 {
640 	hrtimer_cancel(&ttimer->timer);
641 	tasklet_kill(&ttimer->tasklet);
642 }
643 
644 /*
645  * Autoprobing for irqs:
646  *
647  * probe_irq_on() and probe_irq_off() provide robust primitives
648  * for accurate IRQ probing during kernel initialization.  They are
649  * reasonably simple to use, are not "fooled" by spurious interrupts,
650  * and, unlike other attempts at IRQ probing, they do not get hung on
651  * stuck interrupts (such as unused PS2 mouse interfaces on ASUS boards).
652  *
653  * For reasonably foolproof probing, use them as follows:
654  *
655  * 1. clear and/or mask the device's internal interrupt.
656  * 2. sti();
657  * 3. irqs = probe_irq_on();      // "take over" all unassigned idle IRQs
658  * 4. enable the device and cause it to trigger an interrupt.
659  * 5. wait for the device to interrupt, using non-intrusive polling or a delay.
660  * 6. irq = probe_irq_off(irqs);  // get IRQ number, 0=none, negative=multiple
661  * 7. service the device to clear its pending interrupt.
662  * 8. loop again if paranoia is required.
663  *
664  * probe_irq_on() returns a mask of allocated irq's.
665  *
666  * probe_irq_off() takes the mask as a parameter,
667  * and returns the irq number which occurred,
668  * or zero if none occurred, or a negative irq number
669  * if more than one irq occurred.
670  */
671 
672 #if !defined(CONFIG_GENERIC_IRQ_PROBE)
673 static inline unsigned long probe_irq_on(void)
674 {
675 	return 0;
676 }
677 static inline int probe_irq_off(unsigned long val)
678 {
679 	return 0;
680 }
681 static inline unsigned int probe_irq_mask(unsigned long val)
682 {
683 	return 0;
684 }
685 #else
686 extern unsigned long probe_irq_on(void);	/* returns 0 on failure */
687 extern int probe_irq_off(unsigned long);	/* returns 0 or negative on failure */
688 extern unsigned int probe_irq_mask(unsigned long);	/* returns mask of ISA interrupts */
689 #endif
690 
691 #ifdef CONFIG_PROC_FS
692 /* Initialize /proc/irq/ */
693 extern void init_irq_proc(void);
694 #else
695 static inline void init_irq_proc(void)
696 {
697 }
698 #endif
699 
700 #ifdef CONFIG_IRQ_TIMINGS
701 void irq_timings_enable(void);
702 void irq_timings_disable(void);
703 u64 irq_timings_next_event(u64 now);
704 #endif
705 
706 struct seq_file;
707 int show_interrupts(struct seq_file *p, void *v);
708 int arch_show_interrupts(struct seq_file *p, int prec);
709 
710 extern int early_irq_init(void);
711 extern int arch_probe_nr_irqs(void);
712 extern int arch_early_irq_init(void);
713 
714 /*
715  * We want to know which function is an entrypoint of a hardirq or a softirq.
716  */
717 #define __irq_entry		 __attribute__((__section__(".irqentry.text")))
718 #define __softirq_entry  \
719 	__attribute__((__section__(".softirqentry.text")))
720 
721 #endif
722