xref: /linux-6.15/include/linux/interrupt.h (revision 93f14468)
1 /* interrupt.h */
2 #ifndef _LINUX_INTERRUPT_H
3 #define _LINUX_INTERRUPT_H
4 
5 #include <linux/kernel.h>
6 #include <linux/linkage.h>
7 #include <linux/bitops.h>
8 #include <linux/preempt.h>
9 #include <linux/cpumask.h>
10 #include <linux/irqreturn.h>
11 #include <linux/irqnr.h>
12 #include <linux/hardirq.h>
13 #include <linux/irqflags.h>
14 #include <linux/smp.h>
15 #include <linux/percpu.h>
16 #include <linux/hrtimer.h>
17 #include <linux/kref.h>
18 #include <linux/workqueue.h>
19 
20 #include <linux/atomic.h>
21 #include <asm/ptrace.h>
22 
23 /*
24  * These correspond to the IORESOURCE_IRQ_* defines in
25  * linux/ioport.h to select the interrupt line behaviour.  When
26  * requesting an interrupt without specifying a IRQF_TRIGGER, the
27  * setting should be assumed to be "as already configured", which
28  * may be as per machine or firmware initialisation.
29  */
30 #define IRQF_TRIGGER_NONE	0x00000000
31 #define IRQF_TRIGGER_RISING	0x00000001
32 #define IRQF_TRIGGER_FALLING	0x00000002
33 #define IRQF_TRIGGER_HIGH	0x00000004
34 #define IRQF_TRIGGER_LOW	0x00000008
35 #define IRQF_TRIGGER_MASK	(IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW | \
36 				 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)
37 #define IRQF_TRIGGER_PROBE	0x00000010
38 
39 /*
40  * These flags used only by the kernel as part of the
41  * irq handling routines.
42  *
43  * IRQF_DISABLED - keep irqs disabled when calling the action handler.
44  *                 DEPRECATED. This flag is a NOOP and scheduled to be removed
45  * IRQF_SHARED - allow sharing the irq among several devices
46  * IRQF_PROBE_SHARED - set by callers when they expect sharing mismatches to occur
47  * IRQF_TIMER - Flag to mark this interrupt as timer interrupt
48  * IRQF_PERCPU - Interrupt is per cpu
49  * IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing
50  * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is
51  *                registered first in an shared interrupt is considered for
52  *                performance reasons)
53  * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished.
54  *                Used by threaded interrupts which need to keep the
55  *                irq line disabled until the threaded handler has been run.
56  * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend
57  * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set
58  * IRQF_NO_THREAD - Interrupt cannot be threaded
59  * IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device
60  *                resume time.
61  */
62 #define IRQF_DISABLED		0x00000020
63 #define IRQF_SHARED		0x00000080
64 #define IRQF_PROBE_SHARED	0x00000100
65 #define __IRQF_TIMER		0x00000200
66 #define IRQF_PERCPU		0x00000400
67 #define IRQF_NOBALANCING	0x00000800
68 #define IRQF_IRQPOLL		0x00001000
69 #define IRQF_ONESHOT		0x00002000
70 #define IRQF_NO_SUSPEND		0x00004000
71 #define IRQF_FORCE_RESUME	0x00008000
72 #define IRQF_NO_THREAD		0x00010000
73 #define IRQF_EARLY_RESUME	0x00020000
74 
75 #define IRQF_TIMER		(__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
76 
77 /*
78  * These values can be returned by request_any_context_irq() and
79  * describe the context the interrupt will be run in.
80  *
81  * IRQC_IS_HARDIRQ - interrupt runs in hardirq context
82  * IRQC_IS_NESTED - interrupt runs in a nested threaded context
83  */
84 enum {
85 	IRQC_IS_HARDIRQ	= 0,
86 	IRQC_IS_NESTED,
87 };
88 
89 typedef irqreturn_t (*irq_handler_t)(int, void *);
90 
91 /**
92  * struct irqaction - per interrupt action descriptor
93  * @handler:	interrupt handler function
94  * @name:	name of the device
95  * @dev_id:	cookie to identify the device
96  * @percpu_dev_id:	cookie to identify the device
97  * @next:	pointer to the next irqaction for shared interrupts
98  * @irq:	interrupt number
99  * @flags:	flags (see IRQF_* above)
100  * @thread_fn:	interrupt handler function for threaded interrupts
101  * @thread:	thread pointer for threaded interrupts
102  * @thread_flags:	flags related to @thread
103  * @thread_mask:	bitmask for keeping track of @thread activity
104  * @dir:	pointer to the proc/irq/NN/name entry
105  */
106 struct irqaction {
107 	irq_handler_t		handler;
108 	void			*dev_id;
109 	void __percpu		*percpu_dev_id;
110 	struct irqaction	*next;
111 	irq_handler_t		thread_fn;
112 	struct task_struct	*thread;
113 	unsigned int		irq;
114 	unsigned int		flags;
115 	unsigned long		thread_flags;
116 	unsigned long		thread_mask;
117 	const char		*name;
118 	struct proc_dir_entry	*dir;
119 } ____cacheline_internodealigned_in_smp;
120 
121 extern irqreturn_t no_action(int cpl, void *dev_id);
122 
123 #ifdef CONFIG_GENERIC_HARDIRQS
124 extern int __must_check
125 request_threaded_irq(unsigned int irq, irq_handler_t handler,
126 		     irq_handler_t thread_fn,
127 		     unsigned long flags, const char *name, void *dev);
128 
129 static inline int __must_check
130 request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
131 	    const char *name, void *dev)
132 {
133 	return request_threaded_irq(irq, handler, NULL, flags, name, dev);
134 }
135 
136 extern int __must_check
137 request_any_context_irq(unsigned int irq, irq_handler_t handler,
138 			unsigned long flags, const char *name, void *dev_id);
139 
140 extern int __must_check
141 request_percpu_irq(unsigned int irq, irq_handler_t handler,
142 		   const char *devname, void __percpu *percpu_dev_id);
143 #else
144 
145 extern int __must_check
146 request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
147 	    const char *name, void *dev);
148 
149 /*
150  * Special function to avoid ifdeffery in kernel/irq/devres.c which
151  * gets magically built by GENERIC_HARDIRQS=n architectures (sparc,
152  * m68k). I really love these $@%#!* obvious Makefile references:
153  * ../../../kernel/irq/devres.o
154  */
155 static inline int __must_check
156 request_threaded_irq(unsigned int irq, irq_handler_t handler,
157 		     irq_handler_t thread_fn,
158 		     unsigned long flags, const char *name, void *dev)
159 {
160 	return request_irq(irq, handler, flags, name, dev);
161 }
162 
163 static inline int __must_check
164 request_any_context_irq(unsigned int irq, irq_handler_t handler,
165 			unsigned long flags, const char *name, void *dev_id)
166 {
167 	return request_irq(irq, handler, flags, name, dev_id);
168 }
169 
170 static inline int __must_check
171 request_percpu_irq(unsigned int irq, irq_handler_t handler,
172 		   const char *devname, void __percpu *percpu_dev_id)
173 {
174 	return request_irq(irq, handler, 0, devname, percpu_dev_id);
175 }
176 #endif
177 
178 extern void free_irq(unsigned int, void *);
179 extern void free_percpu_irq(unsigned int, void __percpu *);
180 
181 struct device;
182 
183 extern int __must_check
184 devm_request_threaded_irq(struct device *dev, unsigned int irq,
185 			  irq_handler_t handler, irq_handler_t thread_fn,
186 			  unsigned long irqflags, const char *devname,
187 			  void *dev_id);
188 
189 static inline int __must_check
190 devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler,
191 		 unsigned long irqflags, const char *devname, void *dev_id)
192 {
193 	return devm_request_threaded_irq(dev, irq, handler, NULL, irqflags,
194 					 devname, dev_id);
195 }
196 
197 extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
198 
199 /*
200  * On lockdep we dont want to enable hardirqs in hardirq
201  * context. Use local_irq_enable_in_hardirq() to annotate
202  * kernel code that has to do this nevertheless (pretty much
203  * the only valid case is for old/broken hardware that is
204  * insanely slow).
205  *
206  * NOTE: in theory this might break fragile code that relies
207  * on hardirq delivery - in practice we dont seem to have such
208  * places left. So the only effect should be slightly increased
209  * irqs-off latencies.
210  */
211 #ifdef CONFIG_LOCKDEP
212 # define local_irq_enable_in_hardirq()	do { } while (0)
213 #else
214 # define local_irq_enable_in_hardirq()	local_irq_enable()
215 #endif
216 
217 extern void disable_irq_nosync(unsigned int irq);
218 extern void disable_irq(unsigned int irq);
219 extern void disable_percpu_irq(unsigned int irq);
220 extern void enable_irq(unsigned int irq);
221 extern void enable_percpu_irq(unsigned int irq, unsigned int type);
222 
223 /* The following three functions are for the core kernel use only. */
224 #ifdef CONFIG_GENERIC_HARDIRQS
225 extern void suspend_device_irqs(void);
226 extern void resume_device_irqs(void);
227 #ifdef CONFIG_PM_SLEEP
228 extern int check_wakeup_irqs(void);
229 #else
230 static inline int check_wakeup_irqs(void) { return 0; }
231 #endif
232 #else
233 static inline void suspend_device_irqs(void) { };
234 static inline void resume_device_irqs(void) { };
235 static inline int check_wakeup_irqs(void) { return 0; }
236 #endif
237 
238 #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
239 
240 extern cpumask_var_t irq_default_affinity;
241 
242 extern int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask);
243 extern int irq_can_set_affinity(unsigned int irq);
244 extern int irq_select_affinity(unsigned int irq);
245 
246 extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m);
247 
248 /**
249  * struct irq_affinity_notify - context for notification of IRQ affinity changes
250  * @irq:		Interrupt to which notification applies
251  * @kref:		Reference count, for internal use
252  * @work:		Work item, for internal use
253  * @notify:		Function to be called on change.  This will be
254  *			called in process context.
255  * @release:		Function to be called on release.  This will be
256  *			called in process context.  Once registered, the
257  *			structure must only be freed when this function is
258  *			called or later.
259  */
260 struct irq_affinity_notify {
261 	unsigned int irq;
262 	struct kref kref;
263 	struct work_struct work;
264 	void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask);
265 	void (*release)(struct kref *ref);
266 };
267 
268 extern int
269 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
270 
271 #else /* CONFIG_SMP */
272 
273 static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)
274 {
275 	return -EINVAL;
276 }
277 
278 static inline int irq_can_set_affinity(unsigned int irq)
279 {
280 	return 0;
281 }
282 
283 static inline int irq_select_affinity(unsigned int irq)  { return 0; }
284 
285 static inline int irq_set_affinity_hint(unsigned int irq,
286 					const struct cpumask *m)
287 {
288 	return -EINVAL;
289 }
290 #endif /* CONFIG_SMP && CONFIG_GENERIC_HARDIRQS */
291 
292 #ifdef CONFIG_GENERIC_HARDIRQS
293 /*
294  * Special lockdep variants of irq disabling/enabling.
295  * These should be used for locking constructs that
296  * know that a particular irq context which is disabled,
297  * and which is the only irq-context user of a lock,
298  * that it's safe to take the lock in the irq-disabled
299  * section without disabling hardirqs.
300  *
301  * On !CONFIG_LOCKDEP they are equivalent to the normal
302  * irq disable/enable methods.
303  */
304 static inline void disable_irq_nosync_lockdep(unsigned int irq)
305 {
306 	disable_irq_nosync(irq);
307 #ifdef CONFIG_LOCKDEP
308 	local_irq_disable();
309 #endif
310 }
311 
312 static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags)
313 {
314 	disable_irq_nosync(irq);
315 #ifdef CONFIG_LOCKDEP
316 	local_irq_save(*flags);
317 #endif
318 }
319 
320 static inline void disable_irq_lockdep(unsigned int irq)
321 {
322 	disable_irq(irq);
323 #ifdef CONFIG_LOCKDEP
324 	local_irq_disable();
325 #endif
326 }
327 
328 static inline void enable_irq_lockdep(unsigned int irq)
329 {
330 #ifdef CONFIG_LOCKDEP
331 	local_irq_enable();
332 #endif
333 	enable_irq(irq);
334 }
335 
336 static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags)
337 {
338 #ifdef CONFIG_LOCKDEP
339 	local_irq_restore(*flags);
340 #endif
341 	enable_irq(irq);
342 }
343 
344 /* IRQ wakeup (PM) control: */
345 extern int irq_set_irq_wake(unsigned int irq, unsigned int on);
346 
347 static inline int enable_irq_wake(unsigned int irq)
348 {
349 	return irq_set_irq_wake(irq, 1);
350 }
351 
352 static inline int disable_irq_wake(unsigned int irq)
353 {
354 	return irq_set_irq_wake(irq, 0);
355 }
356 
357 #else /* !CONFIG_GENERIC_HARDIRQS */
358 /*
359  * NOTE: non-genirq architectures, if they want to support the lock
360  * validator need to define the methods below in their asm/irq.h
361  * files, under an #ifdef CONFIG_LOCKDEP section.
362  */
363 #ifndef CONFIG_LOCKDEP
364 #  define disable_irq_nosync_lockdep(irq)	disable_irq_nosync(irq)
365 #  define disable_irq_nosync_lockdep_irqsave(irq, flags) \
366 						disable_irq_nosync(irq)
367 #  define disable_irq_lockdep(irq)		disable_irq(irq)
368 #  define enable_irq_lockdep(irq)		enable_irq(irq)
369 #  define enable_irq_lockdep_irqrestore(irq, flags) \
370 						enable_irq(irq)
371 # endif
372 
373 static inline int enable_irq_wake(unsigned int irq)
374 {
375 	return 0;
376 }
377 
378 static inline int disable_irq_wake(unsigned int irq)
379 {
380 	return 0;
381 }
382 #endif /* CONFIG_GENERIC_HARDIRQS */
383 
384 
385 #ifdef CONFIG_IRQ_FORCED_THREADING
386 extern bool force_irqthreads;
387 #else
388 #define force_irqthreads	(0)
389 #endif
390 
391 #ifndef __ARCH_SET_SOFTIRQ_PENDING
392 #define set_softirq_pending(x) (local_softirq_pending() = (x))
393 #define or_softirq_pending(x)  (local_softirq_pending() |= (x))
394 #endif
395 
396 /* Some architectures might implement lazy enabling/disabling of
397  * interrupts. In some cases, such as stop_machine, we might want
398  * to ensure that after a local_irq_disable(), interrupts have
399  * really been disabled in hardware. Such architectures need to
400  * implement the following hook.
401  */
402 #ifndef hard_irq_disable
403 #define hard_irq_disable()	do { } while(0)
404 #endif
405 
406 /* PLEASE, avoid to allocate new softirqs, if you need not _really_ high
407    frequency threaded job scheduling. For almost all the purposes
408    tasklets are more than enough. F.e. all serial device BHs et
409    al. should be converted to tasklets, not to softirqs.
410  */
411 
412 enum
413 {
414 	HI_SOFTIRQ=0,
415 	TIMER_SOFTIRQ,
416 	NET_TX_SOFTIRQ,
417 	NET_RX_SOFTIRQ,
418 	BLOCK_SOFTIRQ,
419 	BLOCK_IOPOLL_SOFTIRQ,
420 	TASKLET_SOFTIRQ,
421 	SCHED_SOFTIRQ,
422 	HRTIMER_SOFTIRQ,
423 	RCU_SOFTIRQ,    /* Preferable RCU should always be the last softirq */
424 
425 	NR_SOFTIRQS
426 };
427 
428 #define SOFTIRQ_STOP_IDLE_MASK (~(1 << RCU_SOFTIRQ))
429 
430 /* map softirq index to softirq name. update 'softirq_to_name' in
431  * kernel/softirq.c when adding a new softirq.
432  */
433 extern char *softirq_to_name[NR_SOFTIRQS];
434 
435 /* softirq mask and active fields moved to irq_cpustat_t in
436  * asm/hardirq.h to get better cache usage.  KAO
437  */
438 
439 struct softirq_action
440 {
441 	void	(*action)(struct softirq_action *);
442 };
443 
444 asmlinkage void do_softirq(void);
445 asmlinkage void __do_softirq(void);
446 extern void open_softirq(int nr, void (*action)(struct softirq_action *));
447 extern void softirq_init(void);
448 extern void __raise_softirq_irqoff(unsigned int nr);
449 
450 extern void raise_softirq_irqoff(unsigned int nr);
451 extern void raise_softirq(unsigned int nr);
452 
453 /* This is the worklist that queues up per-cpu softirq work.
454  *
455  * send_remote_sendirq() adds work to these lists, and
456  * the softirq handler itself dequeues from them.  The queues
457  * are protected by disabling local cpu interrupts and they must
458  * only be accessed by the local cpu that they are for.
459  */
460 DECLARE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list);
461 
462 DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
463 
464 static inline struct task_struct *this_cpu_ksoftirqd(void)
465 {
466 	return this_cpu_read(ksoftirqd);
467 }
468 
469 /* Try to send a softirq to a remote cpu.  If this cannot be done, the
470  * work will be queued to the local cpu.
471  */
472 extern void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq);
473 
474 /* Like send_remote_softirq(), but the caller must disable local cpu interrupts
475  * and compute the current cpu, passed in as 'this_cpu'.
476  */
477 extern void __send_remote_softirq(struct call_single_data *cp, int cpu,
478 				  int this_cpu, int softirq);
479 
480 /* Tasklets --- multithreaded analogue of BHs.
481 
482    Main feature differing them of generic softirqs: tasklet
483    is running only on one CPU simultaneously.
484 
485    Main feature differing them of BHs: different tasklets
486    may be run simultaneously on different CPUs.
487 
488    Properties:
489    * If tasklet_schedule() is called, then tasklet is guaranteed
490      to be executed on some cpu at least once after this.
491    * If the tasklet is already scheduled, but its execution is still not
492      started, it will be executed only once.
493    * If this tasklet is already running on another CPU (or schedule is called
494      from tasklet itself), it is rescheduled for later.
495    * Tasklet is strictly serialized wrt itself, but not
496      wrt another tasklets. If client needs some intertask synchronization,
497      he makes it with spinlocks.
498  */
499 
500 struct tasklet_struct
501 {
502 	struct tasklet_struct *next;
503 	unsigned long state;
504 	atomic_t count;
505 	void (*func)(unsigned long);
506 	unsigned long data;
507 };
508 
509 #define DECLARE_TASKLET(name, func, data) \
510 struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data }
511 
512 #define DECLARE_TASKLET_DISABLED(name, func, data) \
513 struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }
514 
515 
516 enum
517 {
518 	TASKLET_STATE_SCHED,	/* Tasklet is scheduled for execution */
519 	TASKLET_STATE_RUN	/* Tasklet is running (SMP only) */
520 };
521 
522 #ifdef CONFIG_SMP
523 static inline int tasklet_trylock(struct tasklet_struct *t)
524 {
525 	return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
526 }
527 
528 static inline void tasklet_unlock(struct tasklet_struct *t)
529 {
530 	smp_mb__before_clear_bit();
531 	clear_bit(TASKLET_STATE_RUN, &(t)->state);
532 }
533 
534 static inline void tasklet_unlock_wait(struct tasklet_struct *t)
535 {
536 	while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
537 }
538 #else
539 #define tasklet_trylock(t) 1
540 #define tasklet_unlock_wait(t) do { } while (0)
541 #define tasklet_unlock(t) do { } while (0)
542 #endif
543 
544 extern void __tasklet_schedule(struct tasklet_struct *t);
545 
546 static inline void tasklet_schedule(struct tasklet_struct *t)
547 {
548 	if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
549 		__tasklet_schedule(t);
550 }
551 
552 extern void __tasklet_hi_schedule(struct tasklet_struct *t);
553 
554 static inline void tasklet_hi_schedule(struct tasklet_struct *t)
555 {
556 	if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
557 		__tasklet_hi_schedule(t);
558 }
559 
560 extern void __tasklet_hi_schedule_first(struct tasklet_struct *t);
561 
562 /*
563  * This version avoids touching any other tasklets. Needed for kmemcheck
564  * in order not to take any page faults while enqueueing this tasklet;
565  * consider VERY carefully whether you really need this or
566  * tasklet_hi_schedule()...
567  */
568 static inline void tasklet_hi_schedule_first(struct tasklet_struct *t)
569 {
570 	if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
571 		__tasklet_hi_schedule_first(t);
572 }
573 
574 
575 static inline void tasklet_disable_nosync(struct tasklet_struct *t)
576 {
577 	atomic_inc(&t->count);
578 	smp_mb__after_atomic_inc();
579 }
580 
581 static inline void tasklet_disable(struct tasklet_struct *t)
582 {
583 	tasklet_disable_nosync(t);
584 	tasklet_unlock_wait(t);
585 	smp_mb();
586 }
587 
588 static inline void tasklet_enable(struct tasklet_struct *t)
589 {
590 	smp_mb__before_atomic_dec();
591 	atomic_dec(&t->count);
592 }
593 
594 static inline void tasklet_hi_enable(struct tasklet_struct *t)
595 {
596 	smp_mb__before_atomic_dec();
597 	atomic_dec(&t->count);
598 }
599 
600 extern void tasklet_kill(struct tasklet_struct *t);
601 extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
602 extern void tasklet_init(struct tasklet_struct *t,
603 			 void (*func)(unsigned long), unsigned long data);
604 
605 struct tasklet_hrtimer {
606 	struct hrtimer		timer;
607 	struct tasklet_struct	tasklet;
608 	enum hrtimer_restart	(*function)(struct hrtimer *);
609 };
610 
611 extern void
612 tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
613 		     enum hrtimer_restart (*function)(struct hrtimer *),
614 		     clockid_t which_clock, enum hrtimer_mode mode);
615 
616 static inline
617 int tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time,
618 			  const enum hrtimer_mode mode)
619 {
620 	return hrtimer_start(&ttimer->timer, time, mode);
621 }
622 
623 static inline
624 void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer)
625 {
626 	hrtimer_cancel(&ttimer->timer);
627 	tasklet_kill(&ttimer->tasklet);
628 }
629 
630 /*
631  * Autoprobing for irqs:
632  *
633  * probe_irq_on() and probe_irq_off() provide robust primitives
634  * for accurate IRQ probing during kernel initialization.  They are
635  * reasonably simple to use, are not "fooled" by spurious interrupts,
636  * and, unlike other attempts at IRQ probing, they do not get hung on
637  * stuck interrupts (such as unused PS2 mouse interfaces on ASUS boards).
638  *
639  * For reasonably foolproof probing, use them as follows:
640  *
641  * 1. clear and/or mask the device's internal interrupt.
642  * 2. sti();
643  * 3. irqs = probe_irq_on();      // "take over" all unassigned idle IRQs
644  * 4. enable the device and cause it to trigger an interrupt.
645  * 5. wait for the device to interrupt, using non-intrusive polling or a delay.
646  * 6. irq = probe_irq_off(irqs);  // get IRQ number, 0=none, negative=multiple
647  * 7. service the device to clear its pending interrupt.
648  * 8. loop again if paranoia is required.
649  *
650  * probe_irq_on() returns a mask of allocated irq's.
651  *
652  * probe_irq_off() takes the mask as a parameter,
653  * and returns the irq number which occurred,
654  * or zero if none occurred, or a negative irq number
655  * if more than one irq occurred.
656  */
657 
658 #if defined(CONFIG_GENERIC_HARDIRQS) && !defined(CONFIG_GENERIC_IRQ_PROBE)
659 static inline unsigned long probe_irq_on(void)
660 {
661 	return 0;
662 }
663 static inline int probe_irq_off(unsigned long val)
664 {
665 	return 0;
666 }
667 static inline unsigned int probe_irq_mask(unsigned long val)
668 {
669 	return 0;
670 }
671 #else
672 extern unsigned long probe_irq_on(void);	/* returns 0 on failure */
673 extern int probe_irq_off(unsigned long);	/* returns 0 or negative on failure */
674 extern unsigned int probe_irq_mask(unsigned long);	/* returns mask of ISA interrupts */
675 #endif
676 
677 #ifdef CONFIG_PROC_FS
678 /* Initialize /proc/irq/ */
679 extern void init_irq_proc(void);
680 #else
681 static inline void init_irq_proc(void)
682 {
683 }
684 #endif
685 
686 struct seq_file;
687 int show_interrupts(struct seq_file *p, void *v);
688 int arch_show_interrupts(struct seq_file *p, int prec);
689 
690 extern int early_irq_init(void);
691 extern int arch_probe_nr_irqs(void);
692 extern int arch_early_irq_init(void);
693 
694 #endif
695