xref: /linux-6.15/include/linux/interrupt.h (revision 77d1c8eb)
1 /* interrupt.h */
2 #ifndef _LINUX_INTERRUPT_H
3 #define _LINUX_INTERRUPT_H
4 
5 #include <linux/kernel.h>
6 #include <linux/linkage.h>
7 #include <linux/bitops.h>
8 #include <linux/preempt.h>
9 #include <linux/cpumask.h>
10 #include <linux/irqreturn.h>
11 #include <linux/irqnr.h>
12 #include <linux/hardirq.h>
13 #include <linux/irqflags.h>
14 #include <linux/smp.h>
15 #include <linux/percpu.h>
16 #include <linux/hrtimer.h>
17 #include <linux/kref.h>
18 #include <linux/workqueue.h>
19 
20 #include <asm/atomic.h>
21 #include <asm/ptrace.h>
22 #include <asm/system.h>
23 #include <trace/events/irq.h>
24 
25 /*
26  * These correspond to the IORESOURCE_IRQ_* defines in
27  * linux/ioport.h to select the interrupt line behaviour.  When
28  * requesting an interrupt without specifying a IRQF_TRIGGER, the
29  * setting should be assumed to be "as already configured", which
30  * may be as per machine or firmware initialisation.
31  */
32 #define IRQF_TRIGGER_NONE	0x00000000
33 #define IRQF_TRIGGER_RISING	0x00000001
34 #define IRQF_TRIGGER_FALLING	0x00000002
35 #define IRQF_TRIGGER_HIGH	0x00000004
36 #define IRQF_TRIGGER_LOW	0x00000008
37 #define IRQF_TRIGGER_MASK	(IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW | \
38 				 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)
39 #define IRQF_TRIGGER_PROBE	0x00000010
40 
41 /*
42  * These flags used only by the kernel as part of the
43  * irq handling routines.
44  *
45  * IRQF_DISABLED - keep irqs disabled when calling the action handler.
46  *                 DEPRECATED. This flag is a NOOP and scheduled to be removed
47  * IRQF_SAMPLE_RANDOM - irq is used to feed the random generator
48  * IRQF_SHARED - allow sharing the irq among several devices
49  * IRQF_PROBE_SHARED - set by callers when they expect sharing mismatches to occur
50  * IRQF_TIMER - Flag to mark this interrupt as timer interrupt
51  * IRQF_PERCPU - Interrupt is per cpu
52  * IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing
53  * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is
54  *                registered first in an shared interrupt is considered for
55  *                performance reasons)
56  * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished.
57  *                Used by threaded interrupts which need to keep the
58  *                irq line disabled until the threaded handler has been run.
59  * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend
60  * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set
61  * IRQF_NO_THREAD - Interrupt cannot be threaded
62  */
63 #define IRQF_DISABLED		0x00000020
64 #define IRQF_SAMPLE_RANDOM	0x00000040
65 #define IRQF_SHARED		0x00000080
66 #define IRQF_PROBE_SHARED	0x00000100
67 #define __IRQF_TIMER		0x00000200
68 #define IRQF_PERCPU		0x00000400
69 #define IRQF_NOBALANCING	0x00000800
70 #define IRQF_IRQPOLL		0x00001000
71 #define IRQF_ONESHOT		0x00002000
72 #define IRQF_NO_SUSPEND		0x00004000
73 #define IRQF_FORCE_RESUME	0x00008000
74 #define IRQF_NO_THREAD		0x00010000
75 
76 #define IRQF_TIMER		(__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
77 
78 /*
79  * These values can be returned by request_any_context_irq() and
80  * describe the context the interrupt will be run in.
81  *
82  * IRQC_IS_HARDIRQ - interrupt runs in hardirq context
83  * IRQC_IS_NESTED - interrupt runs in a nested threaded context
84  */
85 enum {
86 	IRQC_IS_HARDIRQ	= 0,
87 	IRQC_IS_NESTED,
88 };
89 
90 typedef irqreturn_t (*irq_handler_t)(int, void *);
91 
92 /**
93  * struct irqaction - per interrupt action descriptor
94  * @handler:	interrupt handler function
95  * @flags:	flags (see IRQF_* above)
96  * @name:	name of the device
97  * @dev_id:	cookie to identify the device
98  * @next:	pointer to the next irqaction for shared interrupts
99  * @irq:	interrupt number
100  * @dir:	pointer to the proc/irq/NN/name entry
101  * @thread_fn:	interupt handler function for threaded interrupts
102  * @thread:	thread pointer for threaded interrupts
103  * @thread_flags:	flags related to @thread
104  * @thread_mask:	bitmask for keeping track of @thread activity
105  */
106 struct irqaction {
107 	irq_handler_t handler;
108 	unsigned long flags;
109 	void *dev_id;
110 	struct irqaction *next;
111 	int irq;
112 	irq_handler_t thread_fn;
113 	struct task_struct *thread;
114 	unsigned long thread_flags;
115 	unsigned long thread_mask;
116 	const char *name;
117 	struct proc_dir_entry *dir;
118 } ____cacheline_internodealigned_in_smp;
119 
120 extern irqreturn_t no_action(int cpl, void *dev_id);
121 
122 #ifdef CONFIG_GENERIC_HARDIRQS
123 extern int __must_check
124 request_threaded_irq(unsigned int irq, irq_handler_t handler,
125 		     irq_handler_t thread_fn,
126 		     unsigned long flags, const char *name, void *dev);
127 
128 static inline int __must_check
129 request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
130 	    const char *name, void *dev)
131 {
132 	return request_threaded_irq(irq, handler, NULL, flags, name, dev);
133 }
134 
135 extern int __must_check
136 request_any_context_irq(unsigned int irq, irq_handler_t handler,
137 			unsigned long flags, const char *name, void *dev_id);
138 
139 extern void exit_irq_thread(void);
140 #else
141 
142 extern int __must_check
143 request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
144 	    const char *name, void *dev);
145 
146 /*
147  * Special function to avoid ifdeffery in kernel/irq/devres.c which
148  * gets magically built by GENERIC_HARDIRQS=n architectures (sparc,
149  * m68k). I really love these $@%#!* obvious Makefile references:
150  * ../../../kernel/irq/devres.o
151  */
152 static inline int __must_check
153 request_threaded_irq(unsigned int irq, irq_handler_t handler,
154 		     irq_handler_t thread_fn,
155 		     unsigned long flags, const char *name, void *dev)
156 {
157 	return request_irq(irq, handler, flags, name, dev);
158 }
159 
160 static inline int __must_check
161 request_any_context_irq(unsigned int irq, irq_handler_t handler,
162 			unsigned long flags, const char *name, void *dev_id)
163 {
164 	return request_irq(irq, handler, flags, name, dev_id);
165 }
166 
167 static inline void exit_irq_thread(void) { }
168 #endif
169 
170 extern void free_irq(unsigned int, void *);
171 
172 struct device;
173 
174 extern int __must_check
175 devm_request_threaded_irq(struct device *dev, unsigned int irq,
176 			  irq_handler_t handler, irq_handler_t thread_fn,
177 			  unsigned long irqflags, const char *devname,
178 			  void *dev_id);
179 
180 static inline int __must_check
181 devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler,
182 		 unsigned long irqflags, const char *devname, void *dev_id)
183 {
184 	return devm_request_threaded_irq(dev, irq, handler, NULL, irqflags,
185 					 devname, dev_id);
186 }
187 
188 extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
189 
190 /*
191  * On lockdep we dont want to enable hardirqs in hardirq
192  * context. Use local_irq_enable_in_hardirq() to annotate
193  * kernel code that has to do this nevertheless (pretty much
194  * the only valid case is for old/broken hardware that is
195  * insanely slow).
196  *
197  * NOTE: in theory this might break fragile code that relies
198  * on hardirq delivery - in practice we dont seem to have such
199  * places left. So the only effect should be slightly increased
200  * irqs-off latencies.
201  */
202 #ifdef CONFIG_LOCKDEP
203 # define local_irq_enable_in_hardirq()	do { } while (0)
204 #else
205 # define local_irq_enable_in_hardirq()	local_irq_enable()
206 #endif
207 
208 extern void disable_irq_nosync(unsigned int irq);
209 extern void disable_irq(unsigned int irq);
210 extern void enable_irq(unsigned int irq);
211 
212 /* The following three functions are for the core kernel use only. */
213 #ifdef CONFIG_GENERIC_HARDIRQS
214 extern void suspend_device_irqs(void);
215 extern void resume_device_irqs(void);
216 #ifdef CONFIG_PM_SLEEP
217 extern int check_wakeup_irqs(void);
218 #else
219 static inline int check_wakeup_irqs(void) { return 0; }
220 #endif
221 #else
222 static inline void suspend_device_irqs(void) { };
223 static inline void resume_device_irqs(void) { };
224 static inline int check_wakeup_irqs(void) { return 0; }
225 #endif
226 
227 #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
228 
229 extern cpumask_var_t irq_default_affinity;
230 
231 extern int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask);
232 extern int irq_can_set_affinity(unsigned int irq);
233 extern int irq_select_affinity(unsigned int irq);
234 
235 extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m);
236 
237 /**
238  * struct irq_affinity_notify - context for notification of IRQ affinity changes
239  * @irq:		Interrupt to which notification applies
240  * @kref:		Reference count, for internal use
241  * @work:		Work item, for internal use
242  * @notify:		Function to be called on change.  This will be
243  *			called in process context.
244  * @release:		Function to be called on release.  This will be
245  *			called in process context.  Once registered, the
246  *			structure must only be freed when this function is
247  *			called or later.
248  */
249 struct irq_affinity_notify {
250 	unsigned int irq;
251 	struct kref kref;
252 	struct work_struct work;
253 	void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask);
254 	void (*release)(struct kref *ref);
255 };
256 
257 extern int
258 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
259 
260 static inline void irq_run_affinity_notifiers(void)
261 {
262 	flush_scheduled_work();
263 }
264 
265 #else /* CONFIG_SMP */
266 
267 static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)
268 {
269 	return -EINVAL;
270 }
271 
272 static inline int irq_can_set_affinity(unsigned int irq)
273 {
274 	return 0;
275 }
276 
277 static inline int irq_select_affinity(unsigned int irq)  { return 0; }
278 
279 static inline int irq_set_affinity_hint(unsigned int irq,
280 					const struct cpumask *m)
281 {
282 	return -EINVAL;
283 }
284 #endif /* CONFIG_SMP && CONFIG_GENERIC_HARDIRQS */
285 
286 #ifdef CONFIG_GENERIC_HARDIRQS
287 /*
288  * Special lockdep variants of irq disabling/enabling.
289  * These should be used for locking constructs that
290  * know that a particular irq context which is disabled,
291  * and which is the only irq-context user of a lock,
292  * that it's safe to take the lock in the irq-disabled
293  * section without disabling hardirqs.
294  *
295  * On !CONFIG_LOCKDEP they are equivalent to the normal
296  * irq disable/enable methods.
297  */
298 static inline void disable_irq_nosync_lockdep(unsigned int irq)
299 {
300 	disable_irq_nosync(irq);
301 #ifdef CONFIG_LOCKDEP
302 	local_irq_disable();
303 #endif
304 }
305 
306 static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags)
307 {
308 	disable_irq_nosync(irq);
309 #ifdef CONFIG_LOCKDEP
310 	local_irq_save(*flags);
311 #endif
312 }
313 
314 static inline void disable_irq_lockdep(unsigned int irq)
315 {
316 	disable_irq(irq);
317 #ifdef CONFIG_LOCKDEP
318 	local_irq_disable();
319 #endif
320 }
321 
322 static inline void enable_irq_lockdep(unsigned int irq)
323 {
324 #ifdef CONFIG_LOCKDEP
325 	local_irq_enable();
326 #endif
327 	enable_irq(irq);
328 }
329 
330 static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags)
331 {
332 #ifdef CONFIG_LOCKDEP
333 	local_irq_restore(*flags);
334 #endif
335 	enable_irq(irq);
336 }
337 
338 /* IRQ wakeup (PM) control: */
339 extern int irq_set_irq_wake(unsigned int irq, unsigned int on);
340 
341 #ifndef CONFIG_GENERIC_HARDIRQS_NO_COMPAT
342 /* Please do not use: Use the replacement functions instead */
343 static inline int set_irq_wake(unsigned int irq, unsigned int on)
344 {
345 	return irq_set_irq_wake(irq, on);
346 }
347 #endif
348 
349 static inline int enable_irq_wake(unsigned int irq)
350 {
351 	return irq_set_irq_wake(irq, 1);
352 }
353 
354 static inline int disable_irq_wake(unsigned int irq)
355 {
356 	return irq_set_irq_wake(irq, 0);
357 }
358 
359 #else /* !CONFIG_GENERIC_HARDIRQS */
360 /*
361  * NOTE: non-genirq architectures, if they want to support the lock
362  * validator need to define the methods below in their asm/irq.h
363  * files, under an #ifdef CONFIG_LOCKDEP section.
364  */
365 #ifndef CONFIG_LOCKDEP
366 #  define disable_irq_nosync_lockdep(irq)	disable_irq_nosync(irq)
367 #  define disable_irq_nosync_lockdep_irqsave(irq, flags) \
368 						disable_irq_nosync(irq)
369 #  define disable_irq_lockdep(irq)		disable_irq(irq)
370 #  define enable_irq_lockdep(irq)		enable_irq(irq)
371 #  define enable_irq_lockdep_irqrestore(irq, flags) \
372 						enable_irq(irq)
373 # endif
374 
375 static inline int enable_irq_wake(unsigned int irq)
376 {
377 	return 0;
378 }
379 
380 static inline int disable_irq_wake(unsigned int irq)
381 {
382 	return 0;
383 }
384 #endif /* CONFIG_GENERIC_HARDIRQS */
385 
386 
387 #ifdef CONFIG_IRQ_FORCED_THREADING
388 extern bool force_irqthreads;
389 #else
390 #define force_irqthreads	(0)
391 #endif
392 
393 #ifndef __ARCH_SET_SOFTIRQ_PENDING
394 #define set_softirq_pending(x) (local_softirq_pending() = (x))
395 #define or_softirq_pending(x)  (local_softirq_pending() |= (x))
396 #endif
397 
398 /* Some architectures might implement lazy enabling/disabling of
399  * interrupts. In some cases, such as stop_machine, we might want
400  * to ensure that after a local_irq_disable(), interrupts have
401  * really been disabled in hardware. Such architectures need to
402  * implement the following hook.
403  */
404 #ifndef hard_irq_disable
405 #define hard_irq_disable()	do { } while(0)
406 #endif
407 
408 /* PLEASE, avoid to allocate new softirqs, if you need not _really_ high
409    frequency threaded job scheduling. For almost all the purposes
410    tasklets are more than enough. F.e. all serial device BHs et
411    al. should be converted to tasklets, not to softirqs.
412  */
413 
414 enum
415 {
416 	HI_SOFTIRQ=0,
417 	TIMER_SOFTIRQ,
418 	NET_TX_SOFTIRQ,
419 	NET_RX_SOFTIRQ,
420 	BLOCK_SOFTIRQ,
421 	BLOCK_IOPOLL_SOFTIRQ,
422 	TASKLET_SOFTIRQ,
423 	SCHED_SOFTIRQ,
424 	HRTIMER_SOFTIRQ,
425 	RCU_SOFTIRQ,	/* Preferable RCU should always be the last softirq */
426 
427 	NR_SOFTIRQS
428 };
429 
430 /* map softirq index to softirq name. update 'softirq_to_name' in
431  * kernel/softirq.c when adding a new softirq.
432  */
433 extern char *softirq_to_name[NR_SOFTIRQS];
434 
435 /* softirq mask and active fields moved to irq_cpustat_t in
436  * asm/hardirq.h to get better cache usage.  KAO
437  */
438 
439 struct softirq_action
440 {
441 	void	(*action)(struct softirq_action *);
442 };
443 
444 asmlinkage void do_softirq(void);
445 asmlinkage void __do_softirq(void);
446 extern void open_softirq(int nr, void (*action)(struct softirq_action *));
447 extern void softirq_init(void);
448 static inline void __raise_softirq_irqoff(unsigned int nr)
449 {
450 	trace_softirq_raise(nr);
451 	or_softirq_pending(1UL << nr);
452 }
453 
454 extern void raise_softirq_irqoff(unsigned int nr);
455 extern void raise_softirq(unsigned int nr);
456 
457 /* This is the worklist that queues up per-cpu softirq work.
458  *
459  * send_remote_sendirq() adds work to these lists, and
460  * the softirq handler itself dequeues from them.  The queues
461  * are protected by disabling local cpu interrupts and they must
462  * only be accessed by the local cpu that they are for.
463  */
464 DECLARE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list);
465 
466 DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
467 
468 static inline struct task_struct *this_cpu_ksoftirqd(void)
469 {
470 	return this_cpu_read(ksoftirqd);
471 }
472 
473 /* Try to send a softirq to a remote cpu.  If this cannot be done, the
474  * work will be queued to the local cpu.
475  */
476 extern void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq);
477 
478 /* Like send_remote_softirq(), but the caller must disable local cpu interrupts
479  * and compute the current cpu, passed in as 'this_cpu'.
480  */
481 extern void __send_remote_softirq(struct call_single_data *cp, int cpu,
482 				  int this_cpu, int softirq);
483 
484 /* Tasklets --- multithreaded analogue of BHs.
485 
486    Main feature differing them of generic softirqs: tasklet
487    is running only on one CPU simultaneously.
488 
489    Main feature differing them of BHs: different tasklets
490    may be run simultaneously on different CPUs.
491 
492    Properties:
493    * If tasklet_schedule() is called, then tasklet is guaranteed
494      to be executed on some cpu at least once after this.
495    * If the tasklet is already scheduled, but its excecution is still not
496      started, it will be executed only once.
497    * If this tasklet is already running on another CPU (or schedule is called
498      from tasklet itself), it is rescheduled for later.
499    * Tasklet is strictly serialized wrt itself, but not
500      wrt another tasklets. If client needs some intertask synchronization,
501      he makes it with spinlocks.
502  */
503 
504 struct tasklet_struct
505 {
506 	struct tasklet_struct *next;
507 	unsigned long state;
508 	atomic_t count;
509 	void (*func)(unsigned long);
510 	unsigned long data;
511 };
512 
513 #define DECLARE_TASKLET(name, func, data) \
514 struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data }
515 
516 #define DECLARE_TASKLET_DISABLED(name, func, data) \
517 struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }
518 
519 
520 enum
521 {
522 	TASKLET_STATE_SCHED,	/* Tasklet is scheduled for execution */
523 	TASKLET_STATE_RUN	/* Tasklet is running (SMP only) */
524 };
525 
526 #ifdef CONFIG_SMP
527 static inline int tasklet_trylock(struct tasklet_struct *t)
528 {
529 	return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
530 }
531 
532 static inline void tasklet_unlock(struct tasklet_struct *t)
533 {
534 	smp_mb__before_clear_bit();
535 	clear_bit(TASKLET_STATE_RUN, &(t)->state);
536 }
537 
538 static inline void tasklet_unlock_wait(struct tasklet_struct *t)
539 {
540 	while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
541 }
542 #else
543 #define tasklet_trylock(t) 1
544 #define tasklet_unlock_wait(t) do { } while (0)
545 #define tasklet_unlock(t) do { } while (0)
546 #endif
547 
548 extern void __tasklet_schedule(struct tasklet_struct *t);
549 
550 static inline void tasklet_schedule(struct tasklet_struct *t)
551 {
552 	if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
553 		__tasklet_schedule(t);
554 }
555 
556 extern void __tasklet_hi_schedule(struct tasklet_struct *t);
557 
558 static inline void tasklet_hi_schedule(struct tasklet_struct *t)
559 {
560 	if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
561 		__tasklet_hi_schedule(t);
562 }
563 
564 extern void __tasklet_hi_schedule_first(struct tasklet_struct *t);
565 
566 /*
567  * This version avoids touching any other tasklets. Needed for kmemcheck
568  * in order not to take any page faults while enqueueing this tasklet;
569  * consider VERY carefully whether you really need this or
570  * tasklet_hi_schedule()...
571  */
572 static inline void tasklet_hi_schedule_first(struct tasklet_struct *t)
573 {
574 	if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
575 		__tasklet_hi_schedule_first(t);
576 }
577 
578 
579 static inline void tasklet_disable_nosync(struct tasklet_struct *t)
580 {
581 	atomic_inc(&t->count);
582 	smp_mb__after_atomic_inc();
583 }
584 
585 static inline void tasklet_disable(struct tasklet_struct *t)
586 {
587 	tasklet_disable_nosync(t);
588 	tasklet_unlock_wait(t);
589 	smp_mb();
590 }
591 
592 static inline void tasklet_enable(struct tasklet_struct *t)
593 {
594 	smp_mb__before_atomic_dec();
595 	atomic_dec(&t->count);
596 }
597 
598 static inline void tasklet_hi_enable(struct tasklet_struct *t)
599 {
600 	smp_mb__before_atomic_dec();
601 	atomic_dec(&t->count);
602 }
603 
604 extern void tasklet_kill(struct tasklet_struct *t);
605 extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
606 extern void tasklet_init(struct tasklet_struct *t,
607 			 void (*func)(unsigned long), unsigned long data);
608 
609 struct tasklet_hrtimer {
610 	struct hrtimer		timer;
611 	struct tasklet_struct	tasklet;
612 	enum hrtimer_restart	(*function)(struct hrtimer *);
613 };
614 
615 extern void
616 tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
617 		     enum hrtimer_restart (*function)(struct hrtimer *),
618 		     clockid_t which_clock, enum hrtimer_mode mode);
619 
620 static inline
621 int tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time,
622 			  const enum hrtimer_mode mode)
623 {
624 	return hrtimer_start(&ttimer->timer, time, mode);
625 }
626 
627 static inline
628 void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer)
629 {
630 	hrtimer_cancel(&ttimer->timer);
631 	tasklet_kill(&ttimer->tasklet);
632 }
633 
634 /*
635  * Autoprobing for irqs:
636  *
637  * probe_irq_on() and probe_irq_off() provide robust primitives
638  * for accurate IRQ probing during kernel initialization.  They are
639  * reasonably simple to use, are not "fooled" by spurious interrupts,
640  * and, unlike other attempts at IRQ probing, they do not get hung on
641  * stuck interrupts (such as unused PS2 mouse interfaces on ASUS boards).
642  *
643  * For reasonably foolproof probing, use them as follows:
644  *
645  * 1. clear and/or mask the device's internal interrupt.
646  * 2. sti();
647  * 3. irqs = probe_irq_on();      // "take over" all unassigned idle IRQs
648  * 4. enable the device and cause it to trigger an interrupt.
649  * 5. wait for the device to interrupt, using non-intrusive polling or a delay.
650  * 6. irq = probe_irq_off(irqs);  // get IRQ number, 0=none, negative=multiple
651  * 7. service the device to clear its pending interrupt.
652  * 8. loop again if paranoia is required.
653  *
654  * probe_irq_on() returns a mask of allocated irq's.
655  *
656  * probe_irq_off() takes the mask as a parameter,
657  * and returns the irq number which occurred,
658  * or zero if none occurred, or a negative irq number
659  * if more than one irq occurred.
660  */
661 
662 #if defined(CONFIG_GENERIC_HARDIRQS) && !defined(CONFIG_GENERIC_IRQ_PROBE)
663 static inline unsigned long probe_irq_on(void)
664 {
665 	return 0;
666 }
667 static inline int probe_irq_off(unsigned long val)
668 {
669 	return 0;
670 }
671 static inline unsigned int probe_irq_mask(unsigned long val)
672 {
673 	return 0;
674 }
675 #else
676 extern unsigned long probe_irq_on(void);	/* returns 0 on failure */
677 extern int probe_irq_off(unsigned long);	/* returns 0 or negative on failure */
678 extern unsigned int probe_irq_mask(unsigned long);	/* returns mask of ISA interrupts */
679 #endif
680 
681 #ifdef CONFIG_PROC_FS
682 /* Initialize /proc/irq/ */
683 extern void init_irq_proc(void);
684 #else
685 static inline void init_irq_proc(void)
686 {
687 }
688 #endif
689 
690 struct seq_file;
691 int show_interrupts(struct seq_file *p, void *v);
692 int arch_show_interrupts(struct seq_file *p, int prec);
693 
694 extern int early_irq_init(void);
695 extern int arch_probe_nr_irqs(void);
696 extern int arch_early_irq_init(void);
697 
698 #endif
699