xref: /linux-6.15/include/linux/interrupt.h (revision 6faeeea4)
1 /* interrupt.h */
2 #ifndef _LINUX_INTERRUPT_H
3 #define _LINUX_INTERRUPT_H
4 
5 #include <linux/kernel.h>
6 #include <linux/linkage.h>
7 #include <linux/bitops.h>
8 #include <linux/preempt.h>
9 #include <linux/cpumask.h>
10 #include <linux/irqreturn.h>
11 #include <linux/irqnr.h>
12 #include <linux/hardirq.h>
13 #include <linux/irqflags.h>
14 #include <linux/hrtimer.h>
15 #include <linux/kref.h>
16 #include <linux/workqueue.h>
17 
18 #include <linux/atomic.h>
19 #include <asm/ptrace.h>
20 #include <asm/irq.h>
21 
22 /*
23  * These correspond to the IORESOURCE_IRQ_* defines in
24  * linux/ioport.h to select the interrupt line behaviour.  When
25  * requesting an interrupt without specifying a IRQF_TRIGGER, the
26  * setting should be assumed to be "as already configured", which
27  * may be as per machine or firmware initialisation.
28  */
29 #define IRQF_TRIGGER_NONE	0x00000000
30 #define IRQF_TRIGGER_RISING	0x00000001
31 #define IRQF_TRIGGER_FALLING	0x00000002
32 #define IRQF_TRIGGER_HIGH	0x00000004
33 #define IRQF_TRIGGER_LOW	0x00000008
34 #define IRQF_TRIGGER_MASK	(IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW | \
35 				 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)
36 #define IRQF_TRIGGER_PROBE	0x00000010
37 
38 /*
39  * These flags used only by the kernel as part of the
40  * irq handling routines.
41  *
42  * IRQF_DISABLED - keep irqs disabled when calling the action handler.
43  *                 DEPRECATED. This flag is a NOOP and scheduled to be removed
44  * IRQF_SHARED - allow sharing the irq among several devices
45  * IRQF_PROBE_SHARED - set by callers when they expect sharing mismatches to occur
46  * IRQF_TIMER - Flag to mark this interrupt as timer interrupt
47  * IRQF_PERCPU - Interrupt is per cpu
48  * IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing
49  * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is
50  *                registered first in an shared interrupt is considered for
51  *                performance reasons)
52  * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished.
53  *                Used by threaded interrupts which need to keep the
54  *                irq line disabled until the threaded handler has been run.
55  * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend.  Does not guarantee
56  *                   that this interrupt will wake the system from a suspended
57  *                   state.  See Documentation/power/suspend-and-interrupts.txt
58  * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set
59  * IRQF_NO_THREAD - Interrupt cannot be threaded
60  * IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device
61  *                resume time.
62  * IRQF_COND_SUSPEND - If the IRQ is shared with a NO_SUSPEND user, execute this
63  *                interrupt handler after suspending interrupts. For system
64  *                wakeup devices users need to implement wakeup detection in
65  *                their interrupt handlers.
66  */
67 #define IRQF_DISABLED		0x00000020
68 #define IRQF_SHARED		0x00000080
69 #define IRQF_PROBE_SHARED	0x00000100
70 #define __IRQF_TIMER		0x00000200
71 #define IRQF_PERCPU		0x00000400
72 #define IRQF_NOBALANCING	0x00000800
73 #define IRQF_IRQPOLL		0x00001000
74 #define IRQF_ONESHOT		0x00002000
75 #define IRQF_NO_SUSPEND		0x00004000
76 #define IRQF_FORCE_RESUME	0x00008000
77 #define IRQF_NO_THREAD		0x00010000
78 #define IRQF_EARLY_RESUME	0x00020000
79 #define IRQF_COND_SUSPEND	0x00040000
80 
81 #define IRQF_TIMER		(__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
82 
83 /*
84  * These values can be returned by request_any_context_irq() and
85  * describe the context the interrupt will be run in.
86  *
87  * IRQC_IS_HARDIRQ - interrupt runs in hardirq context
88  * IRQC_IS_NESTED - interrupt runs in a nested threaded context
89  */
90 enum {
91 	IRQC_IS_HARDIRQ	= 0,
92 	IRQC_IS_NESTED,
93 };
94 
95 typedef irqreturn_t (*irq_handler_t)(int, void *);
96 
97 /**
98  * struct irqaction - per interrupt action descriptor
99  * @handler:	interrupt handler function
100  * @name:	name of the device
101  * @dev_id:	cookie to identify the device
102  * @percpu_dev_id:	cookie to identify the device
103  * @next:	pointer to the next irqaction for shared interrupts
104  * @irq:	interrupt number
105  * @flags:	flags (see IRQF_* above)
106  * @thread_fn:	interrupt handler function for threaded interrupts
107  * @thread:	thread pointer for threaded interrupts
108  * @thread_flags:	flags related to @thread
109  * @thread_mask:	bitmask for keeping track of @thread activity
110  * @dir:	pointer to the proc/irq/NN/name entry
111  */
112 struct irqaction {
113 	irq_handler_t		handler;
114 	void			*dev_id;
115 	void __percpu		*percpu_dev_id;
116 	struct irqaction	*next;
117 	irq_handler_t		thread_fn;
118 	struct task_struct	*thread;
119 	unsigned int		irq;
120 	unsigned int		flags;
121 	unsigned long		thread_flags;
122 	unsigned long		thread_mask;
123 	const char		*name;
124 	struct proc_dir_entry	*dir;
125 } ____cacheline_internodealigned_in_smp;
126 
127 extern irqreturn_t no_action(int cpl, void *dev_id);
128 
129 extern int __must_check
130 request_threaded_irq(unsigned int irq, irq_handler_t handler,
131 		     irq_handler_t thread_fn,
132 		     unsigned long flags, const char *name, void *dev);
133 
134 static inline int __must_check
135 request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
136 	    const char *name, void *dev)
137 {
138 	return request_threaded_irq(irq, handler, NULL, flags, name, dev);
139 }
140 
141 extern int __must_check
142 request_any_context_irq(unsigned int irq, irq_handler_t handler,
143 			unsigned long flags, const char *name, void *dev_id);
144 
145 extern int __must_check
146 request_percpu_irq(unsigned int irq, irq_handler_t handler,
147 		   const char *devname, void __percpu *percpu_dev_id);
148 
149 extern void free_irq(unsigned int, void *);
150 extern void free_percpu_irq(unsigned int, void __percpu *);
151 
152 struct device;
153 
154 extern int __must_check
155 devm_request_threaded_irq(struct device *dev, unsigned int irq,
156 			  irq_handler_t handler, irq_handler_t thread_fn,
157 			  unsigned long irqflags, const char *devname,
158 			  void *dev_id);
159 
160 static inline int __must_check
161 devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler,
162 		 unsigned long irqflags, const char *devname, void *dev_id)
163 {
164 	return devm_request_threaded_irq(dev, irq, handler, NULL, irqflags,
165 					 devname, dev_id);
166 }
167 
168 extern int __must_check
169 devm_request_any_context_irq(struct device *dev, unsigned int irq,
170 		 irq_handler_t handler, unsigned long irqflags,
171 		 const char *devname, void *dev_id);
172 
173 extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
174 
175 /*
176  * On lockdep we dont want to enable hardirqs in hardirq
177  * context. Use local_irq_enable_in_hardirq() to annotate
178  * kernel code that has to do this nevertheless (pretty much
179  * the only valid case is for old/broken hardware that is
180  * insanely slow).
181  *
182  * NOTE: in theory this might break fragile code that relies
183  * on hardirq delivery - in practice we dont seem to have such
184  * places left. So the only effect should be slightly increased
185  * irqs-off latencies.
186  */
187 #ifdef CONFIG_LOCKDEP
188 # define local_irq_enable_in_hardirq()	do { } while (0)
189 #else
190 # define local_irq_enable_in_hardirq()	local_irq_enable()
191 #endif
192 
193 extern void disable_irq_nosync(unsigned int irq);
194 extern void disable_irq(unsigned int irq);
195 extern void disable_percpu_irq(unsigned int irq);
196 extern void enable_irq(unsigned int irq);
197 extern void enable_percpu_irq(unsigned int irq, unsigned int type);
198 extern void irq_wake_thread(unsigned int irq, void *dev_id);
199 
200 /* The following three functions are for the core kernel use only. */
201 extern void suspend_device_irqs(void);
202 extern void resume_device_irqs(void);
203 
204 /**
205  * struct irq_affinity_notify - context for notification of IRQ affinity changes
206  * @irq:		Interrupt to which notification applies
207  * @kref:		Reference count, for internal use
208  * @work:		Work item, for internal use
209  * @notify:		Function to be called on change.  This will be
210  *			called in process context.
211  * @release:		Function to be called on release.  This will be
212  *			called in process context.  Once registered, the
213  *			structure must only be freed when this function is
214  *			called or later.
215  */
216 struct irq_affinity_notify {
217 	unsigned int irq;
218 	struct kref kref;
219 	struct work_struct work;
220 	void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask);
221 	void (*release)(struct kref *ref);
222 };
223 
224 #if defined(CONFIG_SMP)
225 
226 extern cpumask_var_t irq_default_affinity;
227 
228 /* Internal implementation. Use the helpers below */
229 extern int __irq_set_affinity(unsigned int irq, const struct cpumask *cpumask,
230 			      bool force);
231 
232 /**
233  * irq_set_affinity - Set the irq affinity of a given irq
234  * @irq:	Interrupt to set affinity
235  * @cpumask:	cpumask
236  *
237  * Fails if cpumask does not contain an online CPU
238  */
239 static inline int
240 irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
241 {
242 	return __irq_set_affinity(irq, cpumask, false);
243 }
244 
245 /**
246  * irq_force_affinity - Force the irq affinity of a given irq
247  * @irq:	Interrupt to set affinity
248  * @cpumask:	cpumask
249  *
250  * Same as irq_set_affinity, but without checking the mask against
251  * online cpus.
252  *
253  * Solely for low level cpu hotplug code, where we need to make per
254  * cpu interrupts affine before the cpu becomes online.
255  */
256 static inline int
257 irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
258 {
259 	return __irq_set_affinity(irq, cpumask, true);
260 }
261 
262 extern int irq_can_set_affinity(unsigned int irq);
263 extern int irq_select_affinity(unsigned int irq);
264 
265 extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m);
266 
267 extern int
268 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
269 
270 #else /* CONFIG_SMP */
271 
272 static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)
273 {
274 	return -EINVAL;
275 }
276 
277 static inline int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
278 {
279 	return 0;
280 }
281 
282 static inline int irq_can_set_affinity(unsigned int irq)
283 {
284 	return 0;
285 }
286 
287 static inline int irq_select_affinity(unsigned int irq)  { return 0; }
288 
289 static inline int irq_set_affinity_hint(unsigned int irq,
290 					const struct cpumask *m)
291 {
292 	return -EINVAL;
293 }
294 
295 static inline int
296 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
297 {
298 	return 0;
299 }
300 #endif /* CONFIG_SMP */
301 
302 /*
303  * Special lockdep variants of irq disabling/enabling.
304  * These should be used for locking constructs that
305  * know that a particular irq context which is disabled,
306  * and which is the only irq-context user of a lock,
307  * that it's safe to take the lock in the irq-disabled
308  * section without disabling hardirqs.
309  *
310  * On !CONFIG_LOCKDEP they are equivalent to the normal
311  * irq disable/enable methods.
312  */
313 static inline void disable_irq_nosync_lockdep(unsigned int irq)
314 {
315 	disable_irq_nosync(irq);
316 #ifdef CONFIG_LOCKDEP
317 	local_irq_disable();
318 #endif
319 }
320 
321 static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags)
322 {
323 	disable_irq_nosync(irq);
324 #ifdef CONFIG_LOCKDEP
325 	local_irq_save(*flags);
326 #endif
327 }
328 
329 static inline void disable_irq_lockdep(unsigned int irq)
330 {
331 	disable_irq(irq);
332 #ifdef CONFIG_LOCKDEP
333 	local_irq_disable();
334 #endif
335 }
336 
337 static inline void enable_irq_lockdep(unsigned int irq)
338 {
339 #ifdef CONFIG_LOCKDEP
340 	local_irq_enable();
341 #endif
342 	enable_irq(irq);
343 }
344 
345 static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags)
346 {
347 #ifdef CONFIG_LOCKDEP
348 	local_irq_restore(*flags);
349 #endif
350 	enable_irq(irq);
351 }
352 
353 /* IRQ wakeup (PM) control: */
354 extern int irq_set_irq_wake(unsigned int irq, unsigned int on);
355 
356 static inline int enable_irq_wake(unsigned int irq)
357 {
358 	return irq_set_irq_wake(irq, 1);
359 }
360 
361 static inline int disable_irq_wake(unsigned int irq)
362 {
363 	return irq_set_irq_wake(irq, 0);
364 }
365 
366 
367 #ifdef CONFIG_IRQ_FORCED_THREADING
368 extern bool force_irqthreads;
369 #else
370 #define force_irqthreads	(0)
371 #endif
372 
373 #ifndef __ARCH_SET_SOFTIRQ_PENDING
374 #define set_softirq_pending(x) (local_softirq_pending() = (x))
375 #define or_softirq_pending(x)  (local_softirq_pending() |= (x))
376 #endif
377 
378 /* Some architectures might implement lazy enabling/disabling of
379  * interrupts. In some cases, such as stop_machine, we might want
380  * to ensure that after a local_irq_disable(), interrupts have
381  * really been disabled in hardware. Such architectures need to
382  * implement the following hook.
383  */
384 #ifndef hard_irq_disable
385 #define hard_irq_disable()	do { } while(0)
386 #endif
387 
388 /* PLEASE, avoid to allocate new softirqs, if you need not _really_ high
389    frequency threaded job scheduling. For almost all the purposes
390    tasklets are more than enough. F.e. all serial device BHs et
391    al. should be converted to tasklets, not to softirqs.
392  */
393 
394 enum
395 {
396 	HI_SOFTIRQ=0,
397 	TIMER_SOFTIRQ,
398 	NET_TX_SOFTIRQ,
399 	NET_RX_SOFTIRQ,
400 	BLOCK_SOFTIRQ,
401 	BLOCK_IOPOLL_SOFTIRQ,
402 	TASKLET_SOFTIRQ,
403 	SCHED_SOFTIRQ,
404 	HRTIMER_SOFTIRQ,
405 	RCU_SOFTIRQ,    /* Preferable RCU should always be the last softirq */
406 
407 	NR_SOFTIRQS
408 };
409 
410 #define SOFTIRQ_STOP_IDLE_MASK (~(1 << RCU_SOFTIRQ))
411 
412 /* map softirq index to softirq name. update 'softirq_to_name' in
413  * kernel/softirq.c when adding a new softirq.
414  */
415 extern const char * const softirq_to_name[NR_SOFTIRQS];
416 
417 /* softirq mask and active fields moved to irq_cpustat_t in
418  * asm/hardirq.h to get better cache usage.  KAO
419  */
420 
421 struct softirq_action
422 {
423 	void	(*action)(struct softirq_action *);
424 };
425 
426 asmlinkage void do_softirq(void);
427 asmlinkage void __do_softirq(void);
428 
429 #ifdef __ARCH_HAS_DO_SOFTIRQ
430 void do_softirq_own_stack(void);
431 #else
432 static inline void do_softirq_own_stack(void)
433 {
434 	__do_softirq();
435 }
436 #endif
437 
438 extern void open_softirq(int nr, void (*action)(struct softirq_action *));
439 extern void softirq_init(void);
440 extern void __raise_softirq_irqoff(unsigned int nr);
441 
442 extern void raise_softirq_irqoff(unsigned int nr);
443 extern void raise_softirq(unsigned int nr);
444 
445 DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
446 
447 static inline struct task_struct *this_cpu_ksoftirqd(void)
448 {
449 	return this_cpu_read(ksoftirqd);
450 }
451 
452 /* Tasklets --- multithreaded analogue of BHs.
453 
454    Main feature differing them of generic softirqs: tasklet
455    is running only on one CPU simultaneously.
456 
457    Main feature differing them of BHs: different tasklets
458    may be run simultaneously on different CPUs.
459 
460    Properties:
461    * If tasklet_schedule() is called, then tasklet is guaranteed
462      to be executed on some cpu at least once after this.
463    * If the tasklet is already scheduled, but its execution is still not
464      started, it will be executed only once.
465    * If this tasklet is already running on another CPU (or schedule is called
466      from tasklet itself), it is rescheduled for later.
467    * Tasklet is strictly serialized wrt itself, but not
468      wrt another tasklets. If client needs some intertask synchronization,
469      he makes it with spinlocks.
470  */
471 
472 struct tasklet_struct
473 {
474 	struct tasklet_struct *next;
475 	unsigned long state;
476 	atomic_t count;
477 	void (*func)(unsigned long);
478 	unsigned long data;
479 };
480 
481 #define DECLARE_TASKLET(name, func, data) \
482 struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data }
483 
484 #define DECLARE_TASKLET_DISABLED(name, func, data) \
485 struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }
486 
487 
488 enum
489 {
490 	TASKLET_STATE_SCHED,	/* Tasklet is scheduled for execution */
491 	TASKLET_STATE_RUN	/* Tasklet is running (SMP only) */
492 };
493 
494 #ifdef CONFIG_SMP
495 static inline int tasklet_trylock(struct tasklet_struct *t)
496 {
497 	return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
498 }
499 
500 static inline void tasklet_unlock(struct tasklet_struct *t)
501 {
502 	smp_mb__before_atomic();
503 	clear_bit(TASKLET_STATE_RUN, &(t)->state);
504 }
505 
506 static inline void tasklet_unlock_wait(struct tasklet_struct *t)
507 {
508 	while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
509 }
510 #else
511 #define tasklet_trylock(t) 1
512 #define tasklet_unlock_wait(t) do { } while (0)
513 #define tasklet_unlock(t) do { } while (0)
514 #endif
515 
516 extern void __tasklet_schedule(struct tasklet_struct *t);
517 
518 static inline void tasklet_schedule(struct tasklet_struct *t)
519 {
520 	if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
521 		__tasklet_schedule(t);
522 }
523 
524 extern void __tasklet_hi_schedule(struct tasklet_struct *t);
525 
526 static inline void tasklet_hi_schedule(struct tasklet_struct *t)
527 {
528 	if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
529 		__tasklet_hi_schedule(t);
530 }
531 
532 extern void __tasklet_hi_schedule_first(struct tasklet_struct *t);
533 
534 /*
535  * This version avoids touching any other tasklets. Needed for kmemcheck
536  * in order not to take any page faults while enqueueing this tasklet;
537  * consider VERY carefully whether you really need this or
538  * tasklet_hi_schedule()...
539  */
540 static inline void tasklet_hi_schedule_first(struct tasklet_struct *t)
541 {
542 	if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
543 		__tasklet_hi_schedule_first(t);
544 }
545 
546 
547 static inline void tasklet_disable_nosync(struct tasklet_struct *t)
548 {
549 	atomic_inc(&t->count);
550 	smp_mb__after_atomic();
551 }
552 
553 static inline void tasklet_disable(struct tasklet_struct *t)
554 {
555 	tasklet_disable_nosync(t);
556 	tasklet_unlock_wait(t);
557 	smp_mb();
558 }
559 
560 static inline void tasklet_enable(struct tasklet_struct *t)
561 {
562 	smp_mb__before_atomic();
563 	atomic_dec(&t->count);
564 }
565 
566 extern void tasklet_kill(struct tasklet_struct *t);
567 extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
568 extern void tasklet_init(struct tasklet_struct *t,
569 			 void (*func)(unsigned long), unsigned long data);
570 
571 struct tasklet_hrtimer {
572 	struct hrtimer		timer;
573 	struct tasklet_struct	tasklet;
574 	enum hrtimer_restart	(*function)(struct hrtimer *);
575 };
576 
577 extern void
578 tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
579 		     enum hrtimer_restart (*function)(struct hrtimer *),
580 		     clockid_t which_clock, enum hrtimer_mode mode);
581 
582 static inline
583 int tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time,
584 			  const enum hrtimer_mode mode)
585 {
586 	return hrtimer_start(&ttimer->timer, time, mode);
587 }
588 
589 static inline
590 void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer)
591 {
592 	hrtimer_cancel(&ttimer->timer);
593 	tasklet_kill(&ttimer->tasklet);
594 }
595 
596 /*
597  * Autoprobing for irqs:
598  *
599  * probe_irq_on() and probe_irq_off() provide robust primitives
600  * for accurate IRQ probing during kernel initialization.  They are
601  * reasonably simple to use, are not "fooled" by spurious interrupts,
602  * and, unlike other attempts at IRQ probing, they do not get hung on
603  * stuck interrupts (such as unused PS2 mouse interfaces on ASUS boards).
604  *
605  * For reasonably foolproof probing, use them as follows:
606  *
607  * 1. clear and/or mask the device's internal interrupt.
608  * 2. sti();
609  * 3. irqs = probe_irq_on();      // "take over" all unassigned idle IRQs
610  * 4. enable the device and cause it to trigger an interrupt.
611  * 5. wait for the device to interrupt, using non-intrusive polling or a delay.
612  * 6. irq = probe_irq_off(irqs);  // get IRQ number, 0=none, negative=multiple
613  * 7. service the device to clear its pending interrupt.
614  * 8. loop again if paranoia is required.
615  *
616  * probe_irq_on() returns a mask of allocated irq's.
617  *
618  * probe_irq_off() takes the mask as a parameter,
619  * and returns the irq number which occurred,
620  * or zero if none occurred, or a negative irq number
621  * if more than one irq occurred.
622  */
623 
624 #if !defined(CONFIG_GENERIC_IRQ_PROBE)
625 static inline unsigned long probe_irq_on(void)
626 {
627 	return 0;
628 }
629 static inline int probe_irq_off(unsigned long val)
630 {
631 	return 0;
632 }
633 static inline unsigned int probe_irq_mask(unsigned long val)
634 {
635 	return 0;
636 }
637 #else
638 extern unsigned long probe_irq_on(void);	/* returns 0 on failure */
639 extern int probe_irq_off(unsigned long);	/* returns 0 or negative on failure */
640 extern unsigned int probe_irq_mask(unsigned long);	/* returns mask of ISA interrupts */
641 #endif
642 
643 #ifdef CONFIG_PROC_FS
644 /* Initialize /proc/irq/ */
645 extern void init_irq_proc(void);
646 #else
647 static inline void init_irq_proc(void)
648 {
649 }
650 #endif
651 
652 struct seq_file;
653 int show_interrupts(struct seq_file *p, void *v);
654 int arch_show_interrupts(struct seq_file *p, int prec);
655 
656 extern int early_irq_init(void);
657 extern int arch_probe_nr_irqs(void);
658 extern int arch_early_irq_init(void);
659 
660 #endif
661