xref: /linux-6.15/include/linux/interrupt.h (revision 5d4a2e29)
1 /* interrupt.h */
2 #ifndef _LINUX_INTERRUPT_H
3 #define _LINUX_INTERRUPT_H
4 
5 #include <linux/kernel.h>
6 #include <linux/linkage.h>
7 #include <linux/bitops.h>
8 #include <linux/preempt.h>
9 #include <linux/cpumask.h>
10 #include <linux/irqreturn.h>
11 #include <linux/irqnr.h>
12 #include <linux/hardirq.h>
13 #include <linux/irqflags.h>
14 #include <linux/smp.h>
15 #include <linux/percpu.h>
16 #include <linux/hrtimer.h>
17 
18 #include <asm/atomic.h>
19 #include <asm/ptrace.h>
20 #include <asm/system.h>
21 
22 /*
23  * These correspond to the IORESOURCE_IRQ_* defines in
24  * linux/ioport.h to select the interrupt line behaviour.  When
25  * requesting an interrupt without specifying a IRQF_TRIGGER, the
26  * setting should be assumed to be "as already configured", which
27  * may be as per machine or firmware initialisation.
28  */
29 #define IRQF_TRIGGER_NONE	0x00000000
30 #define IRQF_TRIGGER_RISING	0x00000001
31 #define IRQF_TRIGGER_FALLING	0x00000002
32 #define IRQF_TRIGGER_HIGH	0x00000004
33 #define IRQF_TRIGGER_LOW	0x00000008
34 #define IRQF_TRIGGER_MASK	(IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW | \
35 				 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)
36 #define IRQF_TRIGGER_PROBE	0x00000010
37 
38 /*
39  * These flags used only by the kernel as part of the
40  * irq handling routines.
41  *
42  * IRQF_DISABLED - keep irqs disabled when calling the action handler.
43  *                 DEPRECATED. This flag is a NOOP and scheduled to be removed
44  * IRQF_SAMPLE_RANDOM - irq is used to feed the random generator
45  * IRQF_SHARED - allow sharing the irq among several devices
46  * IRQF_PROBE_SHARED - set by callers when they expect sharing mismatches to occur
47  * IRQF_TIMER - Flag to mark this interrupt as timer interrupt
48  * IRQF_PERCPU - Interrupt is per cpu
49  * IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing
50  * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is
51  *                registered first in an shared interrupt is considered for
52  *                performance reasons)
53  * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished.
54  *                Used by threaded interrupts which need to keep the
55  *                irq line disabled until the threaded handler has been run.
56  */
57 #define IRQF_DISABLED		0x00000020
58 #define IRQF_SAMPLE_RANDOM	0x00000040
59 #define IRQF_SHARED		0x00000080
60 #define IRQF_PROBE_SHARED	0x00000100
61 #define IRQF_TIMER		0x00000200
62 #define IRQF_PERCPU		0x00000400
63 #define IRQF_NOBALANCING	0x00000800
64 #define IRQF_IRQPOLL		0x00001000
65 #define IRQF_ONESHOT		0x00002000
66 
67 /*
68  * Bits used by threaded handlers:
69  * IRQTF_RUNTHREAD - signals that the interrupt handler thread should run
70  * IRQTF_DIED      - handler thread died
71  * IRQTF_WARNED    - warning "IRQ_WAKE_THREAD w/o thread_fn" has been printed
72  * IRQTF_AFFINITY  - irq thread is requested to adjust affinity
73  */
74 enum {
75 	IRQTF_RUNTHREAD,
76 	IRQTF_DIED,
77 	IRQTF_WARNED,
78 	IRQTF_AFFINITY,
79 };
80 
81 /*
82  * These values can be returned by request_any_context_irq() and
83  * describe the context the interrupt will be run in.
84  *
85  * IRQC_IS_HARDIRQ - interrupt runs in hardirq context
86  * IRQC_IS_NESTED - interrupt runs in a nested threaded context
87  */
88 enum {
89 	IRQC_IS_HARDIRQ	= 0,
90 	IRQC_IS_NESTED,
91 };
92 
93 typedef irqreturn_t (*irq_handler_t)(int, void *);
94 
95 /**
96  * struct irqaction - per interrupt action descriptor
97  * @handler:	interrupt handler function
98  * @flags:	flags (see IRQF_* above)
99  * @name:	name of the device
100  * @dev_id:	cookie to identify the device
101  * @next:	pointer to the next irqaction for shared interrupts
102  * @irq:	interrupt number
103  * @dir:	pointer to the proc/irq/NN/name entry
104  * @thread_fn:	interupt handler function for threaded interrupts
105  * @thread:	thread pointer for threaded interrupts
106  * @thread_flags:	flags related to @thread
107  */
108 struct irqaction {
109 	irq_handler_t handler;
110 	unsigned long flags;
111 	const char *name;
112 	void *dev_id;
113 	struct irqaction *next;
114 	int irq;
115 	struct proc_dir_entry *dir;
116 	irq_handler_t thread_fn;
117 	struct task_struct *thread;
118 	unsigned long thread_flags;
119 };
120 
121 extern irqreturn_t no_action(int cpl, void *dev_id);
122 
123 #ifdef CONFIG_GENERIC_HARDIRQS
124 extern int __must_check
125 request_threaded_irq(unsigned int irq, irq_handler_t handler,
126 		     irq_handler_t thread_fn,
127 		     unsigned long flags, const char *name, void *dev);
128 
129 static inline int __must_check
130 request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
131 	    const char *name, void *dev)
132 {
133 	return request_threaded_irq(irq, handler, NULL, flags, name, dev);
134 }
135 
136 extern int __must_check
137 request_any_context_irq(unsigned int irq, irq_handler_t handler,
138 			unsigned long flags, const char *name, void *dev_id);
139 
140 extern void exit_irq_thread(void);
141 #else
142 
143 extern int __must_check
144 request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
145 	    const char *name, void *dev);
146 
147 /*
148  * Special function to avoid ifdeffery in kernel/irq/devres.c which
149  * gets magically built by GENERIC_HARDIRQS=n architectures (sparc,
150  * m68k). I really love these $@%#!* obvious Makefile references:
151  * ../../../kernel/irq/devres.o
152  */
153 static inline int __must_check
154 request_threaded_irq(unsigned int irq, irq_handler_t handler,
155 		     irq_handler_t thread_fn,
156 		     unsigned long flags, const char *name, void *dev)
157 {
158 	return request_irq(irq, handler, flags, name, dev);
159 }
160 
161 static inline int __must_check
162 request_any_context_irq(unsigned int irq, irq_handler_t handler,
163 			unsigned long flags, const char *name, void *dev_id)
164 {
165 	return request_irq(irq, handler, flags, name, dev_id);
166 }
167 
168 static inline void exit_irq_thread(void) { }
169 #endif
170 
171 extern void free_irq(unsigned int, void *);
172 
173 struct device;
174 
175 extern int __must_check
176 devm_request_threaded_irq(struct device *dev, unsigned int irq,
177 			  irq_handler_t handler, irq_handler_t thread_fn,
178 			  unsigned long irqflags, const char *devname,
179 			  void *dev_id);
180 
181 static inline int __must_check
182 devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler,
183 		 unsigned long irqflags, const char *devname, void *dev_id)
184 {
185 	return devm_request_threaded_irq(dev, irq, handler, NULL, irqflags,
186 					 devname, dev_id);
187 }
188 
189 extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
190 
191 /*
192  * On lockdep we dont want to enable hardirqs in hardirq
193  * context. Use local_irq_enable_in_hardirq() to annotate
194  * kernel code that has to do this nevertheless (pretty much
195  * the only valid case is for old/broken hardware that is
196  * insanely slow).
197  *
198  * NOTE: in theory this might break fragile code that relies
199  * on hardirq delivery - in practice we dont seem to have such
200  * places left. So the only effect should be slightly increased
201  * irqs-off latencies.
202  */
203 #ifdef CONFIG_LOCKDEP
204 # define local_irq_enable_in_hardirq()	do { } while (0)
205 #else
206 # define local_irq_enable_in_hardirq()	local_irq_enable()
207 #endif
208 
209 extern void disable_irq_nosync(unsigned int irq);
210 extern void disable_irq(unsigned int irq);
211 extern void enable_irq(unsigned int irq);
212 
213 /* The following three functions are for the core kernel use only. */
214 #ifdef CONFIG_GENERIC_HARDIRQS
215 extern void suspend_device_irqs(void);
216 extern void resume_device_irqs(void);
217 #ifdef CONFIG_PM_SLEEP
218 extern int check_wakeup_irqs(void);
219 #else
220 static inline int check_wakeup_irqs(void) { return 0; }
221 #endif
222 #else
223 static inline void suspend_device_irqs(void) { };
224 static inline void resume_device_irqs(void) { };
225 static inline int check_wakeup_irqs(void) { return 0; }
226 #endif
227 
228 #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
229 
230 extern cpumask_var_t irq_default_affinity;
231 
232 extern int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask);
233 extern int irq_can_set_affinity(unsigned int irq);
234 extern int irq_select_affinity(unsigned int irq);
235 
236 extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m);
237 #else /* CONFIG_SMP */
238 
239 static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)
240 {
241 	return -EINVAL;
242 }
243 
244 static inline int irq_can_set_affinity(unsigned int irq)
245 {
246 	return 0;
247 }
248 
249 static inline int irq_select_affinity(unsigned int irq)  { return 0; }
250 
251 static inline int irq_set_affinity_hint(unsigned int irq,
252                                         const struct cpumask *m)
253 {
254 	return -EINVAL;
255 }
256 #endif /* CONFIG_SMP && CONFIG_GENERIC_HARDIRQS */
257 
258 #ifdef CONFIG_GENERIC_HARDIRQS
259 /*
260  * Special lockdep variants of irq disabling/enabling.
261  * These should be used for locking constructs that
262  * know that a particular irq context which is disabled,
263  * and which is the only irq-context user of a lock,
264  * that it's safe to take the lock in the irq-disabled
265  * section without disabling hardirqs.
266  *
267  * On !CONFIG_LOCKDEP they are equivalent to the normal
268  * irq disable/enable methods.
269  */
270 static inline void disable_irq_nosync_lockdep(unsigned int irq)
271 {
272 	disable_irq_nosync(irq);
273 #ifdef CONFIG_LOCKDEP
274 	local_irq_disable();
275 #endif
276 }
277 
278 static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags)
279 {
280 	disable_irq_nosync(irq);
281 #ifdef CONFIG_LOCKDEP
282 	local_irq_save(*flags);
283 #endif
284 }
285 
286 static inline void disable_irq_lockdep(unsigned int irq)
287 {
288 	disable_irq(irq);
289 #ifdef CONFIG_LOCKDEP
290 	local_irq_disable();
291 #endif
292 }
293 
294 static inline void enable_irq_lockdep(unsigned int irq)
295 {
296 #ifdef CONFIG_LOCKDEP
297 	local_irq_enable();
298 #endif
299 	enable_irq(irq);
300 }
301 
302 static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags)
303 {
304 #ifdef CONFIG_LOCKDEP
305 	local_irq_restore(*flags);
306 #endif
307 	enable_irq(irq);
308 }
309 
310 /* IRQ wakeup (PM) control: */
311 extern int set_irq_wake(unsigned int irq, unsigned int on);
312 
313 static inline int enable_irq_wake(unsigned int irq)
314 {
315 	return set_irq_wake(irq, 1);
316 }
317 
318 static inline int disable_irq_wake(unsigned int irq)
319 {
320 	return set_irq_wake(irq, 0);
321 }
322 
323 #else /* !CONFIG_GENERIC_HARDIRQS */
324 /*
325  * NOTE: non-genirq architectures, if they want to support the lock
326  * validator need to define the methods below in their asm/irq.h
327  * files, under an #ifdef CONFIG_LOCKDEP section.
328  */
329 #ifndef CONFIG_LOCKDEP
330 #  define disable_irq_nosync_lockdep(irq)	disable_irq_nosync(irq)
331 #  define disable_irq_nosync_lockdep_irqsave(irq, flags) \
332 						disable_irq_nosync(irq)
333 #  define disable_irq_lockdep(irq)		disable_irq(irq)
334 #  define enable_irq_lockdep(irq)		enable_irq(irq)
335 #  define enable_irq_lockdep_irqrestore(irq, flags) \
336 						enable_irq(irq)
337 # endif
338 
339 static inline int enable_irq_wake(unsigned int irq)
340 {
341 	return 0;
342 }
343 
344 static inline int disable_irq_wake(unsigned int irq)
345 {
346 	return 0;
347 }
348 #endif /* CONFIG_GENERIC_HARDIRQS */
349 
350 #ifndef __ARCH_SET_SOFTIRQ_PENDING
351 #define set_softirq_pending(x) (local_softirq_pending() = (x))
352 #define or_softirq_pending(x)  (local_softirq_pending() |= (x))
353 #endif
354 
355 /* Some architectures might implement lazy enabling/disabling of
356  * interrupts. In some cases, such as stop_machine, we might want
357  * to ensure that after a local_irq_disable(), interrupts have
358  * really been disabled in hardware. Such architectures need to
359  * implement the following hook.
360  */
361 #ifndef hard_irq_disable
362 #define hard_irq_disable()	do { } while(0)
363 #endif
364 
365 /* PLEASE, avoid to allocate new softirqs, if you need not _really_ high
366    frequency threaded job scheduling. For almost all the purposes
367    tasklets are more than enough. F.e. all serial device BHs et
368    al. should be converted to tasklets, not to softirqs.
369  */
370 
371 enum
372 {
373 	HI_SOFTIRQ=0,
374 	TIMER_SOFTIRQ,
375 	NET_TX_SOFTIRQ,
376 	NET_RX_SOFTIRQ,
377 	BLOCK_SOFTIRQ,
378 	BLOCK_IOPOLL_SOFTIRQ,
379 	TASKLET_SOFTIRQ,
380 	SCHED_SOFTIRQ,
381 	HRTIMER_SOFTIRQ,
382 	RCU_SOFTIRQ,	/* Preferable RCU should always be the last softirq */
383 
384 	NR_SOFTIRQS
385 };
386 
387 /* map softirq index to softirq name. update 'softirq_to_name' in
388  * kernel/softirq.c when adding a new softirq.
389  */
390 extern char *softirq_to_name[NR_SOFTIRQS];
391 
392 /* softirq mask and active fields moved to irq_cpustat_t in
393  * asm/hardirq.h to get better cache usage.  KAO
394  */
395 
396 struct softirq_action
397 {
398 	void	(*action)(struct softirq_action *);
399 };
400 
401 asmlinkage void do_softirq(void);
402 asmlinkage void __do_softirq(void);
403 extern void open_softirq(int nr, void (*action)(struct softirq_action *));
404 extern void softirq_init(void);
405 #define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0)
406 extern void raise_softirq_irqoff(unsigned int nr);
407 extern void raise_softirq(unsigned int nr);
408 extern void wakeup_softirqd(void);
409 
410 /* This is the worklist that queues up per-cpu softirq work.
411  *
412  * send_remote_sendirq() adds work to these lists, and
413  * the softirq handler itself dequeues from them.  The queues
414  * are protected by disabling local cpu interrupts and they must
415  * only be accessed by the local cpu that they are for.
416  */
417 DECLARE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list);
418 
419 /* Try to send a softirq to a remote cpu.  If this cannot be done, the
420  * work will be queued to the local cpu.
421  */
422 extern void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq);
423 
424 /* Like send_remote_softirq(), but the caller must disable local cpu interrupts
425  * and compute the current cpu, passed in as 'this_cpu'.
426  */
427 extern void __send_remote_softirq(struct call_single_data *cp, int cpu,
428 				  int this_cpu, int softirq);
429 
430 /* Tasklets --- multithreaded analogue of BHs.
431 
432    Main feature differing them of generic softirqs: tasklet
433    is running only on one CPU simultaneously.
434 
435    Main feature differing them of BHs: different tasklets
436    may be run simultaneously on different CPUs.
437 
438    Properties:
439    * If tasklet_schedule() is called, then tasklet is guaranteed
440      to be executed on some cpu at least once after this.
441    * If the tasklet is already scheduled, but its excecution is still not
442      started, it will be executed only once.
443    * If this tasklet is already running on another CPU (or schedule is called
444      from tasklet itself), it is rescheduled for later.
445    * Tasklet is strictly serialized wrt itself, but not
446      wrt another tasklets. If client needs some intertask synchronization,
447      he makes it with spinlocks.
448  */
449 
450 struct tasklet_struct
451 {
452 	struct tasklet_struct *next;
453 	unsigned long state;
454 	atomic_t count;
455 	void (*func)(unsigned long);
456 	unsigned long data;
457 };
458 
459 #define DECLARE_TASKLET(name, func, data) \
460 struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data }
461 
462 #define DECLARE_TASKLET_DISABLED(name, func, data) \
463 struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }
464 
465 
466 enum
467 {
468 	TASKLET_STATE_SCHED,	/* Tasklet is scheduled for execution */
469 	TASKLET_STATE_RUN	/* Tasklet is running (SMP only) */
470 };
471 
472 #ifdef CONFIG_SMP
473 static inline int tasklet_trylock(struct tasklet_struct *t)
474 {
475 	return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
476 }
477 
478 static inline void tasklet_unlock(struct tasklet_struct *t)
479 {
480 	smp_mb__before_clear_bit();
481 	clear_bit(TASKLET_STATE_RUN, &(t)->state);
482 }
483 
484 static inline void tasklet_unlock_wait(struct tasklet_struct *t)
485 {
486 	while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
487 }
488 #else
489 #define tasklet_trylock(t) 1
490 #define tasklet_unlock_wait(t) do { } while (0)
491 #define tasklet_unlock(t) do { } while (0)
492 #endif
493 
494 extern void __tasklet_schedule(struct tasklet_struct *t);
495 
496 static inline void tasklet_schedule(struct tasklet_struct *t)
497 {
498 	if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
499 		__tasklet_schedule(t);
500 }
501 
502 extern void __tasklet_hi_schedule(struct tasklet_struct *t);
503 
504 static inline void tasklet_hi_schedule(struct tasklet_struct *t)
505 {
506 	if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
507 		__tasklet_hi_schedule(t);
508 }
509 
510 extern void __tasklet_hi_schedule_first(struct tasklet_struct *t);
511 
512 /*
513  * This version avoids touching any other tasklets. Needed for kmemcheck
514  * in order not to take any page faults while enqueueing this tasklet;
515  * consider VERY carefully whether you really need this or
516  * tasklet_hi_schedule()...
517  */
518 static inline void tasklet_hi_schedule_first(struct tasklet_struct *t)
519 {
520 	if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
521 		__tasklet_hi_schedule_first(t);
522 }
523 
524 
525 static inline void tasklet_disable_nosync(struct tasklet_struct *t)
526 {
527 	atomic_inc(&t->count);
528 	smp_mb__after_atomic_inc();
529 }
530 
531 static inline void tasklet_disable(struct tasklet_struct *t)
532 {
533 	tasklet_disable_nosync(t);
534 	tasklet_unlock_wait(t);
535 	smp_mb();
536 }
537 
538 static inline void tasklet_enable(struct tasklet_struct *t)
539 {
540 	smp_mb__before_atomic_dec();
541 	atomic_dec(&t->count);
542 }
543 
544 static inline void tasklet_hi_enable(struct tasklet_struct *t)
545 {
546 	smp_mb__before_atomic_dec();
547 	atomic_dec(&t->count);
548 }
549 
550 extern void tasklet_kill(struct tasklet_struct *t);
551 extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
552 extern void tasklet_init(struct tasklet_struct *t,
553 			 void (*func)(unsigned long), unsigned long data);
554 
555 struct tasklet_hrtimer {
556 	struct hrtimer		timer;
557 	struct tasklet_struct	tasklet;
558 	enum hrtimer_restart	(*function)(struct hrtimer *);
559 };
560 
561 extern void
562 tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
563 		     enum hrtimer_restart (*function)(struct hrtimer *),
564 		     clockid_t which_clock, enum hrtimer_mode mode);
565 
566 static inline
567 int tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time,
568 			  const enum hrtimer_mode mode)
569 {
570 	return hrtimer_start(&ttimer->timer, time, mode);
571 }
572 
573 static inline
574 void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer)
575 {
576 	hrtimer_cancel(&ttimer->timer);
577 	tasklet_kill(&ttimer->tasklet);
578 }
579 
580 /*
581  * Autoprobing for irqs:
582  *
583  * probe_irq_on() and probe_irq_off() provide robust primitives
584  * for accurate IRQ probing during kernel initialization.  They are
585  * reasonably simple to use, are not "fooled" by spurious interrupts,
586  * and, unlike other attempts at IRQ probing, they do not get hung on
587  * stuck interrupts (such as unused PS2 mouse interfaces on ASUS boards).
588  *
589  * For reasonably foolproof probing, use them as follows:
590  *
591  * 1. clear and/or mask the device's internal interrupt.
592  * 2. sti();
593  * 3. irqs = probe_irq_on();      // "take over" all unassigned idle IRQs
594  * 4. enable the device and cause it to trigger an interrupt.
595  * 5. wait for the device to interrupt, using non-intrusive polling or a delay.
596  * 6. irq = probe_irq_off(irqs);  // get IRQ number, 0=none, negative=multiple
597  * 7. service the device to clear its pending interrupt.
598  * 8. loop again if paranoia is required.
599  *
600  * probe_irq_on() returns a mask of allocated irq's.
601  *
602  * probe_irq_off() takes the mask as a parameter,
603  * and returns the irq number which occurred,
604  * or zero if none occurred, or a negative irq number
605  * if more than one irq occurred.
606  */
607 
608 #if defined(CONFIG_GENERIC_HARDIRQS) && !defined(CONFIG_GENERIC_IRQ_PROBE)
609 static inline unsigned long probe_irq_on(void)
610 {
611 	return 0;
612 }
613 static inline int probe_irq_off(unsigned long val)
614 {
615 	return 0;
616 }
617 static inline unsigned int probe_irq_mask(unsigned long val)
618 {
619 	return 0;
620 }
621 #else
622 extern unsigned long probe_irq_on(void);	/* returns 0 on failure */
623 extern int probe_irq_off(unsigned long);	/* returns 0 or negative on failure */
624 extern unsigned int probe_irq_mask(unsigned long);	/* returns mask of ISA interrupts */
625 #endif
626 
627 #ifdef CONFIG_PROC_FS
628 /* Initialize /proc/irq/ */
629 extern void init_irq_proc(void);
630 #else
631 static inline void init_irq_proc(void)
632 {
633 }
634 #endif
635 
636 struct seq_file;
637 int show_interrupts(struct seq_file *p, void *v);
638 
639 struct irq_desc;
640 
641 extern int early_irq_init(void);
642 extern int arch_probe_nr_irqs(void);
643 extern int arch_early_irq_init(void);
644 extern int arch_init_chip_data(struct irq_desc *desc, int node);
645 
646 #endif
647