xref: /linux-6.15/include/linux/interrupt.h (revision ee665ecc)
1 /* interrupt.h */
2 #ifndef _LINUX_INTERRUPT_H
3 #define _LINUX_INTERRUPT_H
4 
5 #include <linux/kernel.h>
6 #include <linux/linkage.h>
7 #include <linux/bitops.h>
8 #include <linux/preempt.h>
9 #include <linux/cpumask.h>
10 #include <linux/irqreturn.h>
11 #include <linux/irqnr.h>
12 #include <linux/hardirq.h>
13 #include <linux/sched.h>
14 #include <linux/irqflags.h>
15 #include <linux/smp.h>
16 #include <linux/percpu.h>
17 
18 #include <asm/atomic.h>
19 #include <asm/ptrace.h>
20 #include <asm/system.h>
21 
22 /*
23  * These correspond to the IORESOURCE_IRQ_* defines in
24  * linux/ioport.h to select the interrupt line behaviour.  When
25  * requesting an interrupt without specifying a IRQF_TRIGGER, the
26  * setting should be assumed to be "as already configured", which
27  * may be as per machine or firmware initialisation.
28  */
29 #define IRQF_TRIGGER_NONE	0x00000000
30 #define IRQF_TRIGGER_RISING	0x00000001
31 #define IRQF_TRIGGER_FALLING	0x00000002
32 #define IRQF_TRIGGER_HIGH	0x00000004
33 #define IRQF_TRIGGER_LOW	0x00000008
34 #define IRQF_TRIGGER_MASK	(IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW | \
35 				 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)
36 #define IRQF_TRIGGER_PROBE	0x00000010
37 
38 /*
39  * These flags used only by the kernel as part of the
40  * irq handling routines.
41  *
42  * IRQF_DISABLED - keep irqs disabled when calling the action handler
43  * IRQF_SAMPLE_RANDOM - irq is used to feed the random generator
44  * IRQF_SHARED - allow sharing the irq among several devices
45  * IRQF_PROBE_SHARED - set by callers when they expect sharing mismatches to occur
46  * IRQF_TIMER - Flag to mark this interrupt as timer interrupt
47  * IRQF_PERCPU - Interrupt is per cpu
48  * IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing
49  * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is
50  *                registered first in an shared interrupt is considered for
51  *                performance reasons)
52  */
53 #define IRQF_DISABLED		0x00000020
54 #define IRQF_SAMPLE_RANDOM	0x00000040
55 #define IRQF_SHARED		0x00000080
56 #define IRQF_PROBE_SHARED	0x00000100
57 #define IRQF_TIMER		0x00000200
58 #define IRQF_PERCPU		0x00000400
59 #define IRQF_NOBALANCING	0x00000800
60 #define IRQF_IRQPOLL		0x00001000
61 
62 typedef irqreturn_t (*irq_handler_t)(int, void *);
63 
64 /**
65  * struct irqaction - per interrupt action descriptor
66  * @handler:	interrupt handler function
67  * @flags:	flags (see IRQF_* above)
68  * @mask:	no comment as it is useless and about to be removed
69  * @name:	name of the device
70  * @dev_id:	cookie to identify the device
71  * @next:	pointer to the next irqaction for shared interrupts
72  * @irq:	interrupt number
73  * @dir:	pointer to the proc/irq/NN/name entry
74  */
75 struct irqaction {
76 	irq_handler_t handler;
77 	unsigned long flags;
78 	cpumask_t mask;
79 	const char *name;
80 	void *dev_id;
81 	struct irqaction *next;
82 	int irq;
83 	struct proc_dir_entry *dir;
84 };
85 
86 extern irqreturn_t no_action(int cpl, void *dev_id);
87 extern int __must_check request_irq(unsigned int, irq_handler_t handler,
88 		       unsigned long, const char *, void *);
89 extern void free_irq(unsigned int, void *);
90 
91 struct device;
92 
93 extern int __must_check devm_request_irq(struct device *dev, unsigned int irq,
94 			    irq_handler_t handler, unsigned long irqflags,
95 			    const char *devname, void *dev_id);
96 extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
97 
98 /*
99  * On lockdep we dont want to enable hardirqs in hardirq
100  * context. Use local_irq_enable_in_hardirq() to annotate
101  * kernel code that has to do this nevertheless (pretty much
102  * the only valid case is for old/broken hardware that is
103  * insanely slow).
104  *
105  * NOTE: in theory this might break fragile code that relies
106  * on hardirq delivery - in practice we dont seem to have such
107  * places left. So the only effect should be slightly increased
108  * irqs-off latencies.
109  */
110 #ifdef CONFIG_LOCKDEP
111 # define local_irq_enable_in_hardirq()	do { } while (0)
112 #else
113 # define local_irq_enable_in_hardirq()	local_irq_enable()
114 #endif
115 
116 extern void disable_irq_nosync(unsigned int irq);
117 extern void disable_irq(unsigned int irq);
118 extern void enable_irq(unsigned int irq);
119 
120 #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
121 
122 extern cpumask_var_t irq_default_affinity;
123 
124 extern int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask);
125 extern int irq_can_set_affinity(unsigned int irq);
126 extern int irq_select_affinity(unsigned int irq);
127 
128 #else /* CONFIG_SMP */
129 
130 static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)
131 {
132 	return -EINVAL;
133 }
134 
135 static inline int irq_can_set_affinity(unsigned int irq)
136 {
137 	return 0;
138 }
139 
140 static inline int irq_select_affinity(unsigned int irq)  { return 0; }
141 
142 #endif /* CONFIG_SMP && CONFIG_GENERIC_HARDIRQS */
143 
144 #ifdef CONFIG_GENERIC_HARDIRQS
145 /*
146  * Special lockdep variants of irq disabling/enabling.
147  * These should be used for locking constructs that
148  * know that a particular irq context which is disabled,
149  * and which is the only irq-context user of a lock,
150  * that it's safe to take the lock in the irq-disabled
151  * section without disabling hardirqs.
152  *
153  * On !CONFIG_LOCKDEP they are equivalent to the normal
154  * irq disable/enable methods.
155  */
156 static inline void disable_irq_nosync_lockdep(unsigned int irq)
157 {
158 	disable_irq_nosync(irq);
159 #ifdef CONFIG_LOCKDEP
160 	local_irq_disable();
161 #endif
162 }
163 
164 static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags)
165 {
166 	disable_irq_nosync(irq);
167 #ifdef CONFIG_LOCKDEP
168 	local_irq_save(*flags);
169 #endif
170 }
171 
172 static inline void disable_irq_lockdep(unsigned int irq)
173 {
174 	disable_irq(irq);
175 #ifdef CONFIG_LOCKDEP
176 	local_irq_disable();
177 #endif
178 }
179 
180 static inline void enable_irq_lockdep(unsigned int irq)
181 {
182 #ifdef CONFIG_LOCKDEP
183 	local_irq_enable();
184 #endif
185 	enable_irq(irq);
186 }
187 
188 static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags)
189 {
190 #ifdef CONFIG_LOCKDEP
191 	local_irq_restore(*flags);
192 #endif
193 	enable_irq(irq);
194 }
195 
196 /* IRQ wakeup (PM) control: */
197 extern int set_irq_wake(unsigned int irq, unsigned int on);
198 
199 static inline int enable_irq_wake(unsigned int irq)
200 {
201 	return set_irq_wake(irq, 1);
202 }
203 
204 static inline int disable_irq_wake(unsigned int irq)
205 {
206 	return set_irq_wake(irq, 0);
207 }
208 
209 #else /* !CONFIG_GENERIC_HARDIRQS */
210 /*
211  * NOTE: non-genirq architectures, if they want to support the lock
212  * validator need to define the methods below in their asm/irq.h
213  * files, under an #ifdef CONFIG_LOCKDEP section.
214  */
215 #ifndef CONFIG_LOCKDEP
216 #  define disable_irq_nosync_lockdep(irq)	disable_irq_nosync(irq)
217 #  define disable_irq_nosync_lockdep_irqsave(irq, flags) \
218 						disable_irq_nosync(irq)
219 #  define disable_irq_lockdep(irq)		disable_irq(irq)
220 #  define enable_irq_lockdep(irq)		enable_irq(irq)
221 #  define enable_irq_lockdep_irqrestore(irq, flags) \
222 						enable_irq(irq)
223 # endif
224 
225 static inline int enable_irq_wake(unsigned int irq)
226 {
227 	return 0;
228 }
229 
230 static inline int disable_irq_wake(unsigned int irq)
231 {
232 	return 0;
233 }
234 #endif /* CONFIG_GENERIC_HARDIRQS */
235 
236 #ifndef __ARCH_SET_SOFTIRQ_PENDING
237 #define set_softirq_pending(x) (local_softirq_pending() = (x))
238 #define or_softirq_pending(x)  (local_softirq_pending() |= (x))
239 #endif
240 
241 /* Some architectures might implement lazy enabling/disabling of
242  * interrupts. In some cases, such as stop_machine, we might want
243  * to ensure that after a local_irq_disable(), interrupts have
244  * really been disabled in hardware. Such architectures need to
245  * implement the following hook.
246  */
247 #ifndef hard_irq_disable
248 #define hard_irq_disable()	do { } while(0)
249 #endif
250 
251 /* PLEASE, avoid to allocate new softirqs, if you need not _really_ high
252    frequency threaded job scheduling. For almost all the purposes
253    tasklets are more than enough. F.e. all serial device BHs et
254    al. should be converted to tasklets, not to softirqs.
255  */
256 
257 enum
258 {
259 	HI_SOFTIRQ=0,
260 	TIMER_SOFTIRQ,
261 	NET_TX_SOFTIRQ,
262 	NET_RX_SOFTIRQ,
263 	BLOCK_SOFTIRQ,
264 	TASKLET_SOFTIRQ,
265 	SCHED_SOFTIRQ,
266 	HRTIMER_SOFTIRQ,
267 	RCU_SOFTIRQ,	/* Preferable RCU should always be the last softirq */
268 
269 	NR_SOFTIRQS
270 };
271 
272 /* softirq mask and active fields moved to irq_cpustat_t in
273  * asm/hardirq.h to get better cache usage.  KAO
274  */
275 
276 struct softirq_action
277 {
278 	void	(*action)(struct softirq_action *);
279 };
280 
281 asmlinkage void do_softirq(void);
282 asmlinkage void __do_softirq(void);
283 extern void open_softirq(int nr, void (*action)(struct softirq_action *));
284 extern void softirq_init(void);
285 #define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0)
286 extern void raise_softirq_irqoff(unsigned int nr);
287 extern void raise_softirq(unsigned int nr);
288 
289 /* This is the worklist that queues up per-cpu softirq work.
290  *
291  * send_remote_sendirq() adds work to these lists, and
292  * the softirq handler itself dequeues from them.  The queues
293  * are protected by disabling local cpu interrupts and they must
294  * only be accessed by the local cpu that they are for.
295  */
296 DECLARE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list);
297 
298 /* Try to send a softirq to a remote cpu.  If this cannot be done, the
299  * work will be queued to the local cpu.
300  */
301 extern void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq);
302 
303 /* Like send_remote_softirq(), but the caller must disable local cpu interrupts
304  * and compute the current cpu, passed in as 'this_cpu'.
305  */
306 extern void __send_remote_softirq(struct call_single_data *cp, int cpu,
307 				  int this_cpu, int softirq);
308 
309 /* Tasklets --- multithreaded analogue of BHs.
310 
311    Main feature differing them of generic softirqs: tasklet
312    is running only on one CPU simultaneously.
313 
314    Main feature differing them of BHs: different tasklets
315    may be run simultaneously on different CPUs.
316 
317    Properties:
318    * If tasklet_schedule() is called, then tasklet is guaranteed
319      to be executed on some cpu at least once after this.
320    * If the tasklet is already scheduled, but its excecution is still not
321      started, it will be executed only once.
322    * If this tasklet is already running on another CPU (or schedule is called
323      from tasklet itself), it is rescheduled for later.
324    * Tasklet is strictly serialized wrt itself, but not
325      wrt another tasklets. If client needs some intertask synchronization,
326      he makes it with spinlocks.
327  */
328 
329 struct tasklet_struct
330 {
331 	struct tasklet_struct *next;
332 	unsigned long state;
333 	atomic_t count;
334 	void (*func)(unsigned long);
335 	unsigned long data;
336 };
337 
338 #define DECLARE_TASKLET(name, func, data) \
339 struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data }
340 
341 #define DECLARE_TASKLET_DISABLED(name, func, data) \
342 struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }
343 
344 
345 enum
346 {
347 	TASKLET_STATE_SCHED,	/* Tasklet is scheduled for execution */
348 	TASKLET_STATE_RUN	/* Tasklet is running (SMP only) */
349 };
350 
351 #ifdef CONFIG_SMP
352 static inline int tasklet_trylock(struct tasklet_struct *t)
353 {
354 	return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
355 }
356 
357 static inline void tasklet_unlock(struct tasklet_struct *t)
358 {
359 	smp_mb__before_clear_bit();
360 	clear_bit(TASKLET_STATE_RUN, &(t)->state);
361 }
362 
363 static inline void tasklet_unlock_wait(struct tasklet_struct *t)
364 {
365 	while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
366 }
367 #else
368 #define tasklet_trylock(t) 1
369 #define tasklet_unlock_wait(t) do { } while (0)
370 #define tasklet_unlock(t) do { } while (0)
371 #endif
372 
373 extern void __tasklet_schedule(struct tasklet_struct *t);
374 
375 static inline void tasklet_schedule(struct tasklet_struct *t)
376 {
377 	if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
378 		__tasklet_schedule(t);
379 }
380 
381 extern void __tasklet_hi_schedule(struct tasklet_struct *t);
382 
383 static inline void tasklet_hi_schedule(struct tasklet_struct *t)
384 {
385 	if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
386 		__tasklet_hi_schedule(t);
387 }
388 
389 
390 static inline void tasklet_disable_nosync(struct tasklet_struct *t)
391 {
392 	atomic_inc(&t->count);
393 	smp_mb__after_atomic_inc();
394 }
395 
396 static inline void tasklet_disable(struct tasklet_struct *t)
397 {
398 	tasklet_disable_nosync(t);
399 	tasklet_unlock_wait(t);
400 	smp_mb();
401 }
402 
403 static inline void tasklet_enable(struct tasklet_struct *t)
404 {
405 	smp_mb__before_atomic_dec();
406 	atomic_dec(&t->count);
407 }
408 
409 static inline void tasklet_hi_enable(struct tasklet_struct *t)
410 {
411 	smp_mb__before_atomic_dec();
412 	atomic_dec(&t->count);
413 }
414 
415 extern void tasklet_kill(struct tasklet_struct *t);
416 extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
417 extern void tasklet_init(struct tasklet_struct *t,
418 			 void (*func)(unsigned long), unsigned long data);
419 
420 /*
421  * Autoprobing for irqs:
422  *
423  * probe_irq_on() and probe_irq_off() provide robust primitives
424  * for accurate IRQ probing during kernel initialization.  They are
425  * reasonably simple to use, are not "fooled" by spurious interrupts,
426  * and, unlike other attempts at IRQ probing, they do not get hung on
427  * stuck interrupts (such as unused PS2 mouse interfaces on ASUS boards).
428  *
429  * For reasonably foolproof probing, use them as follows:
430  *
431  * 1. clear and/or mask the device's internal interrupt.
432  * 2. sti();
433  * 3. irqs = probe_irq_on();      // "take over" all unassigned idle IRQs
434  * 4. enable the device and cause it to trigger an interrupt.
435  * 5. wait for the device to interrupt, using non-intrusive polling or a delay.
436  * 6. irq = probe_irq_off(irqs);  // get IRQ number, 0=none, negative=multiple
437  * 7. service the device to clear its pending interrupt.
438  * 8. loop again if paranoia is required.
439  *
440  * probe_irq_on() returns a mask of allocated irq's.
441  *
442  * probe_irq_off() takes the mask as a parameter,
443  * and returns the irq number which occurred,
444  * or zero if none occurred, or a negative irq number
445  * if more than one irq occurred.
446  */
447 
448 #if defined(CONFIG_GENERIC_HARDIRQS) && !defined(CONFIG_GENERIC_IRQ_PROBE)
449 static inline unsigned long probe_irq_on(void)
450 {
451 	return 0;
452 }
453 static inline int probe_irq_off(unsigned long val)
454 {
455 	return 0;
456 }
457 static inline unsigned int probe_irq_mask(unsigned long val)
458 {
459 	return 0;
460 }
461 #else
462 extern unsigned long probe_irq_on(void);	/* returns 0 on failure */
463 extern int probe_irq_off(unsigned long);	/* returns 0 or negative on failure */
464 extern unsigned int probe_irq_mask(unsigned long);	/* returns mask of ISA interrupts */
465 #endif
466 
467 #ifdef CONFIG_PROC_FS
468 /* Initialize /proc/irq/ */
469 extern void init_irq_proc(void);
470 #else
471 static inline void init_irq_proc(void)
472 {
473 }
474 #endif
475 
476 #if defined(CONFIG_GENERIC_HARDIRQS) && defined(CONFIG_DEBUG_SHIRQ)
477 extern void debug_poll_all_shared_irqs(void);
478 #else
479 static inline void debug_poll_all_shared_irqs(void) { }
480 #endif
481 
482 int show_interrupts(struct seq_file *p, void *v);
483 
484 struct irq_desc;
485 
486 extern int early_irq_init(void);
487 extern int arch_probe_nr_irqs(void);
488 extern int arch_early_irq_init(void);
489 extern int arch_init_chip_data(struct irq_desc *desc, int cpu);
490 
491 #endif
492