xref: /linux-6.15/include/linux/msi.h (revision 288c81ce)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef LINUX_MSI_H
3 #define LINUX_MSI_H
4 
5 #include <linux/cpumask.h>
6 #include <linux/list.h>
7 #include <asm/msi.h>
8 
9 /* Dummy shadow structures if an architecture does not define them */
10 #ifndef arch_msi_msg_addr_lo
11 typedef struct arch_msi_msg_addr_lo {
12 	u32	address_lo;
13 } __attribute__ ((packed)) arch_msi_msg_addr_lo_t;
14 #endif
15 
16 #ifndef arch_msi_msg_addr_hi
17 typedef struct arch_msi_msg_addr_hi {
18 	u32	address_hi;
19 } __attribute__ ((packed)) arch_msi_msg_addr_hi_t;
20 #endif
21 
22 #ifndef arch_msi_msg_data
23 typedef struct arch_msi_msg_data {
24 	u32	data;
25 } __attribute__ ((packed)) arch_msi_msg_data_t;
26 #endif
27 
28 /**
29  * msi_msg - Representation of a MSI message
30  * @address_lo:		Low 32 bits of msi message address
31  * @arch_addrlo:	Architecture specific shadow of @address_lo
32  * @address_hi:		High 32 bits of msi message address
33  *			(only used when device supports it)
34  * @arch_addrhi:	Architecture specific shadow of @address_hi
35  * @data:		MSI message data (usually 16 bits)
36  * @arch_data:		Architecture specific shadow of @data
37  */
38 struct msi_msg {
39 	union {
40 		u32			address_lo;
41 		arch_msi_msg_addr_lo_t	arch_addr_lo;
42 	};
43 	union {
44 		u32			address_hi;
45 		arch_msi_msg_addr_hi_t	arch_addr_hi;
46 	};
47 	union {
48 		u32			data;
49 		arch_msi_msg_data_t	arch_data;
50 	};
51 };
52 
53 extern int pci_msi_ignore_mask;
54 /* Helper functions */
55 struct irq_data;
56 struct msi_desc;
57 struct pci_dev;
58 struct platform_msi_priv_data;
59 void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
60 #ifdef CONFIG_GENERIC_MSI_IRQ
61 void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg);
62 #else
63 static inline void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg)
64 {
65 }
66 #endif
67 
68 typedef void (*irq_write_msi_msg_t)(struct msi_desc *desc,
69 				    struct msi_msg *msg);
70 
71 /**
72  * pci_msi_desc - PCI/MSI specific MSI descriptor data
73  *
74  * @msi_mask:	[PCI MSI]   MSI cached mask bits
75  * @msix_ctrl:	[PCI MSI-X] MSI-X cached per vector control bits
76  * @is_msix:	[PCI MSI/X] True if MSI-X
77  * @multiple:	[PCI MSI/X] log2 num of messages allocated
78  * @multi_cap:	[PCI MSI/X] log2 num of messages supported
79  * @can_mask:	[PCI MSI/X] Masking supported?
80  * @is_64:	[PCI MSI/X] Address size: 0=32bit 1=64bit
81  * @entry_nr:	[PCI MSI/X] Entry which is described by this descriptor
82  * @default_irq:[PCI MSI/X] The default pre-assigned non-MSI irq
83  * @mask_pos:	[PCI MSI]   Mask register position
84  * @mask_base:	[PCI MSI-X] Mask register base address
85  */
86 struct pci_msi_desc {
87 	union {
88 		u32 msi_mask;
89 		u32 msix_ctrl;
90 	};
91 	struct {
92 		u8	is_msix		: 1;
93 		u8	multiple	: 3;
94 		u8	multi_cap	: 3;
95 		u8	can_mask	: 1;
96 		u8	is_64		: 1;
97 		u8	is_virtual	: 1;
98 		u16	entry_nr;
99 		unsigned default_irq;
100 	} msi_attrib;
101 	union {
102 		u8	mask_pos;
103 		void __iomem *mask_base;
104 	};
105 };
106 
107 /**
108  * platform_msi_desc - Platform device specific msi descriptor data
109  * @msi_priv_data:	Pointer to platform private data
110  * @msi_index:		The index of the MSI descriptor for multi MSI
111  */
112 struct platform_msi_desc {
113 	struct platform_msi_priv_data	*msi_priv_data;
114 	u16				msi_index;
115 };
116 
117 /**
118  * fsl_mc_msi_desc - FSL-MC device specific msi descriptor data
119  * @msi_index:		The index of the MSI descriptor
120  */
121 struct fsl_mc_msi_desc {
122 	u16				msi_index;
123 };
124 
125 /**
126  * ti_sci_inta_msi_desc - TISCI based INTA specific msi descriptor data
127  * @dev_index: TISCI device index
128  */
129 struct ti_sci_inta_msi_desc {
130 	u16	dev_index;
131 };
132 
133 /**
134  * struct msi_desc - Descriptor structure for MSI based interrupts
135  * @list:	List head for management
136  * @irq:	The base interrupt number
137  * @nvec_used:	The number of vectors used
138  * @dev:	Pointer to the device which uses this descriptor
139  * @msg:	The last set MSI message cached for reuse
140  * @affinity:	Optional pointer to a cpu affinity mask for this descriptor
141  *
142  * @write_msi_msg:	Callback that may be called when the MSI message
143  *			address or data changes
144  * @write_msi_msg_data:	Data parameter for the callback.
145  *
146  * @pci:	[PCI]	    PCI speficic msi descriptor data
147  * @platform:	[platform]  Platform device specific msi descriptor data
148  * @fsl_mc:	[fsl-mc]    FSL MC device specific msi descriptor data
149  * @inta:	[INTA]	    TISCI based INTA specific msi descriptor data
150  */
151 struct msi_desc {
152 	/* Shared device/bus type independent data */
153 	struct list_head		list;
154 	unsigned int			irq;
155 	unsigned int			nvec_used;
156 	struct device			*dev;
157 	struct msi_msg			msg;
158 	struct irq_affinity_desc	*affinity;
159 #ifdef CONFIG_IRQ_MSI_IOMMU
160 	const void			*iommu_cookie;
161 #endif
162 
163 	void (*write_msi_msg)(struct msi_desc *entry, void *data);
164 	void *write_msi_msg_data;
165 
166 	union {
167 		struct pci_msi_desc		pci;
168 		struct platform_msi_desc	platform;
169 		struct fsl_mc_msi_desc		fsl_mc;
170 		struct ti_sci_inta_msi_desc	inta;
171 	};
172 };
173 
174 /* Helpers to hide struct msi_desc implementation details */
175 #define msi_desc_to_dev(desc)		((desc)->dev)
176 #define dev_to_msi_list(dev)		(&(dev)->msi_list)
177 #define first_msi_entry(dev)		\
178 	list_first_entry(dev_to_msi_list((dev)), struct msi_desc, list)
179 #define for_each_msi_entry(desc, dev)	\
180 	list_for_each_entry((desc), dev_to_msi_list((dev)), list)
181 #define for_each_msi_entry_safe(desc, tmp, dev)	\
182 	list_for_each_entry_safe((desc), (tmp), dev_to_msi_list((dev)), list)
183 #define for_each_msi_vector(desc, __irq, dev)				\
184 	for_each_msi_entry((desc), (dev))				\
185 		if ((desc)->irq)					\
186 			for (__irq = (desc)->irq;			\
187 			     __irq < ((desc)->irq + (desc)->nvec_used);	\
188 			     __irq++)
189 
190 #ifdef CONFIG_IRQ_MSI_IOMMU
191 static inline const void *msi_desc_get_iommu_cookie(struct msi_desc *desc)
192 {
193 	return desc->iommu_cookie;
194 }
195 
196 static inline void msi_desc_set_iommu_cookie(struct msi_desc *desc,
197 					     const void *iommu_cookie)
198 {
199 	desc->iommu_cookie = iommu_cookie;
200 }
201 #else
202 static inline const void *msi_desc_get_iommu_cookie(struct msi_desc *desc)
203 {
204 	return NULL;
205 }
206 
207 static inline void msi_desc_set_iommu_cookie(struct msi_desc *desc,
208 					     const void *iommu_cookie)
209 {
210 }
211 #endif
212 
213 #ifdef CONFIG_PCI_MSI
214 #define first_pci_msi_entry(pdev)	first_msi_entry(&(pdev)->dev)
215 #define for_each_pci_msi_entry(desc, pdev)	\
216 	for_each_msi_entry((desc), &(pdev)->dev)
217 
218 struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc);
219 void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg);
220 #else /* CONFIG_PCI_MSI */
221 static inline void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg)
222 {
223 }
224 #endif /* CONFIG_PCI_MSI */
225 
226 struct msi_desc *alloc_msi_entry(struct device *dev, int nvec,
227 				 const struct irq_affinity_desc *affinity);
228 void free_msi_entry(struct msi_desc *entry);
229 void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
230 void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
231 
232 void pci_msi_mask_irq(struct irq_data *data);
233 void pci_msi_unmask_irq(struct irq_data *data);
234 
235 #ifdef CONFIG_SYSFS
236 const struct attribute_group **msi_populate_sysfs(struct device *dev);
237 void msi_destroy_sysfs(struct device *dev,
238 		       const struct attribute_group **msi_irq_groups);
239 #else
240 static inline const struct attribute_group **msi_populate_sysfs(struct device *dev)
241 {
242 	return NULL;
243 }
244 static inline void msi_destroy_sysfs(struct device *dev, const struct attribute_group **msi_irq_groups)
245 {
246 }
247 #endif
248 
249 /*
250  * The arch hooks to setup up msi irqs. Default functions are implemented
251  * as weak symbols so that they /can/ be overriden by architecture specific
252  * code if needed. These hooks can only be enabled by the architecture.
253  *
254  * If CONFIG_PCI_MSI_ARCH_FALLBACKS is not selected they are replaced by
255  * stubs with warnings.
256  */
257 #ifdef CONFIG_PCI_MSI_ARCH_FALLBACKS
258 int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc);
259 void arch_teardown_msi_irq(unsigned int irq);
260 int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type);
261 void arch_teardown_msi_irqs(struct pci_dev *dev);
262 #else
263 static inline int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
264 {
265 	WARN_ON_ONCE(1);
266 	return -ENODEV;
267 }
268 
269 static inline void arch_teardown_msi_irqs(struct pci_dev *dev)
270 {
271 	WARN_ON_ONCE(1);
272 }
273 #endif
274 
275 /*
276  * The restore hook is still available even for fully irq domain based
277  * setups. Courtesy to XEN/X86.
278  */
279 bool arch_restore_msi_irqs(struct pci_dev *dev);
280 
281 #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
282 
283 #include <linux/irqhandler.h>
284 
285 struct irq_domain;
286 struct irq_domain_ops;
287 struct irq_chip;
288 struct device_node;
289 struct fwnode_handle;
290 struct msi_domain_info;
291 
292 /**
293  * struct msi_domain_ops - MSI interrupt domain callbacks
294  * @get_hwirq:		Retrieve the resulting hw irq number
295  * @msi_init:		Domain specific init function for MSI interrupts
296  * @msi_free:		Domain specific function to free a MSI interrupts
297  * @msi_check:		Callback for verification of the domain/info/dev data
298  * @msi_prepare:	Prepare the allocation of the interrupts in the domain
299  * @set_desc:		Set the msi descriptor for an interrupt
300  * @handle_error:	Optional error handler if the allocation fails
301  * @domain_alloc_irqs:	Optional function to override the default allocation
302  *			function.
303  * @domain_free_irqs:	Optional function to override the default free
304  *			function.
305  *
306  * @get_hwirq, @msi_init and @msi_free are callbacks used by the underlying
307  * irqdomain.
308  *
309  * @msi_check, @msi_prepare, @handle_error and @set_desc are callbacks used by
310  * msi_domain_alloc/free_irqs().
311  *
312  * @domain_alloc_irqs, @domain_free_irqs can be used to override the
313  * default allocation/free functions (__msi_domain_alloc/free_irqs). This
314  * is initially for a wrapper around XENs seperate MSI universe which can't
315  * be wrapped into the regular irq domains concepts by mere mortals.  This
316  * allows to universally use msi_domain_alloc/free_irqs without having to
317  * special case XEN all over the place.
318  *
319  * Contrary to other operations @domain_alloc_irqs and @domain_free_irqs
320  * are set to the default implementation if NULL and even when
321  * MSI_FLAG_USE_DEF_DOM_OPS is not set to avoid breaking existing users and
322  * because these callbacks are obviously mandatory.
323  *
324  * This is NOT meant to be abused, but it can be useful to build wrappers
325  * for specialized MSI irq domains which need extra work before and after
326  * calling __msi_domain_alloc_irqs()/__msi_domain_free_irqs().
327  */
328 struct msi_domain_ops {
329 	irq_hw_number_t	(*get_hwirq)(struct msi_domain_info *info,
330 				     msi_alloc_info_t *arg);
331 	int		(*msi_init)(struct irq_domain *domain,
332 				    struct msi_domain_info *info,
333 				    unsigned int virq, irq_hw_number_t hwirq,
334 				    msi_alloc_info_t *arg);
335 	void		(*msi_free)(struct irq_domain *domain,
336 				    struct msi_domain_info *info,
337 				    unsigned int virq);
338 	int		(*msi_check)(struct irq_domain *domain,
339 				     struct msi_domain_info *info,
340 				     struct device *dev);
341 	int		(*msi_prepare)(struct irq_domain *domain,
342 				       struct device *dev, int nvec,
343 				       msi_alloc_info_t *arg);
344 	void		(*set_desc)(msi_alloc_info_t *arg,
345 				    struct msi_desc *desc);
346 	int		(*handle_error)(struct irq_domain *domain,
347 					struct msi_desc *desc, int error);
348 	int		(*domain_alloc_irqs)(struct irq_domain *domain,
349 					     struct device *dev, int nvec);
350 	void		(*domain_free_irqs)(struct irq_domain *domain,
351 					    struct device *dev);
352 };
353 
354 /**
355  * struct msi_domain_info - MSI interrupt domain data
356  * @flags:		Flags to decribe features and capabilities
357  * @ops:		The callback data structure
358  * @chip:		Optional: associated interrupt chip
359  * @chip_data:		Optional: associated interrupt chip data
360  * @handler:		Optional: associated interrupt flow handler
361  * @handler_data:	Optional: associated interrupt flow handler data
362  * @handler_name:	Optional: associated interrupt flow handler name
363  * @data:		Optional: domain specific data
364  */
365 struct msi_domain_info {
366 	u32			flags;
367 	struct msi_domain_ops	*ops;
368 	struct irq_chip		*chip;
369 	void			*chip_data;
370 	irq_flow_handler_t	handler;
371 	void			*handler_data;
372 	const char		*handler_name;
373 	void			*data;
374 };
375 
376 /* Flags for msi_domain_info */
377 enum {
378 	/*
379 	 * Init non implemented ops callbacks with default MSI domain
380 	 * callbacks.
381 	 */
382 	MSI_FLAG_USE_DEF_DOM_OPS	= (1 << 0),
383 	/*
384 	 * Init non implemented chip callbacks with default MSI chip
385 	 * callbacks.
386 	 */
387 	MSI_FLAG_USE_DEF_CHIP_OPS	= (1 << 1),
388 	/* Support multiple PCI MSI interrupts */
389 	MSI_FLAG_MULTI_PCI_MSI		= (1 << 2),
390 	/* Support PCI MSIX interrupts */
391 	MSI_FLAG_PCI_MSIX		= (1 << 3),
392 	/* Needs early activate, required for PCI */
393 	MSI_FLAG_ACTIVATE_EARLY		= (1 << 4),
394 	/*
395 	 * Must reactivate when irq is started even when
396 	 * MSI_FLAG_ACTIVATE_EARLY has been set.
397 	 */
398 	MSI_FLAG_MUST_REACTIVATE	= (1 << 5),
399 	/* Is level-triggered capable, using two messages */
400 	MSI_FLAG_LEVEL_CAPABLE		= (1 << 6),
401 };
402 
403 int msi_domain_set_affinity(struct irq_data *data, const struct cpumask *mask,
404 			    bool force);
405 
406 struct irq_domain *msi_create_irq_domain(struct fwnode_handle *fwnode,
407 					 struct msi_domain_info *info,
408 					 struct irq_domain *parent);
409 int __msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
410 			    int nvec);
411 int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
412 			  int nvec);
413 void __msi_domain_free_irqs(struct irq_domain *domain, struct device *dev);
414 void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev);
415 struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain);
416 
417 struct irq_domain *platform_msi_create_irq_domain(struct fwnode_handle *fwnode,
418 						  struct msi_domain_info *info,
419 						  struct irq_domain *parent);
420 int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec,
421 				   irq_write_msi_msg_t write_msi_msg);
422 void platform_msi_domain_free_irqs(struct device *dev);
423 
424 /* When an MSI domain is used as an intermediate domain */
425 int msi_domain_prepare_irqs(struct irq_domain *domain, struct device *dev,
426 			    int nvec, msi_alloc_info_t *args);
427 int msi_domain_populate_irqs(struct irq_domain *domain, struct device *dev,
428 			     int virq, int nvec, msi_alloc_info_t *args);
429 struct irq_domain *
430 __platform_msi_create_device_domain(struct device *dev,
431 				    unsigned int nvec,
432 				    bool is_tree,
433 				    irq_write_msi_msg_t write_msi_msg,
434 				    const struct irq_domain_ops *ops,
435 				    void *host_data);
436 
437 #define platform_msi_create_device_domain(dev, nvec, write, ops, data)	\
438 	__platform_msi_create_device_domain(dev, nvec, false, write, ops, data)
439 #define platform_msi_create_device_tree_domain(dev, nvec, write, ops, data) \
440 	__platform_msi_create_device_domain(dev, nvec, true, write, ops, data)
441 
442 int platform_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
443 			      unsigned int nr_irqs);
444 void platform_msi_domain_free(struct irq_domain *domain, unsigned int virq,
445 			      unsigned int nvec);
446 void *platform_msi_get_host_data(struct irq_domain *domain);
447 #endif /* CONFIG_GENERIC_MSI_IRQ_DOMAIN */
448 
449 #ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
450 struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode,
451 					     struct msi_domain_info *info,
452 					     struct irq_domain *parent);
453 int pci_msi_domain_check_cap(struct irq_domain *domain,
454 			     struct msi_domain_info *info, struct device *dev);
455 u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev);
456 struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev);
457 bool pci_dev_has_special_msi_domain(struct pci_dev *pdev);
458 #else
459 static inline struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev)
460 {
461 	return NULL;
462 }
463 #endif /* CONFIG_PCI_MSI_IRQ_DOMAIN */
464 
465 #endif /* LINUX_MSI_H */
466