xref: /linux-6.15/include/linux/msi.h (revision 151f4e2b)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef LINUX_MSI_H
3 #define LINUX_MSI_H
4 
5 #include <linux/kobject.h>
6 #include <linux/list.h>
7 
8 struct msi_msg {
9 	u32	address_lo;	/* low 32 bits of msi message address */
10 	u32	address_hi;	/* high 32 bits of msi message address */
11 	u32	data;		/* 16 bits of msi message data */
12 };
13 
14 extern int pci_msi_ignore_mask;
15 /* Helper functions */
16 struct irq_data;
17 struct msi_desc;
18 struct pci_dev;
19 struct platform_msi_priv_data;
20 void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
21 #ifdef CONFIG_GENERIC_MSI_IRQ
22 void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg);
23 #else
24 static inline void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg)
25 {
26 }
27 #endif
28 
29 typedef void (*irq_write_msi_msg_t)(struct msi_desc *desc,
30 				    struct msi_msg *msg);
31 
32 /**
33  * platform_msi_desc - Platform device specific msi descriptor data
34  * @msi_priv_data:	Pointer to platform private data
35  * @msi_index:		The index of the MSI descriptor for multi MSI
36  */
37 struct platform_msi_desc {
38 	struct platform_msi_priv_data	*msi_priv_data;
39 	u16				msi_index;
40 };
41 
42 /**
43  * fsl_mc_msi_desc - FSL-MC device specific msi descriptor data
44  * @msi_index:		The index of the MSI descriptor
45  */
46 struct fsl_mc_msi_desc {
47 	u16				msi_index;
48 };
49 
50 /**
51  * ti_sci_inta_msi_desc - TISCI based INTA specific msi descriptor data
52  * @dev_index: TISCI device index
53  */
54 struct ti_sci_inta_msi_desc {
55 	u16	dev_index;
56 };
57 
58 /**
59  * struct msi_desc - Descriptor structure for MSI based interrupts
60  * @list:	List head for management
61  * @irq:	The base interrupt number
62  * @nvec_used:	The number of vectors used
63  * @dev:	Pointer to the device which uses this descriptor
64  * @msg:	The last set MSI message cached for reuse
65  * @affinity:	Optional pointer to a cpu affinity mask for this descriptor
66  *
67  * @masked:	[PCI MSI/X] Mask bits
68  * @is_msix:	[PCI MSI/X] True if MSI-X
69  * @multiple:	[PCI MSI/X] log2 num of messages allocated
70  * @multi_cap:	[PCI MSI/X] log2 num of messages supported
71  * @maskbit:	[PCI MSI/X] Mask-Pending bit supported?
72  * @is_64:	[PCI MSI/X] Address size: 0=32bit 1=64bit
73  * @entry_nr:	[PCI MSI/X] Entry which is described by this descriptor
74  * @default_irq:[PCI MSI/X] The default pre-assigned non-MSI irq
75  * @mask_pos:	[PCI MSI]   Mask register position
76  * @mask_base:	[PCI MSI-X] Mask register base address
77  * @platform:	[platform]  Platform device specific msi descriptor data
78  * @fsl_mc:	[fsl-mc]    FSL MC device specific msi descriptor data
79  * @inta:	[INTA]	    TISCI based INTA specific msi descriptor data
80  */
81 struct msi_desc {
82 	/* Shared device/bus type independent data */
83 	struct list_head		list;
84 	unsigned int			irq;
85 	unsigned int			nvec_used;
86 	struct device			*dev;
87 	struct msi_msg			msg;
88 	struct irq_affinity_desc	*affinity;
89 #ifdef CONFIG_IRQ_MSI_IOMMU
90 	const void			*iommu_cookie;
91 #endif
92 
93 	union {
94 		/* PCI MSI/X specific data */
95 		struct {
96 			u32 masked;
97 			struct {
98 				u8	is_msix		: 1;
99 				u8	multiple	: 3;
100 				u8	multi_cap	: 3;
101 				u8	maskbit		: 1;
102 				u8	is_64		: 1;
103 				u16	entry_nr;
104 				unsigned default_irq;
105 			} msi_attrib;
106 			union {
107 				u8	mask_pos;
108 				void __iomem *mask_base;
109 			};
110 		};
111 
112 		/*
113 		 * Non PCI variants add their data structure here. New
114 		 * entries need to use a named structure. We want
115 		 * proper name spaces for this. The PCI part is
116 		 * anonymous for now as it would require an immediate
117 		 * tree wide cleanup.
118 		 */
119 		struct platform_msi_desc platform;
120 		struct fsl_mc_msi_desc fsl_mc;
121 		struct ti_sci_inta_msi_desc inta;
122 	};
123 };
124 
125 /* Helpers to hide struct msi_desc implementation details */
126 #define msi_desc_to_dev(desc)		((desc)->dev)
127 #define dev_to_msi_list(dev)		(&(dev)->msi_list)
128 #define first_msi_entry(dev)		\
129 	list_first_entry(dev_to_msi_list((dev)), struct msi_desc, list)
130 #define for_each_msi_entry(desc, dev)	\
131 	list_for_each_entry((desc), dev_to_msi_list((dev)), list)
132 #define for_each_msi_entry_safe(desc, tmp, dev)	\
133 	list_for_each_entry_safe((desc), (tmp), dev_to_msi_list((dev)), list)
134 
135 #ifdef CONFIG_IRQ_MSI_IOMMU
136 static inline const void *msi_desc_get_iommu_cookie(struct msi_desc *desc)
137 {
138 	return desc->iommu_cookie;
139 }
140 
141 static inline void msi_desc_set_iommu_cookie(struct msi_desc *desc,
142 					     const void *iommu_cookie)
143 {
144 	desc->iommu_cookie = iommu_cookie;
145 }
146 #else
147 static inline const void *msi_desc_get_iommu_cookie(struct msi_desc *desc)
148 {
149 	return NULL;
150 }
151 
152 static inline void msi_desc_set_iommu_cookie(struct msi_desc *desc,
153 					     const void *iommu_cookie)
154 {
155 }
156 #endif
157 
158 #ifdef CONFIG_PCI_MSI
159 #define first_pci_msi_entry(pdev)	first_msi_entry(&(pdev)->dev)
160 #define for_each_pci_msi_entry(desc, pdev)	\
161 	for_each_msi_entry((desc), &(pdev)->dev)
162 
163 struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc);
164 void *msi_desc_to_pci_sysdata(struct msi_desc *desc);
165 void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg);
166 #else /* CONFIG_PCI_MSI */
167 static inline void *msi_desc_to_pci_sysdata(struct msi_desc *desc)
168 {
169 	return NULL;
170 }
171 static inline void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg)
172 {
173 }
174 #endif /* CONFIG_PCI_MSI */
175 
176 struct msi_desc *alloc_msi_entry(struct device *dev, int nvec,
177 				 const struct irq_affinity_desc *affinity);
178 void free_msi_entry(struct msi_desc *entry);
179 void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
180 void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
181 
182 u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag);
183 u32 __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag);
184 void pci_msi_mask_irq(struct irq_data *data);
185 void pci_msi_unmask_irq(struct irq_data *data);
186 
187 /*
188  * The arch hooks to setup up msi irqs. Those functions are
189  * implemented as weak symbols so that they /can/ be overriden by
190  * architecture specific code if needed.
191  */
192 int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc);
193 void arch_teardown_msi_irq(unsigned int irq);
194 int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type);
195 void arch_teardown_msi_irqs(struct pci_dev *dev);
196 void arch_restore_msi_irqs(struct pci_dev *dev);
197 
198 void default_teardown_msi_irqs(struct pci_dev *dev);
199 void default_restore_msi_irqs(struct pci_dev *dev);
200 
201 struct msi_controller {
202 	struct module *owner;
203 	struct device *dev;
204 	struct device_node *of_node;
205 	struct list_head list;
206 
207 	int (*setup_irq)(struct msi_controller *chip, struct pci_dev *dev,
208 			 struct msi_desc *desc);
209 	int (*setup_irqs)(struct msi_controller *chip, struct pci_dev *dev,
210 			  int nvec, int type);
211 	void (*teardown_irq)(struct msi_controller *chip, unsigned int irq);
212 };
213 
214 #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
215 
216 #include <linux/irqhandler.h>
217 #include <asm/msi.h>
218 
219 struct irq_domain;
220 struct irq_domain_ops;
221 struct irq_chip;
222 struct device_node;
223 struct fwnode_handle;
224 struct msi_domain_info;
225 
226 /**
227  * struct msi_domain_ops - MSI interrupt domain callbacks
228  * @get_hwirq:		Retrieve the resulting hw irq number
229  * @msi_init:		Domain specific init function for MSI interrupts
230  * @msi_free:		Domain specific function to free a MSI interrupts
231  * @msi_check:		Callback for verification of the domain/info/dev data
232  * @msi_prepare:	Prepare the allocation of the interrupts in the domain
233  * @msi_finish:		Optional callback to finalize the allocation
234  * @set_desc:		Set the msi descriptor for an interrupt
235  * @handle_error:	Optional error handler if the allocation fails
236  *
237  * @get_hwirq, @msi_init and @msi_free are callbacks used by
238  * msi_create_irq_domain() and related interfaces
239  *
240  * @msi_check, @msi_prepare, @msi_finish, @set_desc and @handle_error
241  * are callbacks used by msi_domain_alloc_irqs() and related
242  * interfaces which are based on msi_desc.
243  */
244 struct msi_domain_ops {
245 	irq_hw_number_t	(*get_hwirq)(struct msi_domain_info *info,
246 				     msi_alloc_info_t *arg);
247 	int		(*msi_init)(struct irq_domain *domain,
248 				    struct msi_domain_info *info,
249 				    unsigned int virq, irq_hw_number_t hwirq,
250 				    msi_alloc_info_t *arg);
251 	void		(*msi_free)(struct irq_domain *domain,
252 				    struct msi_domain_info *info,
253 				    unsigned int virq);
254 	int		(*msi_check)(struct irq_domain *domain,
255 				     struct msi_domain_info *info,
256 				     struct device *dev);
257 	int		(*msi_prepare)(struct irq_domain *domain,
258 				       struct device *dev, int nvec,
259 				       msi_alloc_info_t *arg);
260 	void		(*msi_finish)(msi_alloc_info_t *arg, int retval);
261 	void		(*set_desc)(msi_alloc_info_t *arg,
262 				    struct msi_desc *desc);
263 	int		(*handle_error)(struct irq_domain *domain,
264 					struct msi_desc *desc, int error);
265 };
266 
267 /**
268  * struct msi_domain_info - MSI interrupt domain data
269  * @flags:		Flags to decribe features and capabilities
270  * @ops:		The callback data structure
271  * @chip:		Optional: associated interrupt chip
272  * @chip_data:		Optional: associated interrupt chip data
273  * @handler:		Optional: associated interrupt flow handler
274  * @handler_data:	Optional: associated interrupt flow handler data
275  * @handler_name:	Optional: associated interrupt flow handler name
276  * @data:		Optional: domain specific data
277  */
278 struct msi_domain_info {
279 	u32			flags;
280 	struct msi_domain_ops	*ops;
281 	struct irq_chip		*chip;
282 	void			*chip_data;
283 	irq_flow_handler_t	handler;
284 	void			*handler_data;
285 	const char		*handler_name;
286 	void			*data;
287 };
288 
289 /* Flags for msi_domain_info */
290 enum {
291 	/*
292 	 * Init non implemented ops callbacks with default MSI domain
293 	 * callbacks.
294 	 */
295 	MSI_FLAG_USE_DEF_DOM_OPS	= (1 << 0),
296 	/*
297 	 * Init non implemented chip callbacks with default MSI chip
298 	 * callbacks.
299 	 */
300 	MSI_FLAG_USE_DEF_CHIP_OPS	= (1 << 1),
301 	/* Support multiple PCI MSI interrupts */
302 	MSI_FLAG_MULTI_PCI_MSI		= (1 << 2),
303 	/* Support PCI MSIX interrupts */
304 	MSI_FLAG_PCI_MSIX		= (1 << 3),
305 	/* Needs early activate, required for PCI */
306 	MSI_FLAG_ACTIVATE_EARLY		= (1 << 4),
307 	/*
308 	 * Must reactivate when irq is started even when
309 	 * MSI_FLAG_ACTIVATE_EARLY has been set.
310 	 */
311 	MSI_FLAG_MUST_REACTIVATE	= (1 << 5),
312 	/* Is level-triggered capable, using two messages */
313 	MSI_FLAG_LEVEL_CAPABLE		= (1 << 6),
314 };
315 
316 int msi_domain_set_affinity(struct irq_data *data, const struct cpumask *mask,
317 			    bool force);
318 
319 struct irq_domain *msi_create_irq_domain(struct fwnode_handle *fwnode,
320 					 struct msi_domain_info *info,
321 					 struct irq_domain *parent);
322 int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
323 			  int nvec);
324 void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev);
325 struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain);
326 
327 struct irq_domain *platform_msi_create_irq_domain(struct fwnode_handle *fwnode,
328 						  struct msi_domain_info *info,
329 						  struct irq_domain *parent);
330 int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec,
331 				   irq_write_msi_msg_t write_msi_msg);
332 void platform_msi_domain_free_irqs(struct device *dev);
333 
334 /* When an MSI domain is used as an intermediate domain */
335 int msi_domain_prepare_irqs(struct irq_domain *domain, struct device *dev,
336 			    int nvec, msi_alloc_info_t *args);
337 int msi_domain_populate_irqs(struct irq_domain *domain, struct device *dev,
338 			     int virq, int nvec, msi_alloc_info_t *args);
339 struct irq_domain *
340 __platform_msi_create_device_domain(struct device *dev,
341 				    unsigned int nvec,
342 				    bool is_tree,
343 				    irq_write_msi_msg_t write_msi_msg,
344 				    const struct irq_domain_ops *ops,
345 				    void *host_data);
346 
347 #define platform_msi_create_device_domain(dev, nvec, write, ops, data)	\
348 	__platform_msi_create_device_domain(dev, nvec, false, write, ops, data)
349 #define platform_msi_create_device_tree_domain(dev, nvec, write, ops, data) \
350 	__platform_msi_create_device_domain(dev, nvec, true, write, ops, data)
351 
352 int platform_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
353 			      unsigned int nr_irqs);
354 void platform_msi_domain_free(struct irq_domain *domain, unsigned int virq,
355 			      unsigned int nvec);
356 void *platform_msi_get_host_data(struct irq_domain *domain);
357 #endif /* CONFIG_GENERIC_MSI_IRQ_DOMAIN */
358 
359 #ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
360 void pci_msi_domain_write_msg(struct irq_data *irq_data, struct msi_msg *msg);
361 struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode,
362 					     struct msi_domain_info *info,
363 					     struct irq_domain *parent);
364 irq_hw_number_t pci_msi_domain_calc_hwirq(struct pci_dev *dev,
365 					  struct msi_desc *desc);
366 int pci_msi_domain_check_cap(struct irq_domain *domain,
367 			     struct msi_domain_info *info, struct device *dev);
368 u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev);
369 struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev);
370 #else
371 static inline struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev)
372 {
373 	return NULL;
374 }
375 #endif /* CONFIG_PCI_MSI_IRQ_DOMAIN */
376 
377 #endif /* LINUX_MSI_H */
378