1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef LINUX_MSI_H 3 #define LINUX_MSI_H 4 5 /* 6 * This header file contains MSI data structures and functions which are 7 * only relevant for: 8 * - Interrupt core code 9 * - PCI/MSI core code 10 * - MSI interrupt domain implementations 11 * - IOMMU, low level VFIO, NTB and other justified exceptions 12 * dealing with low level MSI details. 13 * 14 * Regular device drivers have no business with any of these functions and 15 * especially storing MSI descriptor pointers in random code is considered 16 * abuse. 17 * 18 * Device driver relevant functions are available in <linux/msi_api.h> 19 */ 20 21 #include <linux/irqdomain_defs.h> 22 #include <linux/cpumask_types.h> 23 #include <linux/msi_api.h> 24 #include <linux/irq.h> 25 26 #include <asm/msi.h> 27 28 /* Dummy shadow structures if an architecture does not define them */ 29 #ifndef arch_msi_msg_addr_lo 30 typedef struct arch_msi_msg_addr_lo { 31 u32 address_lo; 32 } __attribute__ ((packed)) arch_msi_msg_addr_lo_t; 33 #endif 34 35 #ifndef arch_msi_msg_addr_hi 36 typedef struct arch_msi_msg_addr_hi { 37 u32 address_hi; 38 } __attribute__ ((packed)) arch_msi_msg_addr_hi_t; 39 #endif 40 41 #ifndef arch_msi_msg_data 42 typedef struct arch_msi_msg_data { 43 u32 data; 44 } __attribute__ ((packed)) arch_msi_msg_data_t; 45 #endif 46 47 #ifndef arch_is_isolated_msi 48 #define arch_is_isolated_msi() false 49 #endif 50 51 /** 52 * msi_msg - Representation of a MSI message 53 * @address_lo: Low 32 bits of msi message address 54 * @arch_addrlo: Architecture specific shadow of @address_lo 55 * @address_hi: High 32 bits of msi message address 56 * (only used when device supports it) 57 * @arch_addrhi: Architecture specific shadow of @address_hi 58 * @data: MSI message data (usually 16 bits) 59 * @arch_data: Architecture specific shadow of @data 60 */ 61 struct msi_msg { 62 union { 63 u32 address_lo; 64 arch_msi_msg_addr_lo_t arch_addr_lo; 65 }; 66 union { 67 u32 address_hi; 68 arch_msi_msg_addr_hi_t arch_addr_hi; 69 }; 70 union { 71 u32 data; 72 arch_msi_msg_data_t arch_data; 73 }; 74 }; 75 76 /* Helper functions */ 77 struct msi_desc; 78 struct pci_dev; 79 struct device_attribute; 80 struct irq_domain; 81 struct irq_affinity_desc; 82 83 void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg); 84 #ifdef CONFIG_GENERIC_MSI_IRQ 85 void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg); 86 #else 87 static inline void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg) { } 88 #endif 89 90 typedef void (*irq_write_msi_msg_t)(struct msi_desc *desc, 91 struct msi_msg *msg); 92 93 /** 94 * pci_msi_desc - PCI/MSI specific MSI descriptor data 95 * 96 * @msi_mask: [PCI MSI] MSI cached mask bits 97 * @msix_ctrl: [PCI MSI-X] MSI-X cached per vector control bits 98 * @is_msix: [PCI MSI/X] True if MSI-X 99 * @multiple: [PCI MSI/X] log2 num of messages allocated 100 * @multi_cap: [PCI MSI/X] log2 num of messages supported 101 * @can_mask: [PCI MSI/X] Masking supported? 102 * @is_64: [PCI MSI/X] Address size: 0=32bit 1=64bit 103 * @default_irq:[PCI MSI/X] The default pre-assigned non-MSI irq 104 * @mask_pos: [PCI MSI] Mask register position 105 * @mask_base: [PCI MSI-X] Mask register base address 106 */ 107 struct pci_msi_desc { 108 union { 109 u32 msi_mask; 110 u32 msix_ctrl; 111 }; 112 struct { 113 u8 is_msix : 1; 114 u8 multiple : 3; 115 u8 multi_cap : 3; 116 u8 can_mask : 1; 117 u8 is_64 : 1; 118 u8 is_virtual : 1; 119 unsigned default_irq; 120 } msi_attrib; 121 union { 122 u8 mask_pos; 123 void __iomem *mask_base; 124 }; 125 }; 126 127 /** 128 * union msi_domain_cookie - Opaque MSI domain specific data 129 * @value: u64 value store 130 * @ptr: Pointer to domain specific data 131 * @iobase: Domain specific IOmem pointer 132 * 133 * The content of this data is implementation defined and used by the MSI 134 * domain to store domain specific information which is requried for 135 * interrupt chip callbacks. 136 */ 137 union msi_domain_cookie { 138 u64 value; 139 void *ptr; 140 void __iomem *iobase; 141 }; 142 143 /** 144 * struct msi_desc_data - Generic MSI descriptor data 145 * @dcookie: Cookie for MSI domain specific data which is required 146 * for irq_chip callbacks 147 * @icookie: Cookie for the MSI interrupt instance provided by 148 * the usage site to the allocation function 149 * 150 * The content of this data is implementation defined, e.g. PCI/IMS 151 * implementations define the meaning of the data. The MSI core ignores 152 * this data completely. 153 */ 154 struct msi_desc_data { 155 union msi_domain_cookie dcookie; 156 union msi_instance_cookie icookie; 157 }; 158 159 #define MSI_MAX_INDEX ((unsigned int)USHRT_MAX) 160 161 /** 162 * struct msi_desc - Descriptor structure for MSI based interrupts 163 * @irq: The base interrupt number 164 * @nvec_used: The number of vectors used 165 * @dev: Pointer to the device which uses this descriptor 166 * @msg: The last set MSI message cached for reuse 167 * @affinity: Optional pointer to a cpu affinity mask for this descriptor 168 * @sysfs_attr: Pointer to sysfs device attribute 169 * 170 * @write_msi_msg: Callback that may be called when the MSI message 171 * address or data changes 172 * @write_msi_msg_data: Data parameter for the callback. 173 * 174 * @msi_index: Index of the msi descriptor 175 * @pci: PCI specific msi descriptor data 176 * @data: Generic MSI descriptor data 177 */ 178 struct msi_desc { 179 /* Shared device/bus type independent data */ 180 unsigned int irq; 181 unsigned int nvec_used; 182 struct device *dev; 183 struct msi_msg msg; 184 struct irq_affinity_desc *affinity; 185 #ifdef CONFIG_IRQ_MSI_IOMMU 186 const void *iommu_cookie; 187 #endif 188 #ifdef CONFIG_SYSFS 189 struct device_attribute *sysfs_attrs; 190 #endif 191 192 void (*write_msi_msg)(struct msi_desc *entry, void *data); 193 void *write_msi_msg_data; 194 195 u16 msi_index; 196 union { 197 struct pci_msi_desc pci; 198 struct msi_desc_data data; 199 }; 200 }; 201 202 /* 203 * Filter values for the MSI descriptor iterators and accessor functions. 204 */ 205 enum msi_desc_filter { 206 /* All descriptors */ 207 MSI_DESC_ALL, 208 /* Descriptors which have no interrupt associated */ 209 MSI_DESC_NOTASSOCIATED, 210 /* Descriptors which have an interrupt associated */ 211 MSI_DESC_ASSOCIATED, 212 }; 213 214 215 /** 216 * struct msi_dev_domain - The internals of MSI domain info per device 217 * @store: Xarray for storing MSI descriptor pointers 218 * @irqdomain: Pointer to a per device interrupt domain 219 */ 220 struct msi_dev_domain { 221 struct xarray store; 222 struct irq_domain *domain; 223 }; 224 225 int msi_setup_device_data(struct device *dev); 226 227 void msi_lock_descs(struct device *dev); 228 void msi_unlock_descs(struct device *dev); 229 230 struct msi_desc *msi_domain_first_desc(struct device *dev, unsigned int domid, 231 enum msi_desc_filter filter); 232 233 /** 234 * msi_first_desc - Get the first MSI descriptor of the default irqdomain 235 * @dev: Device to operate on 236 * @filter: Descriptor state filter 237 * 238 * Must be called with the MSI descriptor mutex held, i.e. msi_lock_descs() 239 * must be invoked before the call. 240 * 241 * Return: Pointer to the first MSI descriptor matching the search 242 * criteria, NULL if none found. 243 */ 244 static inline struct msi_desc *msi_first_desc(struct device *dev, 245 enum msi_desc_filter filter) 246 { 247 return msi_domain_first_desc(dev, MSI_DEFAULT_DOMAIN, filter); 248 } 249 250 struct msi_desc *msi_next_desc(struct device *dev, unsigned int domid, 251 enum msi_desc_filter filter); 252 253 /** 254 * msi_domain_for_each_desc - Iterate the MSI descriptors in a specific domain 255 * 256 * @desc: struct msi_desc pointer used as iterator 257 * @dev: struct device pointer - device to iterate 258 * @domid: The id of the interrupt domain which should be walked. 259 * @filter: Filter for descriptor selection 260 * 261 * Notes: 262 * - The loop must be protected with a msi_lock_descs()/msi_unlock_descs() 263 * pair. 264 * - It is safe to remove a retrieved MSI descriptor in the loop. 265 */ 266 #define msi_domain_for_each_desc(desc, dev, domid, filter) \ 267 for ((desc) = msi_domain_first_desc((dev), (domid), (filter)); (desc); \ 268 (desc) = msi_next_desc((dev), (domid), (filter))) 269 270 /** 271 * msi_for_each_desc - Iterate the MSI descriptors in the default irqdomain 272 * 273 * @desc: struct msi_desc pointer used as iterator 274 * @dev: struct device pointer - device to iterate 275 * @filter: Filter for descriptor selection 276 * 277 * Notes: 278 * - The loop must be protected with a msi_lock_descs()/msi_unlock_descs() 279 * pair. 280 * - It is safe to remove a retrieved MSI descriptor in the loop. 281 */ 282 #define msi_for_each_desc(desc, dev, filter) \ 283 msi_domain_for_each_desc((desc), (dev), MSI_DEFAULT_DOMAIN, (filter)) 284 285 #define msi_desc_to_dev(desc) ((desc)->dev) 286 287 #ifdef CONFIG_IRQ_MSI_IOMMU 288 static inline const void *msi_desc_get_iommu_cookie(struct msi_desc *desc) 289 { 290 return desc->iommu_cookie; 291 } 292 293 static inline void msi_desc_set_iommu_cookie(struct msi_desc *desc, 294 const void *iommu_cookie) 295 { 296 desc->iommu_cookie = iommu_cookie; 297 } 298 #else 299 static inline const void *msi_desc_get_iommu_cookie(struct msi_desc *desc) 300 { 301 return NULL; 302 } 303 304 static inline void msi_desc_set_iommu_cookie(struct msi_desc *desc, 305 const void *iommu_cookie) 306 { 307 } 308 #endif 309 310 int msi_domain_insert_msi_desc(struct device *dev, unsigned int domid, 311 struct msi_desc *init_desc); 312 /** 313 * msi_insert_msi_desc - Allocate and initialize a MSI descriptor in the 314 * default irqdomain and insert it at @init_desc->msi_index 315 * @dev: Pointer to the device for which the descriptor is allocated 316 * @init_desc: Pointer to an MSI descriptor to initialize the new descriptor 317 * 318 * Return: 0 on success or an appropriate failure code. 319 */ 320 static inline int msi_insert_msi_desc(struct device *dev, struct msi_desc *init_desc) 321 { 322 return msi_domain_insert_msi_desc(dev, MSI_DEFAULT_DOMAIN, init_desc); 323 } 324 325 void msi_domain_free_msi_descs_range(struct device *dev, unsigned int domid, 326 unsigned int first, unsigned int last); 327 328 /** 329 * msi_free_msi_descs_range - Free a range of MSI descriptors of a device 330 * in the default irqdomain 331 * 332 * @dev: Device for which to free the descriptors 333 * @first: Index to start freeing from (inclusive) 334 * @last: Last index to be freed (inclusive) 335 */ 336 static inline void msi_free_msi_descs_range(struct device *dev, unsigned int first, 337 unsigned int last) 338 { 339 msi_domain_free_msi_descs_range(dev, MSI_DEFAULT_DOMAIN, first, last); 340 } 341 342 /** 343 * msi_free_msi_descs - Free all MSI descriptors of a device in the default irqdomain 344 * @dev: Device to free the descriptors 345 */ 346 static inline void msi_free_msi_descs(struct device *dev) 347 { 348 msi_free_msi_descs_range(dev, 0, MSI_MAX_INDEX); 349 } 350 351 /* 352 * The arch hooks to setup up msi irqs. Default functions are implemented 353 * as weak symbols so that they /can/ be overriden by architecture specific 354 * code if needed. These hooks can only be enabled by the architecture. 355 * 356 * If CONFIG_PCI_MSI_ARCH_FALLBACKS is not selected they are replaced by 357 * stubs with warnings. 358 */ 359 #ifdef CONFIG_PCI_MSI_ARCH_FALLBACKS 360 int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc); 361 void arch_teardown_msi_irq(unsigned int irq); 362 int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type); 363 void arch_teardown_msi_irqs(struct pci_dev *dev); 364 #endif /* CONFIG_PCI_MSI_ARCH_FALLBACKS */ 365 366 /* 367 * Xen uses non-default msi_domain_ops and hence needs a way to populate sysfs 368 * entries of MSI IRQs. 369 */ 370 #if defined(CONFIG_PCI_XEN) || defined(CONFIG_PCI_MSI_ARCH_FALLBACKS) 371 #ifdef CONFIG_SYSFS 372 int msi_device_populate_sysfs(struct device *dev); 373 void msi_device_destroy_sysfs(struct device *dev); 374 #else /* CONFIG_SYSFS */ 375 static inline int msi_device_populate_sysfs(struct device *dev) { return 0; } 376 static inline void msi_device_destroy_sysfs(struct device *dev) { } 377 #endif /* !CONFIG_SYSFS */ 378 #endif /* CONFIG_PCI_XEN || CONFIG_PCI_MSI_ARCH_FALLBACKS */ 379 380 /* 381 * The restore hook is still available even for fully irq domain based 382 * setups. Courtesy to XEN/X86. 383 */ 384 bool arch_restore_msi_irqs(struct pci_dev *dev); 385 386 #ifdef CONFIG_GENERIC_MSI_IRQ 387 388 #include <linux/irqhandler.h> 389 390 struct irq_domain; 391 struct irq_domain_ops; 392 struct irq_chip; 393 struct irq_fwspec; 394 struct device_node; 395 struct fwnode_handle; 396 struct msi_domain_info; 397 398 /** 399 * struct msi_domain_ops - MSI interrupt domain callbacks 400 * @get_hwirq: Retrieve the resulting hw irq number 401 * @msi_init: Domain specific init function for MSI interrupts 402 * @msi_free: Domain specific function to free a MSI interrupts 403 * @msi_prepare: Prepare the allocation of the interrupts in the domain 404 * @prepare_desc: Optional function to prepare the allocated MSI descriptor 405 * in the domain 406 * @set_desc: Set the msi descriptor for an interrupt 407 * @domain_alloc_irqs: Optional function to override the default allocation 408 * function. 409 * @domain_free_irqs: Optional function to override the default free 410 * function. 411 * @msi_post_free: Optional function which is invoked after freeing 412 * all interrupts. 413 * @msi_translate: Optional translate callback to support the odd wire to 414 * MSI bridges, e.g. MBIGEN 415 * 416 * @get_hwirq, @msi_init and @msi_free are callbacks used by the underlying 417 * irqdomain. 418 * 419 * @msi_check, @msi_prepare, @prepare_desc and @set_desc are callbacks used by the 420 * msi_domain_alloc/free_irqs*() variants. 421 * 422 * @domain_alloc_irqs, @domain_free_irqs can be used to override the 423 * default allocation/free functions (__msi_domain_alloc/free_irqs). This 424 * is initially for a wrapper around XENs seperate MSI universe which can't 425 * be wrapped into the regular irq domains concepts by mere mortals. This 426 * allows to universally use msi_domain_alloc/free_irqs without having to 427 * special case XEN all over the place. 428 */ 429 struct msi_domain_ops { 430 irq_hw_number_t (*get_hwirq)(struct msi_domain_info *info, 431 msi_alloc_info_t *arg); 432 int (*msi_init)(struct irq_domain *domain, 433 struct msi_domain_info *info, 434 unsigned int virq, irq_hw_number_t hwirq, 435 msi_alloc_info_t *arg); 436 void (*msi_free)(struct irq_domain *domain, 437 struct msi_domain_info *info, 438 unsigned int virq); 439 int (*msi_prepare)(struct irq_domain *domain, 440 struct device *dev, int nvec, 441 msi_alloc_info_t *arg); 442 void (*prepare_desc)(struct irq_domain *domain, msi_alloc_info_t *arg, 443 struct msi_desc *desc); 444 void (*set_desc)(msi_alloc_info_t *arg, 445 struct msi_desc *desc); 446 int (*domain_alloc_irqs)(struct irq_domain *domain, 447 struct device *dev, int nvec); 448 void (*domain_free_irqs)(struct irq_domain *domain, 449 struct device *dev); 450 void (*msi_post_free)(struct irq_domain *domain, 451 struct device *dev); 452 int (*msi_translate)(struct irq_domain *domain, struct irq_fwspec *fwspec, 453 irq_hw_number_t *hwirq, unsigned int *type); 454 }; 455 456 /** 457 * struct msi_domain_info - MSI interrupt domain data 458 * @flags: Flags to decribe features and capabilities 459 * @bus_token: The domain bus token 460 * @hwsize: The hardware table size or the software index limit. 461 * If 0 then the size is considered unlimited and 462 * gets initialized to the maximum software index limit 463 * by the domain creation code. 464 * @ops: The callback data structure 465 * @chip: Optional: associated interrupt chip 466 * @chip_data: Optional: associated interrupt chip data 467 * @handler: Optional: associated interrupt flow handler 468 * @handler_data: Optional: associated interrupt flow handler data 469 * @handler_name: Optional: associated interrupt flow handler name 470 * @data: Optional: domain specific data 471 */ 472 struct msi_domain_info { 473 u32 flags; 474 enum irq_domain_bus_token bus_token; 475 unsigned int hwsize; 476 struct msi_domain_ops *ops; 477 struct irq_chip *chip; 478 void *chip_data; 479 irq_flow_handler_t handler; 480 void *handler_data; 481 const char *handler_name; 482 void *data; 483 }; 484 485 /** 486 * struct msi_domain_template - Template for MSI device domains 487 * @name: Storage for the resulting name. Filled in by the core. 488 * @chip: Interrupt chip for this domain 489 * @ops: MSI domain ops 490 * @info: MSI domain info data 491 */ 492 struct msi_domain_template { 493 char name[48]; 494 struct irq_chip chip; 495 struct msi_domain_ops ops; 496 struct msi_domain_info info; 497 }; 498 499 /* 500 * Flags for msi_domain_info 501 * 502 * Bit 0-15: Generic MSI functionality which is not subject to restriction 503 * by parent domains 504 * 505 * Bit 16-31: Functionality which depends on the underlying parent domain and 506 * can be masked out by msi_parent_ops::init_dev_msi_info() when 507 * a device MSI domain is initialized. 508 */ 509 enum { 510 /* 511 * Init non implemented ops callbacks with default MSI domain 512 * callbacks. 513 */ 514 MSI_FLAG_USE_DEF_DOM_OPS = (1 << 0), 515 /* 516 * Init non implemented chip callbacks with default MSI chip 517 * callbacks. 518 */ 519 MSI_FLAG_USE_DEF_CHIP_OPS = (1 << 1), 520 /* Needs early activate, required for PCI */ 521 MSI_FLAG_ACTIVATE_EARLY = (1 << 2), 522 /* 523 * Must reactivate when irq is started even when 524 * MSI_FLAG_ACTIVATE_EARLY has been set. 525 */ 526 MSI_FLAG_MUST_REACTIVATE = (1 << 3), 527 /* Populate sysfs on alloc() and destroy it on free() */ 528 MSI_FLAG_DEV_SYSFS = (1 << 4), 529 /* Allocate simple MSI descriptors */ 530 MSI_FLAG_ALLOC_SIMPLE_MSI_DESCS = (1 << 5), 531 /* Free MSI descriptors */ 532 MSI_FLAG_FREE_MSI_DESCS = (1 << 6), 533 /* Use dev->fwnode for MSI device domain creation */ 534 MSI_FLAG_USE_DEV_FWNODE = (1 << 7), 535 /* Set parent->dev into domain->pm_dev on device domain creation */ 536 MSI_FLAG_PARENT_PM_DEV = (1 << 8), 537 /* Support for parent mask/unmask */ 538 MSI_FLAG_PCI_MSI_MASK_PARENT = (1 << 9), 539 540 /* Mask for the generic functionality */ 541 MSI_GENERIC_FLAGS_MASK = GENMASK(15, 0), 542 543 /* Mask for the domain specific functionality */ 544 MSI_DOMAIN_FLAGS_MASK = GENMASK(31, 16), 545 546 /* Support multiple PCI MSI interrupts */ 547 MSI_FLAG_MULTI_PCI_MSI = (1 << 16), 548 /* Support PCI MSIX interrupts */ 549 MSI_FLAG_PCI_MSIX = (1 << 17), 550 /* Is level-triggered capable, using two messages */ 551 MSI_FLAG_LEVEL_CAPABLE = (1 << 18), 552 /* MSI-X entries must be contiguous */ 553 MSI_FLAG_MSIX_CONTIGUOUS = (1 << 19), 554 /* PCI/MSI-X vectors can be dynamically allocated/freed post MSI-X enable */ 555 MSI_FLAG_PCI_MSIX_ALLOC_DYN = (1 << 20), 556 /* PCI MSIs cannot be steered separately to CPU cores */ 557 MSI_FLAG_NO_AFFINITY = (1 << 21), 558 /* Inhibit usage of entry masking */ 559 MSI_FLAG_NO_MASK = (1 << 22), 560 }; 561 562 /** 563 * struct msi_parent_ops - MSI parent domain callbacks and configuration info 564 * 565 * @supported_flags: Required: The supported MSI flags of the parent domain 566 * @required_flags: Optional: The required MSI flags of the parent MSI domain 567 * @bus_select_token: Optional: The bus token of the real parent domain for 568 * irq_domain::select() 569 * @bus_select_mask: Optional: A mask of supported BUS_DOMAINs for 570 * irq_domain::select() 571 * @prefix: Optional: Prefix for the domain and chip name 572 * @init_dev_msi_info: Required: Callback for MSI parent domains to setup parent 573 * domain specific domain flags, domain ops and interrupt chip 574 * callbacks when a per device domain is created. 575 */ 576 struct msi_parent_ops { 577 u32 supported_flags; 578 u32 required_flags; 579 u32 bus_select_token; 580 u32 bus_select_mask; 581 const char *prefix; 582 bool (*init_dev_msi_info)(struct device *dev, struct irq_domain *domain, 583 struct irq_domain *msi_parent_domain, 584 struct msi_domain_info *msi_child_info); 585 }; 586 587 bool msi_parent_init_dev_msi_info(struct device *dev, struct irq_domain *domain, 588 struct irq_domain *msi_parent_domain, 589 struct msi_domain_info *msi_child_info); 590 591 int msi_domain_set_affinity(struct irq_data *data, const struct cpumask *mask, 592 bool force); 593 594 struct irq_domain *msi_create_irq_domain(struct fwnode_handle *fwnode, 595 struct msi_domain_info *info, 596 struct irq_domain *parent); 597 598 bool msi_create_device_irq_domain(struct device *dev, unsigned int domid, 599 const struct msi_domain_template *template, 600 unsigned int hwsize, void *domain_data, 601 void *chip_data); 602 void msi_remove_device_irq_domain(struct device *dev, unsigned int domid); 603 604 bool msi_match_device_irq_domain(struct device *dev, unsigned int domid, 605 enum irq_domain_bus_token bus_token); 606 607 int msi_domain_alloc_irqs_range_locked(struct device *dev, unsigned int domid, 608 unsigned int first, unsigned int last); 609 int msi_domain_alloc_irqs_range(struct device *dev, unsigned int domid, 610 unsigned int first, unsigned int last); 611 int msi_domain_alloc_irqs_all_locked(struct device *dev, unsigned int domid, int nirqs); 612 613 struct msi_map msi_domain_alloc_irq_at(struct device *dev, unsigned int domid, unsigned int index, 614 const struct irq_affinity_desc *affdesc, 615 union msi_instance_cookie *cookie); 616 617 void msi_domain_free_irqs_range_locked(struct device *dev, unsigned int domid, 618 unsigned int first, unsigned int last); 619 void msi_domain_free_irqs_range(struct device *dev, unsigned int domid, 620 unsigned int first, unsigned int last); 621 void msi_domain_free_irqs_all_locked(struct device *dev, unsigned int domid); 622 void msi_domain_free_irqs_all(struct device *dev, unsigned int domid); 623 624 struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain); 625 626 /* Per device platform MSI */ 627 int platform_device_msi_init_and_alloc_irqs(struct device *dev, unsigned int nvec, 628 irq_write_msi_msg_t write_msi_msg); 629 void platform_device_msi_free_irqs_all(struct device *dev); 630 631 bool msi_device_has_isolated_msi(struct device *dev); 632 633 static inline int msi_domain_alloc_irqs(struct device *dev, unsigned int domid, int nirqs) 634 { 635 return msi_domain_alloc_irqs_range(dev, domid, 0, nirqs - 1); 636 } 637 638 #else /* CONFIG_GENERIC_MSI_IRQ */ 639 static inline bool msi_device_has_isolated_msi(struct device *dev) 640 { 641 /* 642 * Arguably if the platform does not enable MSI support then it has 643 * "isolated MSI", as an interrupt controller that cannot receive MSIs 644 * is inherently isolated by our definition. The default definition for 645 * arch_is_isolated_msi() is conservative and returns false anyhow. 646 */ 647 return arch_is_isolated_msi(); 648 } 649 #endif /* CONFIG_GENERIC_MSI_IRQ */ 650 651 /* PCI specific interfaces */ 652 #ifdef CONFIG_PCI_MSI 653 struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc); 654 void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg); 655 void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg); 656 void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg); 657 void pci_msi_mask_irq(struct irq_data *data); 658 void pci_msi_unmask_irq(struct irq_data *data); 659 struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode, 660 struct msi_domain_info *info, 661 struct irq_domain *parent); 662 u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev); 663 struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev); 664 #else /* CONFIG_PCI_MSI */ 665 static inline struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev) 666 { 667 return NULL; 668 } 669 static inline void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg) { } 670 #endif /* !CONFIG_PCI_MSI */ 671 672 #endif /* LINUX_MSI_H */ 673