1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * pci.h 4 * 5 * PCI defines and function prototypes 6 * Copyright 1994, Drew Eckhardt 7 * Copyright 1997--1999 Martin Mares <[email protected]> 8 * 9 * PCI Express ASPM defines and function prototypes 10 * Copyright (c) 2007 Intel Corp. 11 * Zhang Yanmin ([email protected]) 12 * Shaohua Li ([email protected]) 13 * 14 * For more information, please consult the following manuals (look at 15 * http://www.pcisig.com/ for how to get them): 16 * 17 * PCI BIOS Specification 18 * PCI Local Bus Specification 19 * PCI to PCI Bridge Specification 20 * PCI Express Specification 21 * PCI System Design Guide 22 */ 23 #ifndef LINUX_PCI_H 24 #define LINUX_PCI_H 25 26 #include <linux/args.h> 27 #include <linux/mod_devicetable.h> 28 29 #include <linux/types.h> 30 #include <linux/init.h> 31 #include <linux/ioport.h> 32 #include <linux/list.h> 33 #include <linux/compiler.h> 34 #include <linux/errno.h> 35 #include <linux/kobject.h> 36 #include <linux/atomic.h> 37 #include <linux/device.h> 38 #include <linux/interrupt.h> 39 #include <linux/io.h> 40 #include <linux/resource_ext.h> 41 #include <linux/msi_api.h> 42 #include <uapi/linux/pci.h> 43 44 #include <linux/pci_ids.h> 45 46 #define PCI_STATUS_ERROR_BITS (PCI_STATUS_DETECTED_PARITY | \ 47 PCI_STATUS_SIG_SYSTEM_ERROR | \ 48 PCI_STATUS_REC_MASTER_ABORT | \ 49 PCI_STATUS_REC_TARGET_ABORT | \ 50 PCI_STATUS_SIG_TARGET_ABORT | \ 51 PCI_STATUS_PARITY) 52 53 /* Number of reset methods used in pci_reset_fn_methods array in pci.c */ 54 #define PCI_NUM_RESET_METHODS 7 55 56 #define PCI_RESET_PROBE true 57 #define PCI_RESET_DO_RESET false 58 59 /* 60 * The PCI interface treats multi-function devices as independent 61 * devices. The slot/function address of each device is encoded 62 * in a single byte as follows: 63 * 64 * 7:3 = slot 65 * 2:0 = function 66 * 67 * PCI_DEVFN(), PCI_SLOT(), and PCI_FUNC() are defined in uapi/linux/pci.h. 68 * In the interest of not exposing interfaces to user-space unnecessarily, 69 * the following kernel-only defines are being added here. 70 */ 71 #define PCI_DEVID(bus, devfn) ((((u16)(bus)) << 8) | (devfn)) 72 /* return bus from PCI devid = ((u16)bus_number) << 8) | devfn */ 73 #define PCI_BUS_NUM(x) (((x) >> 8) & 0xff) 74 75 /* pci_slot represents a physical slot */ 76 struct pci_slot { 77 struct pci_bus *bus; /* Bus this slot is on */ 78 struct list_head list; /* Node in list of slots */ 79 struct hotplug_slot *hotplug; /* Hotplug info (move here) */ 80 unsigned char number; /* PCI_SLOT(pci_dev->devfn) */ 81 struct kobject kobj; 82 }; 83 84 static inline const char *pci_slot_name(const struct pci_slot *slot) 85 { 86 return kobject_name(&slot->kobj); 87 } 88 89 /* File state for mmap()s on /proc/bus/pci/X/Y */ 90 enum pci_mmap_state { 91 pci_mmap_io, 92 pci_mmap_mem 93 }; 94 95 /* For PCI devices, the region numbers are assigned this way: */ 96 enum { 97 /* #0-5: standard PCI resources */ 98 PCI_STD_RESOURCES, 99 PCI_STD_RESOURCE_END = PCI_STD_RESOURCES + PCI_STD_NUM_BARS - 1, 100 101 /* #6: expansion ROM resource */ 102 PCI_ROM_RESOURCE, 103 104 /* Device-specific resources */ 105 #ifdef CONFIG_PCI_IOV 106 PCI_IOV_RESOURCES, 107 PCI_IOV_RESOURCE_END = PCI_IOV_RESOURCES + PCI_SRIOV_NUM_BARS - 1, 108 #endif 109 110 /* PCI-to-PCI (P2P) bridge windows */ 111 #define PCI_BRIDGE_IO_WINDOW (PCI_BRIDGE_RESOURCES + 0) 112 #define PCI_BRIDGE_MEM_WINDOW (PCI_BRIDGE_RESOURCES + 1) 113 #define PCI_BRIDGE_PREF_MEM_WINDOW (PCI_BRIDGE_RESOURCES + 2) 114 115 /* CardBus bridge windows */ 116 #define PCI_CB_BRIDGE_IO_0_WINDOW (PCI_BRIDGE_RESOURCES + 0) 117 #define PCI_CB_BRIDGE_IO_1_WINDOW (PCI_BRIDGE_RESOURCES + 1) 118 #define PCI_CB_BRIDGE_MEM_0_WINDOW (PCI_BRIDGE_RESOURCES + 2) 119 #define PCI_CB_BRIDGE_MEM_1_WINDOW (PCI_BRIDGE_RESOURCES + 3) 120 121 /* Total number of bridge resources for P2P and CardBus */ 122 #define PCI_BRIDGE_RESOURCE_NUM 4 123 124 /* Resources assigned to buses behind the bridge */ 125 PCI_BRIDGE_RESOURCES, 126 PCI_BRIDGE_RESOURCE_END = PCI_BRIDGE_RESOURCES + 127 PCI_BRIDGE_RESOURCE_NUM - 1, 128 129 /* Total resources associated with a PCI device */ 130 PCI_NUM_RESOURCES, 131 132 /* Preserve this for compatibility */ 133 DEVICE_COUNT_RESOURCE = PCI_NUM_RESOURCES, 134 }; 135 136 /** 137 * enum pci_interrupt_pin - PCI INTx interrupt values 138 * @PCI_INTERRUPT_UNKNOWN: Unknown or unassigned interrupt 139 * @PCI_INTERRUPT_INTA: PCI INTA pin 140 * @PCI_INTERRUPT_INTB: PCI INTB pin 141 * @PCI_INTERRUPT_INTC: PCI INTC pin 142 * @PCI_INTERRUPT_INTD: PCI INTD pin 143 * 144 * Corresponds to values for legacy PCI INTx interrupts, as can be found in the 145 * PCI_INTERRUPT_PIN register. 146 */ 147 enum pci_interrupt_pin { 148 PCI_INTERRUPT_UNKNOWN, 149 PCI_INTERRUPT_INTA, 150 PCI_INTERRUPT_INTB, 151 PCI_INTERRUPT_INTC, 152 PCI_INTERRUPT_INTD, 153 }; 154 155 /* The number of legacy PCI INTx interrupts */ 156 #define PCI_NUM_INTX 4 157 158 /* 159 * Reading from a device that doesn't respond typically returns ~0. A 160 * successful read from a device may also return ~0, so you need additional 161 * information to reliably identify errors. 162 */ 163 #define PCI_ERROR_RESPONSE (~0ULL) 164 #define PCI_SET_ERROR_RESPONSE(val) (*(val) = ((typeof(*(val))) PCI_ERROR_RESPONSE)) 165 #define PCI_POSSIBLE_ERROR(val) ((val) == ((typeof(val)) PCI_ERROR_RESPONSE)) 166 167 /* 168 * pci_power_t values must match the bits in the Capabilities PME_Support 169 * and Control/Status PowerState fields in the Power Management capability. 170 */ 171 typedef int __bitwise pci_power_t; 172 173 #define PCI_D0 ((pci_power_t __force) 0) 174 #define PCI_D1 ((pci_power_t __force) 1) 175 #define PCI_D2 ((pci_power_t __force) 2) 176 #define PCI_D3hot ((pci_power_t __force) 3) 177 #define PCI_D3cold ((pci_power_t __force) 4) 178 #define PCI_UNKNOWN ((pci_power_t __force) 5) 179 #define PCI_POWER_ERROR ((pci_power_t __force) -1) 180 181 /* Remember to update this when the list above changes! */ 182 extern const char *pci_power_names[]; 183 184 static inline const char *pci_power_name(pci_power_t state) 185 { 186 return pci_power_names[1 + (__force int) state]; 187 } 188 189 /** 190 * typedef pci_channel_state_t 191 * 192 * The pci_channel state describes connectivity between the CPU and 193 * the PCI device. If some PCI bus between here and the PCI device 194 * has crashed or locked up, this info is reflected here. 195 */ 196 typedef unsigned int __bitwise pci_channel_state_t; 197 198 enum { 199 /* I/O channel is in normal state */ 200 pci_channel_io_normal = (__force pci_channel_state_t) 1, 201 202 /* I/O to channel is blocked */ 203 pci_channel_io_frozen = (__force pci_channel_state_t) 2, 204 205 /* PCI card is dead */ 206 pci_channel_io_perm_failure = (__force pci_channel_state_t) 3, 207 }; 208 209 typedef unsigned int __bitwise pcie_reset_state_t; 210 211 enum pcie_reset_state { 212 /* Reset is NOT asserted (Use to deassert reset) */ 213 pcie_deassert_reset = (__force pcie_reset_state_t) 1, 214 215 /* Use #PERST to reset PCIe device */ 216 pcie_warm_reset = (__force pcie_reset_state_t) 2, 217 218 /* Use PCIe Hot Reset to reset device */ 219 pcie_hot_reset = (__force pcie_reset_state_t) 3 220 }; 221 222 typedef unsigned short __bitwise pci_dev_flags_t; 223 enum pci_dev_flags { 224 /* INTX_DISABLE in PCI_COMMAND register disables MSI too */ 225 PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG = (__force pci_dev_flags_t) (1 << 0), 226 /* Device configuration is irrevocably lost if disabled into D3 */ 227 PCI_DEV_FLAGS_NO_D3 = (__force pci_dev_flags_t) (1 << 1), 228 /* Provide indication device is assigned by a Virtual Machine Manager */ 229 PCI_DEV_FLAGS_ASSIGNED = (__force pci_dev_flags_t) (1 << 2), 230 /* Flag for quirk use to store if quirk-specific ACS is enabled */ 231 PCI_DEV_FLAGS_ACS_ENABLED_QUIRK = (__force pci_dev_flags_t) (1 << 3), 232 /* Use a PCIe-to-PCI bridge alias even if !pci_is_pcie */ 233 PCI_DEV_FLAG_PCIE_BRIDGE_ALIAS = (__force pci_dev_flags_t) (1 << 5), 234 /* Do not use bus resets for device */ 235 PCI_DEV_FLAGS_NO_BUS_RESET = (__force pci_dev_flags_t) (1 << 6), 236 /* Do not use PM reset even if device advertises NoSoftRst- */ 237 PCI_DEV_FLAGS_NO_PM_RESET = (__force pci_dev_flags_t) (1 << 7), 238 /* Get VPD from function 0 VPD */ 239 PCI_DEV_FLAGS_VPD_REF_F0 = (__force pci_dev_flags_t) (1 << 8), 240 /* A non-root bridge where translation occurs, stop alias search here */ 241 PCI_DEV_FLAGS_BRIDGE_XLATE_ROOT = (__force pci_dev_flags_t) (1 << 9), 242 /* Do not use FLR even if device advertises PCI_AF_CAP */ 243 PCI_DEV_FLAGS_NO_FLR_RESET = (__force pci_dev_flags_t) (1 << 10), 244 /* Don't use Relaxed Ordering for TLPs directed at this device */ 245 PCI_DEV_FLAGS_NO_RELAXED_ORDERING = (__force pci_dev_flags_t) (1 << 11), 246 /* Device does honor MSI masking despite saying otherwise */ 247 PCI_DEV_FLAGS_HAS_MSI_MASKING = (__force pci_dev_flags_t) (1 << 12), 248 }; 249 250 enum pci_irq_reroute_variant { 251 INTEL_IRQ_REROUTE_VARIANT = 1, 252 MAX_IRQ_REROUTE_VARIANTS = 3 253 }; 254 255 typedef unsigned short __bitwise pci_bus_flags_t; 256 enum pci_bus_flags { 257 PCI_BUS_FLAGS_NO_MSI = (__force pci_bus_flags_t) 1, 258 PCI_BUS_FLAGS_NO_MMRBC = (__force pci_bus_flags_t) 2, 259 PCI_BUS_FLAGS_NO_AERSID = (__force pci_bus_flags_t) 4, 260 PCI_BUS_FLAGS_NO_EXTCFG = (__force pci_bus_flags_t) 8, 261 }; 262 263 /* Values from Link Status register, PCIe r3.1, sec 7.8.8 */ 264 enum pcie_link_width { 265 PCIE_LNK_WIDTH_RESRV = 0x00, 266 PCIE_LNK_X1 = 0x01, 267 PCIE_LNK_X2 = 0x02, 268 PCIE_LNK_X4 = 0x04, 269 PCIE_LNK_X8 = 0x08, 270 PCIE_LNK_X12 = 0x0c, 271 PCIE_LNK_X16 = 0x10, 272 PCIE_LNK_X32 = 0x20, 273 PCIE_LNK_WIDTH_UNKNOWN = 0xff, 274 }; 275 276 /* See matching string table in pci_speed_string() */ 277 enum pci_bus_speed { 278 PCI_SPEED_33MHz = 0x00, 279 PCI_SPEED_66MHz = 0x01, 280 PCI_SPEED_66MHz_PCIX = 0x02, 281 PCI_SPEED_100MHz_PCIX = 0x03, 282 PCI_SPEED_133MHz_PCIX = 0x04, 283 PCI_SPEED_66MHz_PCIX_ECC = 0x05, 284 PCI_SPEED_100MHz_PCIX_ECC = 0x06, 285 PCI_SPEED_133MHz_PCIX_ECC = 0x07, 286 PCI_SPEED_66MHz_PCIX_266 = 0x09, 287 PCI_SPEED_100MHz_PCIX_266 = 0x0a, 288 PCI_SPEED_133MHz_PCIX_266 = 0x0b, 289 AGP_UNKNOWN = 0x0c, 290 AGP_1X = 0x0d, 291 AGP_2X = 0x0e, 292 AGP_4X = 0x0f, 293 AGP_8X = 0x10, 294 PCI_SPEED_66MHz_PCIX_533 = 0x11, 295 PCI_SPEED_100MHz_PCIX_533 = 0x12, 296 PCI_SPEED_133MHz_PCIX_533 = 0x13, 297 PCIE_SPEED_2_5GT = 0x14, 298 PCIE_SPEED_5_0GT = 0x15, 299 PCIE_SPEED_8_0GT = 0x16, 300 PCIE_SPEED_16_0GT = 0x17, 301 PCIE_SPEED_32_0GT = 0x18, 302 PCIE_SPEED_64_0GT = 0x19, 303 PCI_SPEED_UNKNOWN = 0xff, 304 }; 305 306 enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev); 307 enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev); 308 309 struct pci_vpd { 310 struct mutex lock; 311 unsigned int len; 312 u8 cap; 313 }; 314 315 struct irq_affinity; 316 struct pcie_link_state; 317 struct pci_sriov; 318 struct pci_p2pdma; 319 struct rcec_ea; 320 321 /* The pci_dev structure describes PCI devices */ 322 struct pci_dev { 323 struct list_head bus_list; /* Node in per-bus list */ 324 struct pci_bus *bus; /* Bus this device is on */ 325 struct pci_bus *subordinate; /* Bus this device bridges to */ 326 327 void *sysdata; /* Hook for sys-specific extension */ 328 struct proc_dir_entry *procent; /* Device entry in /proc/bus/pci */ 329 struct pci_slot *slot; /* Physical slot this device is in */ 330 331 unsigned int devfn; /* Encoded device & function index */ 332 unsigned short vendor; 333 unsigned short device; 334 unsigned short subsystem_vendor; 335 unsigned short subsystem_device; 336 unsigned int class; /* 3 bytes: (base,sub,prog-if) */ 337 u8 revision; /* PCI revision, low byte of class word */ 338 u8 hdr_type; /* PCI header type (`multi' flag masked out) */ 339 #ifdef CONFIG_PCIEAER 340 u16 aer_cap; /* AER capability offset */ 341 struct aer_stats *aer_stats; /* AER stats for this device */ 342 #endif 343 #ifdef CONFIG_PCIEPORTBUS 344 struct rcec_ea *rcec_ea; /* RCEC cached endpoint association */ 345 struct pci_dev *rcec; /* Associated RCEC device */ 346 #endif 347 u32 devcap; /* PCIe Device Capabilities */ 348 u8 pcie_cap; /* PCIe capability offset */ 349 u8 msi_cap; /* MSI capability offset */ 350 u8 msix_cap; /* MSI-X capability offset */ 351 u8 pcie_mpss:3; /* PCIe Max Payload Size Supported */ 352 u8 rom_base_reg; /* Config register controlling ROM */ 353 u8 pin; /* Interrupt pin this device uses */ 354 u16 pcie_flags_reg; /* Cached PCIe Capabilities Register */ 355 unsigned long *dma_alias_mask;/* Mask of enabled devfn aliases */ 356 357 struct pci_driver *driver; /* Driver bound to this device */ 358 u64 dma_mask; /* Mask of the bits of bus address this 359 device implements. Normally this is 360 0xffffffff. You only need to change 361 this if your device has broken DMA 362 or supports 64-bit transfers. */ 363 364 struct device_dma_parameters dma_parms; 365 366 pci_power_t current_state; /* Current operating state. In ACPI, 367 this is D0-D3, D0 being fully 368 functional, and D3 being off. */ 369 u8 pm_cap; /* PM capability offset */ 370 unsigned int imm_ready:1; /* Supports Immediate Readiness */ 371 unsigned int pme_support:5; /* Bitmask of states from which PME# 372 can be generated */ 373 unsigned int pme_poll:1; /* Poll device's PME status bit */ 374 unsigned int d1_support:1; /* Low power state D1 is supported */ 375 unsigned int d2_support:1; /* Low power state D2 is supported */ 376 unsigned int no_d1d2:1; /* D1 and D2 are forbidden */ 377 unsigned int no_d3cold:1; /* D3cold is forbidden */ 378 unsigned int bridge_d3:1; /* Allow D3 for bridge */ 379 unsigned int d3cold_allowed:1; /* D3cold is allowed by user */ 380 unsigned int mmio_always_on:1; /* Disallow turning off io/mem 381 decoding during BAR sizing */ 382 unsigned int wakeup_prepared:1; 383 unsigned int skip_bus_pm:1; /* Internal: Skip bus-level PM */ 384 unsigned int ignore_hotplug:1; /* Ignore hotplug events */ 385 unsigned int hotplug_user_indicators:1; /* SlotCtl indicators 386 controlled exclusively by 387 user sysfs */ 388 unsigned int clear_retrain_link:1; /* Need to clear Retrain Link 389 bit manually */ 390 unsigned int d3hot_delay; /* D3hot->D0 transition time in ms */ 391 unsigned int d3cold_delay; /* D3cold->D0 transition time in ms */ 392 393 #ifdef CONFIG_PCIEASPM 394 struct pcie_link_state *link_state; /* ASPM link state */ 395 u16 l1ss; /* L1SS Capability pointer */ 396 unsigned int ltr_path:1; /* Latency Tolerance Reporting 397 supported from root to here */ 398 #endif 399 unsigned int pasid_no_tlp:1; /* PASID works without TLP Prefix */ 400 unsigned int eetlp_prefix_path:1; /* End-to-End TLP Prefix */ 401 402 pci_channel_state_t error_state; /* Current connectivity state */ 403 struct device dev; /* Generic device interface */ 404 405 int cfg_size; /* Size of config space */ 406 407 /* 408 * Instead of touching interrupt line and base address registers 409 * directly, use the values stored here. They might be different! 410 */ 411 unsigned int irq; 412 struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */ 413 struct resource driver_exclusive_resource; /* driver exclusive resource ranges */ 414 415 bool match_driver; /* Skip attaching driver */ 416 417 unsigned int transparent:1; /* Subtractive decode bridge */ 418 unsigned int io_window:1; /* Bridge has I/O window */ 419 unsigned int pref_window:1; /* Bridge has pref mem window */ 420 unsigned int pref_64_window:1; /* Pref mem window is 64-bit */ 421 unsigned int multifunction:1; /* Multi-function device */ 422 423 unsigned int is_busmaster:1; /* Is busmaster */ 424 unsigned int no_msi:1; /* May not use MSI */ 425 unsigned int no_64bit_msi:1; /* May only use 32-bit MSIs */ 426 unsigned int block_cfg_access:1; /* Config space access blocked */ 427 unsigned int broken_parity_status:1; /* Generates false positive parity */ 428 unsigned int irq_reroute_variant:2; /* Needs IRQ rerouting variant */ 429 unsigned int msi_enabled:1; 430 unsigned int msix_enabled:1; 431 unsigned int ari_enabled:1; /* ARI forwarding */ 432 unsigned int ats_enabled:1; /* Address Translation Svc */ 433 unsigned int pasid_enabled:1; /* Process Address Space ID */ 434 unsigned int pri_enabled:1; /* Page Request Interface */ 435 unsigned int is_managed:1; /* Managed via devres */ 436 unsigned int is_msi_managed:1; /* MSI release via devres installed */ 437 unsigned int needs_freset:1; /* Requires fundamental reset */ 438 unsigned int state_saved:1; 439 unsigned int is_physfn:1; 440 unsigned int is_virtfn:1; 441 unsigned int is_hotplug_bridge:1; 442 unsigned int shpc_managed:1; /* SHPC owned by shpchp */ 443 unsigned int is_thunderbolt:1; /* Thunderbolt controller */ 444 /* 445 * Devices marked being untrusted are the ones that can potentially 446 * execute DMA attacks and similar. They are typically connected 447 * through external ports such as Thunderbolt but not limited to 448 * that. When an IOMMU is enabled they should be getting full 449 * mappings to make sure they cannot access arbitrary memory. 450 */ 451 unsigned int untrusted:1; 452 /* 453 * Info from the platform, e.g., ACPI or device tree, may mark a 454 * device as "external-facing". An external-facing device is 455 * itself internal but devices downstream from it are external. 456 */ 457 unsigned int external_facing:1; 458 unsigned int broken_intx_masking:1; /* INTx masking can't be used */ 459 unsigned int io_window_1k:1; /* Intel bridge 1K I/O windows */ 460 unsigned int irq_managed:1; 461 unsigned int non_compliant_bars:1; /* Broken BARs; ignore them */ 462 unsigned int is_probed:1; /* Device probing in progress */ 463 unsigned int link_active_reporting:1;/* Device capable of reporting link active */ 464 unsigned int no_vf_scan:1; /* Don't scan for VFs after IOV enablement */ 465 unsigned int no_command_memory:1; /* No PCI_COMMAND_MEMORY */ 466 unsigned int rom_bar_overlap:1; /* ROM BAR disable broken */ 467 unsigned int rom_attr_enabled:1; /* Display of ROM attribute enabled? */ 468 pci_dev_flags_t dev_flags; 469 atomic_t enable_cnt; /* pci_enable_device has been called */ 470 471 spinlock_t pcie_cap_lock; /* Protects RMW ops in capability accessors */ 472 u32 saved_config_space[16]; /* Config space saved at suspend time */ 473 struct hlist_head saved_cap_space; 474 struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */ 475 struct bin_attribute *res_attr_wc[DEVICE_COUNT_RESOURCE]; /* sysfs file for WC mapping of resources */ 476 477 #ifdef CONFIG_HOTPLUG_PCI_PCIE 478 unsigned int broken_cmd_compl:1; /* No compl for some cmds */ 479 #endif 480 #ifdef CONFIG_PCIE_PTM 481 u16 ptm_cap; /* PTM Capability */ 482 unsigned int ptm_root:1; 483 unsigned int ptm_enabled:1; 484 u8 ptm_granularity; 485 #endif 486 #ifdef CONFIG_PCI_MSI 487 void __iomem *msix_base; 488 raw_spinlock_t msi_lock; 489 #endif 490 struct pci_vpd vpd; 491 #ifdef CONFIG_PCIE_DPC 492 u16 dpc_cap; 493 unsigned int dpc_rp_extensions:1; 494 u8 dpc_rp_log_size; 495 #endif 496 #ifdef CONFIG_PCI_ATS 497 union { 498 struct pci_sriov *sriov; /* PF: SR-IOV info */ 499 struct pci_dev *physfn; /* VF: related PF */ 500 }; 501 u16 ats_cap; /* ATS Capability offset */ 502 u8 ats_stu; /* ATS Smallest Translation Unit */ 503 #endif 504 #ifdef CONFIG_PCI_PRI 505 u16 pri_cap; /* PRI Capability offset */ 506 u32 pri_reqs_alloc; /* Number of PRI requests allocated */ 507 unsigned int pasid_required:1; /* PRG Response PASID Required */ 508 #endif 509 #ifdef CONFIG_PCI_PASID 510 u16 pasid_cap; /* PASID Capability offset */ 511 u16 pasid_features; 512 #endif 513 #ifdef CONFIG_PCI_P2PDMA 514 struct pci_p2pdma __rcu *p2pdma; 515 #endif 516 #ifdef CONFIG_PCI_DOE 517 struct xarray doe_mbs; /* Data Object Exchange mailboxes */ 518 #endif 519 u16 acs_cap; /* ACS Capability offset */ 520 phys_addr_t rom; /* Physical address if not from BAR */ 521 size_t romlen; /* Length if not from BAR */ 522 /* 523 * Driver name to force a match. Do not set directly, because core 524 * frees it. Use driver_set_override() to set or clear it. 525 */ 526 const char *driver_override; 527 528 unsigned long priv_flags; /* Private flags for the PCI driver */ 529 530 /* These methods index pci_reset_fn_methods[] */ 531 u8 reset_methods[PCI_NUM_RESET_METHODS]; /* In priority order */ 532 }; 533 534 static inline struct pci_dev *pci_physfn(struct pci_dev *dev) 535 { 536 #ifdef CONFIG_PCI_IOV 537 if (dev->is_virtfn) 538 dev = dev->physfn; 539 #endif 540 return dev; 541 } 542 543 struct pci_dev *pci_alloc_dev(struct pci_bus *bus); 544 545 #define to_pci_dev(n) container_of(n, struct pci_dev, dev) 546 #define for_each_pci_dev(d) while ((d = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, d)) != NULL) 547 548 static inline int pci_channel_offline(struct pci_dev *pdev) 549 { 550 return (pdev->error_state != pci_channel_io_normal); 551 } 552 553 /* 554 * Currently in ACPI spec, for each PCI host bridge, PCI Segment 555 * Group number is limited to a 16-bit value, therefore (int)-1 is 556 * not a valid PCI domain number, and can be used as a sentinel 557 * value indicating ->domain_nr is not set by the driver (and 558 * CONFIG_PCI_DOMAINS_GENERIC=y archs will set it with 559 * pci_bus_find_domain_nr()). 560 */ 561 #define PCI_DOMAIN_NR_NOT_SET (-1) 562 563 struct pci_host_bridge { 564 struct device dev; 565 struct pci_bus *bus; /* Root bus */ 566 struct pci_ops *ops; 567 struct pci_ops *child_ops; 568 void *sysdata; 569 int busnr; 570 int domain_nr; 571 struct list_head windows; /* resource_entry */ 572 struct list_head dma_ranges; /* dma ranges resource list */ 573 u8 (*swizzle_irq)(struct pci_dev *, u8 *); /* Platform IRQ swizzler */ 574 int (*map_irq)(const struct pci_dev *, u8, u8); 575 void (*release_fn)(struct pci_host_bridge *); 576 void *release_data; 577 unsigned int ignore_reset_delay:1; /* For entire hierarchy */ 578 unsigned int no_ext_tags:1; /* No Extended Tags */ 579 unsigned int no_inc_mrrs:1; /* No Increase MRRS */ 580 unsigned int native_aer:1; /* OS may use PCIe AER */ 581 unsigned int native_pcie_hotplug:1; /* OS may use PCIe hotplug */ 582 unsigned int native_shpc_hotplug:1; /* OS may use SHPC hotplug */ 583 unsigned int native_pme:1; /* OS may use PCIe PME */ 584 unsigned int native_ltr:1; /* OS may use PCIe LTR */ 585 unsigned int native_dpc:1; /* OS may use PCIe DPC */ 586 unsigned int native_cxl_error:1; /* OS may use CXL RAS/Events */ 587 unsigned int preserve_config:1; /* Preserve FW resource setup */ 588 unsigned int size_windows:1; /* Enable root bus sizing */ 589 unsigned int msi_domain:1; /* Bridge wants MSI domain */ 590 591 /* Resource alignment requirements */ 592 resource_size_t (*align_resource)(struct pci_dev *dev, 593 const struct resource *res, 594 resource_size_t start, 595 resource_size_t size, 596 resource_size_t align); 597 unsigned long private[] ____cacheline_aligned; 598 }; 599 600 #define to_pci_host_bridge(n) container_of(n, struct pci_host_bridge, dev) 601 602 static inline void *pci_host_bridge_priv(struct pci_host_bridge *bridge) 603 { 604 return (void *)bridge->private; 605 } 606 607 static inline struct pci_host_bridge *pci_host_bridge_from_priv(void *priv) 608 { 609 return container_of(priv, struct pci_host_bridge, private); 610 } 611 612 struct pci_host_bridge *pci_alloc_host_bridge(size_t priv); 613 struct pci_host_bridge *devm_pci_alloc_host_bridge(struct device *dev, 614 size_t priv); 615 void pci_free_host_bridge(struct pci_host_bridge *bridge); 616 struct pci_host_bridge *pci_find_host_bridge(struct pci_bus *bus); 617 618 void pci_set_host_bridge_release(struct pci_host_bridge *bridge, 619 void (*release_fn)(struct pci_host_bridge *), 620 void *release_data); 621 622 int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge); 623 624 /* 625 * The first PCI_BRIDGE_RESOURCE_NUM PCI bus resources (those that correspond 626 * to P2P or CardBus bridge windows) go in a table. Additional ones (for 627 * buses below host bridges or subtractive decode bridges) go in the list. 628 * Use pci_bus_for_each_resource() to iterate through all the resources. 629 */ 630 631 /* 632 * PCI_SUBTRACTIVE_DECODE means the bridge forwards the window implicitly 633 * and there's no way to program the bridge with the details of the window. 634 * This does not apply to ACPI _CRS windows, even with the _DEC subtractive- 635 * decode bit set, because they are explicit and can be programmed with _SRS. 636 */ 637 #define PCI_SUBTRACTIVE_DECODE 0x1 638 639 struct pci_bus_resource { 640 struct list_head list; 641 struct resource *res; 642 unsigned int flags; 643 }; 644 645 #define PCI_REGION_FLAG_MASK 0x0fU /* These bits of resource flags tell us the PCI region flags */ 646 647 struct pci_bus { 648 struct list_head node; /* Node in list of buses */ 649 struct pci_bus *parent; /* Parent bus this bridge is on */ 650 struct list_head children; /* List of child buses */ 651 struct list_head devices; /* List of devices on this bus */ 652 struct pci_dev *self; /* Bridge device as seen by parent */ 653 struct list_head slots; /* List of slots on this bus; 654 protected by pci_slot_mutex */ 655 struct resource *resource[PCI_BRIDGE_RESOURCE_NUM]; 656 struct list_head resources; /* Address space routed to this bus */ 657 struct resource busn_res; /* Bus numbers routed to this bus */ 658 659 struct pci_ops *ops; /* Configuration access functions */ 660 void *sysdata; /* Hook for sys-specific extension */ 661 struct proc_dir_entry *procdir; /* Directory entry in /proc/bus/pci */ 662 663 unsigned char number; /* Bus number */ 664 unsigned char primary; /* Number of primary bridge */ 665 unsigned char max_bus_speed; /* enum pci_bus_speed */ 666 unsigned char cur_bus_speed; /* enum pci_bus_speed */ 667 #ifdef CONFIG_PCI_DOMAINS_GENERIC 668 int domain_nr; 669 #endif 670 671 char name[48]; 672 673 unsigned short bridge_ctl; /* Manage NO_ISA/FBB/et al behaviors */ 674 pci_bus_flags_t bus_flags; /* Inherited by child buses */ 675 struct device *bridge; 676 struct device dev; 677 struct bin_attribute *legacy_io; /* Legacy I/O for this bus */ 678 struct bin_attribute *legacy_mem; /* Legacy mem */ 679 unsigned int is_added:1; 680 unsigned int unsafe_warn:1; /* warned about RW1C config write */ 681 }; 682 683 #define to_pci_bus(n) container_of(n, struct pci_bus, dev) 684 685 static inline u16 pci_dev_id(struct pci_dev *dev) 686 { 687 return PCI_DEVID(dev->bus->number, dev->devfn); 688 } 689 690 /* 691 * Returns true if the PCI bus is root (behind host-PCI bridge), 692 * false otherwise 693 * 694 * Some code assumes that "bus->self == NULL" means that bus is a root bus. 695 * This is incorrect because "virtual" buses added for SR-IOV (via 696 * virtfn_add_bus()) have "bus->self == NULL" but are not root buses. 697 */ 698 static inline bool pci_is_root_bus(struct pci_bus *pbus) 699 { 700 return !(pbus->parent); 701 } 702 703 /** 704 * pci_is_bridge - check if the PCI device is a bridge 705 * @dev: PCI device 706 * 707 * Return true if the PCI device is bridge whether it has subordinate 708 * or not. 709 */ 710 static inline bool pci_is_bridge(struct pci_dev *dev) 711 { 712 return dev->hdr_type == PCI_HEADER_TYPE_BRIDGE || 713 dev->hdr_type == PCI_HEADER_TYPE_CARDBUS; 714 } 715 716 /** 717 * pci_is_vga - check if the PCI device is a VGA device 718 * 719 * The PCI Code and ID Assignment spec, r1.15, secs 1.4 and 1.1, define 720 * VGA Base Class and Sub-Classes: 721 * 722 * 03 00 PCI_CLASS_DISPLAY_VGA VGA-compatible or 8514-compatible 723 * 00 01 PCI_CLASS_NOT_DEFINED_VGA VGA-compatible (before Class Code) 724 * 725 * Return true if the PCI device is a VGA device and uses the legacy VGA 726 * resources ([mem 0xa0000-0xbffff], [io 0x3b0-0x3bb], [io 0x3c0-0x3df] and 727 * aliases). 728 */ 729 static inline bool pci_is_vga(struct pci_dev *pdev) 730 { 731 if ((pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA) 732 return true; 733 734 if ((pdev->class >> 8) == PCI_CLASS_NOT_DEFINED_VGA) 735 return true; 736 737 return false; 738 } 739 740 #define for_each_pci_bridge(dev, bus) \ 741 list_for_each_entry(dev, &bus->devices, bus_list) \ 742 if (!pci_is_bridge(dev)) {} else 743 744 static inline struct pci_dev *pci_upstream_bridge(struct pci_dev *dev) 745 { 746 dev = pci_physfn(dev); 747 if (pci_is_root_bus(dev->bus)) 748 return NULL; 749 750 return dev->bus->self; 751 } 752 753 #ifdef CONFIG_PCI_MSI 754 static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev) 755 { 756 return pci_dev->msi_enabled || pci_dev->msix_enabled; 757 } 758 #else 759 static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev) { return false; } 760 #endif 761 762 /* Error values that may be returned by PCI functions */ 763 #define PCIBIOS_SUCCESSFUL 0x00 764 #define PCIBIOS_FUNC_NOT_SUPPORTED 0x81 765 #define PCIBIOS_BAD_VENDOR_ID 0x83 766 #define PCIBIOS_DEVICE_NOT_FOUND 0x86 767 #define PCIBIOS_BAD_REGISTER_NUMBER 0x87 768 #define PCIBIOS_SET_FAILED 0x88 769 #define PCIBIOS_BUFFER_TOO_SMALL 0x89 770 771 /* Translate above to generic errno for passing back through non-PCI code */ 772 static inline int pcibios_err_to_errno(int err) 773 { 774 if (err <= PCIBIOS_SUCCESSFUL) 775 return err; /* Assume already errno */ 776 777 switch (err) { 778 case PCIBIOS_FUNC_NOT_SUPPORTED: 779 return -ENOENT; 780 case PCIBIOS_BAD_VENDOR_ID: 781 return -ENOTTY; 782 case PCIBIOS_DEVICE_NOT_FOUND: 783 return -ENODEV; 784 case PCIBIOS_BAD_REGISTER_NUMBER: 785 return -EFAULT; 786 case PCIBIOS_SET_FAILED: 787 return -EIO; 788 case PCIBIOS_BUFFER_TOO_SMALL: 789 return -ENOSPC; 790 } 791 792 return -ERANGE; 793 } 794 795 /* Low-level architecture-dependent routines */ 796 797 struct pci_ops { 798 int (*add_bus)(struct pci_bus *bus); 799 void (*remove_bus)(struct pci_bus *bus); 800 void __iomem *(*map_bus)(struct pci_bus *bus, unsigned int devfn, int where); 801 int (*read)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val); 802 int (*write)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val); 803 }; 804 805 /* 806 * ACPI needs to be able to access PCI config space before we've done a 807 * PCI bus scan and created pci_bus structures. 808 */ 809 int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn, 810 int reg, int len, u32 *val); 811 int raw_pci_write(unsigned int domain, unsigned int bus, unsigned int devfn, 812 int reg, int len, u32 val); 813 814 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 815 typedef u64 pci_bus_addr_t; 816 #else 817 typedef u32 pci_bus_addr_t; 818 #endif 819 820 struct pci_bus_region { 821 pci_bus_addr_t start; 822 pci_bus_addr_t end; 823 }; 824 825 struct pci_dynids { 826 spinlock_t lock; /* Protects list, index */ 827 struct list_head list; /* For IDs added at runtime */ 828 }; 829 830 831 /* 832 * PCI Error Recovery System (PCI-ERS). If a PCI device driver provides 833 * a set of callbacks in struct pci_error_handlers, that device driver 834 * will be notified of PCI bus errors, and will be driven to recovery 835 * when an error occurs. 836 */ 837 838 typedef unsigned int __bitwise pci_ers_result_t; 839 840 enum pci_ers_result { 841 /* No result/none/not supported in device driver */ 842 PCI_ERS_RESULT_NONE = (__force pci_ers_result_t) 1, 843 844 /* Device driver can recover without slot reset */ 845 PCI_ERS_RESULT_CAN_RECOVER = (__force pci_ers_result_t) 2, 846 847 /* Device driver wants slot to be reset */ 848 PCI_ERS_RESULT_NEED_RESET = (__force pci_ers_result_t) 3, 849 850 /* Device has completely failed, is unrecoverable */ 851 PCI_ERS_RESULT_DISCONNECT = (__force pci_ers_result_t) 4, 852 853 /* Device driver is fully recovered and operational */ 854 PCI_ERS_RESULT_RECOVERED = (__force pci_ers_result_t) 5, 855 856 /* No AER capabilities registered for the driver */ 857 PCI_ERS_RESULT_NO_AER_DRIVER = (__force pci_ers_result_t) 6, 858 }; 859 860 /* PCI bus error event callbacks */ 861 struct pci_error_handlers { 862 /* PCI bus error detected on this device */ 863 pci_ers_result_t (*error_detected)(struct pci_dev *dev, 864 pci_channel_state_t error); 865 866 /* MMIO has been re-enabled, but not DMA */ 867 pci_ers_result_t (*mmio_enabled)(struct pci_dev *dev); 868 869 /* PCI slot has been reset */ 870 pci_ers_result_t (*slot_reset)(struct pci_dev *dev); 871 872 /* PCI function reset prepare or completed */ 873 void (*reset_prepare)(struct pci_dev *dev); 874 void (*reset_done)(struct pci_dev *dev); 875 876 /* Device driver may resume normal operations */ 877 void (*resume)(struct pci_dev *dev); 878 879 /* Allow device driver to record more details of a correctable error */ 880 void (*cor_error_detected)(struct pci_dev *dev); 881 }; 882 883 884 struct module; 885 886 /** 887 * struct pci_driver - PCI driver structure 888 * @node: List of driver structures. 889 * @name: Driver name. 890 * @id_table: Pointer to table of device IDs the driver is 891 * interested in. Most drivers should export this 892 * table using MODULE_DEVICE_TABLE(pci,...). 893 * @probe: This probing function gets called (during execution 894 * of pci_register_driver() for already existing 895 * devices or later if a new device gets inserted) for 896 * all PCI devices which match the ID table and are not 897 * "owned" by the other drivers yet. This function gets 898 * passed a "struct pci_dev \*" for each device whose 899 * entry in the ID table matches the device. The probe 900 * function returns zero when the driver chooses to 901 * take "ownership" of the device or an error code 902 * (negative number) otherwise. 903 * The probe function always gets called from process 904 * context, so it can sleep. 905 * @remove: The remove() function gets called whenever a device 906 * being handled by this driver is removed (either during 907 * deregistration of the driver or when it's manually 908 * pulled out of a hot-pluggable slot). 909 * The remove function always gets called from process 910 * context, so it can sleep. 911 * @suspend: Put device into low power state. 912 * @resume: Wake device from low power state. 913 * (Please see Documentation/power/pci.rst for descriptions 914 * of PCI Power Management and the related functions.) 915 * @shutdown: Hook into reboot_notifier_list (kernel/sys.c). 916 * Intended to stop any idling DMA operations. 917 * Useful for enabling wake-on-lan (NIC) or changing 918 * the power state of a device before reboot. 919 * e.g. drivers/net/e100.c. 920 * @sriov_configure: Optional driver callback to allow configuration of 921 * number of VFs to enable via sysfs "sriov_numvfs" file. 922 * @sriov_set_msix_vec_count: PF Driver callback to change number of MSI-X 923 * vectors on a VF. Triggered via sysfs "sriov_vf_msix_count". 924 * This will change MSI-X Table Size in the VF Message Control 925 * registers. 926 * @sriov_get_vf_total_msix: PF driver callback to get the total number of 927 * MSI-X vectors available for distribution to the VFs. 928 * @err_handler: See Documentation/PCI/pci-error-recovery.rst 929 * @groups: Sysfs attribute groups. 930 * @dev_groups: Attributes attached to the device that will be 931 * created once it is bound to the driver. 932 * @driver: Driver model structure. 933 * @dynids: List of dynamically added device IDs. 934 * @driver_managed_dma: Device driver doesn't use kernel DMA API for DMA. 935 * For most device drivers, no need to care about this flag 936 * as long as all DMAs are handled through the kernel DMA API. 937 * For some special ones, for example VFIO drivers, they know 938 * how to manage the DMA themselves and set this flag so that 939 * the IOMMU layer will allow them to setup and manage their 940 * own I/O address space. 941 */ 942 struct pci_driver { 943 struct list_head node; 944 const char *name; 945 const struct pci_device_id *id_table; /* Must be non-NULL for probe to be called */ 946 int (*probe)(struct pci_dev *dev, const struct pci_device_id *id); /* New device inserted */ 947 void (*remove)(struct pci_dev *dev); /* Device removed (NULL if not a hot-plug capable driver) */ 948 int (*suspend)(struct pci_dev *dev, pm_message_t state); /* Device suspended */ 949 int (*resume)(struct pci_dev *dev); /* Device woken up */ 950 void (*shutdown)(struct pci_dev *dev); 951 int (*sriov_configure)(struct pci_dev *dev, int num_vfs); /* On PF */ 952 int (*sriov_set_msix_vec_count)(struct pci_dev *vf, int msix_vec_count); /* On PF */ 953 u32 (*sriov_get_vf_total_msix)(struct pci_dev *pf); 954 const struct pci_error_handlers *err_handler; 955 const struct attribute_group **groups; 956 const struct attribute_group **dev_groups; 957 struct device_driver driver; 958 struct pci_dynids dynids; 959 bool driver_managed_dma; 960 }; 961 962 static inline struct pci_driver *to_pci_driver(struct device_driver *drv) 963 { 964 return drv ? container_of(drv, struct pci_driver, driver) : NULL; 965 } 966 967 /** 968 * PCI_DEVICE - macro used to describe a specific PCI device 969 * @vend: the 16 bit PCI Vendor ID 970 * @dev: the 16 bit PCI Device ID 971 * 972 * This macro is used to create a struct pci_device_id that matches a 973 * specific device. The subvendor and subdevice fields will be set to 974 * PCI_ANY_ID. 975 */ 976 #define PCI_DEVICE(vend,dev) \ 977 .vendor = (vend), .device = (dev), \ 978 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID 979 980 /** 981 * PCI_DEVICE_DRIVER_OVERRIDE - macro used to describe a PCI device with 982 * override_only flags. 983 * @vend: the 16 bit PCI Vendor ID 984 * @dev: the 16 bit PCI Device ID 985 * @driver_override: the 32 bit PCI Device override_only 986 * 987 * This macro is used to create a struct pci_device_id that matches only a 988 * driver_override device. The subvendor and subdevice fields will be set to 989 * PCI_ANY_ID. 990 */ 991 #define PCI_DEVICE_DRIVER_OVERRIDE(vend, dev, driver_override) \ 992 .vendor = (vend), .device = (dev), .subvendor = PCI_ANY_ID, \ 993 .subdevice = PCI_ANY_ID, .override_only = (driver_override) 994 995 /** 996 * PCI_DRIVER_OVERRIDE_DEVICE_VFIO - macro used to describe a VFIO 997 * "driver_override" PCI device. 998 * @vend: the 16 bit PCI Vendor ID 999 * @dev: the 16 bit PCI Device ID 1000 * 1001 * This macro is used to create a struct pci_device_id that matches a 1002 * specific device. The subvendor and subdevice fields will be set to 1003 * PCI_ANY_ID and the driver_override will be set to 1004 * PCI_ID_F_VFIO_DRIVER_OVERRIDE. 1005 */ 1006 #define PCI_DRIVER_OVERRIDE_DEVICE_VFIO(vend, dev) \ 1007 PCI_DEVICE_DRIVER_OVERRIDE(vend, dev, PCI_ID_F_VFIO_DRIVER_OVERRIDE) 1008 1009 /** 1010 * PCI_DEVICE_SUB - macro used to describe a specific PCI device with subsystem 1011 * @vend: the 16 bit PCI Vendor ID 1012 * @dev: the 16 bit PCI Device ID 1013 * @subvend: the 16 bit PCI Subvendor ID 1014 * @subdev: the 16 bit PCI Subdevice ID 1015 * 1016 * This macro is used to create a struct pci_device_id that matches a 1017 * specific device with subsystem information. 1018 */ 1019 #define PCI_DEVICE_SUB(vend, dev, subvend, subdev) \ 1020 .vendor = (vend), .device = (dev), \ 1021 .subvendor = (subvend), .subdevice = (subdev) 1022 1023 /** 1024 * PCI_DEVICE_CLASS - macro used to describe a specific PCI device class 1025 * @dev_class: the class, subclass, prog-if triple for this device 1026 * @dev_class_mask: the class mask for this device 1027 * 1028 * This macro is used to create a struct pci_device_id that matches a 1029 * specific PCI class. The vendor, device, subvendor, and subdevice 1030 * fields will be set to PCI_ANY_ID. 1031 */ 1032 #define PCI_DEVICE_CLASS(dev_class,dev_class_mask) \ 1033 .class = (dev_class), .class_mask = (dev_class_mask), \ 1034 .vendor = PCI_ANY_ID, .device = PCI_ANY_ID, \ 1035 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID 1036 1037 /** 1038 * PCI_VDEVICE - macro used to describe a specific PCI device in short form 1039 * @vend: the vendor name 1040 * @dev: the 16 bit PCI Device ID 1041 * 1042 * This macro is used to create a struct pci_device_id that matches a 1043 * specific PCI device. The subvendor, and subdevice fields will be set 1044 * to PCI_ANY_ID. The macro allows the next field to follow as the device 1045 * private data. 1046 */ 1047 #define PCI_VDEVICE(vend, dev) \ 1048 .vendor = PCI_VENDOR_ID_##vend, .device = (dev), \ 1049 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0 1050 1051 /** 1052 * PCI_DEVICE_DATA - macro used to describe a specific PCI device in very short form 1053 * @vend: the vendor name (without PCI_VENDOR_ID_ prefix) 1054 * @dev: the device name (without PCI_DEVICE_ID_<vend>_ prefix) 1055 * @data: the driver data to be filled 1056 * 1057 * This macro is used to create a struct pci_device_id that matches a 1058 * specific PCI device. The subvendor, and subdevice fields will be set 1059 * to PCI_ANY_ID. 1060 */ 1061 #define PCI_DEVICE_DATA(vend, dev, data) \ 1062 .vendor = PCI_VENDOR_ID_##vend, .device = PCI_DEVICE_ID_##vend##_##dev, \ 1063 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0, \ 1064 .driver_data = (kernel_ulong_t)(data) 1065 1066 enum { 1067 PCI_REASSIGN_ALL_RSRC = 0x00000001, /* Ignore firmware setup */ 1068 PCI_REASSIGN_ALL_BUS = 0x00000002, /* Reassign all bus numbers */ 1069 PCI_PROBE_ONLY = 0x00000004, /* Use existing setup */ 1070 PCI_CAN_SKIP_ISA_ALIGN = 0x00000008, /* Don't do ISA alignment */ 1071 PCI_ENABLE_PROC_DOMAINS = 0x00000010, /* Enable domains in /proc */ 1072 PCI_COMPAT_DOMAIN_0 = 0x00000020, /* ... except domain 0 */ 1073 PCI_SCAN_ALL_PCIE_DEVS = 0x00000040, /* Scan all, not just dev 0 */ 1074 }; 1075 1076 #define PCI_IRQ_LEGACY (1 << 0) /* Allow legacy interrupts */ 1077 #define PCI_IRQ_MSI (1 << 1) /* Allow MSI interrupts */ 1078 #define PCI_IRQ_MSIX (1 << 2) /* Allow MSI-X interrupts */ 1079 #define PCI_IRQ_AFFINITY (1 << 3) /* Auto-assign affinity */ 1080 1081 /* These external functions are only available when PCI support is enabled */ 1082 #ifdef CONFIG_PCI 1083 1084 extern unsigned int pci_flags; 1085 1086 static inline void pci_set_flags(int flags) { pci_flags = flags; } 1087 static inline void pci_add_flags(int flags) { pci_flags |= flags; } 1088 static inline void pci_clear_flags(int flags) { pci_flags &= ~flags; } 1089 static inline int pci_has_flag(int flag) { return pci_flags & flag; } 1090 1091 void pcie_bus_configure_settings(struct pci_bus *bus); 1092 1093 enum pcie_bus_config_types { 1094 PCIE_BUS_TUNE_OFF, /* Don't touch MPS at all */ 1095 PCIE_BUS_DEFAULT, /* Ensure MPS matches upstream bridge */ 1096 PCIE_BUS_SAFE, /* Use largest MPS boot-time devices support */ 1097 PCIE_BUS_PERFORMANCE, /* Use MPS and MRRS for best performance */ 1098 PCIE_BUS_PEER2PEER, /* Set MPS = 128 for all devices */ 1099 }; 1100 1101 extern enum pcie_bus_config_types pcie_bus_config; 1102 1103 extern struct bus_type pci_bus_type; 1104 1105 /* Do NOT directly access these two variables, unless you are arch-specific PCI 1106 * code, or PCI core code. */ 1107 extern struct list_head pci_root_buses; /* List of all known PCI buses */ 1108 /* Some device drivers need know if PCI is initiated */ 1109 int no_pci_devices(void); 1110 1111 void pcibios_resource_survey_bus(struct pci_bus *bus); 1112 void pcibios_bus_add_device(struct pci_dev *pdev); 1113 void pcibios_add_bus(struct pci_bus *bus); 1114 void pcibios_remove_bus(struct pci_bus *bus); 1115 void pcibios_fixup_bus(struct pci_bus *); 1116 int __must_check pcibios_enable_device(struct pci_dev *, int mask); 1117 /* Architecture-specific versions may override this (weak) */ 1118 char *pcibios_setup(char *str); 1119 1120 /* Used only when drivers/pci/setup.c is used */ 1121 resource_size_t pcibios_align_resource(void *, const struct resource *, 1122 resource_size_t, 1123 resource_size_t); 1124 1125 /* Weak but can be overridden by arch */ 1126 void pci_fixup_cardbus(struct pci_bus *); 1127 1128 /* Generic PCI functions used internally */ 1129 1130 void pcibios_resource_to_bus(struct pci_bus *bus, struct pci_bus_region *region, 1131 struct resource *res); 1132 void pcibios_bus_to_resource(struct pci_bus *bus, struct resource *res, 1133 struct pci_bus_region *region); 1134 void pcibios_scan_specific_bus(int busn); 1135 struct pci_bus *pci_find_bus(int domain, int busnr); 1136 void pci_bus_add_devices(const struct pci_bus *bus); 1137 struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops, void *sysdata); 1138 struct pci_bus *pci_create_root_bus(struct device *parent, int bus, 1139 struct pci_ops *ops, void *sysdata, 1140 struct list_head *resources); 1141 int pci_host_probe(struct pci_host_bridge *bridge); 1142 int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int busmax); 1143 int pci_bus_update_busn_res_end(struct pci_bus *b, int busmax); 1144 void pci_bus_release_busn_res(struct pci_bus *b); 1145 struct pci_bus *pci_scan_root_bus(struct device *parent, int bus, 1146 struct pci_ops *ops, void *sysdata, 1147 struct list_head *resources); 1148 int pci_scan_root_bus_bridge(struct pci_host_bridge *bridge); 1149 struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, 1150 int busnr); 1151 struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr, 1152 const char *name, 1153 struct hotplug_slot *hotplug); 1154 void pci_destroy_slot(struct pci_slot *slot); 1155 #ifdef CONFIG_SYSFS 1156 void pci_dev_assign_slot(struct pci_dev *dev); 1157 #else 1158 static inline void pci_dev_assign_slot(struct pci_dev *dev) { } 1159 #endif 1160 int pci_scan_slot(struct pci_bus *bus, int devfn); 1161 struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn); 1162 void pci_device_add(struct pci_dev *dev, struct pci_bus *bus); 1163 unsigned int pci_scan_child_bus(struct pci_bus *bus); 1164 void pci_bus_add_device(struct pci_dev *dev); 1165 void pci_read_bridge_bases(struct pci_bus *child); 1166 struct resource *pci_find_parent_resource(const struct pci_dev *dev, 1167 struct resource *res); 1168 u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin); 1169 int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge); 1170 u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp); 1171 struct pci_dev *pci_dev_get(struct pci_dev *dev); 1172 void pci_dev_put(struct pci_dev *dev); 1173 void pci_remove_bus(struct pci_bus *b); 1174 void pci_stop_and_remove_bus_device(struct pci_dev *dev); 1175 void pci_stop_and_remove_bus_device_locked(struct pci_dev *dev); 1176 void pci_stop_root_bus(struct pci_bus *bus); 1177 void pci_remove_root_bus(struct pci_bus *bus); 1178 void pci_setup_cardbus(struct pci_bus *bus); 1179 void pcibios_setup_bridge(struct pci_bus *bus, unsigned long type); 1180 void pci_sort_breadthfirst(void); 1181 #define dev_is_pci(d) ((d)->bus == &pci_bus_type) 1182 #define dev_is_pf(d) ((dev_is_pci(d) ? to_pci_dev(d)->is_physfn : false)) 1183 1184 /* Generic PCI functions exported to card drivers */ 1185 1186 u8 pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap); 1187 u8 pci_find_capability(struct pci_dev *dev, int cap); 1188 u8 pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap); 1189 u8 pci_find_ht_capability(struct pci_dev *dev, int ht_cap); 1190 u8 pci_find_next_ht_capability(struct pci_dev *dev, u8 pos, int ht_cap); 1191 u16 pci_find_ext_capability(struct pci_dev *dev, int cap); 1192 u16 pci_find_next_ext_capability(struct pci_dev *dev, u16 pos, int cap); 1193 struct pci_bus *pci_find_next_bus(const struct pci_bus *from); 1194 u16 pci_find_vsec_capability(struct pci_dev *dev, u16 vendor, int cap); 1195 u16 pci_find_dvsec_capability(struct pci_dev *dev, u16 vendor, u16 dvsec); 1196 1197 u64 pci_get_dsn(struct pci_dev *dev); 1198 1199 struct pci_dev *pci_get_device(unsigned int vendor, unsigned int device, 1200 struct pci_dev *from); 1201 struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device, 1202 unsigned int ss_vendor, unsigned int ss_device, 1203 struct pci_dev *from); 1204 struct pci_dev *pci_get_slot(struct pci_bus *bus, unsigned int devfn); 1205 struct pci_dev *pci_get_domain_bus_and_slot(int domain, unsigned int bus, 1206 unsigned int devfn); 1207 struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from); 1208 struct pci_dev *pci_get_base_class(unsigned int class, struct pci_dev *from); 1209 1210 int pci_dev_present(const struct pci_device_id *ids); 1211 1212 int pci_bus_read_config_byte(struct pci_bus *bus, unsigned int devfn, 1213 int where, u8 *val); 1214 int pci_bus_read_config_word(struct pci_bus *bus, unsigned int devfn, 1215 int where, u16 *val); 1216 int pci_bus_read_config_dword(struct pci_bus *bus, unsigned int devfn, 1217 int where, u32 *val); 1218 int pci_bus_write_config_byte(struct pci_bus *bus, unsigned int devfn, 1219 int where, u8 val); 1220 int pci_bus_write_config_word(struct pci_bus *bus, unsigned int devfn, 1221 int where, u16 val); 1222 int pci_bus_write_config_dword(struct pci_bus *bus, unsigned int devfn, 1223 int where, u32 val); 1224 1225 int pci_generic_config_read(struct pci_bus *bus, unsigned int devfn, 1226 int where, int size, u32 *val); 1227 int pci_generic_config_write(struct pci_bus *bus, unsigned int devfn, 1228 int where, int size, u32 val); 1229 int pci_generic_config_read32(struct pci_bus *bus, unsigned int devfn, 1230 int where, int size, u32 *val); 1231 int pci_generic_config_write32(struct pci_bus *bus, unsigned int devfn, 1232 int where, int size, u32 val); 1233 1234 struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops); 1235 1236 int pci_read_config_byte(const struct pci_dev *dev, int where, u8 *val); 1237 int pci_read_config_word(const struct pci_dev *dev, int where, u16 *val); 1238 int pci_read_config_dword(const struct pci_dev *dev, int where, u32 *val); 1239 int pci_write_config_byte(const struct pci_dev *dev, int where, u8 val); 1240 int pci_write_config_word(const struct pci_dev *dev, int where, u16 val); 1241 int pci_write_config_dword(const struct pci_dev *dev, int where, u32 val); 1242 1243 int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val); 1244 int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val); 1245 int pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val); 1246 int pcie_capability_write_dword(struct pci_dev *dev, int pos, u32 val); 1247 int pcie_capability_clear_and_set_word_unlocked(struct pci_dev *dev, int pos, 1248 u16 clear, u16 set); 1249 int pcie_capability_clear_and_set_word_locked(struct pci_dev *dev, int pos, 1250 u16 clear, u16 set); 1251 int pcie_capability_clear_and_set_dword(struct pci_dev *dev, int pos, 1252 u32 clear, u32 set); 1253 1254 /** 1255 * pcie_capability_clear_and_set_word - RMW accessor for PCI Express Capability Registers 1256 * @dev: PCI device structure of the PCI Express device 1257 * @pos: PCI Express Capability Register 1258 * @clear: Clear bitmask 1259 * @set: Set bitmask 1260 * 1261 * Perform a Read-Modify-Write (RMW) operation using @clear and @set 1262 * bitmasks on PCI Express Capability Register at @pos. Certain PCI Express 1263 * Capability Registers are accessed concurrently in RMW fashion, hence 1264 * require locking which is handled transparently to the caller. 1265 */ 1266 static inline int pcie_capability_clear_and_set_word(struct pci_dev *dev, 1267 int pos, 1268 u16 clear, u16 set) 1269 { 1270 switch (pos) { 1271 case PCI_EXP_LNKCTL: 1272 case PCI_EXP_RTCTL: 1273 return pcie_capability_clear_and_set_word_locked(dev, pos, 1274 clear, set); 1275 default: 1276 return pcie_capability_clear_and_set_word_unlocked(dev, pos, 1277 clear, set); 1278 } 1279 } 1280 1281 static inline int pcie_capability_set_word(struct pci_dev *dev, int pos, 1282 u16 set) 1283 { 1284 return pcie_capability_clear_and_set_word(dev, pos, 0, set); 1285 } 1286 1287 static inline int pcie_capability_set_dword(struct pci_dev *dev, int pos, 1288 u32 set) 1289 { 1290 return pcie_capability_clear_and_set_dword(dev, pos, 0, set); 1291 } 1292 1293 static inline int pcie_capability_clear_word(struct pci_dev *dev, int pos, 1294 u16 clear) 1295 { 1296 return pcie_capability_clear_and_set_word(dev, pos, clear, 0); 1297 } 1298 1299 static inline int pcie_capability_clear_dword(struct pci_dev *dev, int pos, 1300 u32 clear) 1301 { 1302 return pcie_capability_clear_and_set_dword(dev, pos, clear, 0); 1303 } 1304 1305 /* User-space driven config access */ 1306 int pci_user_read_config_byte(struct pci_dev *dev, int where, u8 *val); 1307 int pci_user_read_config_word(struct pci_dev *dev, int where, u16 *val); 1308 int pci_user_read_config_dword(struct pci_dev *dev, int where, u32 *val); 1309 int pci_user_write_config_byte(struct pci_dev *dev, int where, u8 val); 1310 int pci_user_write_config_word(struct pci_dev *dev, int where, u16 val); 1311 int pci_user_write_config_dword(struct pci_dev *dev, int where, u32 val); 1312 1313 int __must_check pci_enable_device(struct pci_dev *dev); 1314 int __must_check pci_enable_device_io(struct pci_dev *dev); 1315 int __must_check pci_enable_device_mem(struct pci_dev *dev); 1316 int __must_check pci_reenable_device(struct pci_dev *); 1317 int __must_check pcim_enable_device(struct pci_dev *pdev); 1318 void pcim_pin_device(struct pci_dev *pdev); 1319 1320 static inline bool pci_intx_mask_supported(struct pci_dev *pdev) 1321 { 1322 /* 1323 * INTx masking is supported if PCI_COMMAND_INTX_DISABLE is 1324 * writable and no quirk has marked the feature broken. 1325 */ 1326 return !pdev->broken_intx_masking; 1327 } 1328 1329 static inline int pci_is_enabled(struct pci_dev *pdev) 1330 { 1331 return (atomic_read(&pdev->enable_cnt) > 0); 1332 } 1333 1334 static inline int pci_is_managed(struct pci_dev *pdev) 1335 { 1336 return pdev->is_managed; 1337 } 1338 1339 void pci_disable_device(struct pci_dev *dev); 1340 1341 extern unsigned int pcibios_max_latency; 1342 void pci_set_master(struct pci_dev *dev); 1343 void pci_clear_master(struct pci_dev *dev); 1344 1345 int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state); 1346 int pci_set_cacheline_size(struct pci_dev *dev); 1347 int __must_check pci_set_mwi(struct pci_dev *dev); 1348 int __must_check pcim_set_mwi(struct pci_dev *dev); 1349 int pci_try_set_mwi(struct pci_dev *dev); 1350 void pci_clear_mwi(struct pci_dev *dev); 1351 void pci_disable_parity(struct pci_dev *dev); 1352 void pci_intx(struct pci_dev *dev, int enable); 1353 bool pci_check_and_mask_intx(struct pci_dev *dev); 1354 bool pci_check_and_unmask_intx(struct pci_dev *dev); 1355 int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask); 1356 int pci_wait_for_pending_transaction(struct pci_dev *dev); 1357 int pcix_get_max_mmrbc(struct pci_dev *dev); 1358 int pcix_get_mmrbc(struct pci_dev *dev); 1359 int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc); 1360 int pcie_get_readrq(struct pci_dev *dev); 1361 int pcie_set_readrq(struct pci_dev *dev, int rq); 1362 int pcie_get_mps(struct pci_dev *dev); 1363 int pcie_set_mps(struct pci_dev *dev, int mps); 1364 u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev, 1365 enum pci_bus_speed *speed, 1366 enum pcie_link_width *width); 1367 void pcie_print_link_status(struct pci_dev *dev); 1368 int pcie_reset_flr(struct pci_dev *dev, bool probe); 1369 int pcie_flr(struct pci_dev *dev); 1370 int __pci_reset_function_locked(struct pci_dev *dev); 1371 int pci_reset_function(struct pci_dev *dev); 1372 int pci_reset_function_locked(struct pci_dev *dev); 1373 int pci_try_reset_function(struct pci_dev *dev); 1374 int pci_probe_reset_slot(struct pci_slot *slot); 1375 int pci_probe_reset_bus(struct pci_bus *bus); 1376 int pci_reset_bus(struct pci_dev *dev); 1377 void pci_reset_secondary_bus(struct pci_dev *dev); 1378 void pcibios_reset_secondary_bus(struct pci_dev *dev); 1379 void pci_update_resource(struct pci_dev *dev, int resno); 1380 int __must_check pci_assign_resource(struct pci_dev *dev, int i); 1381 int __must_check pci_reassign_resource(struct pci_dev *dev, int i, resource_size_t add_size, resource_size_t align); 1382 void pci_release_resource(struct pci_dev *dev, int resno); 1383 static inline int pci_rebar_bytes_to_size(u64 bytes) 1384 { 1385 bytes = roundup_pow_of_two(bytes); 1386 1387 /* Return BAR size as defined in the resizable BAR specification */ 1388 return max(ilog2(bytes), 20) - 20; 1389 } 1390 1391 u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar); 1392 int __must_check pci_resize_resource(struct pci_dev *dev, int i, int size); 1393 int pci_select_bars(struct pci_dev *dev, unsigned long flags); 1394 bool pci_device_is_present(struct pci_dev *pdev); 1395 void pci_ignore_hotplug(struct pci_dev *dev); 1396 struct pci_dev *pci_real_dma_dev(struct pci_dev *dev); 1397 int pci_status_get_and_clear_errors(struct pci_dev *pdev); 1398 1399 int __printf(6, 7) pci_request_irq(struct pci_dev *dev, unsigned int nr, 1400 irq_handler_t handler, irq_handler_t thread_fn, void *dev_id, 1401 const char *fmt, ...); 1402 void pci_free_irq(struct pci_dev *dev, unsigned int nr, void *dev_id); 1403 1404 /* ROM control related routines */ 1405 int pci_enable_rom(struct pci_dev *pdev); 1406 void pci_disable_rom(struct pci_dev *pdev); 1407 void __iomem __must_check *pci_map_rom(struct pci_dev *pdev, size_t *size); 1408 void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom); 1409 1410 /* Power management related routines */ 1411 int pci_save_state(struct pci_dev *dev); 1412 void pci_restore_state(struct pci_dev *dev); 1413 struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev); 1414 int pci_load_saved_state(struct pci_dev *dev, 1415 struct pci_saved_state *state); 1416 int pci_load_and_free_saved_state(struct pci_dev *dev, 1417 struct pci_saved_state **state); 1418 int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state); 1419 int pci_set_power_state(struct pci_dev *dev, pci_power_t state); 1420 pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state); 1421 bool pci_pme_capable(struct pci_dev *dev, pci_power_t state); 1422 void pci_pme_active(struct pci_dev *dev, bool enable); 1423 int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable); 1424 int pci_wake_from_d3(struct pci_dev *dev, bool enable); 1425 int pci_prepare_to_sleep(struct pci_dev *dev); 1426 int pci_back_from_sleep(struct pci_dev *dev); 1427 bool pci_dev_run_wake(struct pci_dev *dev); 1428 void pci_d3cold_enable(struct pci_dev *dev); 1429 void pci_d3cold_disable(struct pci_dev *dev); 1430 bool pcie_relaxed_ordering_enabled(struct pci_dev *dev); 1431 void pci_resume_bus(struct pci_bus *bus); 1432 void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state); 1433 1434 /* For use by arch with custom probe code */ 1435 void set_pcie_port_type(struct pci_dev *pdev); 1436 void set_pcie_hotplug_bridge(struct pci_dev *pdev); 1437 1438 /* Functions for PCI Hotplug drivers to use */ 1439 unsigned int pci_rescan_bus_bridge_resize(struct pci_dev *bridge); 1440 unsigned int pci_rescan_bus(struct pci_bus *bus); 1441 void pci_lock_rescan_remove(void); 1442 void pci_unlock_rescan_remove(void); 1443 1444 /* Vital Product Data routines */ 1445 ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf); 1446 ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf); 1447 ssize_t pci_read_vpd_any(struct pci_dev *dev, loff_t pos, size_t count, void *buf); 1448 ssize_t pci_write_vpd_any(struct pci_dev *dev, loff_t pos, size_t count, const void *buf); 1449 1450 /* Helper functions for low-level code (drivers/pci/setup-[bus,res].c) */ 1451 resource_size_t pcibios_retrieve_fw_addr(struct pci_dev *dev, int idx); 1452 void pci_bus_assign_resources(const struct pci_bus *bus); 1453 void pci_bus_claim_resources(struct pci_bus *bus); 1454 void pci_bus_size_bridges(struct pci_bus *bus); 1455 int pci_claim_resource(struct pci_dev *, int); 1456 int pci_claim_bridge_resource(struct pci_dev *bridge, int i); 1457 void pci_assign_unassigned_resources(void); 1458 void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge); 1459 void pci_assign_unassigned_bus_resources(struct pci_bus *bus); 1460 void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus); 1461 int pci_reassign_bridge_resources(struct pci_dev *bridge, unsigned long type); 1462 int pci_enable_resources(struct pci_dev *, int mask); 1463 void pci_assign_irq(struct pci_dev *dev); 1464 struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res); 1465 #define HAVE_PCI_REQ_REGIONS 2 1466 int __must_check pci_request_regions(struct pci_dev *, const char *); 1467 int __must_check pci_request_regions_exclusive(struct pci_dev *, const char *); 1468 void pci_release_regions(struct pci_dev *); 1469 int __must_check pci_request_region(struct pci_dev *, int, const char *); 1470 void pci_release_region(struct pci_dev *, int); 1471 int pci_request_selected_regions(struct pci_dev *, int, const char *); 1472 int pci_request_selected_regions_exclusive(struct pci_dev *, int, const char *); 1473 void pci_release_selected_regions(struct pci_dev *, int); 1474 1475 static inline __must_check struct resource * 1476 pci_request_config_region_exclusive(struct pci_dev *pdev, unsigned int offset, 1477 unsigned int len, const char *name) 1478 { 1479 return __request_region(&pdev->driver_exclusive_resource, offset, len, 1480 name, IORESOURCE_EXCLUSIVE); 1481 } 1482 1483 static inline void pci_release_config_region(struct pci_dev *pdev, 1484 unsigned int offset, 1485 unsigned int len) 1486 { 1487 __release_region(&pdev->driver_exclusive_resource, offset, len); 1488 } 1489 1490 /* drivers/pci/bus.c */ 1491 void pci_add_resource(struct list_head *resources, struct resource *res); 1492 void pci_add_resource_offset(struct list_head *resources, struct resource *res, 1493 resource_size_t offset); 1494 void pci_free_resource_list(struct list_head *resources); 1495 void pci_bus_add_resource(struct pci_bus *bus, struct resource *res, 1496 unsigned int flags); 1497 struct resource *pci_bus_resource_n(const struct pci_bus *bus, int n); 1498 void pci_bus_remove_resources(struct pci_bus *bus); 1499 void pci_bus_remove_resource(struct pci_bus *bus, struct resource *res); 1500 int devm_request_pci_bus_resources(struct device *dev, 1501 struct list_head *resources); 1502 1503 /* Temporary until new and working PCI SBR API in place */ 1504 int pci_bridge_secondary_bus_reset(struct pci_dev *dev); 1505 1506 #define __pci_bus_for_each_res0(bus, res, ...) \ 1507 for (unsigned int __b = 0; \ 1508 (res = pci_bus_resource_n(bus, __b)) || __b < PCI_BRIDGE_RESOURCE_NUM; \ 1509 __b++) 1510 1511 #define __pci_bus_for_each_res1(bus, res, __b) \ 1512 for (__b = 0; \ 1513 (res = pci_bus_resource_n(bus, __b)) || __b < PCI_BRIDGE_RESOURCE_NUM; \ 1514 __b++) 1515 1516 /** 1517 * pci_bus_for_each_resource - iterate over PCI bus resources 1518 * @bus: the PCI bus 1519 * @res: pointer to the current resource 1520 * @...: optional index of the current resource 1521 * 1522 * Iterate over PCI bus resources. The first part is to go over PCI bus 1523 * resource array, which has at most the %PCI_BRIDGE_RESOURCE_NUM entries. 1524 * After that continue with the separate list of the additional resources, 1525 * if not empty. That's why the Logical OR is being used. 1526 * 1527 * Possible usage: 1528 * 1529 * struct pci_bus *bus = ...; 1530 * struct resource *res; 1531 * unsigned int i; 1532 * 1533 * // With optional index 1534 * pci_bus_for_each_resource(bus, res, i) 1535 * pr_info("PCI bus resource[%u]: %pR\n", i, res); 1536 * 1537 * // Without index 1538 * pci_bus_for_each_resource(bus, res) 1539 * _do_something_(res); 1540 */ 1541 #define pci_bus_for_each_resource(bus, res, ...) \ 1542 CONCATENATE(__pci_bus_for_each_res, COUNT_ARGS(__VA_ARGS__)) \ 1543 (bus, res, __VA_ARGS__) 1544 1545 int __must_check pci_bus_alloc_resource(struct pci_bus *bus, 1546 struct resource *res, resource_size_t size, 1547 resource_size_t align, resource_size_t min, 1548 unsigned long type_mask, 1549 resource_size_t (*alignf)(void *, 1550 const struct resource *, 1551 resource_size_t, 1552 resource_size_t), 1553 void *alignf_data); 1554 1555 1556 int pci_register_io_range(struct fwnode_handle *fwnode, phys_addr_t addr, 1557 resource_size_t size); 1558 unsigned long pci_address_to_pio(phys_addr_t addr); 1559 phys_addr_t pci_pio_to_address(unsigned long pio); 1560 int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr); 1561 int devm_pci_remap_iospace(struct device *dev, const struct resource *res, 1562 phys_addr_t phys_addr); 1563 void pci_unmap_iospace(struct resource *res); 1564 void __iomem *devm_pci_remap_cfgspace(struct device *dev, 1565 resource_size_t offset, 1566 resource_size_t size); 1567 void __iomem *devm_pci_remap_cfg_resource(struct device *dev, 1568 struct resource *res); 1569 1570 static inline pci_bus_addr_t pci_bus_address(struct pci_dev *pdev, int bar) 1571 { 1572 struct pci_bus_region region; 1573 1574 pcibios_resource_to_bus(pdev->bus, ®ion, &pdev->resource[bar]); 1575 return region.start; 1576 } 1577 1578 /* Proper probing supporting hot-pluggable devices */ 1579 int __must_check __pci_register_driver(struct pci_driver *, struct module *, 1580 const char *mod_name); 1581 1582 /* pci_register_driver() must be a macro so KBUILD_MODNAME can be expanded */ 1583 #define pci_register_driver(driver) \ 1584 __pci_register_driver(driver, THIS_MODULE, KBUILD_MODNAME) 1585 1586 void pci_unregister_driver(struct pci_driver *dev); 1587 1588 /** 1589 * module_pci_driver() - Helper macro for registering a PCI driver 1590 * @__pci_driver: pci_driver struct 1591 * 1592 * Helper macro for PCI drivers which do not do anything special in module 1593 * init/exit. This eliminates a lot of boilerplate. Each module may only 1594 * use this macro once, and calling it replaces module_init() and module_exit() 1595 */ 1596 #define module_pci_driver(__pci_driver) \ 1597 module_driver(__pci_driver, pci_register_driver, pci_unregister_driver) 1598 1599 /** 1600 * builtin_pci_driver() - Helper macro for registering a PCI driver 1601 * @__pci_driver: pci_driver struct 1602 * 1603 * Helper macro for PCI drivers which do not do anything special in their 1604 * init code. This eliminates a lot of boilerplate. Each driver may only 1605 * use this macro once, and calling it replaces device_initcall(...) 1606 */ 1607 #define builtin_pci_driver(__pci_driver) \ 1608 builtin_driver(__pci_driver, pci_register_driver) 1609 1610 struct pci_driver *pci_dev_driver(const struct pci_dev *dev); 1611 int pci_add_dynid(struct pci_driver *drv, 1612 unsigned int vendor, unsigned int device, 1613 unsigned int subvendor, unsigned int subdevice, 1614 unsigned int class, unsigned int class_mask, 1615 unsigned long driver_data); 1616 const struct pci_device_id *pci_match_id(const struct pci_device_id *ids, 1617 struct pci_dev *dev); 1618 int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, 1619 int pass); 1620 1621 void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *), 1622 void *userdata); 1623 int pci_cfg_space_size(struct pci_dev *dev); 1624 unsigned char pci_bus_max_busnr(struct pci_bus *bus); 1625 void pci_setup_bridge(struct pci_bus *bus); 1626 resource_size_t pcibios_window_alignment(struct pci_bus *bus, 1627 unsigned long type); 1628 1629 #define PCI_VGA_STATE_CHANGE_BRIDGE (1 << 0) 1630 #define PCI_VGA_STATE_CHANGE_DECODES (1 << 1) 1631 1632 int pci_set_vga_state(struct pci_dev *pdev, bool decode, 1633 unsigned int command_bits, u32 flags); 1634 1635 /* 1636 * Virtual interrupts allow for more interrupts to be allocated 1637 * than the device has interrupts for. These are not programmed 1638 * into the device's MSI-X table and must be handled by some 1639 * other driver means. 1640 */ 1641 #define PCI_IRQ_VIRTUAL (1 << 4) 1642 1643 #define PCI_IRQ_ALL_TYPES \ 1644 (PCI_IRQ_LEGACY | PCI_IRQ_MSI | PCI_IRQ_MSIX) 1645 1646 #include <linux/dmapool.h> 1647 1648 struct msix_entry { 1649 u32 vector; /* Kernel uses to write allocated vector */ 1650 u16 entry; /* Driver uses to specify entry, OS writes */ 1651 }; 1652 1653 struct msi_domain_template; 1654 1655 #ifdef CONFIG_PCI_MSI 1656 int pci_msi_vec_count(struct pci_dev *dev); 1657 void pci_disable_msi(struct pci_dev *dev); 1658 int pci_msix_vec_count(struct pci_dev *dev); 1659 void pci_disable_msix(struct pci_dev *dev); 1660 void pci_restore_msi_state(struct pci_dev *dev); 1661 int pci_msi_enabled(void); 1662 int pci_enable_msi(struct pci_dev *dev); 1663 int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, 1664 int minvec, int maxvec); 1665 static inline int pci_enable_msix_exact(struct pci_dev *dev, 1666 struct msix_entry *entries, int nvec) 1667 { 1668 int rc = pci_enable_msix_range(dev, entries, nvec, nvec); 1669 if (rc < 0) 1670 return rc; 1671 return 0; 1672 } 1673 int pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs, 1674 unsigned int max_vecs, unsigned int flags); 1675 int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, 1676 unsigned int max_vecs, unsigned int flags, 1677 struct irq_affinity *affd); 1678 1679 bool pci_msix_can_alloc_dyn(struct pci_dev *dev); 1680 struct msi_map pci_msix_alloc_irq_at(struct pci_dev *dev, unsigned int index, 1681 const struct irq_affinity_desc *affdesc); 1682 void pci_msix_free_irq(struct pci_dev *pdev, struct msi_map map); 1683 1684 void pci_free_irq_vectors(struct pci_dev *dev); 1685 int pci_irq_vector(struct pci_dev *dev, unsigned int nr); 1686 const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev, int vec); 1687 bool pci_create_ims_domain(struct pci_dev *pdev, const struct msi_domain_template *template, 1688 unsigned int hwsize, void *data); 1689 struct msi_map pci_ims_alloc_irq(struct pci_dev *pdev, union msi_instance_cookie *icookie, 1690 const struct irq_affinity_desc *affdesc); 1691 void pci_ims_free_irq(struct pci_dev *pdev, struct msi_map map); 1692 1693 #else 1694 static inline int pci_msi_vec_count(struct pci_dev *dev) { return -ENOSYS; } 1695 static inline void pci_disable_msi(struct pci_dev *dev) { } 1696 static inline int pci_msix_vec_count(struct pci_dev *dev) { return -ENOSYS; } 1697 static inline void pci_disable_msix(struct pci_dev *dev) { } 1698 static inline void pci_restore_msi_state(struct pci_dev *dev) { } 1699 static inline int pci_msi_enabled(void) { return 0; } 1700 static inline int pci_enable_msi(struct pci_dev *dev) 1701 { return -ENOSYS; } 1702 static inline int pci_enable_msix_range(struct pci_dev *dev, 1703 struct msix_entry *entries, int minvec, int maxvec) 1704 { return -ENOSYS; } 1705 static inline int pci_enable_msix_exact(struct pci_dev *dev, 1706 struct msix_entry *entries, int nvec) 1707 { return -ENOSYS; } 1708 1709 static inline int 1710 pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, 1711 unsigned int max_vecs, unsigned int flags, 1712 struct irq_affinity *aff_desc) 1713 { 1714 if ((flags & PCI_IRQ_LEGACY) && min_vecs == 1 && dev->irq) 1715 return 1; 1716 return -ENOSPC; 1717 } 1718 static inline int 1719 pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs, 1720 unsigned int max_vecs, unsigned int flags) 1721 { 1722 return pci_alloc_irq_vectors_affinity(dev, min_vecs, max_vecs, 1723 flags, NULL); 1724 } 1725 1726 static inline bool pci_msix_can_alloc_dyn(struct pci_dev *dev) 1727 { return false; } 1728 static inline struct msi_map pci_msix_alloc_irq_at(struct pci_dev *dev, unsigned int index, 1729 const struct irq_affinity_desc *affdesc) 1730 { 1731 struct msi_map map = { .index = -ENOSYS, }; 1732 1733 return map; 1734 } 1735 1736 static inline void pci_msix_free_irq(struct pci_dev *pdev, struct msi_map map) 1737 { 1738 } 1739 1740 static inline void pci_free_irq_vectors(struct pci_dev *dev) 1741 { 1742 } 1743 1744 static inline int pci_irq_vector(struct pci_dev *dev, unsigned int nr) 1745 { 1746 if (WARN_ON_ONCE(nr > 0)) 1747 return -EINVAL; 1748 return dev->irq; 1749 } 1750 static inline const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev, 1751 int vec) 1752 { 1753 return cpu_possible_mask; 1754 } 1755 1756 static inline bool pci_create_ims_domain(struct pci_dev *pdev, 1757 const struct msi_domain_template *template, 1758 unsigned int hwsize, void *data) 1759 { return false; } 1760 1761 static inline struct msi_map pci_ims_alloc_irq(struct pci_dev *pdev, 1762 union msi_instance_cookie *icookie, 1763 const struct irq_affinity_desc *affdesc) 1764 { 1765 struct msi_map map = { .index = -ENOSYS, }; 1766 1767 return map; 1768 } 1769 1770 static inline void pci_ims_free_irq(struct pci_dev *pdev, struct msi_map map) 1771 { 1772 } 1773 1774 #endif 1775 1776 /** 1777 * pci_irqd_intx_xlate() - Translate PCI INTx value to an IRQ domain hwirq 1778 * @d: the INTx IRQ domain 1779 * @node: the DT node for the device whose interrupt we're translating 1780 * @intspec: the interrupt specifier data from the DT 1781 * @intsize: the number of entries in @intspec 1782 * @out_hwirq: pointer at which to write the hwirq number 1783 * @out_type: pointer at which to write the interrupt type 1784 * 1785 * Translate a PCI INTx interrupt number from device tree in the range 1-4, as 1786 * stored in the standard PCI_INTERRUPT_PIN register, to a value in the range 1787 * 0-3 suitable for use in a 4 entry IRQ domain. That is, subtract one from the 1788 * INTx value to obtain the hwirq number. 1789 * 1790 * Returns 0 on success, or -EINVAL if the interrupt specifier is out of range. 1791 */ 1792 static inline int pci_irqd_intx_xlate(struct irq_domain *d, 1793 struct device_node *node, 1794 const u32 *intspec, 1795 unsigned int intsize, 1796 unsigned long *out_hwirq, 1797 unsigned int *out_type) 1798 { 1799 const u32 intx = intspec[0]; 1800 1801 if (intx < PCI_INTERRUPT_INTA || intx > PCI_INTERRUPT_INTD) 1802 return -EINVAL; 1803 1804 *out_hwirq = intx - PCI_INTERRUPT_INTA; 1805 return 0; 1806 } 1807 1808 #ifdef CONFIG_PCIEPORTBUS 1809 extern bool pcie_ports_disabled; 1810 extern bool pcie_ports_native; 1811 #else 1812 #define pcie_ports_disabled true 1813 #define pcie_ports_native false 1814 #endif 1815 1816 #define PCIE_LINK_STATE_L0S BIT(0) 1817 #define PCIE_LINK_STATE_L1 BIT(1) 1818 #define PCIE_LINK_STATE_CLKPM BIT(2) 1819 #define PCIE_LINK_STATE_L1_1 BIT(3) 1820 #define PCIE_LINK_STATE_L1_2 BIT(4) 1821 #define PCIE_LINK_STATE_L1_1_PCIPM BIT(5) 1822 #define PCIE_LINK_STATE_L1_2_PCIPM BIT(6) 1823 #define PCIE_LINK_STATE_ALL (PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |\ 1824 PCIE_LINK_STATE_CLKPM | PCIE_LINK_STATE_L1_1 |\ 1825 PCIE_LINK_STATE_L1_2 | PCIE_LINK_STATE_L1_1_PCIPM |\ 1826 PCIE_LINK_STATE_L1_2_PCIPM) 1827 1828 #ifdef CONFIG_PCIEASPM 1829 int pci_disable_link_state(struct pci_dev *pdev, int state); 1830 int pci_disable_link_state_locked(struct pci_dev *pdev, int state); 1831 int pci_enable_link_state(struct pci_dev *pdev, int state); 1832 void pcie_no_aspm(void); 1833 bool pcie_aspm_support_enabled(void); 1834 bool pcie_aspm_enabled(struct pci_dev *pdev); 1835 #else 1836 static inline int pci_disable_link_state(struct pci_dev *pdev, int state) 1837 { return 0; } 1838 static inline int pci_disable_link_state_locked(struct pci_dev *pdev, int state) 1839 { return 0; } 1840 static inline int pci_enable_link_state(struct pci_dev *pdev, int state) 1841 { return 0; } 1842 static inline void pcie_no_aspm(void) { } 1843 static inline bool pcie_aspm_support_enabled(void) { return false; } 1844 static inline bool pcie_aspm_enabled(struct pci_dev *pdev) { return false; } 1845 #endif 1846 1847 #ifdef CONFIG_PCIEAER 1848 bool pci_aer_available(void); 1849 #else 1850 static inline bool pci_aer_available(void) { return false; } 1851 #endif 1852 1853 bool pci_ats_disabled(void); 1854 1855 #ifdef CONFIG_PCIE_PTM 1856 int pci_enable_ptm(struct pci_dev *dev, u8 *granularity); 1857 void pci_disable_ptm(struct pci_dev *dev); 1858 bool pcie_ptm_enabled(struct pci_dev *dev); 1859 #else 1860 static inline int pci_enable_ptm(struct pci_dev *dev, u8 *granularity) 1861 { return -EINVAL; } 1862 static inline void pci_disable_ptm(struct pci_dev *dev) { } 1863 static inline bool pcie_ptm_enabled(struct pci_dev *dev) 1864 { return false; } 1865 #endif 1866 1867 void pci_cfg_access_lock(struct pci_dev *dev); 1868 bool pci_cfg_access_trylock(struct pci_dev *dev); 1869 void pci_cfg_access_unlock(struct pci_dev *dev); 1870 1871 void pci_dev_lock(struct pci_dev *dev); 1872 int pci_dev_trylock(struct pci_dev *dev); 1873 void pci_dev_unlock(struct pci_dev *dev); 1874 1875 /* 1876 * PCI domain support. Sometimes called PCI segment (eg by ACPI), 1877 * a PCI domain is defined to be a set of PCI buses which share 1878 * configuration space. 1879 */ 1880 #ifdef CONFIG_PCI_DOMAINS 1881 extern int pci_domains_supported; 1882 #else 1883 enum { pci_domains_supported = 0 }; 1884 static inline int pci_domain_nr(struct pci_bus *bus) { return 0; } 1885 static inline int pci_proc_domain(struct pci_bus *bus) { return 0; } 1886 #endif /* CONFIG_PCI_DOMAINS */ 1887 1888 /* 1889 * Generic implementation for PCI domain support. If your 1890 * architecture does not need custom management of PCI 1891 * domains then this implementation will be used 1892 */ 1893 #ifdef CONFIG_PCI_DOMAINS_GENERIC 1894 static inline int pci_domain_nr(struct pci_bus *bus) 1895 { 1896 return bus->domain_nr; 1897 } 1898 #ifdef CONFIG_ACPI 1899 int acpi_pci_bus_find_domain_nr(struct pci_bus *bus); 1900 #else 1901 static inline int acpi_pci_bus_find_domain_nr(struct pci_bus *bus) 1902 { return 0; } 1903 #endif 1904 int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent); 1905 void pci_bus_release_domain_nr(struct pci_bus *bus, struct device *parent); 1906 #endif 1907 1908 /* Some architectures require additional setup to direct VGA traffic */ 1909 typedef int (*arch_set_vga_state_t)(struct pci_dev *pdev, bool decode, 1910 unsigned int command_bits, u32 flags); 1911 void pci_register_set_vga_state(arch_set_vga_state_t func); 1912 1913 static inline int 1914 pci_request_io_regions(struct pci_dev *pdev, const char *name) 1915 { 1916 return pci_request_selected_regions(pdev, 1917 pci_select_bars(pdev, IORESOURCE_IO), name); 1918 } 1919 1920 static inline void 1921 pci_release_io_regions(struct pci_dev *pdev) 1922 { 1923 return pci_release_selected_regions(pdev, 1924 pci_select_bars(pdev, IORESOURCE_IO)); 1925 } 1926 1927 static inline int 1928 pci_request_mem_regions(struct pci_dev *pdev, const char *name) 1929 { 1930 return pci_request_selected_regions(pdev, 1931 pci_select_bars(pdev, IORESOURCE_MEM), name); 1932 } 1933 1934 static inline void 1935 pci_release_mem_regions(struct pci_dev *pdev) 1936 { 1937 return pci_release_selected_regions(pdev, 1938 pci_select_bars(pdev, IORESOURCE_MEM)); 1939 } 1940 1941 #else /* CONFIG_PCI is not enabled */ 1942 1943 static inline void pci_set_flags(int flags) { } 1944 static inline void pci_add_flags(int flags) { } 1945 static inline void pci_clear_flags(int flags) { } 1946 static inline int pci_has_flag(int flag) { return 0; } 1947 1948 /* 1949 * If the system does not have PCI, clearly these return errors. Define 1950 * these as simple inline functions to avoid hair in drivers. 1951 */ 1952 #define _PCI_NOP(o, s, t) \ 1953 static inline int pci_##o##_config_##s(struct pci_dev *dev, \ 1954 int where, t val) \ 1955 { return PCIBIOS_FUNC_NOT_SUPPORTED; } 1956 1957 #define _PCI_NOP_ALL(o, x) _PCI_NOP(o, byte, u8 x) \ 1958 _PCI_NOP(o, word, u16 x) \ 1959 _PCI_NOP(o, dword, u32 x) 1960 _PCI_NOP_ALL(read, *) 1961 _PCI_NOP_ALL(write,) 1962 1963 static inline struct pci_dev *pci_get_device(unsigned int vendor, 1964 unsigned int device, 1965 struct pci_dev *from) 1966 { return NULL; } 1967 1968 static inline struct pci_dev *pci_get_subsys(unsigned int vendor, 1969 unsigned int device, 1970 unsigned int ss_vendor, 1971 unsigned int ss_device, 1972 struct pci_dev *from) 1973 { return NULL; } 1974 1975 static inline struct pci_dev *pci_get_class(unsigned int class, 1976 struct pci_dev *from) 1977 { return NULL; } 1978 1979 static inline struct pci_dev *pci_get_base_class(unsigned int class, 1980 struct pci_dev *from) 1981 { return NULL; } 1982 1983 static inline int pci_dev_present(const struct pci_device_id *ids) 1984 { return 0; } 1985 1986 #define no_pci_devices() (1) 1987 #define pci_dev_put(dev) do { } while (0) 1988 1989 static inline void pci_set_master(struct pci_dev *dev) { } 1990 static inline void pci_clear_master(struct pci_dev *dev) { } 1991 static inline int pci_enable_device(struct pci_dev *dev) { return -EIO; } 1992 static inline void pci_disable_device(struct pci_dev *dev) { } 1993 static inline int pcim_enable_device(struct pci_dev *pdev) { return -EIO; } 1994 static inline int pci_assign_resource(struct pci_dev *dev, int i) 1995 { return -EBUSY; } 1996 static inline int __must_check __pci_register_driver(struct pci_driver *drv, 1997 struct module *owner, 1998 const char *mod_name) 1999 { return 0; } 2000 static inline int pci_register_driver(struct pci_driver *drv) 2001 { return 0; } 2002 static inline void pci_unregister_driver(struct pci_driver *drv) { } 2003 static inline u8 pci_find_capability(struct pci_dev *dev, int cap) 2004 { return 0; } 2005 static inline int pci_find_next_capability(struct pci_dev *dev, u8 post, 2006 int cap) 2007 { return 0; } 2008 static inline int pci_find_ext_capability(struct pci_dev *dev, int cap) 2009 { return 0; } 2010 2011 static inline u64 pci_get_dsn(struct pci_dev *dev) 2012 { return 0; } 2013 2014 /* Power management related routines */ 2015 static inline int pci_save_state(struct pci_dev *dev) { return 0; } 2016 static inline void pci_restore_state(struct pci_dev *dev) { } 2017 static inline int pci_set_power_state(struct pci_dev *dev, pci_power_t state) 2018 { return 0; } 2019 static inline int pci_wake_from_d3(struct pci_dev *dev, bool enable) 2020 { return 0; } 2021 static inline pci_power_t pci_choose_state(struct pci_dev *dev, 2022 pm_message_t state) 2023 { return PCI_D0; } 2024 static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state, 2025 int enable) 2026 { return 0; } 2027 2028 static inline struct resource *pci_find_resource(struct pci_dev *dev, 2029 struct resource *res) 2030 { return NULL; } 2031 static inline int pci_request_regions(struct pci_dev *dev, const char *res_name) 2032 { return -EIO; } 2033 static inline void pci_release_regions(struct pci_dev *dev) { } 2034 2035 static inline int pci_register_io_range(struct fwnode_handle *fwnode, 2036 phys_addr_t addr, resource_size_t size) 2037 { return -EINVAL; } 2038 2039 static inline unsigned long pci_address_to_pio(phys_addr_t addr) { return -1; } 2040 2041 static inline struct pci_bus *pci_find_next_bus(const struct pci_bus *from) 2042 { return NULL; } 2043 static inline struct pci_dev *pci_get_slot(struct pci_bus *bus, 2044 unsigned int devfn) 2045 { return NULL; } 2046 static inline struct pci_dev *pci_get_domain_bus_and_slot(int domain, 2047 unsigned int bus, unsigned int devfn) 2048 { return NULL; } 2049 2050 static inline int pci_domain_nr(struct pci_bus *bus) { return 0; } 2051 static inline struct pci_dev *pci_dev_get(struct pci_dev *dev) { return NULL; } 2052 2053 #define dev_is_pci(d) (false) 2054 #define dev_is_pf(d) (false) 2055 static inline bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags) 2056 { return false; } 2057 static inline int pci_irqd_intx_xlate(struct irq_domain *d, 2058 struct device_node *node, 2059 const u32 *intspec, 2060 unsigned int intsize, 2061 unsigned long *out_hwirq, 2062 unsigned int *out_type) 2063 { return -EINVAL; } 2064 2065 static inline const struct pci_device_id *pci_match_id(const struct pci_device_id *ids, 2066 struct pci_dev *dev) 2067 { return NULL; } 2068 static inline bool pci_ats_disabled(void) { return true; } 2069 2070 static inline int pci_irq_vector(struct pci_dev *dev, unsigned int nr) 2071 { 2072 return -EINVAL; 2073 } 2074 2075 static inline int 2076 pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, 2077 unsigned int max_vecs, unsigned int flags, 2078 struct irq_affinity *aff_desc) 2079 { 2080 return -ENOSPC; 2081 } 2082 static inline int 2083 pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs, 2084 unsigned int max_vecs, unsigned int flags) 2085 { 2086 return -ENOSPC; 2087 } 2088 #endif /* CONFIG_PCI */ 2089 2090 /* Include architecture-dependent settings and functions */ 2091 2092 #include <asm/pci.h> 2093 2094 /* 2095 * pci_mmap_resource_range() maps a specific BAR, and vm->vm_pgoff 2096 * is expected to be an offset within that region. 2097 * 2098 */ 2099 int pci_mmap_resource_range(struct pci_dev *dev, int bar, 2100 struct vm_area_struct *vma, 2101 enum pci_mmap_state mmap_state, int write_combine); 2102 2103 #ifndef arch_can_pci_mmap_wc 2104 #define arch_can_pci_mmap_wc() 0 2105 #endif 2106 2107 #ifndef arch_can_pci_mmap_io 2108 #define arch_can_pci_mmap_io() 0 2109 #define pci_iobar_pfn(pdev, bar, vma) (-EINVAL) 2110 #else 2111 int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma); 2112 #endif 2113 2114 #ifndef pci_root_bus_fwnode 2115 #define pci_root_bus_fwnode(bus) NULL 2116 #endif 2117 2118 /* 2119 * These helpers provide future and backwards compatibility 2120 * for accessing popular PCI BAR info 2121 */ 2122 #define pci_resource_n(dev, bar) (&(dev)->resource[(bar)]) 2123 #define pci_resource_start(dev, bar) (pci_resource_n(dev, bar)->start) 2124 #define pci_resource_end(dev, bar) (pci_resource_n(dev, bar)->end) 2125 #define pci_resource_flags(dev, bar) (pci_resource_n(dev, bar)->flags) 2126 #define pci_resource_len(dev,bar) \ 2127 (pci_resource_end((dev), (bar)) ? \ 2128 resource_size(pci_resource_n((dev), (bar))) : 0) 2129 2130 #define __pci_dev_for_each_res0(dev, res, ...) \ 2131 for (unsigned int __b = 0; \ 2132 res = pci_resource_n(dev, __b), __b < PCI_NUM_RESOURCES; \ 2133 __b++) 2134 2135 #define __pci_dev_for_each_res1(dev, res, __b) \ 2136 for (__b = 0; \ 2137 res = pci_resource_n(dev, __b), __b < PCI_NUM_RESOURCES; \ 2138 __b++) 2139 2140 #define pci_dev_for_each_resource(dev, res, ...) \ 2141 CONCATENATE(__pci_dev_for_each_res, COUNT_ARGS(__VA_ARGS__)) \ 2142 (dev, res, __VA_ARGS__) 2143 2144 /* 2145 * Similar to the helpers above, these manipulate per-pci_dev 2146 * driver-specific data. They are really just a wrapper around 2147 * the generic device structure functions of these calls. 2148 */ 2149 static inline void *pci_get_drvdata(struct pci_dev *pdev) 2150 { 2151 return dev_get_drvdata(&pdev->dev); 2152 } 2153 2154 static inline void pci_set_drvdata(struct pci_dev *pdev, void *data) 2155 { 2156 dev_set_drvdata(&pdev->dev, data); 2157 } 2158 2159 static inline const char *pci_name(const struct pci_dev *pdev) 2160 { 2161 return dev_name(&pdev->dev); 2162 } 2163 2164 void pci_resource_to_user(const struct pci_dev *dev, int bar, 2165 const struct resource *rsrc, 2166 resource_size_t *start, resource_size_t *end); 2167 2168 /* 2169 * The world is not perfect and supplies us with broken PCI devices. 2170 * For at least a part of these bugs we need a work-around, so both 2171 * generic (drivers/pci/quirks.c) and per-architecture code can define 2172 * fixup hooks to be called for particular buggy devices. 2173 */ 2174 2175 struct pci_fixup { 2176 u16 vendor; /* Or PCI_ANY_ID */ 2177 u16 device; /* Or PCI_ANY_ID */ 2178 u32 class; /* Or PCI_ANY_ID */ 2179 unsigned int class_shift; /* should be 0, 8, 16 */ 2180 #ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS 2181 int hook_offset; 2182 #else 2183 void (*hook)(struct pci_dev *dev); 2184 #endif 2185 }; 2186 2187 enum pci_fixup_pass { 2188 pci_fixup_early, /* Before probing BARs */ 2189 pci_fixup_header, /* After reading configuration header */ 2190 pci_fixup_final, /* Final phase of device fixups */ 2191 pci_fixup_enable, /* pci_enable_device() time */ 2192 pci_fixup_resume, /* pci_device_resume() */ 2193 pci_fixup_suspend, /* pci_device_suspend() */ 2194 pci_fixup_resume_early, /* pci_device_resume_early() */ 2195 pci_fixup_suspend_late, /* pci_device_suspend_late() */ 2196 }; 2197 2198 #ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS 2199 #define ___DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \ 2200 class_shift, hook) \ 2201 __ADDRESSABLE(hook) \ 2202 asm(".section " #sec ", \"a\" \n" \ 2203 ".balign 16 \n" \ 2204 ".short " #vendor ", " #device " \n" \ 2205 ".long " #class ", " #class_shift " \n" \ 2206 ".long " #hook " - . \n" \ 2207 ".previous \n"); 2208 2209 /* 2210 * Clang's LTO may rename static functions in C, but has no way to 2211 * handle such renamings when referenced from inline asm. To work 2212 * around this, create global C stubs for these cases. 2213 */ 2214 #ifdef CONFIG_LTO_CLANG 2215 #define __DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \ 2216 class_shift, hook, stub) \ 2217 void stub(struct pci_dev *dev); \ 2218 void stub(struct pci_dev *dev) \ 2219 { \ 2220 hook(dev); \ 2221 } \ 2222 ___DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \ 2223 class_shift, stub) 2224 #else 2225 #define __DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \ 2226 class_shift, hook, stub) \ 2227 ___DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \ 2228 class_shift, hook) 2229 #endif 2230 2231 #define DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \ 2232 class_shift, hook) \ 2233 __DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \ 2234 class_shift, hook, __UNIQUE_ID(hook)) 2235 #else 2236 /* Anonymous variables would be nice... */ 2237 #define DECLARE_PCI_FIXUP_SECTION(section, name, vendor, device, class, \ 2238 class_shift, hook) \ 2239 static const struct pci_fixup __PASTE(__pci_fixup_##name,__LINE__) __used \ 2240 __attribute__((__section__(#section), aligned((sizeof(void *))))) \ 2241 = { vendor, device, class, class_shift, hook }; 2242 #endif 2243 2244 #define DECLARE_PCI_FIXUP_CLASS_EARLY(vendor, device, class, \ 2245 class_shift, hook) \ 2246 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \ 2247 hook, vendor, device, class, class_shift, hook) 2248 #define DECLARE_PCI_FIXUP_CLASS_HEADER(vendor, device, class, \ 2249 class_shift, hook) \ 2250 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header, \ 2251 hook, vendor, device, class, class_shift, hook) 2252 #define DECLARE_PCI_FIXUP_CLASS_FINAL(vendor, device, class, \ 2253 class_shift, hook) \ 2254 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final, \ 2255 hook, vendor, device, class, class_shift, hook) 2256 #define DECLARE_PCI_FIXUP_CLASS_ENABLE(vendor, device, class, \ 2257 class_shift, hook) \ 2258 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable, \ 2259 hook, vendor, device, class, class_shift, hook) 2260 #define DECLARE_PCI_FIXUP_CLASS_RESUME(vendor, device, class, \ 2261 class_shift, hook) \ 2262 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \ 2263 resume##hook, vendor, device, class, class_shift, hook) 2264 #define DECLARE_PCI_FIXUP_CLASS_RESUME_EARLY(vendor, device, class, \ 2265 class_shift, hook) \ 2266 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \ 2267 resume_early##hook, vendor, device, class, class_shift, hook) 2268 #define DECLARE_PCI_FIXUP_CLASS_SUSPEND(vendor, device, class, \ 2269 class_shift, hook) \ 2270 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \ 2271 suspend##hook, vendor, device, class, class_shift, hook) 2272 #define DECLARE_PCI_FIXUP_CLASS_SUSPEND_LATE(vendor, device, class, \ 2273 class_shift, hook) \ 2274 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend_late, \ 2275 suspend_late##hook, vendor, device, class, class_shift, hook) 2276 2277 #define DECLARE_PCI_FIXUP_EARLY(vendor, device, hook) \ 2278 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \ 2279 hook, vendor, device, PCI_ANY_ID, 0, hook) 2280 #define DECLARE_PCI_FIXUP_HEADER(vendor, device, hook) \ 2281 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header, \ 2282 hook, vendor, device, PCI_ANY_ID, 0, hook) 2283 #define DECLARE_PCI_FIXUP_FINAL(vendor, device, hook) \ 2284 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final, \ 2285 hook, vendor, device, PCI_ANY_ID, 0, hook) 2286 #define DECLARE_PCI_FIXUP_ENABLE(vendor, device, hook) \ 2287 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable, \ 2288 hook, vendor, device, PCI_ANY_ID, 0, hook) 2289 #define DECLARE_PCI_FIXUP_RESUME(vendor, device, hook) \ 2290 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \ 2291 resume##hook, vendor, device, PCI_ANY_ID, 0, hook) 2292 #define DECLARE_PCI_FIXUP_RESUME_EARLY(vendor, device, hook) \ 2293 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \ 2294 resume_early##hook, vendor, device, PCI_ANY_ID, 0, hook) 2295 #define DECLARE_PCI_FIXUP_SUSPEND(vendor, device, hook) \ 2296 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \ 2297 suspend##hook, vendor, device, PCI_ANY_ID, 0, hook) 2298 #define DECLARE_PCI_FIXUP_SUSPEND_LATE(vendor, device, hook) \ 2299 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend_late, \ 2300 suspend_late##hook, vendor, device, PCI_ANY_ID, 0, hook) 2301 2302 #ifdef CONFIG_PCI_QUIRKS 2303 void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev); 2304 #else 2305 static inline void pci_fixup_device(enum pci_fixup_pass pass, 2306 struct pci_dev *dev) { } 2307 #endif 2308 2309 void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen); 2310 void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr); 2311 void __iomem * const *pcim_iomap_table(struct pci_dev *pdev); 2312 int pcim_iomap_regions(struct pci_dev *pdev, int mask, const char *name); 2313 int pcim_iomap_regions_request_all(struct pci_dev *pdev, int mask, 2314 const char *name); 2315 void pcim_iounmap_regions(struct pci_dev *pdev, int mask); 2316 2317 extern int pci_pci_problems; 2318 #define PCIPCI_FAIL 1 /* No PCI PCI DMA */ 2319 #define PCIPCI_TRITON 2 2320 #define PCIPCI_NATOMA 4 2321 #define PCIPCI_VIAETBF 8 2322 #define PCIPCI_VSFX 16 2323 #define PCIPCI_ALIMAGIK 32 /* Need low latency setting */ 2324 #define PCIAGP_FAIL 64 /* No PCI to AGP DMA */ 2325 2326 extern unsigned long pci_cardbus_io_size; 2327 extern unsigned long pci_cardbus_mem_size; 2328 extern u8 pci_dfl_cache_line_size; 2329 extern u8 pci_cache_line_size; 2330 2331 /* Architecture-specific versions may override these (weak) */ 2332 void pcibios_disable_device(struct pci_dev *dev); 2333 void pcibios_set_master(struct pci_dev *dev); 2334 int pcibios_set_pcie_reset_state(struct pci_dev *dev, 2335 enum pcie_reset_state state); 2336 int pcibios_device_add(struct pci_dev *dev); 2337 void pcibios_release_device(struct pci_dev *dev); 2338 #ifdef CONFIG_PCI 2339 void pcibios_penalize_isa_irq(int irq, int active); 2340 #else 2341 static inline void pcibios_penalize_isa_irq(int irq, int active) {} 2342 #endif 2343 int pcibios_alloc_irq(struct pci_dev *dev); 2344 void pcibios_free_irq(struct pci_dev *dev); 2345 resource_size_t pcibios_default_alignment(void); 2346 2347 #if !defined(HAVE_PCI_MMAP) && !defined(ARCH_GENERIC_PCI_MMAP_RESOURCE) 2348 extern int pci_create_resource_files(struct pci_dev *dev); 2349 extern void pci_remove_resource_files(struct pci_dev *dev); 2350 #endif 2351 2352 #if defined(CONFIG_PCI_MMCONFIG) || defined(CONFIG_ACPI_MCFG) 2353 void __init pci_mmcfg_early_init(void); 2354 void __init pci_mmcfg_late_init(void); 2355 #else 2356 static inline void pci_mmcfg_early_init(void) { } 2357 static inline void pci_mmcfg_late_init(void) { } 2358 #endif 2359 2360 int pci_ext_cfg_avail(void); 2361 2362 void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar); 2363 void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar); 2364 2365 #ifdef CONFIG_PCI_IOV 2366 int pci_iov_virtfn_bus(struct pci_dev *dev, int id); 2367 int pci_iov_virtfn_devfn(struct pci_dev *dev, int id); 2368 int pci_iov_vf_id(struct pci_dev *dev); 2369 void *pci_iov_get_pf_drvdata(struct pci_dev *dev, struct pci_driver *pf_driver); 2370 int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn); 2371 void pci_disable_sriov(struct pci_dev *dev); 2372 2373 int pci_iov_sysfs_link(struct pci_dev *dev, struct pci_dev *virtfn, int id); 2374 int pci_iov_add_virtfn(struct pci_dev *dev, int id); 2375 void pci_iov_remove_virtfn(struct pci_dev *dev, int id); 2376 int pci_num_vf(struct pci_dev *dev); 2377 int pci_vfs_assigned(struct pci_dev *dev); 2378 int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs); 2379 int pci_sriov_get_totalvfs(struct pci_dev *dev); 2380 int pci_sriov_configure_simple(struct pci_dev *dev, int nr_virtfn); 2381 resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno); 2382 void pci_vf_drivers_autoprobe(struct pci_dev *dev, bool probe); 2383 2384 /* Arch may override these (weak) */ 2385 int pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs); 2386 int pcibios_sriov_disable(struct pci_dev *pdev); 2387 resource_size_t pcibios_iov_resource_alignment(struct pci_dev *dev, int resno); 2388 #else 2389 static inline int pci_iov_virtfn_bus(struct pci_dev *dev, int id) 2390 { 2391 return -ENOSYS; 2392 } 2393 static inline int pci_iov_virtfn_devfn(struct pci_dev *dev, int id) 2394 { 2395 return -ENOSYS; 2396 } 2397 2398 static inline int pci_iov_vf_id(struct pci_dev *dev) 2399 { 2400 return -ENOSYS; 2401 } 2402 2403 static inline void *pci_iov_get_pf_drvdata(struct pci_dev *dev, 2404 struct pci_driver *pf_driver) 2405 { 2406 return ERR_PTR(-EINVAL); 2407 } 2408 2409 static inline int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn) 2410 { return -ENODEV; } 2411 2412 static inline int pci_iov_sysfs_link(struct pci_dev *dev, 2413 struct pci_dev *virtfn, int id) 2414 { 2415 return -ENODEV; 2416 } 2417 static inline int pci_iov_add_virtfn(struct pci_dev *dev, int id) 2418 { 2419 return -ENOSYS; 2420 } 2421 static inline void pci_iov_remove_virtfn(struct pci_dev *dev, 2422 int id) { } 2423 static inline void pci_disable_sriov(struct pci_dev *dev) { } 2424 static inline int pci_num_vf(struct pci_dev *dev) { return 0; } 2425 static inline int pci_vfs_assigned(struct pci_dev *dev) 2426 { return 0; } 2427 static inline int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs) 2428 { return 0; } 2429 static inline int pci_sriov_get_totalvfs(struct pci_dev *dev) 2430 { return 0; } 2431 #define pci_sriov_configure_simple NULL 2432 static inline resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno) 2433 { return 0; } 2434 static inline void pci_vf_drivers_autoprobe(struct pci_dev *dev, bool probe) { } 2435 #endif 2436 2437 #if defined(CONFIG_HOTPLUG_PCI) || defined(CONFIG_HOTPLUG_PCI_MODULE) 2438 void pci_hp_create_module_link(struct pci_slot *pci_slot); 2439 void pci_hp_remove_module_link(struct pci_slot *pci_slot); 2440 #endif 2441 2442 /** 2443 * pci_pcie_cap - get the saved PCIe capability offset 2444 * @dev: PCI device 2445 * 2446 * PCIe capability offset is calculated at PCI device initialization 2447 * time and saved in the data structure. This function returns saved 2448 * PCIe capability offset. Using this instead of pci_find_capability() 2449 * reduces unnecessary search in the PCI configuration space. If you 2450 * need to calculate PCIe capability offset from raw device for some 2451 * reasons, please use pci_find_capability() instead. 2452 */ 2453 static inline int pci_pcie_cap(struct pci_dev *dev) 2454 { 2455 return dev->pcie_cap; 2456 } 2457 2458 /** 2459 * pci_is_pcie - check if the PCI device is PCI Express capable 2460 * @dev: PCI device 2461 * 2462 * Returns: true if the PCI device is PCI Express capable, false otherwise. 2463 */ 2464 static inline bool pci_is_pcie(struct pci_dev *dev) 2465 { 2466 return pci_pcie_cap(dev); 2467 } 2468 2469 /** 2470 * pcie_caps_reg - get the PCIe Capabilities Register 2471 * @dev: PCI device 2472 */ 2473 static inline u16 pcie_caps_reg(const struct pci_dev *dev) 2474 { 2475 return dev->pcie_flags_reg; 2476 } 2477 2478 /** 2479 * pci_pcie_type - get the PCIe device/port type 2480 * @dev: PCI device 2481 */ 2482 static inline int pci_pcie_type(const struct pci_dev *dev) 2483 { 2484 return (pcie_caps_reg(dev) & PCI_EXP_FLAGS_TYPE) >> 4; 2485 } 2486 2487 /** 2488 * pcie_find_root_port - Get the PCIe root port device 2489 * @dev: PCI device 2490 * 2491 * Traverse up the parent chain and return the PCIe Root Port PCI Device 2492 * for a given PCI/PCIe Device. 2493 */ 2494 static inline struct pci_dev *pcie_find_root_port(struct pci_dev *dev) 2495 { 2496 while (dev) { 2497 if (pci_is_pcie(dev) && 2498 pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) 2499 return dev; 2500 dev = pci_upstream_bridge(dev); 2501 } 2502 2503 return NULL; 2504 } 2505 2506 void pci_request_acs(void); 2507 bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags); 2508 bool pci_acs_path_enabled(struct pci_dev *start, 2509 struct pci_dev *end, u16 acs_flags); 2510 int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask); 2511 2512 #define PCI_VPD_LRDT 0x80 /* Large Resource Data Type */ 2513 #define PCI_VPD_LRDT_ID(x) ((x) | PCI_VPD_LRDT) 2514 2515 /* Large Resource Data Type Tag Item Names */ 2516 #define PCI_VPD_LTIN_ID_STRING 0x02 /* Identifier String */ 2517 #define PCI_VPD_LTIN_RO_DATA 0x10 /* Read-Only Data */ 2518 #define PCI_VPD_LTIN_RW_DATA 0x11 /* Read-Write Data */ 2519 2520 #define PCI_VPD_LRDT_ID_STRING PCI_VPD_LRDT_ID(PCI_VPD_LTIN_ID_STRING) 2521 #define PCI_VPD_LRDT_RO_DATA PCI_VPD_LRDT_ID(PCI_VPD_LTIN_RO_DATA) 2522 #define PCI_VPD_LRDT_RW_DATA PCI_VPD_LRDT_ID(PCI_VPD_LTIN_RW_DATA) 2523 2524 #define PCI_VPD_RO_KEYWORD_PARTNO "PN" 2525 #define PCI_VPD_RO_KEYWORD_SERIALNO "SN" 2526 #define PCI_VPD_RO_KEYWORD_MFR_ID "MN" 2527 #define PCI_VPD_RO_KEYWORD_VENDOR0 "V0" 2528 #define PCI_VPD_RO_KEYWORD_CHKSUM "RV" 2529 2530 /** 2531 * pci_vpd_alloc - Allocate buffer and read VPD into it 2532 * @dev: PCI device 2533 * @size: pointer to field where VPD length is returned 2534 * 2535 * Returns pointer to allocated buffer or an ERR_PTR in case of failure 2536 */ 2537 void *pci_vpd_alloc(struct pci_dev *dev, unsigned int *size); 2538 2539 /** 2540 * pci_vpd_find_id_string - Locate id string in VPD 2541 * @buf: Pointer to buffered VPD data 2542 * @len: The length of the buffer area in which to search 2543 * @size: Pointer to field where length of id string is returned 2544 * 2545 * Returns the index of the id string or -ENOENT if not found. 2546 */ 2547 int pci_vpd_find_id_string(const u8 *buf, unsigned int len, unsigned int *size); 2548 2549 /** 2550 * pci_vpd_find_ro_info_keyword - Locate info field keyword in VPD RO section 2551 * @buf: Pointer to buffered VPD data 2552 * @len: The length of the buffer area in which to search 2553 * @kw: The keyword to search for 2554 * @size: Pointer to field where length of found keyword data is returned 2555 * 2556 * Returns the index of the information field keyword data or -ENOENT if 2557 * not found. 2558 */ 2559 int pci_vpd_find_ro_info_keyword(const void *buf, unsigned int len, 2560 const char *kw, unsigned int *size); 2561 2562 /** 2563 * pci_vpd_check_csum - Check VPD checksum 2564 * @buf: Pointer to buffered VPD data 2565 * @len: VPD size 2566 * 2567 * Returns 1 if VPD has no checksum, otherwise 0 or an errno 2568 */ 2569 int pci_vpd_check_csum(const void *buf, unsigned int len); 2570 2571 /* PCI <-> OF binding helpers */ 2572 #ifdef CONFIG_OF 2573 struct device_node; 2574 struct irq_domain; 2575 struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus); 2576 bool pci_host_of_has_msi_map(struct device *dev); 2577 2578 /* Arch may override this (weak) */ 2579 struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus); 2580 2581 #else /* CONFIG_OF */ 2582 static inline struct irq_domain * 2583 pci_host_bridge_of_msi_domain(struct pci_bus *bus) { return NULL; } 2584 static inline bool pci_host_of_has_msi_map(struct device *dev) { return false; } 2585 #endif /* CONFIG_OF */ 2586 2587 static inline struct device_node * 2588 pci_device_to_OF_node(const struct pci_dev *pdev) 2589 { 2590 return pdev ? pdev->dev.of_node : NULL; 2591 } 2592 2593 static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus) 2594 { 2595 return bus ? bus->dev.of_node : NULL; 2596 } 2597 2598 #ifdef CONFIG_ACPI 2599 struct irq_domain *pci_host_bridge_acpi_msi_domain(struct pci_bus *bus); 2600 2601 void 2602 pci_msi_register_fwnode_provider(struct fwnode_handle *(*fn)(struct device *)); 2603 bool pci_pr3_present(struct pci_dev *pdev); 2604 #else 2605 static inline struct irq_domain * 2606 pci_host_bridge_acpi_msi_domain(struct pci_bus *bus) { return NULL; } 2607 static inline bool pci_pr3_present(struct pci_dev *pdev) { return false; } 2608 #endif 2609 2610 #ifdef CONFIG_EEH 2611 static inline struct eeh_dev *pci_dev_to_eeh_dev(struct pci_dev *pdev) 2612 { 2613 return pdev->dev.archdata.edev; 2614 } 2615 #endif 2616 2617 void pci_add_dma_alias(struct pci_dev *dev, u8 devfn_from, unsigned nr_devfns); 2618 bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2); 2619 int pci_for_each_dma_alias(struct pci_dev *pdev, 2620 int (*fn)(struct pci_dev *pdev, 2621 u16 alias, void *data), void *data); 2622 2623 /* Helper functions for operation of device flag */ 2624 static inline void pci_set_dev_assigned(struct pci_dev *pdev) 2625 { 2626 pdev->dev_flags |= PCI_DEV_FLAGS_ASSIGNED; 2627 } 2628 static inline void pci_clear_dev_assigned(struct pci_dev *pdev) 2629 { 2630 pdev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED; 2631 } 2632 static inline bool pci_is_dev_assigned(struct pci_dev *pdev) 2633 { 2634 return (pdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED) == PCI_DEV_FLAGS_ASSIGNED; 2635 } 2636 2637 /** 2638 * pci_ari_enabled - query ARI forwarding status 2639 * @bus: the PCI bus 2640 * 2641 * Returns true if ARI forwarding is enabled. 2642 */ 2643 static inline bool pci_ari_enabled(struct pci_bus *bus) 2644 { 2645 return bus->self && bus->self->ari_enabled; 2646 } 2647 2648 /** 2649 * pci_is_thunderbolt_attached - whether device is on a Thunderbolt daisy chain 2650 * @pdev: PCI device to check 2651 * 2652 * Walk upwards from @pdev and check for each encountered bridge if it's part 2653 * of a Thunderbolt controller. Reaching the host bridge means @pdev is not 2654 * Thunderbolt-attached. (But rather soldered to the mainboard usually.) 2655 */ 2656 static inline bool pci_is_thunderbolt_attached(struct pci_dev *pdev) 2657 { 2658 struct pci_dev *parent = pdev; 2659 2660 if (pdev->is_thunderbolt) 2661 return true; 2662 2663 while ((parent = pci_upstream_bridge(parent))) 2664 if (parent->is_thunderbolt) 2665 return true; 2666 2667 return false; 2668 } 2669 2670 #if defined(CONFIG_PCIEPORTBUS) || defined(CONFIG_EEH) 2671 void pci_uevent_ers(struct pci_dev *pdev, enum pci_ers_result err_type); 2672 #endif 2673 2674 #include <linux/dma-mapping.h> 2675 2676 #define pci_printk(level, pdev, fmt, arg...) \ 2677 dev_printk(level, &(pdev)->dev, fmt, ##arg) 2678 2679 #define pci_emerg(pdev, fmt, arg...) dev_emerg(&(pdev)->dev, fmt, ##arg) 2680 #define pci_alert(pdev, fmt, arg...) dev_alert(&(pdev)->dev, fmt, ##arg) 2681 #define pci_crit(pdev, fmt, arg...) dev_crit(&(pdev)->dev, fmt, ##arg) 2682 #define pci_err(pdev, fmt, arg...) dev_err(&(pdev)->dev, fmt, ##arg) 2683 #define pci_warn(pdev, fmt, arg...) dev_warn(&(pdev)->dev, fmt, ##arg) 2684 #define pci_warn_once(pdev, fmt, arg...) dev_warn_once(&(pdev)->dev, fmt, ##arg) 2685 #define pci_notice(pdev, fmt, arg...) dev_notice(&(pdev)->dev, fmt, ##arg) 2686 #define pci_info(pdev, fmt, arg...) dev_info(&(pdev)->dev, fmt, ##arg) 2687 #define pci_dbg(pdev, fmt, arg...) dev_dbg(&(pdev)->dev, fmt, ##arg) 2688 2689 #define pci_notice_ratelimited(pdev, fmt, arg...) \ 2690 dev_notice_ratelimited(&(pdev)->dev, fmt, ##arg) 2691 2692 #define pci_info_ratelimited(pdev, fmt, arg...) \ 2693 dev_info_ratelimited(&(pdev)->dev, fmt, ##arg) 2694 2695 #define pci_WARN(pdev, condition, fmt, arg...) \ 2696 WARN(condition, "%s %s: " fmt, \ 2697 dev_driver_string(&(pdev)->dev), pci_name(pdev), ##arg) 2698 2699 #define pci_WARN_ONCE(pdev, condition, fmt, arg...) \ 2700 WARN_ONCE(condition, "%s %s: " fmt, \ 2701 dev_driver_string(&(pdev)->dev), pci_name(pdev), ##arg) 2702 2703 #endif /* LINUX_PCI_H */ 2704