1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * pci.h 4 * 5 * PCI defines and function prototypes 6 * Copyright 1994, Drew Eckhardt 7 * Copyright 1997--1999 Martin Mares <[email protected]> 8 * 9 * PCI Express ASPM defines and function prototypes 10 * Copyright (c) 2007 Intel Corp. 11 * Zhang Yanmin ([email protected]) 12 * Shaohua Li ([email protected]) 13 * 14 * For more information, please consult the following manuals (look at 15 * http://www.pcisig.com/ for how to get them): 16 * 17 * PCI BIOS Specification 18 * PCI Local Bus Specification 19 * PCI to PCI Bridge Specification 20 * PCI Express Specification 21 * PCI System Design Guide 22 */ 23 #ifndef LINUX_PCI_H 24 #define LINUX_PCI_H 25 26 #include <linux/args.h> 27 #include <linux/mod_devicetable.h> 28 29 #include <linux/types.h> 30 #include <linux/init.h> 31 #include <linux/ioport.h> 32 #include <linux/list.h> 33 #include <linux/compiler.h> 34 #include <linux/errno.h> 35 #include <linux/kobject.h> 36 #include <linux/atomic.h> 37 #include <linux/device.h> 38 #include <linux/interrupt.h> 39 #include <linux/io.h> 40 #include <linux/resource_ext.h> 41 #include <linux/msi_api.h> 42 #include <uapi/linux/pci.h> 43 44 #include <linux/pci_ids.h> 45 46 #define PCI_STATUS_ERROR_BITS (PCI_STATUS_DETECTED_PARITY | \ 47 PCI_STATUS_SIG_SYSTEM_ERROR | \ 48 PCI_STATUS_REC_MASTER_ABORT | \ 49 PCI_STATUS_REC_TARGET_ABORT | \ 50 PCI_STATUS_SIG_TARGET_ABORT | \ 51 PCI_STATUS_PARITY) 52 53 /* Number of reset methods used in pci_reset_fn_methods array in pci.c */ 54 #define PCI_NUM_RESET_METHODS 8 55 56 #define PCI_RESET_PROBE true 57 #define PCI_RESET_DO_RESET false 58 59 /* 60 * The PCI interface treats multi-function devices as independent 61 * devices. The slot/function address of each device is encoded 62 * in a single byte as follows: 63 * 64 * 7:3 = slot 65 * 2:0 = function 66 * 67 * PCI_DEVFN(), PCI_SLOT(), and PCI_FUNC() are defined in uapi/linux/pci.h. 68 * In the interest of not exposing interfaces to user-space unnecessarily, 69 * the following kernel-only defines are being added here. 70 */ 71 #define PCI_DEVID(bus, devfn) ((((u16)(bus)) << 8) | (devfn)) 72 /* return bus from PCI devid = ((u16)bus_number) << 8) | devfn */ 73 #define PCI_BUS_NUM(x) (((x) >> 8) & 0xff) 74 75 /* pci_slot represents a physical slot */ 76 struct pci_slot { 77 struct pci_bus *bus; /* Bus this slot is on */ 78 struct list_head list; /* Node in list of slots */ 79 struct hotplug_slot *hotplug; /* Hotplug info (move here) */ 80 unsigned char number; /* PCI_SLOT(pci_dev->devfn) */ 81 struct kobject kobj; 82 }; 83 84 static inline const char *pci_slot_name(const struct pci_slot *slot) 85 { 86 return kobject_name(&slot->kobj); 87 } 88 89 /* File state for mmap()s on /proc/bus/pci/X/Y */ 90 enum pci_mmap_state { 91 pci_mmap_io, 92 pci_mmap_mem 93 }; 94 95 /* For PCI devices, the region numbers are assigned this way: */ 96 enum { 97 /* #0-5: standard PCI resources */ 98 PCI_STD_RESOURCES, 99 PCI_STD_RESOURCE_END = PCI_STD_RESOURCES + PCI_STD_NUM_BARS - 1, 100 101 /* #6: expansion ROM resource */ 102 PCI_ROM_RESOURCE, 103 104 /* Device-specific resources */ 105 #ifdef CONFIG_PCI_IOV 106 PCI_IOV_RESOURCES, 107 PCI_IOV_RESOURCE_END = PCI_IOV_RESOURCES + PCI_SRIOV_NUM_BARS - 1, 108 #endif 109 110 /* PCI-to-PCI (P2P) bridge windows */ 111 #define PCI_BRIDGE_IO_WINDOW (PCI_BRIDGE_RESOURCES + 0) 112 #define PCI_BRIDGE_MEM_WINDOW (PCI_BRIDGE_RESOURCES + 1) 113 #define PCI_BRIDGE_PREF_MEM_WINDOW (PCI_BRIDGE_RESOURCES + 2) 114 115 /* CardBus bridge windows */ 116 #define PCI_CB_BRIDGE_IO_0_WINDOW (PCI_BRIDGE_RESOURCES + 0) 117 #define PCI_CB_BRIDGE_IO_1_WINDOW (PCI_BRIDGE_RESOURCES + 1) 118 #define PCI_CB_BRIDGE_MEM_0_WINDOW (PCI_BRIDGE_RESOURCES + 2) 119 #define PCI_CB_BRIDGE_MEM_1_WINDOW (PCI_BRIDGE_RESOURCES + 3) 120 121 /* Total number of bridge resources for P2P and CardBus */ 122 #define PCI_BRIDGE_RESOURCE_NUM 4 123 124 /* Resources assigned to buses behind the bridge */ 125 PCI_BRIDGE_RESOURCES, 126 PCI_BRIDGE_RESOURCE_END = PCI_BRIDGE_RESOURCES + 127 PCI_BRIDGE_RESOURCE_NUM - 1, 128 129 /* Total resources associated with a PCI device */ 130 PCI_NUM_RESOURCES, 131 132 /* Preserve this for compatibility */ 133 DEVICE_COUNT_RESOURCE = PCI_NUM_RESOURCES, 134 }; 135 136 /** 137 * enum pci_interrupt_pin - PCI INTx interrupt values 138 * @PCI_INTERRUPT_UNKNOWN: Unknown or unassigned interrupt 139 * @PCI_INTERRUPT_INTA: PCI INTA pin 140 * @PCI_INTERRUPT_INTB: PCI INTB pin 141 * @PCI_INTERRUPT_INTC: PCI INTC pin 142 * @PCI_INTERRUPT_INTD: PCI INTD pin 143 * 144 * Corresponds to values for legacy PCI INTx interrupts, as can be found in the 145 * PCI_INTERRUPT_PIN register. 146 */ 147 enum pci_interrupt_pin { 148 PCI_INTERRUPT_UNKNOWN, 149 PCI_INTERRUPT_INTA, 150 PCI_INTERRUPT_INTB, 151 PCI_INTERRUPT_INTC, 152 PCI_INTERRUPT_INTD, 153 }; 154 155 /* The number of legacy PCI INTx interrupts */ 156 #define PCI_NUM_INTX 4 157 158 /* 159 * Reading from a device that doesn't respond typically returns ~0. A 160 * successful read from a device may also return ~0, so you need additional 161 * information to reliably identify errors. 162 */ 163 #define PCI_ERROR_RESPONSE (~0ULL) 164 #define PCI_SET_ERROR_RESPONSE(val) (*(val) = ((typeof(*(val))) PCI_ERROR_RESPONSE)) 165 #define PCI_POSSIBLE_ERROR(val) ((val) == ((typeof(val)) PCI_ERROR_RESPONSE)) 166 167 /* 168 * pci_power_t values must match the bits in the Capabilities PME_Support 169 * and Control/Status PowerState fields in the Power Management capability. 170 */ 171 typedef int __bitwise pci_power_t; 172 173 #define PCI_D0 ((pci_power_t __force) 0) 174 #define PCI_D1 ((pci_power_t __force) 1) 175 #define PCI_D2 ((pci_power_t __force) 2) 176 #define PCI_D3hot ((pci_power_t __force) 3) 177 #define PCI_D3cold ((pci_power_t __force) 4) 178 #define PCI_UNKNOWN ((pci_power_t __force) 5) 179 #define PCI_POWER_ERROR ((pci_power_t __force) -1) 180 181 /* Remember to update this when the list above changes! */ 182 extern const char *pci_power_names[]; 183 184 static inline const char *pci_power_name(pci_power_t state) 185 { 186 return pci_power_names[1 + (__force int) state]; 187 } 188 189 /** 190 * typedef pci_channel_state_t 191 * 192 * The pci_channel state describes connectivity between the CPU and 193 * the PCI device. If some PCI bus between here and the PCI device 194 * has crashed or locked up, this info is reflected here. 195 */ 196 typedef unsigned int __bitwise pci_channel_state_t; 197 198 enum { 199 /* I/O channel is in normal state */ 200 pci_channel_io_normal = (__force pci_channel_state_t) 1, 201 202 /* I/O to channel is blocked */ 203 pci_channel_io_frozen = (__force pci_channel_state_t) 2, 204 205 /* PCI card is dead */ 206 pci_channel_io_perm_failure = (__force pci_channel_state_t) 3, 207 }; 208 209 typedef unsigned int __bitwise pcie_reset_state_t; 210 211 enum pcie_reset_state { 212 /* Reset is NOT asserted (Use to deassert reset) */ 213 pcie_deassert_reset = (__force pcie_reset_state_t) 1, 214 215 /* Use #PERST to reset PCIe device */ 216 pcie_warm_reset = (__force pcie_reset_state_t) 2, 217 218 /* Use PCIe Hot Reset to reset device */ 219 pcie_hot_reset = (__force pcie_reset_state_t) 3 220 }; 221 222 typedef unsigned short __bitwise pci_dev_flags_t; 223 enum pci_dev_flags { 224 /* INTX_DISABLE in PCI_COMMAND register disables MSI too */ 225 PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG = (__force pci_dev_flags_t) (1 << 0), 226 /* Device configuration is irrevocably lost if disabled into D3 */ 227 PCI_DEV_FLAGS_NO_D3 = (__force pci_dev_flags_t) (1 << 1), 228 /* Provide indication device is assigned by a Virtual Machine Manager */ 229 PCI_DEV_FLAGS_ASSIGNED = (__force pci_dev_flags_t) (1 << 2), 230 /* Flag for quirk use to store if quirk-specific ACS is enabled */ 231 PCI_DEV_FLAGS_ACS_ENABLED_QUIRK = (__force pci_dev_flags_t) (1 << 3), 232 /* Use a PCIe-to-PCI bridge alias even if !pci_is_pcie */ 233 PCI_DEV_FLAG_PCIE_BRIDGE_ALIAS = (__force pci_dev_flags_t) (1 << 5), 234 /* Do not use bus resets for device */ 235 PCI_DEV_FLAGS_NO_BUS_RESET = (__force pci_dev_flags_t) (1 << 6), 236 /* Do not use PM reset even if device advertises NoSoftRst- */ 237 PCI_DEV_FLAGS_NO_PM_RESET = (__force pci_dev_flags_t) (1 << 7), 238 /* Get VPD from function 0 VPD */ 239 PCI_DEV_FLAGS_VPD_REF_F0 = (__force pci_dev_flags_t) (1 << 8), 240 /* A non-root bridge where translation occurs, stop alias search here */ 241 PCI_DEV_FLAGS_BRIDGE_XLATE_ROOT = (__force pci_dev_flags_t) (1 << 9), 242 /* Do not use FLR even if device advertises PCI_AF_CAP */ 243 PCI_DEV_FLAGS_NO_FLR_RESET = (__force pci_dev_flags_t) (1 << 10), 244 /* Don't use Relaxed Ordering for TLPs directed at this device */ 245 PCI_DEV_FLAGS_NO_RELAXED_ORDERING = (__force pci_dev_flags_t) (1 << 11), 246 /* Device does honor MSI masking despite saying otherwise */ 247 PCI_DEV_FLAGS_HAS_MSI_MASKING = (__force pci_dev_flags_t) (1 << 12), 248 }; 249 250 enum pci_irq_reroute_variant { 251 INTEL_IRQ_REROUTE_VARIANT = 1, 252 MAX_IRQ_REROUTE_VARIANTS = 3 253 }; 254 255 typedef unsigned short __bitwise pci_bus_flags_t; 256 enum pci_bus_flags { 257 PCI_BUS_FLAGS_NO_MSI = (__force pci_bus_flags_t) 1, 258 PCI_BUS_FLAGS_NO_MMRBC = (__force pci_bus_flags_t) 2, 259 PCI_BUS_FLAGS_NO_AERSID = (__force pci_bus_flags_t) 4, 260 PCI_BUS_FLAGS_NO_EXTCFG = (__force pci_bus_flags_t) 8, 261 }; 262 263 /* Values from Link Status register, PCIe r3.1, sec 7.8.8 */ 264 enum pcie_link_width { 265 PCIE_LNK_WIDTH_RESRV = 0x00, 266 PCIE_LNK_X1 = 0x01, 267 PCIE_LNK_X2 = 0x02, 268 PCIE_LNK_X4 = 0x04, 269 PCIE_LNK_X8 = 0x08, 270 PCIE_LNK_X12 = 0x0c, 271 PCIE_LNK_X16 = 0x10, 272 PCIE_LNK_X32 = 0x20, 273 PCIE_LNK_WIDTH_UNKNOWN = 0xff, 274 }; 275 276 /* See matching string table in pci_speed_string() */ 277 enum pci_bus_speed { 278 PCI_SPEED_33MHz = 0x00, 279 PCI_SPEED_66MHz = 0x01, 280 PCI_SPEED_66MHz_PCIX = 0x02, 281 PCI_SPEED_100MHz_PCIX = 0x03, 282 PCI_SPEED_133MHz_PCIX = 0x04, 283 PCI_SPEED_66MHz_PCIX_ECC = 0x05, 284 PCI_SPEED_100MHz_PCIX_ECC = 0x06, 285 PCI_SPEED_133MHz_PCIX_ECC = 0x07, 286 PCI_SPEED_66MHz_PCIX_266 = 0x09, 287 PCI_SPEED_100MHz_PCIX_266 = 0x0a, 288 PCI_SPEED_133MHz_PCIX_266 = 0x0b, 289 AGP_UNKNOWN = 0x0c, 290 AGP_1X = 0x0d, 291 AGP_2X = 0x0e, 292 AGP_4X = 0x0f, 293 AGP_8X = 0x10, 294 PCI_SPEED_66MHz_PCIX_533 = 0x11, 295 PCI_SPEED_100MHz_PCIX_533 = 0x12, 296 PCI_SPEED_133MHz_PCIX_533 = 0x13, 297 PCIE_SPEED_2_5GT = 0x14, 298 PCIE_SPEED_5_0GT = 0x15, 299 PCIE_SPEED_8_0GT = 0x16, 300 PCIE_SPEED_16_0GT = 0x17, 301 PCIE_SPEED_32_0GT = 0x18, 302 PCIE_SPEED_64_0GT = 0x19, 303 PCI_SPEED_UNKNOWN = 0xff, 304 }; 305 306 enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev); 307 enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev); 308 309 struct pci_vpd { 310 struct mutex lock; 311 unsigned int len; 312 u8 cap; 313 }; 314 315 struct irq_affinity; 316 struct pcie_bwctrl_data; 317 struct pcie_link_state; 318 struct pci_sriov; 319 struct pci_p2pdma; 320 struct rcec_ea; 321 322 /* struct pci_dev - describes a PCI device 323 * 324 * @supported_speeds: PCIe Supported Link Speeds Vector (+ reserved 0 at 325 * LSB). 0 when the supported speeds cannot be 326 * determined (e.g., for Root Complex Integrated 327 * Endpoints without the relevant Capability 328 * Registers). 329 */ 330 struct pci_dev { 331 struct list_head bus_list; /* Node in per-bus list */ 332 struct pci_bus *bus; /* Bus this device is on */ 333 struct pci_bus *subordinate; /* Bus this device bridges to */ 334 335 void *sysdata; /* Hook for sys-specific extension */ 336 struct proc_dir_entry *procent; /* Device entry in /proc/bus/pci */ 337 struct pci_slot *slot; /* Physical slot this device is in */ 338 339 unsigned int devfn; /* Encoded device & function index */ 340 unsigned short vendor; 341 unsigned short device; 342 unsigned short subsystem_vendor; 343 unsigned short subsystem_device; 344 unsigned int class; /* 3 bytes: (base,sub,prog-if) */ 345 u8 revision; /* PCI revision, low byte of class word */ 346 u8 hdr_type; /* PCI header type (`multi' flag masked out) */ 347 #ifdef CONFIG_PCIEAER 348 u16 aer_cap; /* AER capability offset */ 349 struct aer_stats *aer_stats; /* AER stats for this device */ 350 #endif 351 #ifdef CONFIG_PCIEPORTBUS 352 struct rcec_ea *rcec_ea; /* RCEC cached endpoint association */ 353 struct pci_dev *rcec; /* Associated RCEC device */ 354 #endif 355 u32 devcap; /* PCIe Device Capabilities */ 356 u16 rebar_cap; /* Resizable BAR capability offset */ 357 u8 pcie_cap; /* PCIe capability offset */ 358 u8 msi_cap; /* MSI capability offset */ 359 u8 msix_cap; /* MSI-X capability offset */ 360 u8 pcie_mpss:3; /* PCIe Max Payload Size Supported */ 361 u8 rom_base_reg; /* Config register controlling ROM */ 362 u8 pin; /* Interrupt pin this device uses */ 363 u16 pcie_flags_reg; /* Cached PCIe Capabilities Register */ 364 unsigned long *dma_alias_mask;/* Mask of enabled devfn aliases */ 365 366 struct pci_driver *driver; /* Driver bound to this device */ 367 u64 dma_mask; /* Mask of the bits of bus address this 368 device implements. Normally this is 369 0xffffffff. You only need to change 370 this if your device has broken DMA 371 or supports 64-bit transfers. */ 372 373 struct device_dma_parameters dma_parms; 374 375 pci_power_t current_state; /* Current operating state. In ACPI, 376 this is D0-D3, D0 being fully 377 functional, and D3 being off. */ 378 u8 pm_cap; /* PM capability offset */ 379 unsigned int pme_support:5; /* Bitmask of states from which PME# 380 can be generated */ 381 unsigned int pme_poll:1; /* Poll device's PME status bit */ 382 unsigned int pinned:1; /* Whether this dev is pinned */ 383 unsigned int config_rrs_sv:1; /* Config RRS software visibility */ 384 unsigned int imm_ready:1; /* Supports Immediate Readiness */ 385 unsigned int d1_support:1; /* Low power state D1 is supported */ 386 unsigned int d2_support:1; /* Low power state D2 is supported */ 387 unsigned int no_d1d2:1; /* D1 and D2 are forbidden */ 388 unsigned int no_d3cold:1; /* D3cold is forbidden */ 389 unsigned int bridge_d3:1; /* Allow D3 for bridge */ 390 unsigned int d3cold_allowed:1; /* D3cold is allowed by user */ 391 unsigned int mmio_always_on:1; /* Disallow turning off io/mem 392 decoding during BAR sizing */ 393 unsigned int wakeup_prepared:1; 394 unsigned int skip_bus_pm:1; /* Internal: Skip bus-level PM */ 395 unsigned int ignore_hotplug:1; /* Ignore hotplug events */ 396 unsigned int hotplug_user_indicators:1; /* SlotCtl indicators 397 controlled exclusively by 398 user sysfs */ 399 unsigned int clear_retrain_link:1; /* Need to clear Retrain Link 400 bit manually */ 401 unsigned int d3hot_delay; /* D3hot->D0 transition time in ms */ 402 unsigned int d3cold_delay; /* D3cold->D0 transition time in ms */ 403 404 u16 l1ss; /* L1SS Capability pointer */ 405 #ifdef CONFIG_PCIEASPM 406 struct pcie_link_state *link_state; /* ASPM link state */ 407 unsigned int ltr_path:1; /* Latency Tolerance Reporting 408 supported from root to here */ 409 #endif 410 unsigned int pasid_no_tlp:1; /* PASID works without TLP Prefix */ 411 unsigned int eetlp_prefix_max:3; /* Max # of End-End TLP Prefixes, 0=not supported */ 412 413 pci_channel_state_t error_state; /* Current connectivity state */ 414 struct device dev; /* Generic device interface */ 415 416 int cfg_size; /* Size of config space */ 417 418 /* 419 * Instead of touching interrupt line and base address registers 420 * directly, use the values stored here. They might be different! 421 */ 422 unsigned int irq; 423 struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */ 424 struct resource driver_exclusive_resource; /* driver exclusive resource ranges */ 425 426 bool match_driver; /* Skip attaching driver */ 427 428 unsigned int transparent:1; /* Subtractive decode bridge */ 429 unsigned int io_window:1; /* Bridge has I/O window */ 430 unsigned int pref_window:1; /* Bridge has pref mem window */ 431 unsigned int pref_64_window:1; /* Pref mem window is 64-bit */ 432 unsigned int multifunction:1; /* Multi-function device */ 433 434 unsigned int is_busmaster:1; /* Is busmaster */ 435 unsigned int no_msi:1; /* May not use MSI */ 436 unsigned int no_64bit_msi:1; /* May only use 32-bit MSIs */ 437 unsigned int block_cfg_access:1; /* Config space access blocked */ 438 unsigned int broken_parity_status:1; /* Generates false positive parity */ 439 unsigned int irq_reroute_variant:2; /* Needs IRQ rerouting variant */ 440 unsigned int msi_enabled:1; 441 unsigned int msix_enabled:1; 442 unsigned int ari_enabled:1; /* ARI forwarding */ 443 unsigned int ats_enabled:1; /* Address Translation Svc */ 444 unsigned int pasid_enabled:1; /* Process Address Space ID */ 445 unsigned int pri_enabled:1; /* Page Request Interface */ 446 unsigned int tph_enabled:1; /* TLP Processing Hints */ 447 unsigned int is_managed:1; /* Managed via devres */ 448 unsigned int is_msi_managed:1; /* MSI release via devres installed */ 449 unsigned int needs_freset:1; /* Requires fundamental reset */ 450 unsigned int state_saved:1; 451 unsigned int is_physfn:1; 452 unsigned int is_virtfn:1; 453 unsigned int is_hotplug_bridge:1; 454 unsigned int shpc_managed:1; /* SHPC owned by shpchp */ 455 unsigned int is_thunderbolt:1; /* Thunderbolt controller */ 456 /* 457 * Devices marked being untrusted are the ones that can potentially 458 * execute DMA attacks and similar. They are typically connected 459 * through external ports such as Thunderbolt but not limited to 460 * that. When an IOMMU is enabled they should be getting full 461 * mappings to make sure they cannot access arbitrary memory. 462 */ 463 unsigned int untrusted:1; 464 /* 465 * Info from the platform, e.g., ACPI or device tree, may mark a 466 * device as "external-facing". An external-facing device is 467 * itself internal but devices downstream from it are external. 468 */ 469 unsigned int external_facing:1; 470 unsigned int broken_intx_masking:1; /* INTx masking can't be used */ 471 unsigned int io_window_1k:1; /* Intel bridge 1K I/O windows */ 472 unsigned int irq_managed:1; 473 unsigned int non_compliant_bars:1; /* Broken BARs; ignore them */ 474 unsigned int is_probed:1; /* Device probing in progress */ 475 unsigned int link_active_reporting:1;/* Device capable of reporting link active */ 476 unsigned int no_vf_scan:1; /* Don't scan for VFs after IOV enablement */ 477 unsigned int no_command_memory:1; /* No PCI_COMMAND_MEMORY */ 478 unsigned int rom_bar_overlap:1; /* ROM BAR disable broken */ 479 unsigned int rom_attr_enabled:1; /* Display of ROM attribute enabled? */ 480 unsigned int non_mappable_bars:1; /* BARs can't be mapped to user-space */ 481 pci_dev_flags_t dev_flags; 482 atomic_t enable_cnt; /* pci_enable_device has been called */ 483 484 spinlock_t pcie_cap_lock; /* Protects RMW ops in capability accessors */ 485 u32 saved_config_space[16]; /* Config space saved at suspend time */ 486 struct hlist_head saved_cap_space; 487 struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */ 488 struct bin_attribute *res_attr_wc[DEVICE_COUNT_RESOURCE]; /* sysfs file for WC mapping of resources */ 489 490 #ifdef CONFIG_HOTPLUG_PCI_PCIE 491 unsigned int broken_cmd_compl:1; /* No compl for some cmds */ 492 #endif 493 #ifdef CONFIG_PCIE_PTM 494 u16 ptm_cap; /* PTM Capability */ 495 unsigned int ptm_root:1; 496 unsigned int ptm_enabled:1; 497 u8 ptm_granularity; 498 #endif 499 #ifdef CONFIG_PCI_MSI 500 void __iomem *msix_base; 501 raw_spinlock_t msi_lock; 502 #endif 503 struct pci_vpd vpd; 504 #ifdef CONFIG_PCIE_DPC 505 u16 dpc_cap; 506 unsigned int dpc_rp_extensions:1; 507 u8 dpc_rp_log_size; 508 #endif 509 struct pcie_bwctrl_data *link_bwctrl; 510 #ifdef CONFIG_PCI_ATS 511 union { 512 struct pci_sriov *sriov; /* PF: SR-IOV info */ 513 struct pci_dev *physfn; /* VF: related PF */ 514 }; 515 u16 ats_cap; /* ATS Capability offset */ 516 u8 ats_stu; /* ATS Smallest Translation Unit */ 517 #endif 518 #ifdef CONFIG_PCI_PRI 519 u16 pri_cap; /* PRI Capability offset */ 520 u32 pri_reqs_alloc; /* Number of PRI requests allocated */ 521 unsigned int pasid_required:1; /* PRG Response PASID Required */ 522 #endif 523 #ifdef CONFIG_PCI_PASID 524 u16 pasid_cap; /* PASID Capability offset */ 525 u16 pasid_features; 526 #endif 527 #ifdef CONFIG_PCI_P2PDMA 528 struct pci_p2pdma __rcu *p2pdma; 529 #endif 530 #ifdef CONFIG_PCI_DOE 531 struct xarray doe_mbs; /* Data Object Exchange mailboxes */ 532 #endif 533 #ifdef CONFIG_PCI_NPEM 534 struct npem *npem; /* Native PCIe Enclosure Management */ 535 #endif 536 u16 acs_cap; /* ACS Capability offset */ 537 u8 supported_speeds; /* Supported Link Speeds Vector */ 538 phys_addr_t rom; /* Physical address if not from BAR */ 539 size_t romlen; /* Length if not from BAR */ 540 /* 541 * Driver name to force a match. Do not set directly, because core 542 * frees it. Use driver_set_override() to set or clear it. 543 */ 544 const char *driver_override; 545 546 unsigned long priv_flags; /* Private flags for the PCI driver */ 547 548 /* These methods index pci_reset_fn_methods[] */ 549 u8 reset_methods[PCI_NUM_RESET_METHODS]; /* In priority order */ 550 551 #ifdef CONFIG_PCIE_TPH 552 u16 tph_cap; /* TPH capability offset */ 553 u8 tph_mode; /* TPH mode */ 554 u8 tph_req_type; /* TPH requester type */ 555 #endif 556 }; 557 558 static inline struct pci_dev *pci_physfn(struct pci_dev *dev) 559 { 560 #ifdef CONFIG_PCI_IOV 561 if (dev->is_virtfn) 562 dev = dev->physfn; 563 #endif 564 return dev; 565 } 566 567 struct pci_dev *pci_alloc_dev(struct pci_bus *bus); 568 569 #define to_pci_dev(n) container_of(n, struct pci_dev, dev) 570 #define for_each_pci_dev(d) while ((d = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, d)) != NULL) 571 572 static inline int pci_channel_offline(struct pci_dev *pdev) 573 { 574 return (pdev->error_state != pci_channel_io_normal); 575 } 576 577 /* 578 * Currently in ACPI spec, for each PCI host bridge, PCI Segment 579 * Group number is limited to a 16-bit value, therefore (int)-1 is 580 * not a valid PCI domain number, and can be used as a sentinel 581 * value indicating ->domain_nr is not set by the driver (and 582 * CONFIG_PCI_DOMAINS_GENERIC=y archs will set it with 583 * pci_bus_find_domain_nr()). 584 */ 585 #define PCI_DOMAIN_NR_NOT_SET (-1) 586 587 struct pci_host_bridge { 588 struct device dev; 589 struct pci_bus *bus; /* Root bus */ 590 struct pci_ops *ops; 591 struct pci_ops *child_ops; 592 void *sysdata; 593 int busnr; 594 int domain_nr; 595 struct list_head windows; /* resource_entry */ 596 struct list_head dma_ranges; /* dma ranges resource list */ 597 u8 (*swizzle_irq)(struct pci_dev *, u8 *); /* Platform IRQ swizzler */ 598 int (*map_irq)(const struct pci_dev *, u8, u8); 599 void (*release_fn)(struct pci_host_bridge *); 600 int (*enable_device)(struct pci_host_bridge *bridge, struct pci_dev *dev); 601 void (*disable_device)(struct pci_host_bridge *bridge, struct pci_dev *dev); 602 void *release_data; 603 unsigned int ignore_reset_delay:1; /* For entire hierarchy */ 604 unsigned int no_ext_tags:1; /* No Extended Tags */ 605 unsigned int no_inc_mrrs:1; /* No Increase MRRS */ 606 unsigned int native_aer:1; /* OS may use PCIe AER */ 607 unsigned int native_pcie_hotplug:1; /* OS may use PCIe hotplug */ 608 unsigned int native_shpc_hotplug:1; /* OS may use SHPC hotplug */ 609 unsigned int native_pme:1; /* OS may use PCIe PME */ 610 unsigned int native_ltr:1; /* OS may use PCIe LTR */ 611 unsigned int native_dpc:1; /* OS may use PCIe DPC */ 612 unsigned int native_cxl_error:1; /* OS may use CXL RAS/Events */ 613 unsigned int preserve_config:1; /* Preserve FW resource setup */ 614 unsigned int size_windows:1; /* Enable root bus sizing */ 615 unsigned int msi_domain:1; /* Bridge wants MSI domain */ 616 617 /* Resource alignment requirements */ 618 resource_size_t (*align_resource)(struct pci_dev *dev, 619 const struct resource *res, 620 resource_size_t start, 621 resource_size_t size, 622 resource_size_t align); 623 unsigned long private[] ____cacheline_aligned; 624 }; 625 626 #define to_pci_host_bridge(n) container_of(n, struct pci_host_bridge, dev) 627 628 static inline void *pci_host_bridge_priv(struct pci_host_bridge *bridge) 629 { 630 return (void *)bridge->private; 631 } 632 633 static inline struct pci_host_bridge *pci_host_bridge_from_priv(void *priv) 634 { 635 return container_of(priv, struct pci_host_bridge, private); 636 } 637 638 struct pci_host_bridge *pci_alloc_host_bridge(size_t priv); 639 struct pci_host_bridge *devm_pci_alloc_host_bridge(struct device *dev, 640 size_t priv); 641 void pci_free_host_bridge(struct pci_host_bridge *bridge); 642 struct pci_host_bridge *pci_find_host_bridge(struct pci_bus *bus); 643 644 void pci_set_host_bridge_release(struct pci_host_bridge *bridge, 645 void (*release_fn)(struct pci_host_bridge *), 646 void *release_data); 647 648 int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge); 649 650 #define PCI_REGION_FLAG_MASK 0x0fU /* These bits of resource flags tell us the PCI region flags */ 651 652 struct pci_bus { 653 struct list_head node; /* Node in list of buses */ 654 struct pci_bus *parent; /* Parent bus this bridge is on */ 655 struct list_head children; /* List of child buses */ 656 struct list_head devices; /* List of devices on this bus */ 657 struct pci_dev *self; /* Bridge device as seen by parent */ 658 struct list_head slots; /* List of slots on this bus; 659 protected by pci_slot_mutex */ 660 struct resource *resource[PCI_BRIDGE_RESOURCE_NUM]; 661 struct list_head resources; /* Address space routed to this bus */ 662 struct resource busn_res; /* Bus numbers routed to this bus */ 663 664 struct pci_ops *ops; /* Configuration access functions */ 665 void *sysdata; /* Hook for sys-specific extension */ 666 struct proc_dir_entry *procdir; /* Directory entry in /proc/bus/pci */ 667 668 unsigned char number; /* Bus number */ 669 unsigned char primary; /* Number of primary bridge */ 670 unsigned char max_bus_speed; /* enum pci_bus_speed */ 671 unsigned char cur_bus_speed; /* enum pci_bus_speed */ 672 #ifdef CONFIG_PCI_DOMAINS_GENERIC 673 int domain_nr; 674 #endif 675 676 char name[48]; 677 678 unsigned short bridge_ctl; /* Manage NO_ISA/FBB/et al behaviors */ 679 pci_bus_flags_t bus_flags; /* Inherited by child buses */ 680 struct device *bridge; 681 struct device dev; 682 struct bin_attribute *legacy_io; /* Legacy I/O for this bus */ 683 struct bin_attribute *legacy_mem; /* Legacy mem */ 684 unsigned int is_added:1; 685 unsigned int unsafe_warn:1; /* warned about RW1C config write */ 686 unsigned int flit_mode:1; /* Link in Flit mode */ 687 }; 688 689 #define to_pci_bus(n) container_of(n, struct pci_bus, dev) 690 691 static inline u16 pci_dev_id(struct pci_dev *dev) 692 { 693 return PCI_DEVID(dev->bus->number, dev->devfn); 694 } 695 696 /* 697 * Returns true if the PCI bus is root (behind host-PCI bridge), 698 * false otherwise 699 * 700 * Some code assumes that "bus->self == NULL" means that bus is a root bus. 701 * This is incorrect because "virtual" buses added for SR-IOV (via 702 * virtfn_add_bus()) have "bus->self == NULL" but are not root buses. 703 */ 704 static inline bool pci_is_root_bus(struct pci_bus *pbus) 705 { 706 return !(pbus->parent); 707 } 708 709 /** 710 * pci_is_bridge - check if the PCI device is a bridge 711 * @dev: PCI device 712 * 713 * Return true if the PCI device is bridge whether it has subordinate 714 * or not. 715 */ 716 static inline bool pci_is_bridge(struct pci_dev *dev) 717 { 718 return dev->hdr_type == PCI_HEADER_TYPE_BRIDGE || 719 dev->hdr_type == PCI_HEADER_TYPE_CARDBUS; 720 } 721 722 /** 723 * pci_is_vga - check if the PCI device is a VGA device 724 * @pdev: PCI device 725 * 726 * The PCI Code and ID Assignment spec, r1.15, secs 1.4 and 1.1, define 727 * VGA Base Class and Sub-Classes: 728 * 729 * 03 00 PCI_CLASS_DISPLAY_VGA VGA-compatible or 8514-compatible 730 * 00 01 PCI_CLASS_NOT_DEFINED_VGA VGA-compatible (before Class Code) 731 * 732 * Return true if the PCI device is a VGA device and uses the legacy VGA 733 * resources ([mem 0xa0000-0xbffff], [io 0x3b0-0x3bb], [io 0x3c0-0x3df] and 734 * aliases). 735 */ 736 static inline bool pci_is_vga(struct pci_dev *pdev) 737 { 738 if ((pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA) 739 return true; 740 741 if ((pdev->class >> 8) == PCI_CLASS_NOT_DEFINED_VGA) 742 return true; 743 744 return false; 745 } 746 747 #define for_each_pci_bridge(dev, bus) \ 748 list_for_each_entry(dev, &bus->devices, bus_list) \ 749 if (!pci_is_bridge(dev)) {} else 750 751 static inline struct pci_dev *pci_upstream_bridge(struct pci_dev *dev) 752 { 753 dev = pci_physfn(dev); 754 if (pci_is_root_bus(dev->bus)) 755 return NULL; 756 757 return dev->bus->self; 758 } 759 760 #ifdef CONFIG_PCI_MSI 761 static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev) 762 { 763 return pci_dev->msi_enabled || pci_dev->msix_enabled; 764 } 765 #else 766 static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev) { return false; } 767 #endif 768 769 /* Error values that may be returned by PCI functions */ 770 #define PCIBIOS_SUCCESSFUL 0x00 771 #define PCIBIOS_FUNC_NOT_SUPPORTED 0x81 772 #define PCIBIOS_BAD_VENDOR_ID 0x83 773 #define PCIBIOS_DEVICE_NOT_FOUND 0x86 774 #define PCIBIOS_BAD_REGISTER_NUMBER 0x87 775 #define PCIBIOS_SET_FAILED 0x88 776 #define PCIBIOS_BUFFER_TOO_SMALL 0x89 777 778 /* Translate above to generic errno for passing back through non-PCI code */ 779 static inline int pcibios_err_to_errno(int err) 780 { 781 if (err <= PCIBIOS_SUCCESSFUL) 782 return err; /* Assume already errno */ 783 784 switch (err) { 785 case PCIBIOS_FUNC_NOT_SUPPORTED: 786 return -ENOENT; 787 case PCIBIOS_BAD_VENDOR_ID: 788 return -ENOTTY; 789 case PCIBIOS_DEVICE_NOT_FOUND: 790 return -ENODEV; 791 case PCIBIOS_BAD_REGISTER_NUMBER: 792 return -EFAULT; 793 case PCIBIOS_SET_FAILED: 794 return -EIO; 795 case PCIBIOS_BUFFER_TOO_SMALL: 796 return -ENOSPC; 797 } 798 799 return -ERANGE; 800 } 801 802 /* Low-level architecture-dependent routines */ 803 804 struct pci_ops { 805 int (*add_bus)(struct pci_bus *bus); 806 void (*remove_bus)(struct pci_bus *bus); 807 void __iomem *(*map_bus)(struct pci_bus *bus, unsigned int devfn, int where); 808 int (*read)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val); 809 int (*write)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val); 810 }; 811 812 /* 813 * ACPI needs to be able to access PCI config space before we've done a 814 * PCI bus scan and created pci_bus structures. 815 */ 816 int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn, 817 int reg, int len, u32 *val); 818 int raw_pci_write(unsigned int domain, unsigned int bus, unsigned int devfn, 819 int reg, int len, u32 val); 820 821 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 822 typedef u64 pci_bus_addr_t; 823 #else 824 typedef u32 pci_bus_addr_t; 825 #endif 826 827 struct pci_bus_region { 828 pci_bus_addr_t start; 829 pci_bus_addr_t end; 830 }; 831 832 struct pci_dynids { 833 spinlock_t lock; /* Protects list, index */ 834 struct list_head list; /* For IDs added at runtime */ 835 }; 836 837 838 /* 839 * PCI Error Recovery System (PCI-ERS). If a PCI device driver provides 840 * a set of callbacks in struct pci_error_handlers, that device driver 841 * will be notified of PCI bus errors, and will be driven to recovery 842 * when an error occurs. 843 */ 844 845 typedef unsigned int __bitwise pci_ers_result_t; 846 847 enum pci_ers_result { 848 /* No result/none/not supported in device driver */ 849 PCI_ERS_RESULT_NONE = (__force pci_ers_result_t) 1, 850 851 /* Device driver can recover without slot reset */ 852 PCI_ERS_RESULT_CAN_RECOVER = (__force pci_ers_result_t) 2, 853 854 /* Device driver wants slot to be reset */ 855 PCI_ERS_RESULT_NEED_RESET = (__force pci_ers_result_t) 3, 856 857 /* Device has completely failed, is unrecoverable */ 858 PCI_ERS_RESULT_DISCONNECT = (__force pci_ers_result_t) 4, 859 860 /* Device driver is fully recovered and operational */ 861 PCI_ERS_RESULT_RECOVERED = (__force pci_ers_result_t) 5, 862 863 /* No AER capabilities registered for the driver */ 864 PCI_ERS_RESULT_NO_AER_DRIVER = (__force pci_ers_result_t) 6, 865 }; 866 867 /* PCI bus error event callbacks */ 868 struct pci_error_handlers { 869 /* PCI bus error detected on this device */ 870 pci_ers_result_t (*error_detected)(struct pci_dev *dev, 871 pci_channel_state_t error); 872 873 /* MMIO has been re-enabled, but not DMA */ 874 pci_ers_result_t (*mmio_enabled)(struct pci_dev *dev); 875 876 /* PCI slot has been reset */ 877 pci_ers_result_t (*slot_reset)(struct pci_dev *dev); 878 879 /* PCI function reset prepare or completed */ 880 void (*reset_prepare)(struct pci_dev *dev); 881 void (*reset_done)(struct pci_dev *dev); 882 883 /* Device driver may resume normal operations */ 884 void (*resume)(struct pci_dev *dev); 885 886 /* Allow device driver to record more details of a correctable error */ 887 void (*cor_error_detected)(struct pci_dev *dev); 888 }; 889 890 891 struct module; 892 893 /** 894 * struct pci_driver - PCI driver structure 895 * @name: Driver name. 896 * @id_table: Pointer to table of device IDs the driver is 897 * interested in. Most drivers should export this 898 * table using MODULE_DEVICE_TABLE(pci,...). 899 * @probe: This probing function gets called (during execution 900 * of pci_register_driver() for already existing 901 * devices or later if a new device gets inserted) for 902 * all PCI devices which match the ID table and are not 903 * "owned" by the other drivers yet. This function gets 904 * passed a "struct pci_dev \*" for each device whose 905 * entry in the ID table matches the device. The probe 906 * function returns zero when the driver chooses to 907 * take "ownership" of the device or an error code 908 * (negative number) otherwise. 909 * The probe function always gets called from process 910 * context, so it can sleep. 911 * @remove: The remove() function gets called whenever a device 912 * being handled by this driver is removed (either during 913 * deregistration of the driver or when it's manually 914 * pulled out of a hot-pluggable slot). 915 * The remove function always gets called from process 916 * context, so it can sleep. 917 * @suspend: Put device into low power state. 918 * @resume: Wake device from low power state. 919 * (Please see Documentation/power/pci.rst for descriptions 920 * of PCI Power Management and the related functions.) 921 * @shutdown: Hook into reboot_notifier_list (kernel/sys.c). 922 * Intended to stop any idling DMA operations. 923 * Useful for enabling wake-on-lan (NIC) or changing 924 * the power state of a device before reboot. 925 * e.g. drivers/net/e100.c. 926 * @sriov_configure: Optional driver callback to allow configuration of 927 * number of VFs to enable via sysfs "sriov_numvfs" file. 928 * @sriov_set_msix_vec_count: PF Driver callback to change number of MSI-X 929 * vectors on a VF. Triggered via sysfs "sriov_vf_msix_count". 930 * This will change MSI-X Table Size in the VF Message Control 931 * registers. 932 * @sriov_get_vf_total_msix: PF driver callback to get the total number of 933 * MSI-X vectors available for distribution to the VFs. 934 * @err_handler: See Documentation/PCI/pci-error-recovery.rst 935 * @groups: Sysfs attribute groups. 936 * @dev_groups: Attributes attached to the device that will be 937 * created once it is bound to the driver. 938 * @driver: Driver model structure. 939 * @dynids: List of dynamically added device IDs. 940 * @driver_managed_dma: Device driver doesn't use kernel DMA API for DMA. 941 * For most device drivers, no need to care about this flag 942 * as long as all DMAs are handled through the kernel DMA API. 943 * For some special ones, for example VFIO drivers, they know 944 * how to manage the DMA themselves and set this flag so that 945 * the IOMMU layer will allow them to setup and manage their 946 * own I/O address space. 947 */ 948 struct pci_driver { 949 const char *name; 950 const struct pci_device_id *id_table; /* Must be non-NULL for probe to be called */ 951 int (*probe)(struct pci_dev *dev, const struct pci_device_id *id); /* New device inserted */ 952 void (*remove)(struct pci_dev *dev); /* Device removed (NULL if not a hot-plug capable driver) */ 953 int (*suspend)(struct pci_dev *dev, pm_message_t state); /* Device suspended */ 954 int (*resume)(struct pci_dev *dev); /* Device woken up */ 955 void (*shutdown)(struct pci_dev *dev); 956 int (*sriov_configure)(struct pci_dev *dev, int num_vfs); /* On PF */ 957 int (*sriov_set_msix_vec_count)(struct pci_dev *vf, int msix_vec_count); /* On PF */ 958 u32 (*sriov_get_vf_total_msix)(struct pci_dev *pf); 959 const struct pci_error_handlers *err_handler; 960 const struct attribute_group **groups; 961 const struct attribute_group **dev_groups; 962 struct device_driver driver; 963 struct pci_dynids dynids; 964 bool driver_managed_dma; 965 }; 966 967 #define to_pci_driver(__drv) \ 968 ( __drv ? container_of_const(__drv, struct pci_driver, driver) : NULL ) 969 970 /** 971 * PCI_DEVICE - macro used to describe a specific PCI device 972 * @vend: the 16 bit PCI Vendor ID 973 * @dev: the 16 bit PCI Device ID 974 * 975 * This macro is used to create a struct pci_device_id that matches a 976 * specific device. The subvendor and subdevice fields will be set to 977 * PCI_ANY_ID. 978 */ 979 #define PCI_DEVICE(vend,dev) \ 980 .vendor = (vend), .device = (dev), \ 981 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID 982 983 /** 984 * PCI_DEVICE_DRIVER_OVERRIDE - macro used to describe a PCI device with 985 * override_only flags. 986 * @vend: the 16 bit PCI Vendor ID 987 * @dev: the 16 bit PCI Device ID 988 * @driver_override: the 32 bit PCI Device override_only 989 * 990 * This macro is used to create a struct pci_device_id that matches only a 991 * driver_override device. The subvendor and subdevice fields will be set to 992 * PCI_ANY_ID. 993 */ 994 #define PCI_DEVICE_DRIVER_OVERRIDE(vend, dev, driver_override) \ 995 .vendor = (vend), .device = (dev), .subvendor = PCI_ANY_ID, \ 996 .subdevice = PCI_ANY_ID, .override_only = (driver_override) 997 998 /** 999 * PCI_DRIVER_OVERRIDE_DEVICE_VFIO - macro used to describe a VFIO 1000 * "driver_override" PCI device. 1001 * @vend: the 16 bit PCI Vendor ID 1002 * @dev: the 16 bit PCI Device ID 1003 * 1004 * This macro is used to create a struct pci_device_id that matches a 1005 * specific device. The subvendor and subdevice fields will be set to 1006 * PCI_ANY_ID and the driver_override will be set to 1007 * PCI_ID_F_VFIO_DRIVER_OVERRIDE. 1008 */ 1009 #define PCI_DRIVER_OVERRIDE_DEVICE_VFIO(vend, dev) \ 1010 PCI_DEVICE_DRIVER_OVERRIDE(vend, dev, PCI_ID_F_VFIO_DRIVER_OVERRIDE) 1011 1012 /** 1013 * PCI_DEVICE_SUB - macro used to describe a specific PCI device with subsystem 1014 * @vend: the 16 bit PCI Vendor ID 1015 * @dev: the 16 bit PCI Device ID 1016 * @subvend: the 16 bit PCI Subvendor ID 1017 * @subdev: the 16 bit PCI Subdevice ID 1018 * 1019 * This macro is used to create a struct pci_device_id that matches a 1020 * specific device with subsystem information. 1021 */ 1022 #define PCI_DEVICE_SUB(vend, dev, subvend, subdev) \ 1023 .vendor = (vend), .device = (dev), \ 1024 .subvendor = (subvend), .subdevice = (subdev) 1025 1026 /** 1027 * PCI_DEVICE_CLASS - macro used to describe a specific PCI device class 1028 * @dev_class: the class, subclass, prog-if triple for this device 1029 * @dev_class_mask: the class mask for this device 1030 * 1031 * This macro is used to create a struct pci_device_id that matches a 1032 * specific PCI class. The vendor, device, subvendor, and subdevice 1033 * fields will be set to PCI_ANY_ID. 1034 */ 1035 #define PCI_DEVICE_CLASS(dev_class,dev_class_mask) \ 1036 .class = (dev_class), .class_mask = (dev_class_mask), \ 1037 .vendor = PCI_ANY_ID, .device = PCI_ANY_ID, \ 1038 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID 1039 1040 /** 1041 * PCI_VDEVICE - macro used to describe a specific PCI device in short form 1042 * @vend: the vendor name 1043 * @dev: the 16 bit PCI Device ID 1044 * 1045 * This macro is used to create a struct pci_device_id that matches a 1046 * specific PCI device. The subvendor, and subdevice fields will be set 1047 * to PCI_ANY_ID. The macro allows the next field to follow as the device 1048 * private data. 1049 */ 1050 #define PCI_VDEVICE(vend, dev) \ 1051 .vendor = PCI_VENDOR_ID_##vend, .device = (dev), \ 1052 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0 1053 1054 /** 1055 * PCI_VDEVICE_SUB - describe a specific PCI device/subdevice in a short form 1056 * @vend: the vendor name 1057 * @dev: the 16 bit PCI Device ID 1058 * @subvend: the 16 bit PCI Subvendor ID 1059 * @subdev: the 16 bit PCI Subdevice ID 1060 * 1061 * Generate the pci_device_id struct layout for the specific PCI 1062 * device/subdevice. Private data may follow the output. 1063 */ 1064 #define PCI_VDEVICE_SUB(vend, dev, subvend, subdev) \ 1065 .vendor = PCI_VENDOR_ID_##vend, .device = (dev), \ 1066 .subvendor = (subvend), .subdevice = (subdev), 0, 0 1067 1068 /** 1069 * PCI_DEVICE_DATA - macro used to describe a specific PCI device in very short form 1070 * @vend: the vendor name (without PCI_VENDOR_ID_ prefix) 1071 * @dev: the device name (without PCI_DEVICE_ID_<vend>_ prefix) 1072 * @data: the driver data to be filled 1073 * 1074 * This macro is used to create a struct pci_device_id that matches a 1075 * specific PCI device. The subvendor, and subdevice fields will be set 1076 * to PCI_ANY_ID. 1077 */ 1078 #define PCI_DEVICE_DATA(vend, dev, data) \ 1079 .vendor = PCI_VENDOR_ID_##vend, .device = PCI_DEVICE_ID_##vend##_##dev, \ 1080 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0, \ 1081 .driver_data = (kernel_ulong_t)(data) 1082 1083 enum { 1084 PCI_REASSIGN_ALL_RSRC = 0x00000001, /* Ignore firmware setup */ 1085 PCI_REASSIGN_ALL_BUS = 0x00000002, /* Reassign all bus numbers */ 1086 PCI_PROBE_ONLY = 0x00000004, /* Use existing setup */ 1087 PCI_CAN_SKIP_ISA_ALIGN = 0x00000008, /* Don't do ISA alignment */ 1088 PCI_ENABLE_PROC_DOMAINS = 0x00000010, /* Enable domains in /proc */ 1089 PCI_COMPAT_DOMAIN_0 = 0x00000020, /* ... except domain 0 */ 1090 PCI_SCAN_ALL_PCIE_DEVS = 0x00000040, /* Scan all, not just dev 0 */ 1091 }; 1092 1093 #define PCI_IRQ_INTX (1 << 0) /* Allow INTx interrupts */ 1094 #define PCI_IRQ_MSI (1 << 1) /* Allow MSI interrupts */ 1095 #define PCI_IRQ_MSIX (1 << 2) /* Allow MSI-X interrupts */ 1096 #define PCI_IRQ_AFFINITY (1 << 3) /* Auto-assign affinity */ 1097 1098 /* These external functions are only available when PCI support is enabled */ 1099 #ifdef CONFIG_PCI 1100 1101 extern unsigned int pci_flags; 1102 1103 static inline void pci_set_flags(int flags) { pci_flags = flags; } 1104 static inline void pci_add_flags(int flags) { pci_flags |= flags; } 1105 static inline void pci_clear_flags(int flags) { pci_flags &= ~flags; } 1106 static inline int pci_has_flag(int flag) { return pci_flags & flag; } 1107 1108 void pcie_bus_configure_settings(struct pci_bus *bus); 1109 1110 enum pcie_bus_config_types { 1111 PCIE_BUS_TUNE_OFF, /* Don't touch MPS at all */ 1112 PCIE_BUS_DEFAULT, /* Ensure MPS matches upstream bridge */ 1113 PCIE_BUS_SAFE, /* Use largest MPS boot-time devices support */ 1114 PCIE_BUS_PERFORMANCE, /* Use MPS and MRRS for best performance */ 1115 PCIE_BUS_PEER2PEER, /* Set MPS = 128 for all devices */ 1116 }; 1117 1118 extern enum pcie_bus_config_types pcie_bus_config; 1119 1120 extern const struct bus_type pci_bus_type; 1121 1122 /* Do NOT directly access these two variables, unless you are arch-specific PCI 1123 * code, or PCI core code. */ 1124 extern struct list_head pci_root_buses; /* List of all known PCI buses */ 1125 /* Some device drivers need know if PCI is initiated */ 1126 int no_pci_devices(void); 1127 1128 void pcibios_resource_survey_bus(struct pci_bus *bus); 1129 void pcibios_bus_add_device(struct pci_dev *pdev); 1130 void pcibios_add_bus(struct pci_bus *bus); 1131 void pcibios_remove_bus(struct pci_bus *bus); 1132 void pcibios_fixup_bus(struct pci_bus *); 1133 int __must_check pcibios_enable_device(struct pci_dev *, int mask); 1134 /* Architecture-specific versions may override this (weak) */ 1135 char *pcibios_setup(char *str); 1136 1137 /* Used only when drivers/pci/setup.c is used */ 1138 resource_size_t pcibios_align_resource(void *, const struct resource *, 1139 resource_size_t, 1140 resource_size_t); 1141 1142 /* Weak but can be overridden by arch */ 1143 void pci_fixup_cardbus(struct pci_bus *); 1144 1145 /* Generic PCI functions used internally */ 1146 1147 void pcibios_resource_to_bus(struct pci_bus *bus, struct pci_bus_region *region, 1148 struct resource *res); 1149 void pcibios_bus_to_resource(struct pci_bus *bus, struct resource *res, 1150 struct pci_bus_region *region); 1151 void pcibios_scan_specific_bus(int busn); 1152 struct pci_bus *pci_find_bus(int domain, int busnr); 1153 void pci_bus_add_devices(const struct pci_bus *bus); 1154 struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops, void *sysdata); 1155 struct pci_bus *pci_create_root_bus(struct device *parent, int bus, 1156 struct pci_ops *ops, void *sysdata, 1157 struct list_head *resources); 1158 int pci_host_probe(struct pci_host_bridge *bridge); 1159 int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int busmax); 1160 int pci_bus_update_busn_res_end(struct pci_bus *b, int busmax); 1161 void pci_bus_release_busn_res(struct pci_bus *b); 1162 struct pci_bus *pci_scan_root_bus(struct device *parent, int bus, 1163 struct pci_ops *ops, void *sysdata, 1164 struct list_head *resources); 1165 int pci_scan_root_bus_bridge(struct pci_host_bridge *bridge); 1166 struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, 1167 int busnr); 1168 struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr, 1169 const char *name, 1170 struct hotplug_slot *hotplug); 1171 void pci_destroy_slot(struct pci_slot *slot); 1172 #ifdef CONFIG_SYSFS 1173 void pci_dev_assign_slot(struct pci_dev *dev); 1174 #else 1175 static inline void pci_dev_assign_slot(struct pci_dev *dev) { } 1176 #endif 1177 int pci_scan_slot(struct pci_bus *bus, int devfn); 1178 struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn); 1179 void pci_device_add(struct pci_dev *dev, struct pci_bus *bus); 1180 unsigned int pci_scan_child_bus(struct pci_bus *bus); 1181 void pci_bus_add_device(struct pci_dev *dev); 1182 void pci_read_bridge_bases(struct pci_bus *child); 1183 struct resource *pci_find_parent_resource(const struct pci_dev *dev, 1184 struct resource *res); 1185 u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin); 1186 int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge); 1187 u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp); 1188 struct pci_dev *pci_dev_get(struct pci_dev *dev); 1189 void pci_dev_put(struct pci_dev *dev); 1190 DEFINE_FREE(pci_dev_put, struct pci_dev *, if (_T) pci_dev_put(_T)) 1191 void pci_remove_bus(struct pci_bus *b); 1192 void pci_stop_and_remove_bus_device(struct pci_dev *dev); 1193 void pci_stop_and_remove_bus_device_locked(struct pci_dev *dev); 1194 void pci_stop_root_bus(struct pci_bus *bus); 1195 void pci_remove_root_bus(struct pci_bus *bus); 1196 void pci_setup_cardbus(struct pci_bus *bus); 1197 void pcibios_setup_bridge(struct pci_bus *bus, unsigned long type); 1198 void pci_sort_breadthfirst(void); 1199 #define dev_is_pci(d) ((d)->bus == &pci_bus_type) 1200 #define dev_is_pf(d) ((dev_is_pci(d) ? to_pci_dev(d)->is_physfn : false)) 1201 1202 /* Generic PCI functions exported to card drivers */ 1203 1204 u8 pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap); 1205 u8 pci_find_capability(struct pci_dev *dev, int cap); 1206 u8 pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap); 1207 u8 pci_find_ht_capability(struct pci_dev *dev, int ht_cap); 1208 u8 pci_find_next_ht_capability(struct pci_dev *dev, u8 pos, int ht_cap); 1209 u16 pci_find_ext_capability(struct pci_dev *dev, int cap); 1210 u16 pci_find_next_ext_capability(struct pci_dev *dev, u16 pos, int cap); 1211 struct pci_bus *pci_find_next_bus(const struct pci_bus *from); 1212 u16 pci_find_vsec_capability(struct pci_dev *dev, u16 vendor, int cap); 1213 u16 pci_find_dvsec_capability(struct pci_dev *dev, u16 vendor, u16 dvsec); 1214 1215 u64 pci_get_dsn(struct pci_dev *dev); 1216 1217 struct pci_dev *pci_get_device(unsigned int vendor, unsigned int device, 1218 struct pci_dev *from); 1219 struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device, 1220 unsigned int ss_vendor, unsigned int ss_device, 1221 struct pci_dev *from); 1222 struct pci_dev *pci_get_slot(struct pci_bus *bus, unsigned int devfn); 1223 struct pci_dev *pci_get_domain_bus_and_slot(int domain, unsigned int bus, 1224 unsigned int devfn); 1225 struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from); 1226 struct pci_dev *pci_get_base_class(unsigned int class, struct pci_dev *from); 1227 1228 int pci_dev_present(const struct pci_device_id *ids); 1229 1230 int pci_bus_read_config_byte(struct pci_bus *bus, unsigned int devfn, 1231 int where, u8 *val); 1232 int pci_bus_read_config_word(struct pci_bus *bus, unsigned int devfn, 1233 int where, u16 *val); 1234 int pci_bus_read_config_dword(struct pci_bus *bus, unsigned int devfn, 1235 int where, u32 *val); 1236 int pci_bus_write_config_byte(struct pci_bus *bus, unsigned int devfn, 1237 int where, u8 val); 1238 int pci_bus_write_config_word(struct pci_bus *bus, unsigned int devfn, 1239 int where, u16 val); 1240 int pci_bus_write_config_dword(struct pci_bus *bus, unsigned int devfn, 1241 int where, u32 val); 1242 1243 int pci_generic_config_read(struct pci_bus *bus, unsigned int devfn, 1244 int where, int size, u32 *val); 1245 int pci_generic_config_write(struct pci_bus *bus, unsigned int devfn, 1246 int where, int size, u32 val); 1247 int pci_generic_config_read32(struct pci_bus *bus, unsigned int devfn, 1248 int where, int size, u32 *val); 1249 int pci_generic_config_write32(struct pci_bus *bus, unsigned int devfn, 1250 int where, int size, u32 val); 1251 1252 struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops); 1253 1254 int pci_read_config_byte(const struct pci_dev *dev, int where, u8 *val); 1255 int pci_read_config_word(const struct pci_dev *dev, int where, u16 *val); 1256 int pci_read_config_dword(const struct pci_dev *dev, int where, u32 *val); 1257 int pci_write_config_byte(const struct pci_dev *dev, int where, u8 val); 1258 int pci_write_config_word(const struct pci_dev *dev, int where, u16 val); 1259 int pci_write_config_dword(const struct pci_dev *dev, int where, u32 val); 1260 void pci_clear_and_set_config_dword(const struct pci_dev *dev, int pos, 1261 u32 clear, u32 set); 1262 1263 int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val); 1264 int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val); 1265 int pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val); 1266 int pcie_capability_write_dword(struct pci_dev *dev, int pos, u32 val); 1267 int pcie_capability_clear_and_set_word_unlocked(struct pci_dev *dev, int pos, 1268 u16 clear, u16 set); 1269 int pcie_capability_clear_and_set_word_locked(struct pci_dev *dev, int pos, 1270 u16 clear, u16 set); 1271 int pcie_capability_clear_and_set_dword(struct pci_dev *dev, int pos, 1272 u32 clear, u32 set); 1273 1274 /** 1275 * pcie_capability_clear_and_set_word - RMW accessor for PCI Express Capability Registers 1276 * @dev: PCI device structure of the PCI Express device 1277 * @pos: PCI Express Capability Register 1278 * @clear: Clear bitmask 1279 * @set: Set bitmask 1280 * 1281 * Perform a Read-Modify-Write (RMW) operation using @clear and @set 1282 * bitmasks on PCI Express Capability Register at @pos. Certain PCI Express 1283 * Capability Registers are accessed concurrently in RMW fashion, hence 1284 * require locking which is handled transparently to the caller. 1285 */ 1286 static inline int pcie_capability_clear_and_set_word(struct pci_dev *dev, 1287 int pos, 1288 u16 clear, u16 set) 1289 { 1290 switch (pos) { 1291 case PCI_EXP_LNKCTL: 1292 case PCI_EXP_LNKCTL2: 1293 case PCI_EXP_RTCTL: 1294 return pcie_capability_clear_and_set_word_locked(dev, pos, 1295 clear, set); 1296 default: 1297 return pcie_capability_clear_and_set_word_unlocked(dev, pos, 1298 clear, set); 1299 } 1300 } 1301 1302 static inline int pcie_capability_set_word(struct pci_dev *dev, int pos, 1303 u16 set) 1304 { 1305 return pcie_capability_clear_and_set_word(dev, pos, 0, set); 1306 } 1307 1308 static inline int pcie_capability_set_dword(struct pci_dev *dev, int pos, 1309 u32 set) 1310 { 1311 return pcie_capability_clear_and_set_dword(dev, pos, 0, set); 1312 } 1313 1314 static inline int pcie_capability_clear_word(struct pci_dev *dev, int pos, 1315 u16 clear) 1316 { 1317 return pcie_capability_clear_and_set_word(dev, pos, clear, 0); 1318 } 1319 1320 static inline int pcie_capability_clear_dword(struct pci_dev *dev, int pos, 1321 u32 clear) 1322 { 1323 return pcie_capability_clear_and_set_dword(dev, pos, clear, 0); 1324 } 1325 1326 /* User-space driven config access */ 1327 int pci_user_read_config_byte(struct pci_dev *dev, int where, u8 *val); 1328 int pci_user_read_config_word(struct pci_dev *dev, int where, u16 *val); 1329 int pci_user_read_config_dword(struct pci_dev *dev, int where, u32 *val); 1330 int pci_user_write_config_byte(struct pci_dev *dev, int where, u8 val); 1331 int pci_user_write_config_word(struct pci_dev *dev, int where, u16 val); 1332 int pci_user_write_config_dword(struct pci_dev *dev, int where, u32 val); 1333 1334 int __must_check pci_enable_device(struct pci_dev *dev); 1335 int __must_check pci_enable_device_mem(struct pci_dev *dev); 1336 int __must_check pci_reenable_device(struct pci_dev *); 1337 int __must_check pcim_enable_device(struct pci_dev *pdev); 1338 void pcim_pin_device(struct pci_dev *pdev); 1339 1340 static inline bool pci_intx_mask_supported(struct pci_dev *pdev) 1341 { 1342 /* 1343 * INTx masking is supported if PCI_COMMAND_INTX_DISABLE is 1344 * writable and no quirk has marked the feature broken. 1345 */ 1346 return !pdev->broken_intx_masking; 1347 } 1348 1349 static inline int pci_is_enabled(struct pci_dev *pdev) 1350 { 1351 return (atomic_read(&pdev->enable_cnt) > 0); 1352 } 1353 1354 static inline int pci_is_managed(struct pci_dev *pdev) 1355 { 1356 return pdev->is_managed; 1357 } 1358 1359 void pci_disable_device(struct pci_dev *dev); 1360 1361 extern unsigned int pcibios_max_latency; 1362 void pci_set_master(struct pci_dev *dev); 1363 void pci_clear_master(struct pci_dev *dev); 1364 1365 int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state); 1366 int pci_set_cacheline_size(struct pci_dev *dev); 1367 int __must_check pci_set_mwi(struct pci_dev *dev); 1368 int __must_check pcim_set_mwi(struct pci_dev *dev); 1369 int pci_try_set_mwi(struct pci_dev *dev); 1370 void pci_clear_mwi(struct pci_dev *dev); 1371 void pci_disable_parity(struct pci_dev *dev); 1372 void pci_intx(struct pci_dev *dev, int enable); 1373 bool pci_check_and_mask_intx(struct pci_dev *dev); 1374 bool pci_check_and_unmask_intx(struct pci_dev *dev); 1375 int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask); 1376 int pci_wait_for_pending_transaction(struct pci_dev *dev); 1377 int pcix_get_max_mmrbc(struct pci_dev *dev); 1378 int pcix_get_mmrbc(struct pci_dev *dev); 1379 int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc); 1380 int pcie_get_readrq(struct pci_dev *dev); 1381 int pcie_set_readrq(struct pci_dev *dev, int rq); 1382 int pcie_get_mps(struct pci_dev *dev); 1383 int pcie_set_mps(struct pci_dev *dev, int mps); 1384 u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev, 1385 enum pci_bus_speed *speed, 1386 enum pcie_link_width *width); 1387 int pcie_link_speed_mbps(struct pci_dev *pdev); 1388 void pcie_print_link_status(struct pci_dev *dev); 1389 int pcie_reset_flr(struct pci_dev *dev, bool probe); 1390 int pcie_flr(struct pci_dev *dev); 1391 int __pci_reset_function_locked(struct pci_dev *dev); 1392 int pci_reset_function(struct pci_dev *dev); 1393 int pci_reset_function_locked(struct pci_dev *dev); 1394 int pci_try_reset_function(struct pci_dev *dev); 1395 int pci_probe_reset_slot(struct pci_slot *slot); 1396 int pci_probe_reset_bus(struct pci_bus *bus); 1397 int pci_reset_bus(struct pci_dev *dev); 1398 void pci_reset_secondary_bus(struct pci_dev *dev); 1399 void pcibios_reset_secondary_bus(struct pci_dev *dev); 1400 void pci_update_resource(struct pci_dev *dev, int resno); 1401 int __must_check pci_assign_resource(struct pci_dev *dev, int i); 1402 void pci_release_resource(struct pci_dev *dev, int resno); 1403 static inline int pci_rebar_bytes_to_size(u64 bytes) 1404 { 1405 bytes = roundup_pow_of_two(bytes); 1406 1407 /* Return BAR size as defined in the resizable BAR specification */ 1408 return max(ilog2(bytes), 20) - 20; 1409 } 1410 1411 u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar); 1412 int __must_check pci_resize_resource(struct pci_dev *dev, int i, int size); 1413 int pci_select_bars(struct pci_dev *dev, unsigned long flags); 1414 bool pci_device_is_present(struct pci_dev *pdev); 1415 void pci_ignore_hotplug(struct pci_dev *dev); 1416 struct pci_dev *pci_real_dma_dev(struct pci_dev *dev); 1417 int pci_status_get_and_clear_errors(struct pci_dev *pdev); 1418 1419 int __printf(6, 7) pci_request_irq(struct pci_dev *dev, unsigned int nr, 1420 irq_handler_t handler, irq_handler_t thread_fn, void *dev_id, 1421 const char *fmt, ...); 1422 void pci_free_irq(struct pci_dev *dev, unsigned int nr, void *dev_id); 1423 1424 /* ROM control related routines */ 1425 int pci_enable_rom(struct pci_dev *pdev); 1426 void pci_disable_rom(struct pci_dev *pdev); 1427 void __iomem __must_check *pci_map_rom(struct pci_dev *pdev, size_t *size); 1428 void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom); 1429 1430 /* Power management related routines */ 1431 int pci_save_state(struct pci_dev *dev); 1432 void pci_restore_state(struct pci_dev *dev); 1433 struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev); 1434 int pci_load_saved_state(struct pci_dev *dev, 1435 struct pci_saved_state *state); 1436 int pci_load_and_free_saved_state(struct pci_dev *dev, 1437 struct pci_saved_state **state); 1438 int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state); 1439 int pci_set_power_state(struct pci_dev *dev, pci_power_t state); 1440 int pci_set_power_state_locked(struct pci_dev *dev, pci_power_t state); 1441 pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state); 1442 bool pci_pme_capable(struct pci_dev *dev, pci_power_t state); 1443 void pci_pme_active(struct pci_dev *dev, bool enable); 1444 int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable); 1445 int pci_wake_from_d3(struct pci_dev *dev, bool enable); 1446 int pci_prepare_to_sleep(struct pci_dev *dev); 1447 int pci_back_from_sleep(struct pci_dev *dev); 1448 bool pci_dev_run_wake(struct pci_dev *dev); 1449 void pci_d3cold_enable(struct pci_dev *dev); 1450 void pci_d3cold_disable(struct pci_dev *dev); 1451 bool pcie_relaxed_ordering_enabled(struct pci_dev *dev); 1452 void pci_resume_bus(struct pci_bus *bus); 1453 void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state); 1454 1455 /* For use by arch with custom probe code */ 1456 void set_pcie_port_type(struct pci_dev *pdev); 1457 void set_pcie_hotplug_bridge(struct pci_dev *pdev); 1458 1459 /* Functions for PCI Hotplug drivers to use */ 1460 unsigned int pci_rescan_bus(struct pci_bus *bus); 1461 void pci_lock_rescan_remove(void); 1462 void pci_unlock_rescan_remove(void); 1463 1464 /* Vital Product Data routines */ 1465 ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf); 1466 ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf); 1467 ssize_t pci_read_vpd_any(struct pci_dev *dev, loff_t pos, size_t count, void *buf); 1468 ssize_t pci_write_vpd_any(struct pci_dev *dev, loff_t pos, size_t count, const void *buf); 1469 1470 /* Helper functions for low-level code (drivers/pci/setup-[bus,res].c) */ 1471 resource_size_t pcibios_retrieve_fw_addr(struct pci_dev *dev, int idx); 1472 void pci_bus_assign_resources(const struct pci_bus *bus); 1473 void pci_bus_claim_resources(struct pci_bus *bus); 1474 void pci_bus_size_bridges(struct pci_bus *bus); 1475 int pci_claim_resource(struct pci_dev *, int); 1476 int pci_claim_bridge_resource(struct pci_dev *bridge, int i); 1477 void pci_assign_unassigned_resources(void); 1478 void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge); 1479 void pci_assign_unassigned_bus_resources(struct pci_bus *bus); 1480 void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus); 1481 int pci_enable_resources(struct pci_dev *, int mask); 1482 void pci_assign_irq(struct pci_dev *dev); 1483 struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res); 1484 #define HAVE_PCI_REQ_REGIONS 2 1485 int __must_check pci_request_regions(struct pci_dev *, const char *); 1486 int __must_check pci_request_regions_exclusive(struct pci_dev *, const char *); 1487 void pci_release_regions(struct pci_dev *); 1488 int __must_check pci_request_region(struct pci_dev *, int, const char *); 1489 void pci_release_region(struct pci_dev *, int); 1490 int pci_request_selected_regions(struct pci_dev *, int, const char *); 1491 int pci_request_selected_regions_exclusive(struct pci_dev *, int, const char *); 1492 void pci_release_selected_regions(struct pci_dev *, int); 1493 1494 static inline __must_check struct resource * 1495 pci_request_config_region_exclusive(struct pci_dev *pdev, unsigned int offset, 1496 unsigned int len, const char *name) 1497 { 1498 return __request_region(&pdev->driver_exclusive_resource, offset, len, 1499 name, IORESOURCE_EXCLUSIVE); 1500 } 1501 1502 static inline void pci_release_config_region(struct pci_dev *pdev, 1503 unsigned int offset, 1504 unsigned int len) 1505 { 1506 __release_region(&pdev->driver_exclusive_resource, offset, len); 1507 } 1508 1509 /* drivers/pci/bus.c */ 1510 void pci_add_resource(struct list_head *resources, struct resource *res); 1511 void pci_add_resource_offset(struct list_head *resources, struct resource *res, 1512 resource_size_t offset); 1513 void pci_free_resource_list(struct list_head *resources); 1514 void pci_bus_add_resource(struct pci_bus *bus, struct resource *res); 1515 struct resource *pci_bus_resource_n(const struct pci_bus *bus, int n); 1516 void pci_bus_remove_resources(struct pci_bus *bus); 1517 void pci_bus_remove_resource(struct pci_bus *bus, struct resource *res); 1518 int devm_request_pci_bus_resources(struct device *dev, 1519 struct list_head *resources); 1520 1521 /* Temporary until new and working PCI SBR API in place */ 1522 int pci_bridge_secondary_bus_reset(struct pci_dev *dev); 1523 1524 #define __pci_bus_for_each_res0(bus, res, ...) \ 1525 for (unsigned int __b = 0; \ 1526 (res = pci_bus_resource_n(bus, __b)) || __b < PCI_BRIDGE_RESOURCE_NUM; \ 1527 __b++) 1528 1529 #define __pci_bus_for_each_res1(bus, res, __b) \ 1530 for (__b = 0; \ 1531 (res = pci_bus_resource_n(bus, __b)) || __b < PCI_BRIDGE_RESOURCE_NUM; \ 1532 __b++) 1533 1534 /** 1535 * pci_bus_for_each_resource - iterate over PCI bus resources 1536 * @bus: the PCI bus 1537 * @res: pointer to the current resource 1538 * @...: optional index of the current resource 1539 * 1540 * Iterate over PCI bus resources. The first part is to go over PCI bus 1541 * resource array, which has at most the %PCI_BRIDGE_RESOURCE_NUM entries. 1542 * After that continue with the separate list of the additional resources, 1543 * if not empty. That's why the Logical OR is being used. 1544 * 1545 * Possible usage: 1546 * 1547 * struct pci_bus *bus = ...; 1548 * struct resource *res; 1549 * unsigned int i; 1550 * 1551 * // With optional index 1552 * pci_bus_for_each_resource(bus, res, i) 1553 * pr_info("PCI bus resource[%u]: %pR\n", i, res); 1554 * 1555 * // Without index 1556 * pci_bus_for_each_resource(bus, res) 1557 * _do_something_(res); 1558 */ 1559 #define pci_bus_for_each_resource(bus, res, ...) \ 1560 CONCATENATE(__pci_bus_for_each_res, COUNT_ARGS(__VA_ARGS__)) \ 1561 (bus, res, __VA_ARGS__) 1562 1563 int __must_check pci_bus_alloc_resource(struct pci_bus *bus, 1564 struct resource *res, resource_size_t size, 1565 resource_size_t align, resource_size_t min, 1566 unsigned long type_mask, 1567 resource_alignf alignf, 1568 void *alignf_data); 1569 1570 1571 int pci_register_io_range(const struct fwnode_handle *fwnode, phys_addr_t addr, 1572 resource_size_t size); 1573 unsigned long pci_address_to_pio(phys_addr_t addr); 1574 phys_addr_t pci_pio_to_address(unsigned long pio); 1575 int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr); 1576 int devm_pci_remap_iospace(struct device *dev, const struct resource *res, 1577 phys_addr_t phys_addr); 1578 void pci_unmap_iospace(struct resource *res); 1579 void __iomem *devm_pci_remap_cfgspace(struct device *dev, 1580 resource_size_t offset, 1581 resource_size_t size); 1582 void __iomem *devm_pci_remap_cfg_resource(struct device *dev, 1583 struct resource *res); 1584 1585 static inline pci_bus_addr_t pci_bus_address(struct pci_dev *pdev, int bar) 1586 { 1587 struct pci_bus_region region; 1588 1589 pcibios_resource_to_bus(pdev->bus, ®ion, &pdev->resource[bar]); 1590 return region.start; 1591 } 1592 1593 /* Proper probing supporting hot-pluggable devices */ 1594 int __must_check __pci_register_driver(struct pci_driver *, struct module *, 1595 const char *mod_name); 1596 1597 /* pci_register_driver() must be a macro so KBUILD_MODNAME can be expanded */ 1598 #define pci_register_driver(driver) \ 1599 __pci_register_driver(driver, THIS_MODULE, KBUILD_MODNAME) 1600 1601 void pci_unregister_driver(struct pci_driver *dev); 1602 1603 /** 1604 * module_pci_driver() - Helper macro for registering a PCI driver 1605 * @__pci_driver: pci_driver struct 1606 * 1607 * Helper macro for PCI drivers which do not do anything special in module 1608 * init/exit. This eliminates a lot of boilerplate. Each module may only 1609 * use this macro once, and calling it replaces module_init() and module_exit() 1610 */ 1611 #define module_pci_driver(__pci_driver) \ 1612 module_driver(__pci_driver, pci_register_driver, pci_unregister_driver) 1613 1614 /** 1615 * builtin_pci_driver() - Helper macro for registering a PCI driver 1616 * @__pci_driver: pci_driver struct 1617 * 1618 * Helper macro for PCI drivers which do not do anything special in their 1619 * init code. This eliminates a lot of boilerplate. Each driver may only 1620 * use this macro once, and calling it replaces device_initcall(...) 1621 */ 1622 #define builtin_pci_driver(__pci_driver) \ 1623 builtin_driver(__pci_driver, pci_register_driver) 1624 1625 struct pci_driver *pci_dev_driver(const struct pci_dev *dev); 1626 int pci_add_dynid(struct pci_driver *drv, 1627 unsigned int vendor, unsigned int device, 1628 unsigned int subvendor, unsigned int subdevice, 1629 unsigned int class, unsigned int class_mask, 1630 unsigned long driver_data); 1631 const struct pci_device_id *pci_match_id(const struct pci_device_id *ids, 1632 struct pci_dev *dev); 1633 int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, 1634 int pass); 1635 1636 void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *), 1637 void *userdata); 1638 int pci_cfg_space_size(struct pci_dev *dev); 1639 unsigned char pci_bus_max_busnr(struct pci_bus *bus); 1640 resource_size_t pcibios_window_alignment(struct pci_bus *bus, 1641 unsigned long type); 1642 1643 #define PCI_VGA_STATE_CHANGE_BRIDGE (1 << 0) 1644 #define PCI_VGA_STATE_CHANGE_DECODES (1 << 1) 1645 1646 int pci_set_vga_state(struct pci_dev *pdev, bool decode, 1647 unsigned int command_bits, u32 flags); 1648 1649 /* 1650 * Virtual interrupts allow for more interrupts to be allocated 1651 * than the device has interrupts for. These are not programmed 1652 * into the device's MSI-X table and must be handled by some 1653 * other driver means. 1654 */ 1655 #define PCI_IRQ_VIRTUAL (1 << 4) 1656 1657 #define PCI_IRQ_ALL_TYPES (PCI_IRQ_INTX | PCI_IRQ_MSI | PCI_IRQ_MSIX) 1658 1659 #include <linux/dmapool.h> 1660 1661 struct msix_entry { 1662 u32 vector; /* Kernel uses to write allocated vector */ 1663 u16 entry; /* Driver uses to specify entry, OS writes */ 1664 }; 1665 1666 #ifdef CONFIG_PCI_MSI 1667 int pci_msi_vec_count(struct pci_dev *dev); 1668 void pci_disable_msi(struct pci_dev *dev); 1669 int pci_msix_vec_count(struct pci_dev *dev); 1670 void pci_disable_msix(struct pci_dev *dev); 1671 void pci_restore_msi_state(struct pci_dev *dev); 1672 int pci_msi_enabled(void); 1673 int pci_enable_msi(struct pci_dev *dev); 1674 int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, 1675 int minvec, int maxvec); 1676 static inline int pci_enable_msix_exact(struct pci_dev *dev, 1677 struct msix_entry *entries, int nvec) 1678 { 1679 int rc = pci_enable_msix_range(dev, entries, nvec, nvec); 1680 if (rc < 0) 1681 return rc; 1682 return 0; 1683 } 1684 int pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs, 1685 unsigned int max_vecs, unsigned int flags); 1686 int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, 1687 unsigned int max_vecs, unsigned int flags, 1688 struct irq_affinity *affd); 1689 1690 bool pci_msix_can_alloc_dyn(struct pci_dev *dev); 1691 struct msi_map pci_msix_alloc_irq_at(struct pci_dev *dev, unsigned int index, 1692 const struct irq_affinity_desc *affdesc); 1693 void pci_msix_free_irq(struct pci_dev *pdev, struct msi_map map); 1694 1695 void pci_free_irq_vectors(struct pci_dev *dev); 1696 int pci_irq_vector(struct pci_dev *dev, unsigned int nr); 1697 const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev, int vec); 1698 1699 #else 1700 static inline int pci_msi_vec_count(struct pci_dev *dev) { return -ENOSYS; } 1701 static inline void pci_disable_msi(struct pci_dev *dev) { } 1702 static inline int pci_msix_vec_count(struct pci_dev *dev) { return -ENOSYS; } 1703 static inline void pci_disable_msix(struct pci_dev *dev) { } 1704 static inline void pci_restore_msi_state(struct pci_dev *dev) { } 1705 static inline int pci_msi_enabled(void) { return 0; } 1706 static inline int pci_enable_msi(struct pci_dev *dev) 1707 { return -ENOSYS; } 1708 static inline int pci_enable_msix_range(struct pci_dev *dev, 1709 struct msix_entry *entries, int minvec, int maxvec) 1710 { return -ENOSYS; } 1711 static inline int pci_enable_msix_exact(struct pci_dev *dev, 1712 struct msix_entry *entries, int nvec) 1713 { return -ENOSYS; } 1714 1715 static inline int 1716 pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, 1717 unsigned int max_vecs, unsigned int flags, 1718 struct irq_affinity *aff_desc) 1719 { 1720 if ((flags & PCI_IRQ_INTX) && min_vecs == 1 && dev->irq) 1721 return 1; 1722 return -ENOSPC; 1723 } 1724 static inline int 1725 pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs, 1726 unsigned int max_vecs, unsigned int flags) 1727 { 1728 return pci_alloc_irq_vectors_affinity(dev, min_vecs, max_vecs, 1729 flags, NULL); 1730 } 1731 1732 static inline bool pci_msix_can_alloc_dyn(struct pci_dev *dev) 1733 { return false; } 1734 static inline struct msi_map pci_msix_alloc_irq_at(struct pci_dev *dev, unsigned int index, 1735 const struct irq_affinity_desc *affdesc) 1736 { 1737 struct msi_map map = { .index = -ENOSYS, }; 1738 1739 return map; 1740 } 1741 1742 static inline void pci_msix_free_irq(struct pci_dev *pdev, struct msi_map map) 1743 { 1744 } 1745 1746 static inline void pci_free_irq_vectors(struct pci_dev *dev) 1747 { 1748 } 1749 1750 static inline int pci_irq_vector(struct pci_dev *dev, unsigned int nr) 1751 { 1752 if (WARN_ON_ONCE(nr > 0)) 1753 return -EINVAL; 1754 return dev->irq; 1755 } 1756 static inline const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev, 1757 int vec) 1758 { 1759 return cpu_possible_mask; 1760 } 1761 #endif 1762 1763 /** 1764 * pci_irqd_intx_xlate() - Translate PCI INTx value to an IRQ domain hwirq 1765 * @d: the INTx IRQ domain 1766 * @node: the DT node for the device whose interrupt we're translating 1767 * @intspec: the interrupt specifier data from the DT 1768 * @intsize: the number of entries in @intspec 1769 * @out_hwirq: pointer at which to write the hwirq number 1770 * @out_type: pointer at which to write the interrupt type 1771 * 1772 * Translate a PCI INTx interrupt number from device tree in the range 1-4, as 1773 * stored in the standard PCI_INTERRUPT_PIN register, to a value in the range 1774 * 0-3 suitable for use in a 4 entry IRQ domain. That is, subtract one from the 1775 * INTx value to obtain the hwirq number. 1776 * 1777 * Returns 0 on success, or -EINVAL if the interrupt specifier is out of range. 1778 */ 1779 static inline int pci_irqd_intx_xlate(struct irq_domain *d, 1780 struct device_node *node, 1781 const u32 *intspec, 1782 unsigned int intsize, 1783 unsigned long *out_hwirq, 1784 unsigned int *out_type) 1785 { 1786 const u32 intx = intspec[0]; 1787 1788 if (intx < PCI_INTERRUPT_INTA || intx > PCI_INTERRUPT_INTD) 1789 return -EINVAL; 1790 1791 *out_hwirq = intx - PCI_INTERRUPT_INTA; 1792 return 0; 1793 } 1794 1795 #ifdef CONFIG_PCIEPORTBUS 1796 extern bool pcie_ports_disabled; 1797 extern bool pcie_ports_native; 1798 1799 int pcie_set_target_speed(struct pci_dev *port, enum pci_bus_speed speed_req, 1800 bool use_lt); 1801 #else 1802 #define pcie_ports_disabled true 1803 #define pcie_ports_native false 1804 1805 static inline int pcie_set_target_speed(struct pci_dev *port, 1806 enum pci_bus_speed speed_req, 1807 bool use_lt) 1808 { 1809 return -EOPNOTSUPP; 1810 } 1811 #endif 1812 1813 #define PCIE_LINK_STATE_L0S (BIT(0) | BIT(1)) /* Upstr/dwnstr L0s */ 1814 #define PCIE_LINK_STATE_L1 BIT(2) /* L1 state */ 1815 #define PCIE_LINK_STATE_L1_1 BIT(3) /* ASPM L1.1 state */ 1816 #define PCIE_LINK_STATE_L1_2 BIT(4) /* ASPM L1.2 state */ 1817 #define PCIE_LINK_STATE_L1_1_PCIPM BIT(5) /* PCI-PM L1.1 state */ 1818 #define PCIE_LINK_STATE_L1_2_PCIPM BIT(6) /* PCI-PM L1.2 state */ 1819 #define PCIE_LINK_STATE_ASPM_ALL (PCIE_LINK_STATE_L0S |\ 1820 PCIE_LINK_STATE_L1 |\ 1821 PCIE_LINK_STATE_L1_1 |\ 1822 PCIE_LINK_STATE_L1_2 |\ 1823 PCIE_LINK_STATE_L1_1_PCIPM |\ 1824 PCIE_LINK_STATE_L1_2_PCIPM) 1825 #define PCIE_LINK_STATE_CLKPM BIT(7) 1826 #define PCIE_LINK_STATE_ALL (PCIE_LINK_STATE_ASPM_ALL |\ 1827 PCIE_LINK_STATE_CLKPM) 1828 1829 #ifdef CONFIG_PCIEASPM 1830 int pci_disable_link_state(struct pci_dev *pdev, int state); 1831 int pci_disable_link_state_locked(struct pci_dev *pdev, int state); 1832 int pci_enable_link_state(struct pci_dev *pdev, int state); 1833 int pci_enable_link_state_locked(struct pci_dev *pdev, int state); 1834 void pcie_no_aspm(void); 1835 bool pcie_aspm_support_enabled(void); 1836 bool pcie_aspm_enabled(struct pci_dev *pdev); 1837 #else 1838 static inline int pci_disable_link_state(struct pci_dev *pdev, int state) 1839 { return 0; } 1840 static inline int pci_disable_link_state_locked(struct pci_dev *pdev, int state) 1841 { return 0; } 1842 static inline int pci_enable_link_state(struct pci_dev *pdev, int state) 1843 { return 0; } 1844 static inline int pci_enable_link_state_locked(struct pci_dev *pdev, int state) 1845 { return 0; } 1846 static inline void pcie_no_aspm(void) { } 1847 static inline bool pcie_aspm_support_enabled(void) { return false; } 1848 static inline bool pcie_aspm_enabled(struct pci_dev *pdev) { return false; } 1849 #endif 1850 1851 #ifdef CONFIG_PCIEAER 1852 bool pci_aer_available(void); 1853 #else 1854 static inline bool pci_aer_available(void) { return false; } 1855 #endif 1856 1857 bool pci_ats_disabled(void); 1858 1859 #ifdef CONFIG_PCIE_PTM 1860 int pci_enable_ptm(struct pci_dev *dev, u8 *granularity); 1861 void pci_disable_ptm(struct pci_dev *dev); 1862 bool pcie_ptm_enabled(struct pci_dev *dev); 1863 #else 1864 static inline int pci_enable_ptm(struct pci_dev *dev, u8 *granularity) 1865 { return -EINVAL; } 1866 static inline void pci_disable_ptm(struct pci_dev *dev) { } 1867 static inline bool pcie_ptm_enabled(struct pci_dev *dev) 1868 { return false; } 1869 #endif 1870 1871 void pci_cfg_access_lock(struct pci_dev *dev); 1872 bool pci_cfg_access_trylock(struct pci_dev *dev); 1873 void pci_cfg_access_unlock(struct pci_dev *dev); 1874 1875 void pci_dev_lock(struct pci_dev *dev); 1876 int pci_dev_trylock(struct pci_dev *dev); 1877 void pci_dev_unlock(struct pci_dev *dev); 1878 DEFINE_GUARD(pci_dev, struct pci_dev *, pci_dev_lock(_T), pci_dev_unlock(_T)) 1879 1880 /* 1881 * PCI domain support. Sometimes called PCI segment (eg by ACPI), 1882 * a PCI domain is defined to be a set of PCI buses which share 1883 * configuration space. 1884 */ 1885 #ifdef CONFIG_PCI_DOMAINS 1886 extern int pci_domains_supported; 1887 #else 1888 enum { pci_domains_supported = 0 }; 1889 static inline int pci_domain_nr(struct pci_bus *bus) { return 0; } 1890 static inline int pci_proc_domain(struct pci_bus *bus) { return 0; } 1891 #endif /* CONFIG_PCI_DOMAINS */ 1892 1893 /* 1894 * Generic implementation for PCI domain support. If your 1895 * architecture does not need custom management of PCI 1896 * domains then this implementation will be used 1897 */ 1898 #ifdef CONFIG_PCI_DOMAINS_GENERIC 1899 static inline int pci_domain_nr(struct pci_bus *bus) 1900 { 1901 return bus->domain_nr; 1902 } 1903 #ifdef CONFIG_ACPI 1904 int acpi_pci_bus_find_domain_nr(struct pci_bus *bus); 1905 #else 1906 static inline int acpi_pci_bus_find_domain_nr(struct pci_bus *bus) 1907 { return 0; } 1908 #endif 1909 int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent); 1910 void pci_bus_release_domain_nr(struct device *parent, int domain_nr); 1911 #endif 1912 1913 /* Some architectures require additional setup to direct VGA traffic */ 1914 typedef int (*arch_set_vga_state_t)(struct pci_dev *pdev, bool decode, 1915 unsigned int command_bits, u32 flags); 1916 void pci_register_set_vga_state(arch_set_vga_state_t func); 1917 1918 static inline int 1919 pci_request_io_regions(struct pci_dev *pdev, const char *name) 1920 { 1921 return pci_request_selected_regions(pdev, 1922 pci_select_bars(pdev, IORESOURCE_IO), name); 1923 } 1924 1925 static inline void 1926 pci_release_io_regions(struct pci_dev *pdev) 1927 { 1928 return pci_release_selected_regions(pdev, 1929 pci_select_bars(pdev, IORESOURCE_IO)); 1930 } 1931 1932 static inline int 1933 pci_request_mem_regions(struct pci_dev *pdev, const char *name) 1934 { 1935 return pci_request_selected_regions(pdev, 1936 pci_select_bars(pdev, IORESOURCE_MEM), name); 1937 } 1938 1939 static inline void 1940 pci_release_mem_regions(struct pci_dev *pdev) 1941 { 1942 return pci_release_selected_regions(pdev, 1943 pci_select_bars(pdev, IORESOURCE_MEM)); 1944 } 1945 1946 #else /* CONFIG_PCI is not enabled */ 1947 1948 static inline void pci_set_flags(int flags) { } 1949 static inline void pci_add_flags(int flags) { } 1950 static inline void pci_clear_flags(int flags) { } 1951 static inline int pci_has_flag(int flag) { return 0; } 1952 1953 /* 1954 * If the system does not have PCI, clearly these return errors. Define 1955 * these as simple inline functions to avoid hair in drivers. 1956 */ 1957 #define _PCI_NOP(o, s, t) \ 1958 static inline int pci_##o##_config_##s(struct pci_dev *dev, \ 1959 int where, t val) \ 1960 { return PCIBIOS_FUNC_NOT_SUPPORTED; } 1961 1962 #define _PCI_NOP_ALL(o, x) _PCI_NOP(o, byte, u8 x) \ 1963 _PCI_NOP(o, word, u16 x) \ 1964 _PCI_NOP(o, dword, u32 x) 1965 _PCI_NOP_ALL(read, *) 1966 _PCI_NOP_ALL(write,) 1967 1968 static inline struct pci_dev *pci_get_device(unsigned int vendor, 1969 unsigned int device, 1970 struct pci_dev *from) 1971 { return NULL; } 1972 1973 static inline struct pci_dev *pci_get_subsys(unsigned int vendor, 1974 unsigned int device, 1975 unsigned int ss_vendor, 1976 unsigned int ss_device, 1977 struct pci_dev *from) 1978 { return NULL; } 1979 1980 static inline struct pci_dev *pci_get_class(unsigned int class, 1981 struct pci_dev *from) 1982 { return NULL; } 1983 1984 static inline struct pci_dev *pci_get_base_class(unsigned int class, 1985 struct pci_dev *from) 1986 { return NULL; } 1987 1988 static inline int pci_dev_present(const struct pci_device_id *ids) 1989 { return 0; } 1990 1991 #define no_pci_devices() (1) 1992 #define pci_dev_put(dev) do { } while (0) 1993 1994 static inline void pci_set_master(struct pci_dev *dev) { } 1995 static inline void pci_clear_master(struct pci_dev *dev) { } 1996 static inline int pci_enable_device(struct pci_dev *dev) { return -EIO; } 1997 static inline void pci_disable_device(struct pci_dev *dev) { } 1998 static inline int pcim_enable_device(struct pci_dev *pdev) { return -EIO; } 1999 static inline int pci_assign_resource(struct pci_dev *dev, int i) 2000 { return -EBUSY; } 2001 static inline int __must_check __pci_register_driver(struct pci_driver *drv, 2002 struct module *owner, 2003 const char *mod_name) 2004 { return 0; } 2005 static inline int pci_register_driver(struct pci_driver *drv) 2006 { return 0; } 2007 static inline void pci_unregister_driver(struct pci_driver *drv) { } 2008 static inline u8 pci_find_capability(struct pci_dev *dev, int cap) 2009 { return 0; } 2010 static inline u8 pci_find_next_capability(struct pci_dev *dev, u8 post, int cap) 2011 { return 0; } 2012 static inline u16 pci_find_ext_capability(struct pci_dev *dev, int cap) 2013 { return 0; } 2014 2015 static inline u64 pci_get_dsn(struct pci_dev *dev) 2016 { return 0; } 2017 2018 /* Power management related routines */ 2019 static inline int pci_save_state(struct pci_dev *dev) { return 0; } 2020 static inline void pci_restore_state(struct pci_dev *dev) { } 2021 static inline int pci_set_power_state(struct pci_dev *dev, pci_power_t state) 2022 { return 0; } 2023 static inline int pci_set_power_state_locked(struct pci_dev *dev, pci_power_t state) 2024 { return 0; } 2025 static inline int pci_wake_from_d3(struct pci_dev *dev, bool enable) 2026 { return 0; } 2027 static inline pci_power_t pci_choose_state(struct pci_dev *dev, 2028 pm_message_t state) 2029 { return PCI_D0; } 2030 static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state, 2031 int enable) 2032 { return 0; } 2033 2034 static inline struct resource *pci_find_resource(struct pci_dev *dev, 2035 struct resource *res) 2036 { return NULL; } 2037 static inline int pci_request_regions(struct pci_dev *dev, const char *res_name) 2038 { return -EIO; } 2039 static inline void pci_release_regions(struct pci_dev *dev) { } 2040 2041 static inline int pci_register_io_range(const struct fwnode_handle *fwnode, 2042 phys_addr_t addr, resource_size_t size) 2043 { return -EINVAL; } 2044 2045 static inline unsigned long pci_address_to_pio(phys_addr_t addr) { return -1; } 2046 2047 static inline struct pci_bus *pci_find_next_bus(const struct pci_bus *from) 2048 { return NULL; } 2049 static inline struct pci_dev *pci_get_slot(struct pci_bus *bus, 2050 unsigned int devfn) 2051 { return NULL; } 2052 static inline struct pci_dev *pci_get_domain_bus_and_slot(int domain, 2053 unsigned int bus, unsigned int devfn) 2054 { return NULL; } 2055 2056 static inline int pci_domain_nr(struct pci_bus *bus) { return 0; } 2057 static inline struct pci_dev *pci_dev_get(struct pci_dev *dev) { return NULL; } 2058 2059 #define dev_is_pci(d) (false) 2060 #define dev_is_pf(d) (false) 2061 static inline bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags) 2062 { return false; } 2063 static inline int pci_irqd_intx_xlate(struct irq_domain *d, 2064 struct device_node *node, 2065 const u32 *intspec, 2066 unsigned int intsize, 2067 unsigned long *out_hwirq, 2068 unsigned int *out_type) 2069 { return -EINVAL; } 2070 2071 static inline const struct pci_device_id *pci_match_id(const struct pci_device_id *ids, 2072 struct pci_dev *dev) 2073 { return NULL; } 2074 static inline bool pci_ats_disabled(void) { return true; } 2075 2076 static inline int pci_irq_vector(struct pci_dev *dev, unsigned int nr) 2077 { 2078 return -EINVAL; 2079 } 2080 2081 static inline int 2082 pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, 2083 unsigned int max_vecs, unsigned int flags, 2084 struct irq_affinity *aff_desc) 2085 { 2086 return -ENOSPC; 2087 } 2088 static inline int 2089 pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs, 2090 unsigned int max_vecs, unsigned int flags) 2091 { 2092 return -ENOSPC; 2093 } 2094 #endif /* CONFIG_PCI */ 2095 2096 /* Include architecture-dependent settings and functions */ 2097 2098 #include <asm/pci.h> 2099 2100 /* 2101 * pci_mmap_resource_range() maps a specific BAR, and vm->vm_pgoff 2102 * is expected to be an offset within that region. 2103 * 2104 */ 2105 int pci_mmap_resource_range(struct pci_dev *dev, int bar, 2106 struct vm_area_struct *vma, 2107 enum pci_mmap_state mmap_state, int write_combine); 2108 2109 #ifndef arch_can_pci_mmap_wc 2110 #define arch_can_pci_mmap_wc() 0 2111 #endif 2112 2113 #ifndef arch_can_pci_mmap_io 2114 #define arch_can_pci_mmap_io() 0 2115 #define pci_iobar_pfn(pdev, bar, vma) (-EINVAL) 2116 #else 2117 int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma); 2118 #endif 2119 2120 #ifndef pci_root_bus_fwnode 2121 #define pci_root_bus_fwnode(bus) NULL 2122 #endif 2123 2124 /* 2125 * These helpers provide future and backwards compatibility 2126 * for accessing popular PCI BAR info 2127 */ 2128 #define pci_resource_n(dev, bar) (&(dev)->resource[(bar)]) 2129 #define pci_resource_start(dev, bar) (pci_resource_n(dev, bar)->start) 2130 #define pci_resource_end(dev, bar) (pci_resource_n(dev, bar)->end) 2131 #define pci_resource_flags(dev, bar) (pci_resource_n(dev, bar)->flags) 2132 #define pci_resource_len(dev,bar) \ 2133 (pci_resource_end((dev), (bar)) ? \ 2134 resource_size(pci_resource_n((dev), (bar))) : 0) 2135 2136 #define __pci_dev_for_each_res0(dev, res, ...) \ 2137 for (unsigned int __b = 0; \ 2138 __b < PCI_NUM_RESOURCES && (res = pci_resource_n(dev, __b)); \ 2139 __b++) 2140 2141 #define __pci_dev_for_each_res1(dev, res, __b) \ 2142 for (__b = 0; \ 2143 __b < PCI_NUM_RESOURCES && (res = pci_resource_n(dev, __b)); \ 2144 __b++) 2145 2146 #define pci_dev_for_each_resource(dev, res, ...) \ 2147 CONCATENATE(__pci_dev_for_each_res, COUNT_ARGS(__VA_ARGS__)) \ 2148 (dev, res, __VA_ARGS__) 2149 2150 /* 2151 * Similar to the helpers above, these manipulate per-pci_dev 2152 * driver-specific data. They are really just a wrapper around 2153 * the generic device structure functions of these calls. 2154 */ 2155 static inline void *pci_get_drvdata(struct pci_dev *pdev) 2156 { 2157 return dev_get_drvdata(&pdev->dev); 2158 } 2159 2160 static inline void pci_set_drvdata(struct pci_dev *pdev, void *data) 2161 { 2162 dev_set_drvdata(&pdev->dev, data); 2163 } 2164 2165 static inline const char *pci_name(const struct pci_dev *pdev) 2166 { 2167 return dev_name(&pdev->dev); 2168 } 2169 2170 void pci_resource_to_user(const struct pci_dev *dev, int bar, 2171 const struct resource *rsrc, 2172 resource_size_t *start, resource_size_t *end); 2173 2174 /* 2175 * The world is not perfect and supplies us with broken PCI devices. 2176 * For at least a part of these bugs we need a work-around, so both 2177 * generic (drivers/pci/quirks.c) and per-architecture code can define 2178 * fixup hooks to be called for particular buggy devices. 2179 */ 2180 2181 struct pci_fixup { 2182 u16 vendor; /* Or PCI_ANY_ID */ 2183 u16 device; /* Or PCI_ANY_ID */ 2184 u32 class; /* Or PCI_ANY_ID */ 2185 unsigned int class_shift; /* should be 0, 8, 16 */ 2186 #ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS 2187 int hook_offset; 2188 #else 2189 void (*hook)(struct pci_dev *dev); 2190 #endif 2191 }; 2192 2193 enum pci_fixup_pass { 2194 pci_fixup_early, /* Before probing BARs */ 2195 pci_fixup_header, /* After reading configuration header */ 2196 pci_fixup_final, /* Final phase of device fixups */ 2197 pci_fixup_enable, /* pci_enable_device() time */ 2198 pci_fixup_resume, /* pci_device_resume() */ 2199 pci_fixup_suspend, /* pci_device_suspend() */ 2200 pci_fixup_resume_early, /* pci_device_resume_early() */ 2201 pci_fixup_suspend_late, /* pci_device_suspend_late() */ 2202 }; 2203 2204 #ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS 2205 #define ___DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \ 2206 class_shift, hook) \ 2207 __ADDRESSABLE(hook) \ 2208 asm(".section " #sec ", \"a\" \n" \ 2209 ".balign 16 \n" \ 2210 ".short " #vendor ", " #device " \n" \ 2211 ".long " #class ", " #class_shift " \n" \ 2212 ".long " #hook " - . \n" \ 2213 ".previous \n"); 2214 2215 /* 2216 * Clang's LTO may rename static functions in C, but has no way to 2217 * handle such renamings when referenced from inline asm. To work 2218 * around this, create global C stubs for these cases. 2219 */ 2220 #ifdef CONFIG_LTO_CLANG 2221 #define __DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \ 2222 class_shift, hook, stub) \ 2223 void stub(struct pci_dev *dev); \ 2224 void stub(struct pci_dev *dev) \ 2225 { \ 2226 hook(dev); \ 2227 } \ 2228 ___DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \ 2229 class_shift, stub) 2230 #else 2231 #define __DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \ 2232 class_shift, hook, stub) \ 2233 ___DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \ 2234 class_shift, hook) 2235 #endif 2236 2237 #define DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \ 2238 class_shift, hook) \ 2239 __DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \ 2240 class_shift, hook, __UNIQUE_ID(hook)) 2241 #else 2242 /* Anonymous variables would be nice... */ 2243 #define DECLARE_PCI_FIXUP_SECTION(section, name, vendor, device, class, \ 2244 class_shift, hook) \ 2245 static const struct pci_fixup __PASTE(__pci_fixup_##name,__LINE__) __used \ 2246 __attribute__((__section__(#section), aligned((sizeof(void *))))) \ 2247 = { vendor, device, class, class_shift, hook }; 2248 #endif 2249 2250 #define DECLARE_PCI_FIXUP_CLASS_EARLY(vendor, device, class, \ 2251 class_shift, hook) \ 2252 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \ 2253 hook, vendor, device, class, class_shift, hook) 2254 #define DECLARE_PCI_FIXUP_CLASS_HEADER(vendor, device, class, \ 2255 class_shift, hook) \ 2256 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header, \ 2257 hook, vendor, device, class, class_shift, hook) 2258 #define DECLARE_PCI_FIXUP_CLASS_FINAL(vendor, device, class, \ 2259 class_shift, hook) \ 2260 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final, \ 2261 hook, vendor, device, class, class_shift, hook) 2262 #define DECLARE_PCI_FIXUP_CLASS_ENABLE(vendor, device, class, \ 2263 class_shift, hook) \ 2264 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable, \ 2265 hook, vendor, device, class, class_shift, hook) 2266 #define DECLARE_PCI_FIXUP_CLASS_RESUME(vendor, device, class, \ 2267 class_shift, hook) \ 2268 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \ 2269 resume##hook, vendor, device, class, class_shift, hook) 2270 #define DECLARE_PCI_FIXUP_CLASS_RESUME_EARLY(vendor, device, class, \ 2271 class_shift, hook) \ 2272 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \ 2273 resume_early##hook, vendor, device, class, class_shift, hook) 2274 #define DECLARE_PCI_FIXUP_CLASS_SUSPEND(vendor, device, class, \ 2275 class_shift, hook) \ 2276 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \ 2277 suspend##hook, vendor, device, class, class_shift, hook) 2278 #define DECLARE_PCI_FIXUP_CLASS_SUSPEND_LATE(vendor, device, class, \ 2279 class_shift, hook) \ 2280 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend_late, \ 2281 suspend_late##hook, vendor, device, class, class_shift, hook) 2282 2283 #define DECLARE_PCI_FIXUP_EARLY(vendor, device, hook) \ 2284 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \ 2285 hook, vendor, device, PCI_ANY_ID, 0, hook) 2286 #define DECLARE_PCI_FIXUP_HEADER(vendor, device, hook) \ 2287 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header, \ 2288 hook, vendor, device, PCI_ANY_ID, 0, hook) 2289 #define DECLARE_PCI_FIXUP_FINAL(vendor, device, hook) \ 2290 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final, \ 2291 hook, vendor, device, PCI_ANY_ID, 0, hook) 2292 #define DECLARE_PCI_FIXUP_ENABLE(vendor, device, hook) \ 2293 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable, \ 2294 hook, vendor, device, PCI_ANY_ID, 0, hook) 2295 #define DECLARE_PCI_FIXUP_RESUME(vendor, device, hook) \ 2296 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \ 2297 resume##hook, vendor, device, PCI_ANY_ID, 0, hook) 2298 #define DECLARE_PCI_FIXUP_RESUME_EARLY(vendor, device, hook) \ 2299 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \ 2300 resume_early##hook, vendor, device, PCI_ANY_ID, 0, hook) 2301 #define DECLARE_PCI_FIXUP_SUSPEND(vendor, device, hook) \ 2302 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \ 2303 suspend##hook, vendor, device, PCI_ANY_ID, 0, hook) 2304 #define DECLARE_PCI_FIXUP_SUSPEND_LATE(vendor, device, hook) \ 2305 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend_late, \ 2306 suspend_late##hook, vendor, device, PCI_ANY_ID, 0, hook) 2307 2308 #ifdef CONFIG_PCI_QUIRKS 2309 void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev); 2310 #else 2311 static inline void pci_fixup_device(enum pci_fixup_pass pass, 2312 struct pci_dev *dev) { } 2313 #endif 2314 2315 int pcim_intx(struct pci_dev *pdev, int enabled); 2316 int pcim_request_all_regions(struct pci_dev *pdev, const char *name); 2317 void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen); 2318 void __iomem *pcim_iomap_region(struct pci_dev *pdev, int bar, 2319 const char *name); 2320 void pcim_iounmap_region(struct pci_dev *pdev, int bar); 2321 void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr); 2322 void __iomem * const *pcim_iomap_table(struct pci_dev *pdev); 2323 int pcim_request_region(struct pci_dev *pdev, int bar, const char *name); 2324 int pcim_iomap_regions(struct pci_dev *pdev, int mask, const char *name); 2325 void pcim_iounmap_regions(struct pci_dev *pdev, int mask); 2326 void __iomem *pcim_iomap_range(struct pci_dev *pdev, int bar, 2327 unsigned long offset, unsigned long len); 2328 2329 extern int pci_pci_problems; 2330 #define PCIPCI_FAIL 1 /* No PCI PCI DMA */ 2331 #define PCIPCI_TRITON 2 2332 #define PCIPCI_NATOMA 4 2333 #define PCIPCI_VIAETBF 8 2334 #define PCIPCI_VSFX 16 2335 #define PCIPCI_ALIMAGIK 32 /* Need low latency setting */ 2336 #define PCIAGP_FAIL 64 /* No PCI to AGP DMA */ 2337 2338 extern u8 pci_dfl_cache_line_size; 2339 extern u8 pci_cache_line_size; 2340 2341 /* Architecture-specific versions may override these (weak) */ 2342 void pcibios_disable_device(struct pci_dev *dev); 2343 void pcibios_set_master(struct pci_dev *dev); 2344 int pcibios_set_pcie_reset_state(struct pci_dev *dev, 2345 enum pcie_reset_state state); 2346 int pcibios_device_add(struct pci_dev *dev); 2347 void pcibios_release_device(struct pci_dev *dev); 2348 #ifdef CONFIG_PCI 2349 void pcibios_penalize_isa_irq(int irq, int active); 2350 #else 2351 static inline void pcibios_penalize_isa_irq(int irq, int active) {} 2352 #endif 2353 int pcibios_alloc_irq(struct pci_dev *dev); 2354 void pcibios_free_irq(struct pci_dev *dev); 2355 resource_size_t pcibios_default_alignment(void); 2356 2357 #if !defined(HAVE_PCI_MMAP) && !defined(ARCH_GENERIC_PCI_MMAP_RESOURCE) 2358 extern int pci_create_resource_files(struct pci_dev *dev); 2359 extern void pci_remove_resource_files(struct pci_dev *dev); 2360 #endif 2361 2362 #if defined(CONFIG_PCI_MMCONFIG) || defined(CONFIG_ACPI_MCFG) 2363 void __init pci_mmcfg_early_init(void); 2364 void __init pci_mmcfg_late_init(void); 2365 #else 2366 static inline void pci_mmcfg_early_init(void) { } 2367 static inline void pci_mmcfg_late_init(void) { } 2368 #endif 2369 2370 int pci_ext_cfg_avail(void); 2371 2372 void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar); 2373 void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar); 2374 2375 #ifdef CONFIG_PCI_IOV 2376 int pci_iov_virtfn_bus(struct pci_dev *dev, int id); 2377 int pci_iov_virtfn_devfn(struct pci_dev *dev, int id); 2378 int pci_iov_vf_id(struct pci_dev *dev); 2379 void *pci_iov_get_pf_drvdata(struct pci_dev *dev, struct pci_driver *pf_driver); 2380 int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn); 2381 void pci_disable_sriov(struct pci_dev *dev); 2382 2383 int pci_iov_sysfs_link(struct pci_dev *dev, struct pci_dev *virtfn, int id); 2384 int pci_iov_add_virtfn(struct pci_dev *dev, int id); 2385 void pci_iov_remove_virtfn(struct pci_dev *dev, int id); 2386 int pci_num_vf(struct pci_dev *dev); 2387 int pci_vfs_assigned(struct pci_dev *dev); 2388 int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs); 2389 int pci_sriov_get_totalvfs(struct pci_dev *dev); 2390 int pci_sriov_configure_simple(struct pci_dev *dev, int nr_virtfn); 2391 resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno); 2392 void pci_vf_drivers_autoprobe(struct pci_dev *dev, bool probe); 2393 2394 /* Arch may override these (weak) */ 2395 int pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs); 2396 int pcibios_sriov_disable(struct pci_dev *pdev); 2397 resource_size_t pcibios_iov_resource_alignment(struct pci_dev *dev, int resno); 2398 #else 2399 static inline int pci_iov_virtfn_bus(struct pci_dev *dev, int id) 2400 { 2401 return -ENOSYS; 2402 } 2403 static inline int pci_iov_virtfn_devfn(struct pci_dev *dev, int id) 2404 { 2405 return -ENOSYS; 2406 } 2407 2408 static inline int pci_iov_vf_id(struct pci_dev *dev) 2409 { 2410 return -ENOSYS; 2411 } 2412 2413 static inline void *pci_iov_get_pf_drvdata(struct pci_dev *dev, 2414 struct pci_driver *pf_driver) 2415 { 2416 return ERR_PTR(-EINVAL); 2417 } 2418 2419 static inline int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn) 2420 { return -ENODEV; } 2421 2422 static inline int pci_iov_sysfs_link(struct pci_dev *dev, 2423 struct pci_dev *virtfn, int id) 2424 { 2425 return -ENODEV; 2426 } 2427 static inline int pci_iov_add_virtfn(struct pci_dev *dev, int id) 2428 { 2429 return -ENOSYS; 2430 } 2431 static inline void pci_iov_remove_virtfn(struct pci_dev *dev, 2432 int id) { } 2433 static inline void pci_disable_sriov(struct pci_dev *dev) { } 2434 static inline int pci_num_vf(struct pci_dev *dev) { return 0; } 2435 static inline int pci_vfs_assigned(struct pci_dev *dev) 2436 { return 0; } 2437 static inline int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs) 2438 { return 0; } 2439 static inline int pci_sriov_get_totalvfs(struct pci_dev *dev) 2440 { return 0; } 2441 #define pci_sriov_configure_simple NULL 2442 static inline resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno) 2443 { return 0; } 2444 static inline void pci_vf_drivers_autoprobe(struct pci_dev *dev, bool probe) { } 2445 #endif 2446 2447 /** 2448 * pci_pcie_cap - get the saved PCIe capability offset 2449 * @dev: PCI device 2450 * 2451 * PCIe capability offset is calculated at PCI device initialization 2452 * time and saved in the data structure. This function returns saved 2453 * PCIe capability offset. Using this instead of pci_find_capability() 2454 * reduces unnecessary search in the PCI configuration space. If you 2455 * need to calculate PCIe capability offset from raw device for some 2456 * reasons, please use pci_find_capability() instead. 2457 */ 2458 static inline int pci_pcie_cap(struct pci_dev *dev) 2459 { 2460 return dev->pcie_cap; 2461 } 2462 2463 /** 2464 * pci_is_pcie - check if the PCI device is PCI Express capable 2465 * @dev: PCI device 2466 * 2467 * Returns: true if the PCI device is PCI Express capable, false otherwise. 2468 */ 2469 static inline bool pci_is_pcie(struct pci_dev *dev) 2470 { 2471 return pci_pcie_cap(dev); 2472 } 2473 2474 /** 2475 * pcie_caps_reg - get the PCIe Capabilities Register 2476 * @dev: PCI device 2477 */ 2478 static inline u16 pcie_caps_reg(const struct pci_dev *dev) 2479 { 2480 return dev->pcie_flags_reg; 2481 } 2482 2483 /** 2484 * pci_pcie_type - get the PCIe device/port type 2485 * @dev: PCI device 2486 */ 2487 static inline int pci_pcie_type(const struct pci_dev *dev) 2488 { 2489 return (pcie_caps_reg(dev) & PCI_EXP_FLAGS_TYPE) >> 4; 2490 } 2491 2492 /** 2493 * pcie_find_root_port - Get the PCIe root port device 2494 * @dev: PCI device 2495 * 2496 * Traverse up the parent chain and return the PCIe Root Port PCI Device 2497 * for a given PCI/PCIe Device. 2498 */ 2499 static inline struct pci_dev *pcie_find_root_port(struct pci_dev *dev) 2500 { 2501 while (dev) { 2502 if (pci_is_pcie(dev) && 2503 pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) 2504 return dev; 2505 dev = pci_upstream_bridge(dev); 2506 } 2507 2508 return NULL; 2509 } 2510 2511 static inline bool pci_dev_is_disconnected(const struct pci_dev *dev) 2512 { 2513 /* 2514 * error_state is set in pci_dev_set_io_state() using xchg/cmpxchg() 2515 * and read w/o common lock. READ_ONCE() ensures compiler cannot cache 2516 * the value (e.g. inside the loop in pci_dev_wait()). 2517 */ 2518 return READ_ONCE(dev->error_state) == pci_channel_io_perm_failure; 2519 } 2520 2521 void pci_request_acs(void); 2522 bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags); 2523 bool pci_acs_path_enabled(struct pci_dev *start, 2524 struct pci_dev *end, u16 acs_flags); 2525 int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask); 2526 2527 #define PCI_VPD_LRDT 0x80 /* Large Resource Data Type */ 2528 #define PCI_VPD_LRDT_ID(x) ((x) | PCI_VPD_LRDT) 2529 2530 /* Large Resource Data Type Tag Item Names */ 2531 #define PCI_VPD_LTIN_ID_STRING 0x02 /* Identifier String */ 2532 #define PCI_VPD_LTIN_RO_DATA 0x10 /* Read-Only Data */ 2533 #define PCI_VPD_LTIN_RW_DATA 0x11 /* Read-Write Data */ 2534 2535 #define PCI_VPD_LRDT_ID_STRING PCI_VPD_LRDT_ID(PCI_VPD_LTIN_ID_STRING) 2536 #define PCI_VPD_LRDT_RO_DATA PCI_VPD_LRDT_ID(PCI_VPD_LTIN_RO_DATA) 2537 #define PCI_VPD_LRDT_RW_DATA PCI_VPD_LRDT_ID(PCI_VPD_LTIN_RW_DATA) 2538 2539 #define PCI_VPD_RO_KEYWORD_PARTNO "PN" 2540 #define PCI_VPD_RO_KEYWORD_SERIALNO "SN" 2541 #define PCI_VPD_RO_KEYWORD_MFR_ID "MN" 2542 #define PCI_VPD_RO_KEYWORD_VENDOR0 "V0" 2543 #define PCI_VPD_RO_KEYWORD_CHKSUM "RV" 2544 2545 /** 2546 * pci_vpd_alloc - Allocate buffer and read VPD into it 2547 * @dev: PCI device 2548 * @size: pointer to field where VPD length is returned 2549 * 2550 * Returns pointer to allocated buffer or an ERR_PTR in case of failure 2551 */ 2552 void *pci_vpd_alloc(struct pci_dev *dev, unsigned int *size); 2553 2554 /** 2555 * pci_vpd_find_id_string - Locate id string in VPD 2556 * @buf: Pointer to buffered VPD data 2557 * @len: The length of the buffer area in which to search 2558 * @size: Pointer to field where length of id string is returned 2559 * 2560 * Returns the index of the id string or -ENOENT if not found. 2561 */ 2562 int pci_vpd_find_id_string(const u8 *buf, unsigned int len, unsigned int *size); 2563 2564 /** 2565 * pci_vpd_find_ro_info_keyword - Locate info field keyword in VPD RO section 2566 * @buf: Pointer to buffered VPD data 2567 * @len: The length of the buffer area in which to search 2568 * @kw: The keyword to search for 2569 * @size: Pointer to field where length of found keyword data is returned 2570 * 2571 * Returns the index of the information field keyword data or -ENOENT if 2572 * not found. 2573 */ 2574 int pci_vpd_find_ro_info_keyword(const void *buf, unsigned int len, 2575 const char *kw, unsigned int *size); 2576 2577 /** 2578 * pci_vpd_check_csum - Check VPD checksum 2579 * @buf: Pointer to buffered VPD data 2580 * @len: VPD size 2581 * 2582 * Returns 1 if VPD has no checksum, otherwise 0 or an errno 2583 */ 2584 int pci_vpd_check_csum(const void *buf, unsigned int len); 2585 2586 /* PCI <-> OF binding helpers */ 2587 #ifdef CONFIG_OF 2588 struct device_node; 2589 struct irq_domain; 2590 struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus); 2591 bool pci_host_of_has_msi_map(struct device *dev); 2592 2593 /* Arch may override this (weak) */ 2594 struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus); 2595 2596 #else /* CONFIG_OF */ 2597 static inline struct irq_domain * 2598 pci_host_bridge_of_msi_domain(struct pci_bus *bus) { return NULL; } 2599 static inline bool pci_host_of_has_msi_map(struct device *dev) { return false; } 2600 #endif /* CONFIG_OF */ 2601 2602 static inline struct device_node * 2603 pci_device_to_OF_node(const struct pci_dev *pdev) 2604 { 2605 return pdev ? pdev->dev.of_node : NULL; 2606 } 2607 2608 static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus) 2609 { 2610 return bus ? bus->dev.of_node : NULL; 2611 } 2612 2613 #ifdef CONFIG_ACPI 2614 struct irq_domain *pci_host_bridge_acpi_msi_domain(struct pci_bus *bus); 2615 2616 void 2617 pci_msi_register_fwnode_provider(struct fwnode_handle *(*fn)(struct device *)); 2618 bool pci_pr3_present(struct pci_dev *pdev); 2619 #else 2620 static inline struct irq_domain * 2621 pci_host_bridge_acpi_msi_domain(struct pci_bus *bus) { return NULL; } 2622 static inline bool pci_pr3_present(struct pci_dev *pdev) { return false; } 2623 #endif 2624 2625 #if defined(CONFIG_X86) && defined(CONFIG_ACPI) 2626 bool arch_pci_dev_is_removable(struct pci_dev *pdev); 2627 #else 2628 static inline bool arch_pci_dev_is_removable(struct pci_dev *pdev) { return false; } 2629 #endif 2630 2631 #ifdef CONFIG_EEH 2632 static inline struct eeh_dev *pci_dev_to_eeh_dev(struct pci_dev *pdev) 2633 { 2634 return pdev->dev.archdata.edev; 2635 } 2636 #endif 2637 2638 void pci_add_dma_alias(struct pci_dev *dev, u8 devfn_from, unsigned nr_devfns); 2639 bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2); 2640 int pci_for_each_dma_alias(struct pci_dev *pdev, 2641 int (*fn)(struct pci_dev *pdev, 2642 u16 alias, void *data), void *data); 2643 2644 /* Helper functions for operation of device flag */ 2645 static inline void pci_set_dev_assigned(struct pci_dev *pdev) 2646 { 2647 pdev->dev_flags |= PCI_DEV_FLAGS_ASSIGNED; 2648 } 2649 static inline void pci_clear_dev_assigned(struct pci_dev *pdev) 2650 { 2651 pdev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED; 2652 } 2653 static inline bool pci_is_dev_assigned(struct pci_dev *pdev) 2654 { 2655 return (pdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED) == PCI_DEV_FLAGS_ASSIGNED; 2656 } 2657 2658 /** 2659 * pci_ari_enabled - query ARI forwarding status 2660 * @bus: the PCI bus 2661 * 2662 * Returns true if ARI forwarding is enabled. 2663 */ 2664 static inline bool pci_ari_enabled(struct pci_bus *bus) 2665 { 2666 return bus->self && bus->self->ari_enabled; 2667 } 2668 2669 /** 2670 * pci_is_thunderbolt_attached - whether device is on a Thunderbolt daisy chain 2671 * @pdev: PCI device to check 2672 * 2673 * Walk upwards from @pdev and check for each encountered bridge if it's part 2674 * of a Thunderbolt controller. Reaching the host bridge means @pdev is not 2675 * Thunderbolt-attached. (But rather soldered to the mainboard usually.) 2676 */ 2677 static inline bool pci_is_thunderbolt_attached(struct pci_dev *pdev) 2678 { 2679 struct pci_dev *parent = pdev; 2680 2681 if (pdev->is_thunderbolt) 2682 return true; 2683 2684 while ((parent = pci_upstream_bridge(parent))) 2685 if (parent->is_thunderbolt) 2686 return true; 2687 2688 return false; 2689 } 2690 2691 #if defined(CONFIG_PCIEPORTBUS) || defined(CONFIG_EEH) 2692 void pci_uevent_ers(struct pci_dev *pdev, enum pci_ers_result err_type); 2693 #endif 2694 2695 #include <linux/dma-mapping.h> 2696 2697 #define pci_printk(level, pdev, fmt, arg...) \ 2698 dev_printk(level, &(pdev)->dev, fmt, ##arg) 2699 2700 #define pci_emerg(pdev, fmt, arg...) dev_emerg(&(pdev)->dev, fmt, ##arg) 2701 #define pci_alert(pdev, fmt, arg...) dev_alert(&(pdev)->dev, fmt, ##arg) 2702 #define pci_crit(pdev, fmt, arg...) dev_crit(&(pdev)->dev, fmt, ##arg) 2703 #define pci_err(pdev, fmt, arg...) dev_err(&(pdev)->dev, fmt, ##arg) 2704 #define pci_warn(pdev, fmt, arg...) dev_warn(&(pdev)->dev, fmt, ##arg) 2705 #define pci_warn_once(pdev, fmt, arg...) dev_warn_once(&(pdev)->dev, fmt, ##arg) 2706 #define pci_notice(pdev, fmt, arg...) dev_notice(&(pdev)->dev, fmt, ##arg) 2707 #define pci_info(pdev, fmt, arg...) dev_info(&(pdev)->dev, fmt, ##arg) 2708 #define pci_dbg(pdev, fmt, arg...) dev_dbg(&(pdev)->dev, fmt, ##arg) 2709 2710 #define pci_notice_ratelimited(pdev, fmt, arg...) \ 2711 dev_notice_ratelimited(&(pdev)->dev, fmt, ##arg) 2712 2713 #define pci_info_ratelimited(pdev, fmt, arg...) \ 2714 dev_info_ratelimited(&(pdev)->dev, fmt, ##arg) 2715 2716 #define pci_WARN(pdev, condition, fmt, arg...) \ 2717 WARN(condition, "%s %s: " fmt, \ 2718 dev_driver_string(&(pdev)->dev), pci_name(pdev), ##arg) 2719 2720 #define pci_WARN_ONCE(pdev, condition, fmt, arg...) \ 2721 WARN_ONCE(condition, "%s %s: " fmt, \ 2722 dev_driver_string(&(pdev)->dev), pci_name(pdev), ##arg) 2723 2724 #endif /* LINUX_PCI_H */ 2725