1 /*-
2 * Copyright (c) 2010 Isilon Systems, Inc.
3 * Copyright (c) 2010 iX Systems, Inc.
4 * Copyright (c) 2010 Panasas, Inc.
5 * Copyright (c) 2013-2016 Mellanox Technologies, Ltd.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice unmodified, this list of conditions, and the following
13 * disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 * $FreeBSD$
30 */
31 #ifndef _LINUX_PCI_H_
32 #define _LINUX_PCI_H_
33
34 #define CONFIG_PCI_MSI
35
36 #include <linux/types.h>
37
38 #include <sys/param.h>
39 #include <sys/bus.h>
40 #include <sys/pciio.h>
41 #include <sys/rman.h>
42 #include <dev/pci/pcivar.h>
43 #include <dev/pci/pcireg.h>
44 #include <dev/pci/pci_private.h>
45
46 #include <machine/resource.h>
47
48 #include <linux/list.h>
49 #include <linux/dmapool.h>
50 #include <linux/dma-mapping.h>
51 #include <linux/compiler.h>
52 #include <linux/errno.h>
53 #include <asm/atomic.h>
54 #include <linux/device.h>
55
56 struct pci_device_id {
57 uint32_t vendor;
58 uint32_t device;
59 uint32_t subvendor;
60 uint32_t subdevice;
61 uint32_t class;
62 uint32_t class_mask;
63 uintptr_t driver_data;
64 };
65
66 #define MODULE_DEVICE_TABLE(bus, table)
67
68 #define PCI_BASE_CLASS_DISPLAY 0x03
69 #define PCI_CLASS_DISPLAY_VGA 0x0300
70 #define PCI_CLASS_DISPLAY_OTHER 0x0380
71 #define PCI_BASE_CLASS_BRIDGE 0x06
72 #define PCI_CLASS_BRIDGE_ISA 0x0601
73
74 #define PCI_ANY_ID -1U
75 #define PCI_VENDOR_ID_APPLE 0x106b
76 #define PCI_VENDOR_ID_ASUSTEK 0x1043
77 #define PCI_VENDOR_ID_ATI 0x1002
78 #define PCI_VENDOR_ID_DELL 0x1028
79 #define PCI_VENDOR_ID_HP 0x103c
80 #define PCI_VENDOR_ID_IBM 0x1014
81 #define PCI_VENDOR_ID_INTEL 0x8086
82 #define PCI_VENDOR_ID_MELLANOX 0x15b3
83 #define PCI_VENDOR_ID_REDHAT_QUMRANET 0x1af4
84 #define PCI_VENDOR_ID_SERVERWORKS 0x1166
85 #define PCI_VENDOR_ID_SONY 0x104d
86 #define PCI_VENDOR_ID_TOPSPIN 0x1867
87 #define PCI_VENDOR_ID_VIA 0x1106
88 #define PCI_SUBVENDOR_ID_REDHAT_QUMRANET 0x1af4
89 #define PCI_DEVICE_ID_ATI_RADEON_QY 0x5159
90 #define PCI_DEVICE_ID_MELLANOX_TAVOR 0x5a44
91 #define PCI_DEVICE_ID_MELLANOX_TAVOR_BRIDGE 0x5a46
92 #define PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT 0x6278
93 #define PCI_DEVICE_ID_MELLANOX_ARBEL 0x6282
94 #define PCI_DEVICE_ID_MELLANOX_SINAI_OLD 0x5e8c
95 #define PCI_DEVICE_ID_MELLANOX_SINAI 0x6274
96 #define PCI_SUBDEVICE_ID_QEMU 0x1100
97
98 #define PCI_DEVFN(slot, func) ((((slot) & 0x1f) << 3) | ((func) & 0x07))
99 #define PCI_SLOT(devfn) (((devfn) >> 3) & 0x1f)
100 #define PCI_FUNC(devfn) ((devfn) & 0x07)
101 #define PCI_BUS_NUM(devfn) (((devfn) >> 8) & 0xff)
102
103 #define PCI_VDEVICE(_vendor, _device) \
104 .vendor = PCI_VENDOR_ID_##_vendor, .device = (_device), \
105 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
106 #define PCI_DEVICE(_vendor, _device) \
107 .vendor = (_vendor), .device = (_device), \
108 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
109
110 #define to_pci_dev(n) container_of(n, struct pci_dev, dev)
111
112 #define PCI_VENDOR_ID PCIR_DEVVENDOR
113 #define PCI_COMMAND PCIR_COMMAND
114 #define PCI_EXP_DEVCTL PCIER_DEVICE_CTL /* Device Control */
115 #define PCI_EXP_LNKCTL PCIER_LINK_CTL /* Link Control */
116 #define PCI_EXP_FLAGS_TYPE PCIEM_FLAGS_TYPE /* Device/Port type */
117 #define PCI_EXP_DEVCAP PCIER_DEVICE_CAP /* Device capabilities */
118 #define PCI_EXP_DEVSTA PCIER_DEVICE_STA /* Device Status */
119 #define PCI_EXP_LNKCAP PCIER_LINK_CAP /* Link Capabilities */
120 #define PCI_EXP_LNKSTA PCIER_LINK_STA /* Link Status */
121 #define PCI_EXP_SLTCAP PCIER_SLOT_CAP /* Slot Capabilities */
122 #define PCI_EXP_SLTCTL PCIER_SLOT_CTL /* Slot Control */
123 #define PCI_EXP_SLTSTA PCIER_SLOT_STA /* Slot Status */
124 #define PCI_EXP_RTCTL PCIER_ROOT_CTL /* Root Control */
125 #define PCI_EXP_RTCAP PCIER_ROOT_CAP /* Root Capabilities */
126 #define PCI_EXP_RTSTA PCIER_ROOT_STA /* Root Status */
127 #define PCI_EXP_DEVCAP2 PCIER_DEVICE_CAP2 /* Device Capabilities 2 */
128 #define PCI_EXP_DEVCTL2 PCIER_DEVICE_CTL2 /* Device Control 2 */
129 #define PCI_EXP_LNKCAP2 PCIER_LINK_CAP2 /* Link Capabilities 2 */
130 #define PCI_EXP_LNKCTL2 PCIER_LINK_CTL2 /* Link Control 2 */
131 #define PCI_EXP_LNKSTA2 PCIER_LINK_STA2 /* Link Status 2 */
132 #define PCI_EXP_FLAGS PCIER_FLAGS /* Capabilities register */
133 #define PCI_EXP_FLAGS_VERS PCIEM_FLAGS_VERSION /* Capability version */
134 #define PCI_EXP_TYPE_ROOT_PORT PCIEM_TYPE_ROOT_PORT /* Root Port */
135 #define PCI_EXP_TYPE_ENDPOINT PCIEM_TYPE_ENDPOINT /* Express Endpoint */
136 #define PCI_EXP_TYPE_LEG_END PCIEM_TYPE_LEGACY_ENDPOINT /* Legacy Endpoint */
137 #define PCI_EXP_TYPE_DOWNSTREAM PCIEM_TYPE_DOWNSTREAM_PORT /* Downstream Port */
138 #define PCI_EXP_FLAGS_SLOT PCIEM_FLAGS_SLOT /* Slot implemented */
139 #define PCI_EXP_TYPE_RC_EC PCIEM_TYPE_ROOT_EC /* Root Complex Event Collector */
140 #define PCI_EXP_LNKCAP_SLS_2_5GB 0x01 /* Supported Link Speed 2.5GT/s */
141 #define PCI_EXP_LNKCAP_SLS_5_0GB 0x02 /* Supported Link Speed 5.0GT/s */
142 #define PCI_EXP_LNKCAP_SLS_8_0GB 0x04 /* Supported Link Speed 8.0GT/s */
143 #define PCI_EXP_LNKCAP_SLS_16_0GB 0x08 /* Supported Link Speed 16.0GT/s */
144 #define PCI_EXP_LNKCAP_MLW 0x03f0 /* Maximum Link Width */
145 #define PCI_EXP_LNKCAP2_SLS_2_5GB 0x02 /* Supported Link Speed 2.5GT/s */
146 #define PCI_EXP_LNKCAP2_SLS_5_0GB 0x04 /* Supported Link Speed 5.0GT/s */
147 #define PCI_EXP_LNKCAP2_SLS_8_0GB 0x08 /* Supported Link Speed 8.0GT/s */
148 #define PCI_EXP_LNKCAP2_SLS_16_0GB 0x10 /* Supported Link Speed 16.0GT/s */
149
150 #define PCI_EXP_LNKCTL_HAWD PCIEM_LINK_CTL_HAWD
151 #define PCI_EXP_LNKCAP_CLKPM 0x00040000
152 #define PCI_EXP_DEVSTA_TRPND 0x0020
153
154 #define IORESOURCE_MEM (1 << SYS_RES_MEMORY)
155 #define IORESOURCE_IO (1 << SYS_RES_IOPORT)
156 #define IORESOURCE_IRQ (1 << SYS_RES_IRQ)
157
158 enum pci_bus_speed {
159 PCI_SPEED_UNKNOWN = -1,
160 PCIE_SPEED_2_5GT,
161 PCIE_SPEED_5_0GT,
162 PCIE_SPEED_8_0GT,
163 PCIE_SPEED_16_0GT,
164 };
165
166 enum pcie_link_width {
167 PCIE_LNK_WIDTH_RESRV = 0x00,
168 PCIE_LNK_X1 = 0x01,
169 PCIE_LNK_X2 = 0x02,
170 PCIE_LNK_X4 = 0x04,
171 PCIE_LNK_X8 = 0x08,
172 PCIE_LNK_X12 = 0x0c,
173 PCIE_LNK_X16 = 0x10,
174 PCIE_LNK_X32 = 0x20,
175 PCIE_LNK_WIDTH_UNKNOWN = 0xff,
176 };
177
178 typedef int pci_power_t;
179
180 #define PCI_D0 PCI_POWERSTATE_D0
181 #define PCI_D1 PCI_POWERSTATE_D1
182 #define PCI_D2 PCI_POWERSTATE_D2
183 #define PCI_D3hot PCI_POWERSTATE_D3
184 #define PCI_D3cold 4
185
186 #define PCI_POWER_ERROR PCI_POWERSTATE_UNKNOWN
187
188 struct pci_dev;
189
190 struct pci_driver {
191 struct list_head links;
192 char *name;
193 const struct pci_device_id *id_table;
194 int (*probe)(struct pci_dev *dev, const struct pci_device_id *id);
195 void (*remove)(struct pci_dev *dev);
196 int (*suspend) (struct pci_dev *dev, pm_message_t state); /* Device suspended */
197 int (*resume) (struct pci_dev *dev); /* Device woken up */
198 void (*shutdown) (struct pci_dev *dev); /* Device shutdown */
199 driver_t bsddriver;
200 devclass_t bsdclass;
201 struct device_driver driver;
202 const struct pci_error_handlers *err_handler;
203 bool isdrm;
204 };
205
206 struct pci_bus {
207 struct pci_dev *self;
208 int number;
209 };
210
211 extern struct list_head pci_drivers;
212 extern struct list_head pci_devices;
213 extern spinlock_t pci_lock;
214
215 #define __devexit_p(x) x
216
217 struct pci_dev {
218 struct device dev;
219 struct list_head links;
220 struct pci_driver *pdrv;
221 struct pci_bus *bus;
222 uint64_t dma_mask;
223 uint16_t device;
224 uint16_t vendor;
225 uint16_t subsystem_vendor;
226 uint16_t subsystem_device;
227 unsigned int irq;
228 unsigned int devfn;
229 uint32_t class;
230 uint8_t revision;
231 bool msi_enabled;
232 };
233
234 static inline struct resource_list_entry *
linux_pci_get_rle(struct pci_dev * pdev,int type,int rid)235 linux_pci_get_rle(struct pci_dev *pdev, int type, int rid)
236 {
237 struct pci_devinfo *dinfo;
238 struct resource_list *rl;
239
240 dinfo = device_get_ivars(pdev->dev.bsddev);
241 rl = &dinfo->resources;
242 return resource_list_find(rl, type, rid);
243 }
244
245 static inline struct resource_list_entry *
linux_pci_get_bar(struct pci_dev * pdev,int bar)246 linux_pci_get_bar(struct pci_dev *pdev, int bar)
247 {
248 struct resource_list_entry *rle;
249
250 bar = PCIR_BAR(bar);
251 if ((rle = linux_pci_get_rle(pdev, SYS_RES_MEMORY, bar)) == NULL)
252 rle = linux_pci_get_rle(pdev, SYS_RES_IOPORT, bar);
253 return (rle);
254 }
255
256 static inline struct device *
linux_pci_find_irq_dev(unsigned int irq)257 linux_pci_find_irq_dev(unsigned int irq)
258 {
259 struct pci_dev *pdev;
260 struct device *found;
261
262 found = NULL;
263 spin_lock(&pci_lock);
264 list_for_each_entry(pdev, &pci_devices, links) {
265 if (irq == pdev->dev.irq ||
266 (irq >= pdev->dev.irq_start && irq < pdev->dev.irq_end)) {
267 found = &pdev->dev;
268 break;
269 }
270 }
271 spin_unlock(&pci_lock);
272 return (found);
273 }
274
275 static inline unsigned long
pci_resource_start(struct pci_dev * pdev,int bar)276 pci_resource_start(struct pci_dev *pdev, int bar)
277 {
278 struct resource_list_entry *rle;
279
280 if ((rle = linux_pci_get_bar(pdev, bar)) == NULL)
281 return (0);
282 return rle->start;
283 }
284
285 static inline unsigned long
pci_resource_len(struct pci_dev * pdev,int bar)286 pci_resource_len(struct pci_dev *pdev, int bar)
287 {
288 struct resource_list_entry *rle;
289
290 if ((rle = linux_pci_get_bar(pdev, bar)) == NULL)
291 return (0);
292 return rle->count;
293 }
294
295 static inline int
pci_resource_type(struct pci_dev * pdev,int bar)296 pci_resource_type(struct pci_dev *pdev, int bar)
297 {
298 struct pci_map *pm;
299
300 pm = pci_find_bar(pdev->dev.bsddev, PCIR_BAR(bar));
301 if (!pm)
302 return (-1);
303
304 if (PCI_BAR_IO(pm->pm_value))
305 return (SYS_RES_IOPORT);
306 else
307 return (SYS_RES_MEMORY);
308 }
309
310 /*
311 * All drivers just seem to want to inspect the type not flags.
312 */
313 static inline int
pci_resource_flags(struct pci_dev * pdev,int bar)314 pci_resource_flags(struct pci_dev *pdev, int bar)
315 {
316 int type;
317
318 type = pci_resource_type(pdev, bar);
319 if (type < 0)
320 return (0);
321 return (1 << type);
322 }
323
324 static inline const char *
pci_name(struct pci_dev * d)325 pci_name(struct pci_dev *d)
326 {
327
328 return device_get_desc(d->dev.bsddev);
329 }
330
331 static inline void *
pci_get_drvdata(struct pci_dev * pdev)332 pci_get_drvdata(struct pci_dev *pdev)
333 {
334
335 return dev_get_drvdata(&pdev->dev);
336 }
337
338 static inline void
pci_set_drvdata(struct pci_dev * pdev,void * data)339 pci_set_drvdata(struct pci_dev *pdev, void *data)
340 {
341
342 dev_set_drvdata(&pdev->dev, data);
343 }
344
345 static inline int
pci_enable_device(struct pci_dev * pdev)346 pci_enable_device(struct pci_dev *pdev)
347 {
348
349 pci_enable_io(pdev->dev.bsddev, SYS_RES_IOPORT);
350 pci_enable_io(pdev->dev.bsddev, SYS_RES_MEMORY);
351 return (0);
352 }
353
354 static inline void
pci_disable_device(struct pci_dev * pdev)355 pci_disable_device(struct pci_dev *pdev)
356 {
357
358 pci_disable_busmaster(pdev->dev.bsddev);
359 }
360
361 static inline int
pci_set_master(struct pci_dev * pdev)362 pci_set_master(struct pci_dev *pdev)
363 {
364
365 pci_enable_busmaster(pdev->dev.bsddev);
366 return (0);
367 }
368
369 static inline int
pci_set_power_state(struct pci_dev * pdev,int state)370 pci_set_power_state(struct pci_dev *pdev, int state)
371 {
372
373 pci_set_powerstate(pdev->dev.bsddev, state);
374 return (0);
375 }
376
377 static inline int
pci_clear_master(struct pci_dev * pdev)378 pci_clear_master(struct pci_dev *pdev)
379 {
380
381 pci_disable_busmaster(pdev->dev.bsddev);
382 return (0);
383 }
384
385 static inline int
pci_request_region(struct pci_dev * pdev,int bar,const char * res_name)386 pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
387 {
388 int rid;
389 int type;
390
391 type = pci_resource_type(pdev, bar);
392 if (type < 0)
393 return (-ENODEV);
394 rid = PCIR_BAR(bar);
395 if (bus_alloc_resource_any(pdev->dev.bsddev, type, &rid,
396 RF_ACTIVE) == NULL)
397 return (-EINVAL);
398 return (0);
399 }
400
401 static inline void
pci_release_region(struct pci_dev * pdev,int bar)402 pci_release_region(struct pci_dev *pdev, int bar)
403 {
404 struct resource_list_entry *rle;
405
406 if ((rle = linux_pci_get_bar(pdev, bar)) == NULL)
407 return;
408 bus_release_resource(pdev->dev.bsddev, rle->type, rle->rid, rle->res);
409 }
410
411 static inline void
pci_release_regions(struct pci_dev * pdev)412 pci_release_regions(struct pci_dev *pdev)
413 {
414 int i;
415
416 for (i = 0; i <= PCIR_MAX_BAR_0; i++)
417 pci_release_region(pdev, i);
418 }
419
420 static inline int
pci_request_regions(struct pci_dev * pdev,const char * res_name)421 pci_request_regions(struct pci_dev *pdev, const char *res_name)
422 {
423 int error;
424 int i;
425
426 for (i = 0; i <= PCIR_MAX_BAR_0; i++) {
427 error = pci_request_region(pdev, i, res_name);
428 if (error && error != -ENODEV) {
429 pci_release_regions(pdev);
430 return (error);
431 }
432 }
433 return (0);
434 }
435
436 static inline void
pci_disable_msix(struct pci_dev * pdev)437 pci_disable_msix(struct pci_dev *pdev)
438 {
439
440 pci_release_msi(pdev->dev.bsddev);
441
442 /*
443 * The MSIX IRQ numbers associated with this PCI device are no
444 * longer valid and might be re-assigned. Make sure
445 * linux_pci_find_irq_dev() does no longer see them by
446 * resetting their references to zero:
447 */
448 pdev->dev.irq_start = 0;
449 pdev->dev.irq_end = 0;
450 }
451
452 #define pci_disable_msi(pdev) \
453 linux_pci_disable_msi(pdev)
454
455 static inline void
linux_pci_disable_msi(struct pci_dev * pdev)456 linux_pci_disable_msi(struct pci_dev *pdev)
457 {
458
459 pci_release_msi(pdev->dev.bsddev);
460
461 pdev->dev.irq_start = 0;
462 pdev->dev.irq_end = 0;
463 pdev->irq = pdev->dev.irq;
464 pdev->msi_enabled = false;
465 }
466
467 static inline bus_addr_t
pci_bus_address(struct pci_dev * pdev,int bar)468 pci_bus_address(struct pci_dev *pdev, int bar)
469 {
470
471 return (pci_resource_start(pdev, bar));
472 }
473
474 #define PCI_CAP_ID_EXP PCIY_EXPRESS
475 #define PCI_CAP_ID_PCIX PCIY_PCIX
476 #define PCI_CAP_ID_AGP PCIY_AGP
477 #define PCI_CAP_ID_PM PCIY_PMG
478
479 #define PCI_EXP_DEVCTL PCIER_DEVICE_CTL
480 #define PCI_EXP_DEVCTL_PAYLOAD PCIEM_CTL_MAX_PAYLOAD
481 #define PCI_EXP_DEVCTL_READRQ PCIEM_CTL_MAX_READ_REQUEST
482 #define PCI_EXP_LNKCTL PCIER_LINK_CTL
483 #define PCI_EXP_LNKSTA PCIER_LINK_STA
484
485 static inline int
pci_find_capability(struct pci_dev * pdev,int capid)486 pci_find_capability(struct pci_dev *pdev, int capid)
487 {
488 int reg;
489
490 if (pci_find_cap(pdev->dev.bsddev, capid, ®))
491 return (0);
492 return (reg);
493 }
494
pci_pcie_cap(struct pci_dev * dev)495 static inline int pci_pcie_cap(struct pci_dev *dev)
496 {
497 return pci_find_capability(dev, PCI_CAP_ID_EXP);
498 }
499
500
501 static inline int
pci_read_config_byte(struct pci_dev * pdev,int where,u8 * val)502 pci_read_config_byte(struct pci_dev *pdev, int where, u8 *val)
503 {
504
505 *val = (u8)pci_read_config(pdev->dev.bsddev, where, 1);
506 return (0);
507 }
508
509 static inline int
pci_read_config_word(struct pci_dev * pdev,int where,u16 * val)510 pci_read_config_word(struct pci_dev *pdev, int where, u16 *val)
511 {
512
513 *val = (u16)pci_read_config(pdev->dev.bsddev, where, 2);
514 return (0);
515 }
516
517 static inline int
pci_read_config_dword(struct pci_dev * pdev,int where,u32 * val)518 pci_read_config_dword(struct pci_dev *pdev, int where, u32 *val)
519 {
520
521 *val = (u32)pci_read_config(pdev->dev.bsddev, where, 4);
522 return (0);
523 }
524
525 static inline int
pci_write_config_byte(struct pci_dev * pdev,int where,u8 val)526 pci_write_config_byte(struct pci_dev *pdev, int where, u8 val)
527 {
528
529 pci_write_config(pdev->dev.bsddev, where, val, 1);
530 return (0);
531 }
532
533 static inline int
pci_write_config_word(struct pci_dev * pdev,int where,u16 val)534 pci_write_config_word(struct pci_dev *pdev, int where, u16 val)
535 {
536
537 pci_write_config(pdev->dev.bsddev, where, val, 2);
538 return (0);
539 }
540
541 static inline int
pci_write_config_dword(struct pci_dev * pdev,int where,u32 val)542 pci_write_config_dword(struct pci_dev *pdev, int where, u32 val)
543 {
544
545 pci_write_config(pdev->dev.bsddev, where, val, 4);
546 return (0);
547 }
548
549 int linux_pci_register_driver(struct pci_driver *pdrv);
550 int linux_pci_register_drm_driver(struct pci_driver *pdrv);
551 void linux_pci_unregister_driver(struct pci_driver *pdrv);
552 void linux_pci_unregister_drm_driver(struct pci_driver *pdrv);
553
554 #define pci_register_driver(pdrv) linux_pci_register_driver(pdrv)
555 #define pci_unregister_driver(pdrv) linux_pci_unregister_driver(pdrv)
556
557 struct msix_entry {
558 int entry;
559 int vector;
560 };
561
562 /*
563 * Enable msix, positive errors indicate actual number of available
564 * vectors. Negative errors are failures.
565 *
566 * NB: define added to prevent this definition of pci_enable_msix from
567 * clashing with the native FreeBSD version.
568 */
569 #define pci_enable_msix(...) \
570 linux_pci_enable_msix(__VA_ARGS__)
571
572 static inline int
pci_enable_msix(struct pci_dev * pdev,struct msix_entry * entries,int nreq)573 pci_enable_msix(struct pci_dev *pdev, struct msix_entry *entries, int nreq)
574 {
575 struct resource_list_entry *rle;
576 int error;
577 int avail;
578 int i;
579
580 avail = pci_msix_count(pdev->dev.bsddev);
581 if (avail < nreq) {
582 if (avail == 0)
583 return -EINVAL;
584 return avail;
585 }
586 avail = nreq;
587 if ((error = -pci_alloc_msix(pdev->dev.bsddev, &avail)) != 0)
588 return error;
589 /*
590 * Handle case where "pci_alloc_msix()" may allocate less
591 * interrupts than available and return with no error:
592 */
593 if (avail < nreq) {
594 pci_release_msi(pdev->dev.bsddev);
595 return avail;
596 }
597 rle = linux_pci_get_rle(pdev, SYS_RES_IRQ, 1);
598 pdev->dev.irq_start = rle->start;
599 pdev->dev.irq_end = rle->start + avail;
600 for (i = 0; i < nreq; i++)
601 entries[i].vector = pdev->dev.irq_start + i;
602 return (0);
603 }
604
605 #define pci_enable_msix_range(...) \
606 linux_pci_enable_msix_range(__VA_ARGS__)
607
608 static inline int
pci_enable_msix_range(struct pci_dev * dev,struct msix_entry * entries,int minvec,int maxvec)609 pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries,
610 int minvec, int maxvec)
611 {
612 int nvec = maxvec;
613 int rc;
614
615 if (maxvec < minvec)
616 return (-ERANGE);
617
618 do {
619 rc = pci_enable_msix(dev, entries, nvec);
620 if (rc < 0) {
621 return (rc);
622 } else if (rc > 0) {
623 if (rc < minvec)
624 return (-ENOSPC);
625 nvec = rc;
626 }
627 } while (rc);
628 return (nvec);
629 }
630
631 #define pci_enable_msi(pdev) \
632 linux_pci_enable_msi(pdev)
633
634 static inline int
pci_enable_msi(struct pci_dev * pdev)635 pci_enable_msi(struct pci_dev *pdev)
636 {
637 struct resource_list_entry *rle;
638 int error;
639 int avail;
640
641 avail = pci_msi_count(pdev->dev.bsddev);
642 if (avail < 1)
643 return -EINVAL;
644
645 avail = 1; /* this function only enable one MSI IRQ */
646 if ((error = -pci_alloc_msi(pdev->dev.bsddev, &avail)) != 0)
647 return error;
648
649 rle = linux_pci_get_rle(pdev, SYS_RES_IRQ, 1);
650 pdev->dev.irq_start = rle->start;
651 pdev->dev.irq_end = rle->start + avail;
652 pdev->irq = rle->start;
653 pdev->msi_enabled = true;
654 return (0);
655 }
656
657 static inline int
pci_channel_offline(struct pci_dev * pdev)658 pci_channel_offline(struct pci_dev *pdev)
659 {
660
661 return (pci_get_vendor(pdev->dev.bsddev) == PCIV_INVALID);
662 }
663
pci_enable_sriov(struct pci_dev * dev,int nr_virtfn)664 static inline int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn)
665 {
666 return -ENODEV;
667 }
pci_disable_sriov(struct pci_dev * dev)668 static inline void pci_disable_sriov(struct pci_dev *dev)
669 {
670 }
671
672 #define DEFINE_PCI_DEVICE_TABLE(_table) \
673 const struct pci_device_id _table[] __devinitdata
674
675
676 /* XXX This should not be necessary. */
677 #define pcix_set_mmrbc(d, v) 0
678 #define pcix_get_max_mmrbc(d) 0
679 #define pcie_set_readrq(d, v) 0
680
681 #define PCI_DMA_BIDIRECTIONAL 0
682 #define PCI_DMA_TODEVICE 1
683 #define PCI_DMA_FROMDEVICE 2
684 #define PCI_DMA_NONE 3
685
686 #define pci_pool dma_pool
687 #define pci_pool_destroy(...) dma_pool_destroy(__VA_ARGS__)
688 #define pci_pool_alloc(...) dma_pool_alloc(__VA_ARGS__)
689 #define pci_pool_free(...) dma_pool_free(__VA_ARGS__)
690 #define pci_pool_create(_name, _pdev, _size, _align, _alloc) \
691 dma_pool_create(_name, &(_pdev)->dev, _size, _align, _alloc)
692 #define pci_free_consistent(_hwdev, _size, _vaddr, _dma_handle) \
693 dma_free_coherent((_hwdev) == NULL ? NULL : &(_hwdev)->dev, \
694 _size, _vaddr, _dma_handle)
695 #define pci_map_sg(_hwdev, _sg, _nents, _dir) \
696 dma_map_sg((_hwdev) == NULL ? NULL : &(_hwdev->dev), \
697 _sg, _nents, (enum dma_data_direction)_dir)
698 #define pci_map_single(_hwdev, _ptr, _size, _dir) \
699 dma_map_single((_hwdev) == NULL ? NULL : &(_hwdev->dev), \
700 (_ptr), (_size), (enum dma_data_direction)_dir)
701 #define pci_unmap_single(_hwdev, _addr, _size, _dir) \
702 dma_unmap_single((_hwdev) == NULL ? NULL : &(_hwdev)->dev, \
703 _addr, _size, (enum dma_data_direction)_dir)
704 #define pci_unmap_sg(_hwdev, _sg, _nents, _dir) \
705 dma_unmap_sg((_hwdev) == NULL ? NULL : &(_hwdev)->dev, \
706 _sg, _nents, (enum dma_data_direction)_dir)
707 #define pci_map_page(_hwdev, _page, _offset, _size, _dir) \
708 dma_map_page((_hwdev) == NULL ? NULL : &(_hwdev)->dev, _page,\
709 _offset, _size, (enum dma_data_direction)_dir)
710 #define pci_unmap_page(_hwdev, _dma_address, _size, _dir) \
711 dma_unmap_page((_hwdev) == NULL ? NULL : &(_hwdev)->dev, \
712 _dma_address, _size, (enum dma_data_direction)_dir)
713 #define pci_set_dma_mask(_pdev, mask) dma_set_mask(&(_pdev)->dev, (mask))
714 #define pci_dma_mapping_error(_pdev, _dma_addr) \
715 dma_mapping_error(&(_pdev)->dev, _dma_addr)
716 #define pci_set_consistent_dma_mask(_pdev, _mask) \
717 dma_set_coherent_mask(&(_pdev)->dev, (_mask))
718 #define DECLARE_PCI_UNMAP_ADDR(x) DEFINE_DMA_UNMAP_ADDR(x);
719 #define DECLARE_PCI_UNMAP_LEN(x) DEFINE_DMA_UNMAP_LEN(x);
720 #define pci_unmap_addr dma_unmap_addr
721 #define pci_unmap_addr_set dma_unmap_addr_set
722 #define pci_unmap_len dma_unmap_len
723 #define pci_unmap_len_set dma_unmap_len_set
724
725 typedef unsigned int __bitwise pci_channel_state_t;
726 typedef unsigned int __bitwise pci_ers_result_t;
727
728 enum pci_channel_state {
729 pci_channel_io_normal = 1,
730 pci_channel_io_frozen = 2,
731 pci_channel_io_perm_failure = 3,
732 };
733
734 enum pci_ers_result {
735 PCI_ERS_RESULT_NONE = 1,
736 PCI_ERS_RESULT_CAN_RECOVER = 2,
737 PCI_ERS_RESULT_NEED_RESET = 3,
738 PCI_ERS_RESULT_DISCONNECT = 4,
739 PCI_ERS_RESULT_RECOVERED = 5,
740 };
741
742
743 /* PCI bus error event callbacks */
744 struct pci_error_handlers {
745 pci_ers_result_t (*error_detected)(struct pci_dev *dev,
746 enum pci_channel_state error);
747 pci_ers_result_t (*mmio_enabled)(struct pci_dev *dev);
748 pci_ers_result_t (*link_reset)(struct pci_dev *dev);
749 pci_ers_result_t (*slot_reset)(struct pci_dev *dev);
750 void (*resume)(struct pci_dev *dev);
751 };
752
753 /* FreeBSD does not support SRIOV - yet */
pci_physfn(struct pci_dev * dev)754 static inline struct pci_dev *pci_physfn(struct pci_dev *dev)
755 {
756 return dev;
757 }
758
pci_is_pcie(struct pci_dev * dev)759 static inline bool pci_is_pcie(struct pci_dev *dev)
760 {
761 return !!pci_pcie_cap(dev);
762 }
763
pcie_flags_reg(struct pci_dev * dev)764 static inline u16 pcie_flags_reg(struct pci_dev *dev)
765 {
766 int pos;
767 u16 reg16;
768
769 pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
770 if (!pos)
771 return 0;
772
773 pci_read_config_word(dev, pos + PCI_EXP_FLAGS, ®16);
774
775 return reg16;
776 }
777
778
pci_pcie_type(struct pci_dev * dev)779 static inline int pci_pcie_type(struct pci_dev *dev)
780 {
781 return (pcie_flags_reg(dev) & PCI_EXP_FLAGS_TYPE) >> 4;
782 }
783
pcie_cap_version(struct pci_dev * dev)784 static inline int pcie_cap_version(struct pci_dev *dev)
785 {
786 return pcie_flags_reg(dev) & PCI_EXP_FLAGS_VERS;
787 }
788
pcie_cap_has_lnkctl(struct pci_dev * dev)789 static inline bool pcie_cap_has_lnkctl(struct pci_dev *dev)
790 {
791 int type = pci_pcie_type(dev);
792
793 return pcie_cap_version(dev) > 1 ||
794 type == PCI_EXP_TYPE_ROOT_PORT ||
795 type == PCI_EXP_TYPE_ENDPOINT ||
796 type == PCI_EXP_TYPE_LEG_END;
797 }
798
pcie_cap_has_devctl(const struct pci_dev * dev)799 static inline bool pcie_cap_has_devctl(const struct pci_dev *dev)
800 {
801 return true;
802 }
803
pcie_cap_has_sltctl(struct pci_dev * dev)804 static inline bool pcie_cap_has_sltctl(struct pci_dev *dev)
805 {
806 int type = pci_pcie_type(dev);
807
808 return pcie_cap_version(dev) > 1 || type == PCI_EXP_TYPE_ROOT_PORT ||
809 (type == PCI_EXP_TYPE_DOWNSTREAM &&
810 pcie_flags_reg(dev) & PCI_EXP_FLAGS_SLOT);
811 }
812
pcie_cap_has_rtctl(struct pci_dev * dev)813 static inline bool pcie_cap_has_rtctl(struct pci_dev *dev)
814 {
815 int type = pci_pcie_type(dev);
816
817 return pcie_cap_version(dev) > 1 || type == PCI_EXP_TYPE_ROOT_PORT ||
818 type == PCI_EXP_TYPE_RC_EC;
819 }
820
pcie_capability_reg_implemented(struct pci_dev * dev,int pos)821 static bool pcie_capability_reg_implemented(struct pci_dev *dev, int pos)
822 {
823 if (!pci_is_pcie(dev))
824 return false;
825
826 switch (pos) {
827 case PCI_EXP_FLAGS_TYPE:
828 return true;
829 case PCI_EXP_DEVCAP:
830 case PCI_EXP_DEVCTL:
831 case PCI_EXP_DEVSTA:
832 return pcie_cap_has_devctl(dev);
833 case PCI_EXP_LNKCAP:
834 case PCI_EXP_LNKCTL:
835 case PCI_EXP_LNKSTA:
836 return pcie_cap_has_lnkctl(dev);
837 case PCI_EXP_SLTCAP:
838 case PCI_EXP_SLTCTL:
839 case PCI_EXP_SLTSTA:
840 return pcie_cap_has_sltctl(dev);
841 case PCI_EXP_RTCTL:
842 case PCI_EXP_RTCAP:
843 case PCI_EXP_RTSTA:
844 return pcie_cap_has_rtctl(dev);
845 case PCI_EXP_DEVCAP2:
846 case PCI_EXP_DEVCTL2:
847 case PCI_EXP_LNKCAP2:
848 case PCI_EXP_LNKCTL2:
849 case PCI_EXP_LNKSTA2:
850 return pcie_cap_version(dev) > 1;
851 default:
852 return false;
853 }
854 }
855
856 static inline int
pcie_capability_read_dword(struct pci_dev * dev,int pos,u32 * dst)857 pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *dst)
858 {
859 if (pos & 3)
860 return -EINVAL;
861
862 if (!pcie_capability_reg_implemented(dev, pos))
863 return -EINVAL;
864
865 return pci_read_config_dword(dev, pci_pcie_cap(dev) + pos, dst);
866 }
867
868 static inline int
pcie_capability_read_word(struct pci_dev * dev,int pos,u16 * dst)869 pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *dst)
870 {
871 if (pos & 3)
872 return -EINVAL;
873
874 if (!pcie_capability_reg_implemented(dev, pos))
875 return -EINVAL;
876
877 return pci_read_config_word(dev, pci_pcie_cap(dev) + pos, dst);
878 }
879
880 static inline int
pcie_capability_write_word(struct pci_dev * dev,int pos,u16 val)881 pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val)
882 {
883 if (pos & 1)
884 return -EINVAL;
885
886 if (!pcie_capability_reg_implemented(dev, pos))
887 return 0;
888
889 return pci_write_config_word(dev, pci_pcie_cap(dev) + pos, val);
890 }
891
pcie_get_minimum_link(struct pci_dev * dev,enum pci_bus_speed * speed,enum pcie_link_width * width)892 static inline int pcie_get_minimum_link(struct pci_dev *dev,
893 enum pci_bus_speed *speed, enum pcie_link_width *width)
894 {
895 *speed = PCI_SPEED_UNKNOWN;
896 *width = PCIE_LNK_WIDTH_UNKNOWN;
897 return (0);
898 }
899
900 static inline int
pci_num_vf(struct pci_dev * dev)901 pci_num_vf(struct pci_dev *dev)
902 {
903 return (0);
904 }
905
906 static inline enum pci_bus_speed
pcie_get_speed_cap(struct pci_dev * dev)907 pcie_get_speed_cap(struct pci_dev *dev)
908 {
909 device_t root;
910 uint32_t lnkcap, lnkcap2;
911 int error, pos;
912
913 root = device_get_parent(dev->dev.bsddev);
914 if (root == NULL)
915 return (PCI_SPEED_UNKNOWN);
916 root = device_get_parent(root);
917 if (root == NULL)
918 return (PCI_SPEED_UNKNOWN);
919 root = device_get_parent(root);
920 if (root == NULL)
921 return (PCI_SPEED_UNKNOWN);
922
923 if (pci_get_vendor(root) == PCI_VENDOR_ID_VIA ||
924 pci_get_vendor(root) == PCI_VENDOR_ID_SERVERWORKS)
925 return (PCI_SPEED_UNKNOWN);
926
927 if ((error = pci_find_cap(root, PCIY_EXPRESS, &pos)) != 0)
928 return (PCI_SPEED_UNKNOWN);
929
930 lnkcap2 = pci_read_config(root, pos + PCIER_LINK_CAP2, 4);
931
932 if (lnkcap2) { /* PCIe r3.0-compliant */
933 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB)
934 return (PCIE_SPEED_2_5GT);
935 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB)
936 return (PCIE_SPEED_5_0GT);
937 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB)
938 return (PCIE_SPEED_8_0GT);
939 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_16_0GB)
940 return (PCIE_SPEED_16_0GT);
941 } else { /* pre-r3.0 */
942 lnkcap = pci_read_config(root, pos + PCIER_LINK_CAP, 4);
943 if (lnkcap & PCI_EXP_LNKCAP_SLS_2_5GB)
944 return (PCIE_SPEED_2_5GT);
945 if (lnkcap & PCI_EXP_LNKCAP_SLS_5_0GB)
946 return (PCIE_SPEED_5_0GT);
947 if (lnkcap & PCI_EXP_LNKCAP_SLS_8_0GB)
948 return (PCIE_SPEED_8_0GT);
949 if (lnkcap & PCI_EXP_LNKCAP_SLS_16_0GB)
950 return (PCIE_SPEED_16_0GT);
951 }
952 return (PCI_SPEED_UNKNOWN);
953 }
954
955 static inline enum pcie_link_width
pcie_get_width_cap(struct pci_dev * dev)956 pcie_get_width_cap(struct pci_dev *dev)
957 {
958 uint32_t lnkcap;
959
960 pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
961 if (lnkcap)
962 return ((lnkcap & PCI_EXP_LNKCAP_MLW) >> 4);
963
964 return (PCIE_LNK_WIDTH_UNKNOWN);
965 }
966
967 #endif /* _LINUX_PCI_H_ */
968