1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2021 Alexander Motin <[email protected]>
5 * Copyright 2019 Cisco Systems, Inc.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 #include <sys/cdefs.h>
31 #include <sys/types.h>
32 #include <sys/param.h>
33 #include <sys/bus.h>
34 #include <sys/conf.h>
35 #include <sys/kernel.h>
36 #include <sys/limits.h>
37 #include <sys/module.h>
38 #include <sys/sysctl.h>
39 #include <sys/systm.h>
40 #include <sys/malloc.h>
41
42 #include <machine/bus.h>
43 #include <machine/resource.h>
44 #include <machine/intr_machdep.h>
45 #include <sys/rman.h>
46 #include <sys/lock.h>
47 #include <sys/mutex.h>
48
49 #include <sys/pciio.h>
50 #include <dev/pci/pcivar.h>
51 #include <dev/pci/pcireg.h>
52 #include <dev/pci/pci_private.h>
53 #include <dev/pci/pcib_private.h>
54
55 #include <dev/vmd/vmd.h>
56
57 #include "pcib_if.h"
58
59 struct vmd_type {
60 u_int16_t vmd_vid;
61 u_int16_t vmd_did;
62 char *vmd_name;
63 int flags;
64 #define BUS_RESTRICT 1
65 #define VECTOR_OFFSET 2
66 #define CAN_BYPASS_MSI 4
67 };
68
69 #define VMD_CAP 0x40
70 #define VMD_BUS_RESTRICT 0x1
71
72 #define VMD_CONFIG 0x44
73 #define VMD_BYPASS_MSI 0x2
74 #define VMD_BUS_START(x) ((x >> 8) & 0x3)
75
76 #define VMD_LOCK 0x70
77
78 SYSCTL_NODE(_hw, OID_AUTO, vmd, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
79 "Intel Volume Management Device tuning parameters");
80
81 /*
82 * By default all VMD devices remap children MSI/MSI-X interrupts into their
83 * own. It creates additional isolation, but also complicates things due to
84 * sharing, etc. Fortunately some VMD devices can bypass the remapping.
85 */
86 static int vmd_bypass_msi = 1;
87 SYSCTL_INT(_hw_vmd, OID_AUTO, bypass_msi, CTLFLAG_RWTUN, &vmd_bypass_msi, 0,
88 "Bypass MSI remapping on capable hardware");
89
90 /*
91 * All MSIs within a group share address, so VMD can't distinguish them.
92 * It makes no sense to use more than one per device, only if required by
93 * some specific device drivers.
94 */
95 static int vmd_max_msi = 1;
96 SYSCTL_INT(_hw_vmd, OID_AUTO, max_msi, CTLFLAG_RWTUN, &vmd_max_msi, 0,
97 "Maximum number of MSI vectors per device");
98
99 /*
100 * MSI-X can use different addresses, but we have limited number of MSI-X
101 * we can route to, so use conservative default to try to avoid sharing.
102 */
103 static int vmd_max_msix = 3;
104 SYSCTL_INT(_hw_vmd, OID_AUTO, max_msix, CTLFLAG_RWTUN, &vmd_max_msix, 0,
105 "Maximum number of MSI-X vectors per device");
106
107 static struct vmd_type vmd_devs[] = {
108 { 0x8086, 0x201d, "Intel Volume Management Device", 0 },
109 { 0x8086, 0x28c0, "Intel Volume Management Device", BUS_RESTRICT | CAN_BYPASS_MSI },
110 { 0x8086, 0x467f, "Intel Volume Management Device", BUS_RESTRICT | VECTOR_OFFSET },
111 { 0x8086, 0x4c3d, "Intel Volume Management Device", BUS_RESTRICT | VECTOR_OFFSET },
112 { 0x8086, 0x7d0b, "Intel Volume Management Device", BUS_RESTRICT | VECTOR_OFFSET },
113 { 0x8086, 0x9a0b, "Intel Volume Management Device", BUS_RESTRICT | VECTOR_OFFSET },
114 { 0x8086, 0xa77f, "Intel Volume Management Device", BUS_RESTRICT | VECTOR_OFFSET },
115 { 0x8086, 0xad0b, "Intel Volume Management Device", BUS_RESTRICT | VECTOR_OFFSET },
116 { 0, 0, NULL, 0 }
117 };
118
119 static int
vmd_probe(device_t dev)120 vmd_probe(device_t dev)
121 {
122 struct vmd_type *t;
123 uint16_t vid, did;
124
125 vid = pci_get_vendor(dev);
126 did = pci_get_device(dev);
127 for (t = vmd_devs; t->vmd_name != NULL; t++) {
128 if (vid == t->vmd_vid && did == t->vmd_did) {
129 device_set_desc(dev, t->vmd_name);
130 return (BUS_PROBE_DEFAULT);
131 }
132 }
133 return (ENXIO);
134 }
135
136 static void
vmd_free(struct vmd_softc * sc)137 vmd_free(struct vmd_softc *sc)
138 {
139 struct vmd_irq *vi;
140 struct vmd_irq_user *u;
141 int i;
142
143 if (sc->psc.bus.rman.rm_end != 0)
144 rman_fini(&sc->psc.bus.rman);
145 if (sc->psc.mem.rman.rm_end != 0)
146 rman_fini(&sc->psc.mem.rman);
147 while ((u = LIST_FIRST(&sc->vmd_users)) != NULL) {
148 LIST_REMOVE(u, viu_link);
149 free(u, M_DEVBUF);
150 }
151 if (sc->vmd_irq != NULL) {
152 for (i = 0; i < sc->vmd_msix_count; i++) {
153 vi = &sc->vmd_irq[i];
154 if (vi->vi_res == NULL)
155 continue;
156 bus_teardown_intr(sc->psc.dev, vi->vi_res,
157 vi->vi_handle);
158 bus_release_resource(sc->psc.dev, SYS_RES_IRQ,
159 vi->vi_rid, vi->vi_res);
160 }
161 }
162 free(sc->vmd_irq, M_DEVBUF);
163 sc->vmd_irq = NULL;
164 pci_release_msi(sc->psc.dev);
165 for (i = 0; i < VMD_MAX_BAR; i++) {
166 if (sc->vmd_regs_res[i] != NULL)
167 bus_release_resource(sc->psc.dev, SYS_RES_MEMORY,
168 sc->vmd_regs_rid[i], sc->vmd_regs_res[i]);
169 }
170 }
171
172 /* Hidden PCI Roots are hidden in BAR(0). */
173
174 static uint32_t
vmd_read_config(device_t dev,u_int b,u_int s,u_int f,u_int reg,int width)175 vmd_read_config(device_t dev, u_int b, u_int s, u_int f, u_int reg, int width)
176 {
177 struct vmd_softc *sc;
178 bus_addr_t offset;
179
180 sc = device_get_softc(dev);
181 if (b < sc->vmd_bus_start || b > sc->vmd_bus_end)
182 return (0xffffffff);
183
184 offset = ((b - sc->vmd_bus_start) << 20) + (s << 15) + (f << 12) + reg;
185
186 switch (width) {
187 case 4:
188 return (bus_read_4(sc->vmd_regs_res[0], offset));
189 case 2:
190 return (bus_read_2(sc->vmd_regs_res[0], offset));
191 case 1:
192 return (bus_read_1(sc->vmd_regs_res[0], offset));
193 default:
194 __assert_unreachable();
195 return (0xffffffff);
196 }
197 }
198
199 static void
vmd_write_config(device_t dev,u_int b,u_int s,u_int f,u_int reg,uint32_t val,int width)200 vmd_write_config(device_t dev, u_int b, u_int s, u_int f, u_int reg,
201 uint32_t val, int width)
202 {
203 struct vmd_softc *sc;
204 bus_addr_t offset;
205
206 sc = device_get_softc(dev);
207 if (b < sc->vmd_bus_start || b > sc->vmd_bus_end)
208 return;
209
210 offset = ((b - sc->vmd_bus_start) << 20) + (s << 15) + (f << 12) + reg;
211
212 switch (width) {
213 case 4:
214 return (bus_write_4(sc->vmd_regs_res[0], offset, val));
215 case 2:
216 return (bus_write_2(sc->vmd_regs_res[0], offset, val));
217 case 1:
218 return (bus_write_1(sc->vmd_regs_res[0], offset, val));
219 default:
220 __assert_unreachable();
221 }
222 }
223
224 static void
vmd_set_msi_bypass(device_t dev,bool enable)225 vmd_set_msi_bypass(device_t dev, bool enable)
226 {
227 uint16_t val;
228
229 val = pci_read_config(dev, VMD_CONFIG, 2);
230 if (enable)
231 val |= VMD_BYPASS_MSI;
232 else
233 val &= ~VMD_BYPASS_MSI;
234 pci_write_config(dev, VMD_CONFIG, val, 2);
235 }
236
237 static int
vmd_intr(void * arg)238 vmd_intr(void *arg)
239 {
240 /*
241 * We have nothing to do here, but we have to register some interrupt
242 * handler to make PCI code setup and enable the MSI-X vector.
243 */
244 return (FILTER_STRAY);
245 }
246
247 static int
vmd_attach(device_t dev)248 vmd_attach(device_t dev)
249 {
250 struct vmd_softc *sc;
251 struct pcib_secbus *bus;
252 struct pcib_window *w;
253 struct vmd_type *t;
254 struct vmd_irq *vi;
255 uint16_t vid, did;
256 uint32_t bar;
257 int i, j, error;
258 char buf[64];
259
260 sc = device_get_softc(dev);
261 bzero(sc, sizeof(*sc));
262 sc->psc.dev = dev;
263 sc->psc.domain = PCI_DOMAINMAX - device_get_unit(dev);
264
265 pci_enable_busmaster(dev);
266
267 for (i = 0, j = 0; i < VMD_MAX_BAR; i++, j++) {
268 sc->vmd_regs_rid[i] = PCIR_BAR(j);
269 bar = pci_read_config(dev, PCIR_BAR(0), 4);
270 if (PCI_BAR_MEM(bar) && (bar & PCIM_BAR_MEM_TYPE) ==
271 PCIM_BAR_MEM_64)
272 j++;
273 if ((sc->vmd_regs_res[i] = bus_alloc_resource_any(dev,
274 SYS_RES_MEMORY, &sc->vmd_regs_rid[i], RF_ACTIVE)) == NULL) {
275 device_printf(dev, "Cannot allocate resources\n");
276 goto fail;
277 }
278 }
279
280 vid = pci_get_vendor(dev);
281 did = pci_get_device(dev);
282 for (t = vmd_devs; t->vmd_name != NULL; t++) {
283 if (vid == t->vmd_vid && did == t->vmd_did)
284 break;
285 }
286
287 sc->vmd_bus_start = 0;
288 if ((t->flags & BUS_RESTRICT) &&
289 (pci_read_config(dev, VMD_CAP, 2) & VMD_BUS_RESTRICT)) {
290 switch (VMD_BUS_START(pci_read_config(dev, VMD_CONFIG, 2))) {
291 case 0:
292 sc->vmd_bus_start = 0;
293 break;
294 case 1:
295 sc->vmd_bus_start = 128;
296 break;
297 case 2:
298 sc->vmd_bus_start = 224;
299 break;
300 default:
301 device_printf(dev, "Unknown bus offset\n");
302 goto fail;
303 }
304 }
305 sc->vmd_bus_end = MIN(PCI_BUSMAX, sc->vmd_bus_start +
306 (rman_get_size(sc->vmd_regs_res[0]) >> 20) - 1);
307
308 bus = &sc->psc.bus;
309 bus->sec = sc->vmd_bus_start;
310 bus->sub = sc->vmd_bus_end;
311 bus->dev = dev;
312 bus->rman.rm_start = 0;
313 bus->rman.rm_end = PCI_BUSMAX;
314 bus->rman.rm_type = RMAN_ARRAY;
315 snprintf(buf, sizeof(buf), "%s bus numbers", device_get_nameunit(dev));
316 bus->rman.rm_descr = strdup(buf, M_DEVBUF);
317 error = rman_init(&bus->rman);
318 if (error) {
319 device_printf(dev, "Failed to initialize bus rman\n");
320 bus->rman.rm_end = 0;
321 goto fail;
322 }
323 error = rman_manage_region(&bus->rman, sc->vmd_bus_start,
324 sc->vmd_bus_end);
325 if (error) {
326 device_printf(dev, "Failed to add resource to bus rman\n");
327 goto fail;
328 }
329
330 w = &sc->psc.mem;
331 w->rman.rm_type = RMAN_ARRAY;
332 snprintf(buf, sizeof(buf), "%s memory window", device_get_nameunit(dev));
333 w->rman.rm_descr = strdup(buf, M_DEVBUF);
334 error = rman_init(&w->rman);
335 if (error) {
336 device_printf(dev, "Failed to initialize memory rman\n");
337 w->rman.rm_end = 0;
338 goto fail;
339 }
340 error = rman_manage_region(&w->rman,
341 rman_get_start(sc->vmd_regs_res[1]),
342 rman_get_end(sc->vmd_regs_res[1]));
343 if (error) {
344 device_printf(dev, "Failed to add resource to memory rman\n");
345 goto fail;
346 }
347 error = rman_manage_region(&w->rman,
348 rman_get_start(sc->vmd_regs_res[2]) + 0x2000,
349 rman_get_end(sc->vmd_regs_res[2]));
350 if (error) {
351 device_printf(dev, "Failed to add resource to memory rman\n");
352 goto fail;
353 }
354
355 LIST_INIT(&sc->vmd_users);
356 sc->vmd_fist_vector = (t->flags & VECTOR_OFFSET) ? 1 : 0;
357 sc->vmd_msix_count = pci_msix_count(dev);
358 if (vmd_bypass_msi && (t->flags & CAN_BYPASS_MSI)) {
359 sc->vmd_msix_count = 0;
360 vmd_set_msi_bypass(dev, true);
361 } else if (pci_alloc_msix(dev, &sc->vmd_msix_count) == 0) {
362 sc->vmd_irq = malloc(sizeof(struct vmd_irq) *
363 sc->vmd_msix_count, M_DEVBUF, M_WAITOK | M_ZERO);
364 for (i = 0; i < sc->vmd_msix_count; i++) {
365 vi = &sc->vmd_irq[i];
366 vi->vi_rid = i + 1;
367 vi->vi_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
368 &vi->vi_rid, RF_ACTIVE | RF_SHAREABLE);
369 if (vi->vi_res == NULL) {
370 device_printf(dev, "Failed to allocate irq\n");
371 goto fail;
372 }
373 vi->vi_irq = rman_get_start(vi->vi_res);
374 if (bus_setup_intr(dev, vi->vi_res, INTR_TYPE_MISC |
375 INTR_MPSAFE, vmd_intr, NULL, vi, &vi->vi_handle)) {
376 device_printf(dev, "Can't set up interrupt\n");
377 bus_release_resource(dev, SYS_RES_IRQ,
378 vi->vi_rid, vi->vi_res);
379 vi->vi_res = NULL;
380 goto fail;
381 }
382 }
383 vmd_set_msi_bypass(dev, false);
384 }
385
386 sc->vmd_dma_tag = bus_get_dma_tag(dev);
387
388 sc->psc.child = device_add_child(dev, "pci", -1);
389 return (bus_generic_attach(dev));
390
391 fail:
392 vmd_free(sc);
393 return (ENXIO);
394 }
395
396 static int
vmd_detach(device_t dev)397 vmd_detach(device_t dev)
398 {
399 struct vmd_softc *sc = device_get_softc(dev);
400 int error;
401
402 error = bus_generic_detach(dev);
403 if (error)
404 return (error);
405 error = device_delete_children(dev);
406 if (error)
407 return (error);
408 if (sc->vmd_msix_count == 0)
409 vmd_set_msi_bypass(dev, false);
410 vmd_free(sc);
411 return (0);
412 }
413
414 static bus_dma_tag_t
vmd_get_dma_tag(device_t dev,device_t child)415 vmd_get_dma_tag(device_t dev, device_t child)
416 {
417 struct vmd_softc *sc = device_get_softc(dev);
418
419 return (sc->vmd_dma_tag);
420 }
421
422 static struct resource *
vmd_alloc_resource(device_t dev,device_t child,int type,int * rid,rman_res_t start,rman_res_t end,rman_res_t count,u_int flags)423 vmd_alloc_resource(device_t dev, device_t child, int type, int *rid,
424 rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
425 {
426 struct vmd_softc *sc = device_get_softc(dev);
427 struct resource *res;
428
429 switch (type) {
430 case SYS_RES_IRQ:
431 /* VMD hardware does not support legacy interrupts. */
432 if (*rid == 0)
433 return (NULL);
434 return (bus_generic_alloc_resource(dev, child, type, rid,
435 start, end, count, flags | RF_SHAREABLE));
436 case SYS_RES_MEMORY:
437 res = rman_reserve_resource(&sc->psc.mem.rman, start, end,
438 count, flags, child);
439 if (res == NULL)
440 return (NULL);
441 if (bootverbose)
442 device_printf(dev,
443 "allocated memory range (%#jx-%#jx) for rid %d of %s\n",
444 rman_get_start(res), rman_get_end(res), *rid,
445 pcib_child_name(child));
446 break;
447 case PCI_RES_BUS:
448 res = rman_reserve_resource(&sc->psc.bus.rman, start, end,
449 count, flags, child);
450 if (res == NULL)
451 return (NULL);
452 if (bootverbose)
453 device_printf(dev,
454 "allocated bus range (%ju-%ju) for rid %d of %s\n",
455 rman_get_start(res), rman_get_end(res), *rid,
456 pcib_child_name(child));
457 break;
458 default:
459 /* VMD hardware does not support I/O ports. */
460 return (NULL);
461 }
462 rman_set_rid(res, *rid);
463 return (res);
464 }
465
466 static int
vmd_adjust_resource(device_t dev,device_t child,int type,struct resource * r,rman_res_t start,rman_res_t end)467 vmd_adjust_resource(device_t dev, device_t child, int type,
468 struct resource *r, rman_res_t start, rman_res_t end)
469 {
470
471 if (type == SYS_RES_IRQ) {
472 return (bus_generic_adjust_resource(dev, child, type, r,
473 start, end));
474 }
475 return (rman_adjust_resource(r, start, end));
476 }
477
478 static int
vmd_release_resource(device_t dev,device_t child,int type,int rid,struct resource * r)479 vmd_release_resource(device_t dev, device_t child, int type, int rid,
480 struct resource *r)
481 {
482
483 if (type == SYS_RES_IRQ) {
484 return (bus_generic_release_resource(dev, child, type, rid,
485 r));
486 }
487 return (rman_release_resource(r));
488 }
489
490 static int
vmd_route_interrupt(device_t dev,device_t child,int pin)491 vmd_route_interrupt(device_t dev, device_t child, int pin)
492 {
493
494 /* VMD hardware does not support legacy interrupts. */
495 return (PCI_INVALID_IRQ);
496 }
497
498 static int
vmd_alloc_msi(device_t dev,device_t child,int count,int maxcount,int * irqs)499 vmd_alloc_msi(device_t dev, device_t child, int count, int maxcount,
500 int *irqs)
501 {
502 struct vmd_softc *sc = device_get_softc(dev);
503 struct vmd_irq_user *u;
504 int i, ibest = 0, best = INT_MAX;
505
506 if (sc->vmd_msix_count == 0) {
507 return (PCIB_ALLOC_MSI(device_get_parent(device_get_parent(dev)),
508 child, count, maxcount, irqs));
509 }
510
511 if (count > vmd_max_msi)
512 return (ENOSPC);
513 LIST_FOREACH(u, &sc->vmd_users, viu_link) {
514 if (u->viu_child == child)
515 return (EBUSY);
516 }
517
518 for (i = sc->vmd_fist_vector; i < sc->vmd_msix_count; i++) {
519 if (best > sc->vmd_irq[i].vi_nusers) {
520 best = sc->vmd_irq[i].vi_nusers;
521 ibest = i;
522 }
523 }
524
525 u = malloc(sizeof(*u), M_DEVBUF, M_WAITOK | M_ZERO);
526 u->viu_child = child;
527 u->viu_vector = ibest;
528 LIST_INSERT_HEAD(&sc->vmd_users, u, viu_link);
529 sc->vmd_irq[ibest].vi_nusers += count;
530
531 for (i = 0; i < count; i++)
532 irqs[i] = sc->vmd_irq[ibest].vi_irq;
533 return (0);
534 }
535
536 static int
vmd_release_msi(device_t dev,device_t child,int count,int * irqs)537 vmd_release_msi(device_t dev, device_t child, int count, int *irqs)
538 {
539 struct vmd_softc *sc = device_get_softc(dev);
540 struct vmd_irq_user *u;
541
542 if (sc->vmd_msix_count == 0) {
543 return (PCIB_RELEASE_MSI(device_get_parent(device_get_parent(dev)),
544 child, count, irqs));
545 }
546
547 LIST_FOREACH(u, &sc->vmd_users, viu_link) {
548 if (u->viu_child == child) {
549 sc->vmd_irq[u->viu_vector].vi_nusers -= count;
550 LIST_REMOVE(u, viu_link);
551 free(u, M_DEVBUF);
552 return (0);
553 }
554 }
555 return (EINVAL);
556 }
557
558 static int
vmd_alloc_msix(device_t dev,device_t child,int * irq)559 vmd_alloc_msix(device_t dev, device_t child, int *irq)
560 {
561 struct vmd_softc *sc = device_get_softc(dev);
562 struct vmd_irq_user *u;
563 int i, ibest = 0, best = INT_MAX;
564
565 if (sc->vmd_msix_count == 0) {
566 return (PCIB_ALLOC_MSIX(device_get_parent(device_get_parent(dev)),
567 child, irq));
568 }
569
570 i = 0;
571 LIST_FOREACH(u, &sc->vmd_users, viu_link) {
572 if (u->viu_child == child)
573 i++;
574 }
575 if (i >= vmd_max_msix)
576 return (ENOSPC);
577
578 for (i = sc->vmd_fist_vector; i < sc->vmd_msix_count; i++) {
579 if (best > sc->vmd_irq[i].vi_nusers) {
580 best = sc->vmd_irq[i].vi_nusers;
581 ibest = i;
582 }
583 }
584
585 u = malloc(sizeof(*u), M_DEVBUF, M_WAITOK | M_ZERO);
586 u->viu_child = child;
587 u->viu_vector = ibest;
588 LIST_INSERT_HEAD(&sc->vmd_users, u, viu_link);
589 sc->vmd_irq[ibest].vi_nusers++;
590
591 *irq = sc->vmd_irq[ibest].vi_irq;
592 return (0);
593 }
594
595 static int
vmd_release_msix(device_t dev,device_t child,int irq)596 vmd_release_msix(device_t dev, device_t child, int irq)
597 {
598 struct vmd_softc *sc = device_get_softc(dev);
599 struct vmd_irq_user *u;
600
601 if (sc->vmd_msix_count == 0) {
602 return (PCIB_RELEASE_MSIX(device_get_parent(device_get_parent(dev)),
603 child, irq));
604 }
605
606 LIST_FOREACH(u, &sc->vmd_users, viu_link) {
607 if (u->viu_child == child &&
608 sc->vmd_irq[u->viu_vector].vi_irq == irq) {
609 sc->vmd_irq[u->viu_vector].vi_nusers--;
610 LIST_REMOVE(u, viu_link);
611 free(u, M_DEVBUF);
612 return (0);
613 }
614 }
615 return (EINVAL);
616 }
617
618 static int
vmd_map_msi(device_t dev,device_t child,int irq,uint64_t * addr,uint32_t * data)619 vmd_map_msi(device_t dev, device_t child, int irq, uint64_t *addr, uint32_t *data)
620 {
621 struct vmd_softc *sc = device_get_softc(dev);
622 int i;
623
624 if (sc->vmd_msix_count == 0) {
625 return (PCIB_MAP_MSI(device_get_parent(device_get_parent(dev)),
626 child, irq, addr, data));
627 }
628
629 for (i = sc->vmd_fist_vector; i < sc->vmd_msix_count; i++) {
630 if (sc->vmd_irq[i].vi_irq == irq)
631 break;
632 }
633 if (i >= sc->vmd_msix_count)
634 return (EINVAL);
635 *addr = MSI_INTEL_ADDR_BASE | (i << 12);
636 *data = 0;
637 return (0);
638 }
639
640 static device_method_t vmd_pci_methods[] = {
641 /* Device interface */
642 DEVMETHOD(device_probe, vmd_probe),
643 DEVMETHOD(device_attach, vmd_attach),
644 DEVMETHOD(device_detach, vmd_detach),
645 DEVMETHOD(device_suspend, bus_generic_suspend),
646 DEVMETHOD(device_resume, bus_generic_resume),
647 DEVMETHOD(device_shutdown, bus_generic_shutdown),
648
649 /* Bus interface */
650 DEVMETHOD(bus_get_dma_tag, vmd_get_dma_tag),
651 DEVMETHOD(bus_read_ivar, pcib_read_ivar),
652 DEVMETHOD(bus_write_ivar, pcib_write_ivar),
653 DEVMETHOD(bus_alloc_resource, vmd_alloc_resource),
654 DEVMETHOD(bus_adjust_resource, vmd_adjust_resource),
655 DEVMETHOD(bus_release_resource, vmd_release_resource),
656 DEVMETHOD(bus_activate_resource, bus_generic_activate_resource),
657 DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource),
658 DEVMETHOD(bus_setup_intr, bus_generic_setup_intr),
659 DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr),
660
661 /* pcib interface */
662 DEVMETHOD(pcib_maxslots, pcib_maxslots),
663 DEVMETHOD(pcib_read_config, vmd_read_config),
664 DEVMETHOD(pcib_write_config, vmd_write_config),
665 DEVMETHOD(pcib_route_interrupt, vmd_route_interrupt),
666 DEVMETHOD(pcib_alloc_msi, vmd_alloc_msi),
667 DEVMETHOD(pcib_release_msi, vmd_release_msi),
668 DEVMETHOD(pcib_alloc_msix, vmd_alloc_msix),
669 DEVMETHOD(pcib_release_msix, vmd_release_msix),
670 DEVMETHOD(pcib_map_msi, vmd_map_msi),
671 DEVMETHOD(pcib_request_feature, pcib_request_feature_allow),
672
673 DEVMETHOD_END
674 };
675
676 DEFINE_CLASS_0(pcib, vmd_pci_driver, vmd_pci_methods, sizeof(struct vmd_softc));
677 DRIVER_MODULE(vmd, pci, vmd_pci_driver, NULL, NULL);
678 MODULE_PNP_INFO("U16:vendor;U16:device;D:#", pci, vmd,
679 vmd_devs, nitems(vmd_devs) - 1);
680