1 /*
2 * Copyright (c) 2008 Mark Kettenis
3 *
4 * Permission to use, copy, modify, and distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17 #include <sys/param.h>
18 #include <sys/ioctl.h>
19 #include <sys/memrange.h>
20 #include <sys/mman.h>
21 #include <sys/pciio.h>
22
23 #include <dev/pci/pcireg.h>
24 #include <dev/pci/pcidevs.h>
25
26 #include <errno.h>
27 #include <fcntl.h>
28 #include <stdio.h>
29 #include <stdlib.h>
30 #include <string.h>
31 #include <unistd.h>
32
33 #include "pciaccess.h"
34 #include "pciaccess_private.h"
35
36 /*
37 * This should allow for 16 domains, which should cover everything
38 * except perhaps the really big fridge-sized sparc64 server machines
39 * that are unlikely to have any graphics hardware in them.
40 */
41 static int pcifd[16];
42 static int ndomains;
43
44 static int aperturefd = -1;
45
46 static int
pci_read(int domain,int bus,int dev,int func,uint32_t reg,uint32_t * val)47 pci_read(int domain, int bus, int dev, int func, uint32_t reg, uint32_t *val)
48 {
49 struct pci_io io;
50 int err;
51
52 bzero(&io, sizeof(io));
53 io.pi_sel.pc_bus = bus;
54 io.pi_sel.pc_dev = dev;
55 io.pi_sel.pc_func = func;
56 io.pi_reg = reg;
57 io.pi_width = 4;
58
59 err = ioctl(pcifd[domain], PCIOCREAD, &io);
60 if (err)
61 return (err);
62
63 *val = io.pi_data;
64
65 return 0;
66 }
67
68 static int
pci_write(int domain,int bus,int dev,int func,uint32_t reg,uint32_t val)69 pci_write(int domain, int bus, int dev, int func, uint32_t reg, uint32_t val)
70 {
71 struct pci_io io;
72
73 bzero(&io, sizeof(io));
74 io.pi_sel.pc_bus = bus;
75 io.pi_sel.pc_dev = dev;
76 io.pi_sel.pc_func = func;
77 io.pi_reg = reg;
78 io.pi_width = 4;
79 io.pi_data = val;
80
81 return ioctl(pcifd[domain], PCIOCWRITE, &io);
82 }
83
84 /**
85 * Read a VGA ROM
86 *
87 */
88 static int
pci_device_openbsd_read_rom(struct pci_device * device,void * buffer)89 pci_device_openbsd_read_rom(struct pci_device *device, void *buffer)
90 {
91 struct pci_device_private *priv = (struct pci_device_private *)device;
92 unsigned char *bios;
93 pciaddr_t rom_base;
94 pciaddr_t rom_size;
95 u_int32_t csr, rom;
96 int pci_rom, domain, bus, dev, func;
97
98 domain = device->domain;
99 if (domain < 0 || domain >= ndomains)
100 return ENXIO;
101
102 bus = device->bus;
103 dev = device->dev;
104 func = device->func;
105
106 if (aperturefd == -1)
107 return ENOSYS;
108
109 if (priv->base.rom_size == 0) {
110 #if defined(__alpha__) || defined(__amd64__) || defined(__i386__)
111 if ((device->device_class & 0x00ffff00) ==
112 ((PCI_CLASS_DISPLAY << 16) |
113 (PCI_SUBCLASS_DISPLAY_VGA << 8))) {
114 rom_base = 0xc0000;
115 rom_size = 0x10000;
116 pci_rom = 0;
117 } else
118 #endif
119 return ENOSYS;
120 } else {
121 rom_base = priv->rom_base;
122 rom_size = priv->base.rom_size;
123 pci_rom = 1;
124
125 pci_read(domain, bus, dev, func, PCI_COMMAND_STATUS_REG, &csr);
126 pci_write(domain, bus, dev, func, PCI_COMMAND_STATUS_REG,
127 csr | PCI_COMMAND_MEM_ENABLE);
128 pci_read(domain, bus, dev, func, PCI_ROM_REG, &rom);
129 pci_write(domain, bus, dev, func, PCI_ROM_REG,
130 rom | PCI_ROM_ENABLE);
131 }
132
133 bios = mmap(NULL, rom_size, PROT_READ, MAP_SHARED,
134 aperturefd, (off_t)rom_base);
135 if (bios == MAP_FAILED)
136 return errno;
137
138 memcpy(buffer, bios, rom_size);
139 munmap(bios, rom_size);
140
141 if (pci_rom) {
142 /* Restore PCI config space */
143 pci_write(domain, bus, dev, func, PCI_ROM_REG, rom);
144 pci_write(domain, bus, dev, func, PCI_COMMAND_STATUS_REG, csr);
145 }
146 return 0;
147 }
148
149 static int
pci_nfuncs(int domain,int bus,int dev)150 pci_nfuncs(int domain, int bus, int dev)
151 {
152 uint32_t hdr;
153
154 if (domain < 0 || domain >= ndomains)
155 return ENXIO;
156
157 if (pci_read(domain, bus, dev, 0, PCI_BHLC_REG, &hdr) != 0)
158 return -1;
159
160 return (PCI_HDRTYPE_MULTIFN(hdr) ? 8 : 1);
161 }
162
163 static int
pci_device_openbsd_map_range(struct pci_device * dev,struct pci_device_mapping * map)164 pci_device_openbsd_map_range(struct pci_device *dev,
165 struct pci_device_mapping *map)
166 {
167 struct mem_range_desc mr;
168 struct mem_range_op mo;
169 int prot = PROT_READ;
170
171 if (map->flags & PCI_DEV_MAP_FLAG_WRITABLE)
172 prot |= PROT_WRITE;
173
174 map->memory = mmap(NULL, map->size, prot, MAP_SHARED, aperturefd,
175 map->base);
176 if (map->memory == MAP_FAILED)
177 return errno;
178 #if defined(__i386__) || defined(__amd64__)
179 /* No need to set an MTRR if it's the default mode. */
180 if ((map->flags & PCI_DEV_MAP_FLAG_CACHABLE) ||
181 (map->flags & PCI_DEV_MAP_FLAG_WRITE_COMBINE)) {
182 mr.mr_base = map->base;
183 mr.mr_len = map->size;
184 mr.mr_flags = 0;
185 if (map->flags & PCI_DEV_MAP_FLAG_CACHABLE)
186 mr.mr_flags |= MDF_WRITEBACK;
187 if (map->flags & PCI_DEV_MAP_FLAG_WRITE_COMBINE)
188 mr.mr_flags |= MDF_WRITECOMBINE;
189 strlcpy(mr.mr_owner, "pciaccess", sizeof(mr.mr_owner));
190
191 mo.mo_desc = &mr;
192 mo.mo_arg[0] = MEMRANGE_SET_UPDATE;
193
194 if (ioctl(aperturefd, MEMRANGE_SET, &mo))
195 (void)fprintf(stderr, "mtrr set failed: %s\n",
196 strerror(errno));
197 }
198 #endif
199 return 0;
200 }
201
202 static int
pci_device_openbsd_unmap_range(struct pci_device * dev,struct pci_device_mapping * map)203 pci_device_openbsd_unmap_range(struct pci_device *dev,
204 struct pci_device_mapping *map)
205 {
206 #if defined(__i386__) || defined(__amd64__)
207 struct mem_range_desc mr;
208 struct mem_range_op mo;
209
210 if ((map->flags & PCI_DEV_MAP_FLAG_CACHABLE) ||
211 (map->flags & PCI_DEV_MAP_FLAG_WRITE_COMBINE)) {
212 mr.mr_base = map->base;
213 mr.mr_len = map->size;
214 mr.mr_flags = MDF_UNCACHEABLE;
215 strlcpy(mr.mr_owner, "pciaccess", sizeof(mr.mr_owner));
216
217 mo.mo_desc = &mr;
218 mo.mo_arg[0] = MEMRANGE_SET_REMOVE;
219
220 (void)ioctl(aperturefd, MEMRANGE_SET, &mo);
221 }
222 #endif
223 return pci_device_generic_unmap_range(dev, map);
224 }
225
226 static int
pci_device_openbsd_read(struct pci_device * dev,void * data,pciaddr_t offset,pciaddr_t size,pciaddr_t * bytes_read)227 pci_device_openbsd_read(struct pci_device *dev, void *data,
228 pciaddr_t offset, pciaddr_t size, pciaddr_t *bytes_read)
229 {
230 struct pci_io io;
231
232 io.pi_sel.pc_bus = dev->bus;
233 io.pi_sel.pc_dev = dev->dev;
234 io.pi_sel.pc_func = dev->func;
235
236 *bytes_read = 0;
237 while (size > 0) {
238 int toread = MIN(size, 4 - (offset & 0x3));
239
240 io.pi_reg = (offset & ~0x3);
241 io.pi_width = 4;
242
243 if (ioctl(pcifd[dev->domain], PCIOCREAD, &io) == -1)
244 return errno;
245
246 io.pi_data = htole32(io.pi_data);
247 io.pi_data >>= ((offset & 0x3) * 8);
248
249 memcpy(data, &io.pi_data, toread);
250
251 offset += toread;
252 data = (char *)data + toread;
253 size -= toread;
254 *bytes_read += toread;
255 }
256
257 return 0;
258 }
259
260 static int
pci_device_openbsd_write(struct pci_device * dev,const void * data,pciaddr_t offset,pciaddr_t size,pciaddr_t * bytes_written)261 pci_device_openbsd_write(struct pci_device *dev, const void *data,
262 pciaddr_t offset, pciaddr_t size, pciaddr_t *bytes_written)
263 {
264 struct pci_io io;
265
266 if ((offset % 4) != 0 || (size % 4) != 0)
267 return EINVAL;
268
269 io.pi_sel.pc_bus = dev->bus;
270 io.pi_sel.pc_dev = dev->dev;
271 io.pi_sel.pc_func = dev->func;
272
273 *bytes_written = 0;
274 while (size > 0) {
275 io.pi_reg = offset;
276 io.pi_width = 4;
277 memcpy(&io.pi_data, data, 4);
278
279 if (ioctl(pcifd[dev->domain], PCIOCWRITE, &io) == -1)
280 return errno;
281
282 offset += 4;
283 data = (char *)data + 4;
284 size -= 4;
285 *bytes_written += 4;
286 }
287
288 return 0;
289 }
290
291 static void
pci_system_openbsd_destroy(void)292 pci_system_openbsd_destroy(void)
293 {
294 int domain;
295
296 for (domain = 0; domain < ndomains; domain++)
297 close(pcifd[domain]);
298 ndomains = 0;
299 }
300
301 static int
pci_device_openbsd_probe(struct pci_device * device)302 pci_device_openbsd_probe(struct pci_device *device)
303 {
304 struct pci_device_private *priv = (struct pci_device_private *)device;
305 struct pci_mem_region *region;
306 uint64_t reg64, size64;
307 uint32_t bar, reg, size;
308 int domain, bus, dev, func, err;
309
310 domain = device->domain;
311 bus = device->bus;
312 dev = device->dev;
313 func = device->func;
314
315 err = pci_read(domain, bus, dev, func, PCI_BHLC_REG, ®);
316 if (err)
317 return err;
318
319 priv->header_type = PCI_HDRTYPE_TYPE(reg);
320 if (priv->header_type != 0)
321 return 0;
322
323 region = device->regions;
324 for (bar = PCI_MAPREG_START; bar < PCI_MAPREG_END;
325 bar += sizeof(uint32_t), region++) {
326 err = pci_read(domain, bus, dev, func, bar, ®);
327 if (err)
328 return err;
329
330 /* Probe the size of the region. */
331 err = pci_write(domain, bus, dev, func, bar, ~0);
332 if (err)
333 return err;
334 pci_read(domain, bus, dev, func, bar, &size);
335 pci_write(domain, bus, dev, func, bar, reg);
336
337 if (PCI_MAPREG_TYPE(reg) == PCI_MAPREG_TYPE_IO) {
338 region->is_IO = 1;
339 region->base_addr = PCI_MAPREG_IO_ADDR(reg);
340 region->size = PCI_MAPREG_IO_SIZE(size);
341 } else {
342 if (PCI_MAPREG_MEM_PREFETCHABLE(reg))
343 region->is_prefetchable = 1;
344 switch(PCI_MAPREG_MEM_TYPE(reg)) {
345 case PCI_MAPREG_MEM_TYPE_32BIT:
346 case PCI_MAPREG_MEM_TYPE_32BIT_1M:
347 region->base_addr = PCI_MAPREG_MEM_ADDR(reg);
348 region->size = PCI_MAPREG_MEM_SIZE(size);
349 break;
350 case PCI_MAPREG_MEM_TYPE_64BIT:
351 region->is_64 = 1;
352
353 reg64 = reg;
354 size64 = size;
355
356 bar += sizeof(uint32_t);
357
358 err = pci_read(domain, bus, dev, func, bar, ®);
359 if (err)
360 return err;
361 reg64 |= (uint64_t)reg << 32;
362
363 err = pci_write(domain, bus, dev, func, bar, ~0);
364 if (err)
365 return err;
366 pci_read(domain, bus, dev, func, bar, &size);
367 pci_write(domain, bus, dev, func, bar, reg64 >> 32);
368 size64 |= (uint64_t)size << 32;
369
370 region->base_addr = PCI_MAPREG_MEM64_ADDR(reg64);
371 region->size = PCI_MAPREG_MEM64_SIZE(size64);
372 region++;
373 break;
374 }
375 }
376 }
377
378 /* Probe expansion ROM if present */
379 err = pci_read(domain, bus, dev, func, PCI_ROM_REG, ®);
380 if (err)
381 return err;
382 if (reg != 0) {
383 err = pci_write(domain, bus, dev, func, PCI_ROM_REG, ~PCI_ROM_ENABLE);
384 if (err)
385 return err;
386 pci_read(domain, bus, dev, func, PCI_ROM_REG, &size);
387 pci_write(domain, bus, dev, func, PCI_ROM_REG, reg);
388
389 if (PCI_ROM_ADDR(reg) != 0) {
390 priv->rom_base = PCI_ROM_ADDR(reg);
391 device->rom_size = PCI_ROM_SIZE(size);
392 }
393 }
394 return 0;
395 }
396
397 static const struct pci_system_methods openbsd_pci_methods = {
398 pci_system_openbsd_destroy,
399 NULL,
400 pci_device_openbsd_read_rom,
401 pci_device_openbsd_probe,
402 pci_device_openbsd_map_range,
403 pci_device_openbsd_unmap_range,
404 pci_device_openbsd_read,
405 pci_device_openbsd_write,
406 pci_fill_capabilities_generic
407 };
408
409 int
pci_system_openbsd_create(void)410 pci_system_openbsd_create(void)
411 {
412 struct pci_device_private *device;
413 int domain, bus, dev, func, ndevs, nfuncs;
414 char path[MAXPATHLEN];
415 uint32_t reg;
416
417 if (ndomains > 0)
418 return 0;
419
420 for (domain = 0; domain < sizeof(pcifd) / sizeof(pcifd[0]); domain++) {
421 snprintf(path, sizeof(path), "/dev/pci%d", domain);
422 pcifd[domain] = open(path, O_RDWR);
423 if (pcifd[domain] == -1)
424 break;
425 ndomains++;
426 }
427
428 if (ndomains == 0)
429 return ENXIO;
430
431 pci_sys = calloc(1, sizeof(struct pci_system));
432 if (pci_sys == NULL) {
433 for (domain = 0; domain < ndomains; domain++)
434 close(pcifd[domain]);
435 ndomains = 0;
436 return ENOMEM;
437 }
438
439 pci_sys->methods = &openbsd_pci_methods;
440
441 ndevs = 0;
442 for (domain = 0; domain < ndomains; domain++) {
443 for (bus = 0; bus < 256; bus++) {
444 for (dev = 0; dev < 32; dev++) {
445 nfuncs = pci_nfuncs(domain, bus, dev);
446 for (func = 0; func < nfuncs; func++) {
447 if (pci_read(domain, bus, dev, func,
448 PCI_ID_REG, ®) != 0)
449 continue;
450 if (PCI_VENDOR(reg) == PCI_VENDOR_INVALID ||
451 PCI_VENDOR(reg) == 0)
452 continue;
453
454 ndevs++;
455 }
456 }
457 }
458 }
459
460 pci_sys->num_devices = ndevs;
461 pci_sys->devices = calloc(ndevs, sizeof(struct pci_device_private));
462 if (pci_sys->devices == NULL) {
463 free(pci_sys);
464 pci_sys = NULL;
465 for (domain = 0; domain < ndomains; domain++)
466 close(pcifd[domain]);
467 ndomains = 0;
468 return ENOMEM;
469 }
470
471 device = pci_sys->devices;
472 for (domain = 0; domain < ndomains; domain++) {
473 for (bus = 0; bus < 256; bus++) {
474 for (dev = 0; dev < 32; dev++) {
475 nfuncs = pci_nfuncs(domain, bus, dev);
476 for (func = 0; func < nfuncs; func++) {
477 if (pci_read(domain, bus, dev, func,
478 PCI_ID_REG, ®) != 0)
479 continue;
480 if (PCI_VENDOR(reg) == PCI_VENDOR_INVALID ||
481 PCI_VENDOR(reg) == 0)
482 continue;
483
484 device->base.domain = domain;
485 device->base.bus = bus;
486 device->base.dev = dev;
487 device->base.func = func;
488 device->base.vendor_id = PCI_VENDOR(reg);
489 device->base.device_id = PCI_PRODUCT(reg);
490
491 if (pci_read(domain, bus, dev, func,
492 PCI_CLASS_REG, ®) != 0)
493 continue;
494
495 device->base.device_class =
496 PCI_INTERFACE(reg) |
497 PCI_CLASS(reg) << 16 |
498 PCI_SUBCLASS(reg) << 8;
499 device->base.revision = PCI_REVISION(reg);
500
501 if (pci_read(domain, bus, dev, func,
502 PCI_SUBVEND_0, ®) != 0)
503 continue;
504
505 device->base.subvendor_id = PCI_VENDOR(reg);
506 device->base.subdevice_id = PCI_PRODUCT(reg);
507
508 device->base.vgaarb_rsrc =
509 VGA_ARB_RSRC_LEGACY_IO |
510 VGA_ARB_RSRC_LEGACY_MEM;
511
512 device++;
513 }
514 }
515 }
516 }
517
518 return 0;
519 }
520
521 void
pci_system_openbsd_init_dev_mem(int fd)522 pci_system_openbsd_init_dev_mem(int fd)
523 {
524 aperturefd = fd;
525 }
526
527 int
pci_device_vgaarb_init(void)528 pci_device_vgaarb_init(void)
529 {
530 struct pci_device *dev = pci_sys->vga_target;
531 struct pci_device_iterator *iter;
532 struct pci_id_match vga_match = {
533 PCI_MATCH_ANY, PCI_MATCH_ANY, PCI_MATCH_ANY, PCI_MATCH_ANY,
534 (PCI_CLASS_DISPLAY << 16) | (PCI_SUBCLASS_DISPLAY_VGA << 8),
535 0x00ffff00
536 };
537 struct pci_vga pv;
538 int err;
539
540 pv.pv_sel.pc_bus = 0;
541 pv.pv_sel.pc_dev = 0;
542 pv.pv_sel.pc_func = 0;
543 err = ioctl(pcifd[0], PCIOCGETVGA, &pv);
544 if (err)
545 return err;
546
547 pci_sys->vga_target = pci_device_find_by_slot(0, pv.pv_sel.pc_bus,
548 pv.pv_sel.pc_dev, pv.pv_sel.pc_func);
549
550 /* Count the number of VGA devices in domain 0. */
551 iter = pci_id_match_iterator_create(&vga_match);
552 if (iter == NULL)
553 return -1;
554 pci_sys->vga_count = 0;
555 while ((dev = pci_device_next(iter)) != NULL) {
556 if (dev->domain == 0)
557 pci_sys->vga_count++;
558 }
559 pci_iterator_destroy(iter);
560
561 return 0;
562 }
563
564 void
pci_device_vgaarb_fini(void)565 pci_device_vgaarb_fini(void)
566 {
567 struct pci_device *dev;
568 struct pci_vga pv;
569
570 if (pci_sys == NULL)
571 return;
572 dev = pci_sys->vga_target;
573 if (dev == NULL)
574 return;
575
576 pv.pv_sel.pc_bus = dev->bus;
577 pv.pv_sel.pc_dev = dev->dev;
578 pv.pv_sel.pc_func = dev->func;
579 pv.pv_lock = PCI_VGA_UNLOCK;
580 ioctl(pcifd[dev->domain], PCIOCSETVGA, &pv);
581 }
582
583 int
pci_device_vgaarb_set_target(struct pci_device * dev)584 pci_device_vgaarb_set_target(struct pci_device *dev)
585 {
586 pci_sys->vga_target = dev;
587 }
588
589 int
pci_device_vgaarb_lock(void)590 pci_device_vgaarb_lock(void)
591 {
592 struct pci_device *dev = pci_sys->vga_target;
593 struct pci_vga pv;
594
595 if (dev == NULL)
596 return -1;
597
598 #if 0
599 if (dev->vgaarb_rsrc == 0 || pci_sys->vga_count == 1)
600 return 0;
601 #else
602 if (pci_sys->vga_count == 1)
603 return 0;
604 #endif
605
606 pv.pv_sel.pc_bus = dev->bus;
607 pv.pv_sel.pc_dev = dev->dev;
608 pv.pv_sel.pc_func = dev->func;
609 pv.pv_lock = PCI_VGA_LOCK;
610 return ioctl(pcifd[dev->domain], PCIOCSETVGA, &pv);
611 }
612
613 int
pci_device_vgaarb_unlock(void)614 pci_device_vgaarb_unlock(void)
615 {
616 struct pci_device *dev = pci_sys->vga_target;
617 struct pci_vga pv;
618
619 if (dev == NULL)
620 return -1;
621
622 #if 0
623 if (dev->vgaarb_rsrc == 0 || pci_sys->vga_count == 1)
624 return 0;
625 #else
626 if (pci_sys->vga_count == 1)
627 return 0;
628 #endif
629
630 pv.pv_sel.pc_bus = dev->bus;
631 pv.pv_sel.pc_dev = dev->dev;
632 pv.pv_sel.pc_func = dev->func;
633 pv.pv_lock = PCI_VGA_UNLOCK;
634 return ioctl(pcifd[dev->domain], PCIOCSETVGA, &pv);
635 }
636
637 int
pci_device_vgaarb_get_info(struct pci_device * dev,int * vga_count,int * rsrc_decodes)638 pci_device_vgaarb_get_info(struct pci_device *dev, int *vga_count,
639 int *rsrc_decodes)
640 {
641 *vga_count = pci_sys->vga_count;
642
643 if (dev)
644 *rsrc_decodes = dev->vgaarb_rsrc;
645
646 return 0;
647 }
648
649 int
pci_device_vgaarb_decodes(int rsrc_decodes)650 pci_device_vgaarb_decodes(int rsrc_decodes)
651 {
652 struct pci_device *dev = pci_sys->vga_target;
653
654 if (dev == NULL)
655 return -1;
656
657 dev->vgaarb_rsrc = rsrc_decodes;
658 return 0;
659 }
660