1 /* 2 * (C) Copyright IBM Corporation 2006 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * on the rights to use, copy, modify, merge, publish, distribute, sub 9 * license, and/or sell copies of the Software, and to permit persons to whom 10 * the Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice (including the next 13 * paragraph) shall be included in all copies or substantial portions of the 14 * Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 19 * IBM AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 22 * DEALINGS IN THE SOFTWARE. 23 */ 24 25 /** 26 * \file common_interface.c 27 * Platform independent interface glue. 28 * 29 * \author Ian Romanick <[email protected]> 30 */ 31 32 #include <stdlib.h> 33 #include <string.h> 34 #include <errno.h> 35 36 #include "pciaccess.h" 37 #include "pciaccess_private.h" 38 39 #ifdef __linux__ 40 #include <byteswap.h> 41 42 #if __BYTE_ORDER == __BIG_ENDIAN 43 # define LETOH_16(x) bswap_16(x) 44 # define HTOLE_16(x) bswap_16(x) 45 # define LETOH_32(x) bswap_32(x) 46 # define HTOLE_32(x) bswap_32(x) 47 #else 48 # define LETOH_16(x) (x) 49 # define HTOLE_16(x) (x) 50 # define LETOH_32(x) (x) 51 # define HTOLE_32(x) (x) 52 #endif /* linux */ 53 54 #elif defined(__sun) 55 #define LETOH_16(x) (x) 56 #define HTOLE_16(x) (x) 57 #define LETOH_32(x) (x) 58 #define HTOLE_32(x) (x) 59 60 #else 61 62 #include <sys/endian.h> 63 64 #define LETOH_16(x) le16toh(x) 65 #define HTOLE_16(x) htole16(x) 66 #define LETOH_32(x) le32toh(x) 67 #define HTOLE_32(x) htole32(x) 68 69 #endif /* others */ 70 71 /** 72 * Read a device's expansion ROM. 73 * 74 * Reads the device's expansion ROM and stores the data in the memory pointed 75 * to by \c buffer. The buffer must be at least \c pci_device::rom_size 76 * bytes. 77 * 78 * \param dev Device whose expansion ROM is to be read. 79 * \param buffer Memory in which to store the ROM. 80 * 81 * \return 82 * Zero on success or an \c errno value on failure. 83 */ 84 int 85 pci_device_read_rom( struct pci_device * dev, void * buffer ) 86 { 87 if ( (dev == NULL) || (buffer == NULL) ) { 88 return EFAULT; 89 } 90 91 92 return (pci_sys->methods->read_rom)( dev, buffer ); 93 } 94 95 96 /** 97 * Probe a PCI device to learn information about the device. 98 * 99 * Probes a PCI device to learn various information about the device. Before 100 * calling this function, the only public fields in the \c pci_device 101 * structure that have valid values are \c pci_device::domain, 102 * \c pci_device::bus, \c pci_device::dev, and \c pci_device::func. 103 * 104 * \param dev Device to be probed. 105 * 106 * \return 107 * Zero on succes or an \c errno value on failure. 108 */ 109 int 110 pci_device_probe( struct pci_device * dev ) 111 { 112 if ( dev == NULL ) { 113 return EFAULT; 114 } 115 116 117 return (pci_sys->methods->probe)( dev ); 118 } 119 120 121 /** 122 * Map the specified BAR so that it can be accessed by the CPU. 123 * 124 * Maps the specified BAR for acces by the processor. The pointer to the 125 * mapped region is stored in the \c pci_mem_region::memory pointer for the 126 * BAR. 127 * 128 * \param dev Device whose memory region is to be mapped. 129 * \param region Region, on the range [0, 5], that is to be mapped. 130 * \param write_enable Map for writing (non-zero). 131 * 132 * \return 133 * Zero on success or an \c errno value on failure. 134 * 135 * \sa pci_device_map_range, pci_device_unmap_range 136 * \deprecated 137 */ 138 int 139 pci_device_map_region(struct pci_device * dev, unsigned region, 140 int write_enable) 141 { 142 const unsigned map_flags = 143 (write_enable) ? PCI_DEV_MAP_FLAG_WRITABLE : 0; 144 145 if ((region > 5) || (dev->regions[region].size == 0)) { 146 return ENOENT; 147 } 148 149 if (dev->regions[region].memory != NULL) { 150 return 0; 151 } 152 153 return pci_device_map_range(dev, dev->regions[region].base_addr, 154 dev->regions[region].size, map_flags, 155 &dev->regions[region].memory); 156 } 157 158 159 /** 160 * Map the specified memory range so that it can be accessed by the CPU. 161 * 162 * Maps the specified memory range for access by the processor. The pointer 163 * to the mapped region is stored in \c addr. In addtion, the 164 * \c pci_mem_region::memory pointer for the BAR will be updated. 165 * 166 * \param dev Device whose memory region is to be mapped. 167 * \param base Base address of the range to be mapped. 168 * \param size Size of the range to be mapped. 169 * \param map_flags Flag bits controlling how the mapping is accessed. 170 * \param addr Location to store the mapped address. 171 * 172 * \return 173 * Zero on success or an \c errno value on failure. 174 * 175 * \sa pci_device_unmap_range 176 */ 177 int 178 pci_device_map_range(struct pci_device *dev, pciaddr_t base, 179 pciaddr_t size, unsigned map_flags, 180 void **addr) 181 { 182 struct pci_device_private *const devp = 183 (struct pci_device_private *) dev; 184 struct pci_device_mapping *mappings; 185 unsigned region; 186 unsigned i; 187 int err = 0; 188 189 190 *addr = NULL; 191 192 if (dev == NULL) { 193 return EFAULT; 194 } 195 196 197 for (region = 0; region < 6; region++) { 198 const struct pci_mem_region const* r = &dev->regions[region]; 199 200 if (r->size != 0) { 201 if ((r->base_addr <= base) && ((r->base_addr + r->size) > base)) { 202 if ((base + size) > (r->base_addr + r->size)) { 203 return E2BIG; 204 } 205 206 break; 207 } 208 } 209 } 210 211 if (region > 5) { 212 return ENOENT; 213 } 214 215 /* Make sure that there isn't already a mapping with the same base and 216 * size. 217 */ 218 for (i = 0; i < devp->num_mappings; i++) { 219 if ((devp->mappings[i].base == base) 220 && (devp->mappings[i].size == size)) { 221 return EINVAL; 222 } 223 } 224 225 226 mappings = realloc(devp->mappings, 227 (sizeof(devp->mappings[0]) * (devp->num_mappings + 1))); 228 if (mappings == NULL) { 229 return ENOMEM; 230 } 231 232 mappings[devp->num_mappings].base = base; 233 mappings[devp->num_mappings].size = size; 234 mappings[devp->num_mappings].region = region; 235 mappings[devp->num_mappings].flags = map_flags; 236 mappings[devp->num_mappings].memory = NULL; 237 238 if (dev->regions[region].memory == NULL) { 239 err = (*pci_sys->methods->map_range)(dev, 240 &mappings[devp->num_mappings]); 241 } 242 243 if (err == 0) { 244 *addr = mappings[devp->num_mappings].memory; 245 devp->num_mappings++; 246 } else { 247 mappings = realloc(devp->mappings, 248 (sizeof(devp->mappings[0]) * devp->num_mappings)); 249 } 250 251 devp->mappings = mappings; 252 253 return err; 254 } 255 256 257 /** 258 * Unmap the specified BAR so that it can no longer be accessed by the CPU. 259 * 260 * Unmaps the specified BAR that was previously mapped via 261 * \c pci_device_map_region. 262 * 263 * \param dev Device whose memory region is to be mapped. 264 * \param region Region, on the range [0, 5], that is to be mapped. 265 * 266 * \return 267 * Zero on success or an \c errno value on failure. 268 * 269 * \sa pci_device_map_range, pci_device_unmap_range 270 * \deprecated 271 */ 272 int 273 pci_device_unmap_region( struct pci_device * dev, unsigned region ) 274 { 275 int err; 276 277 if (dev == NULL) { 278 return EFAULT; 279 } 280 281 if ((region > 5) || (dev->regions[region].size == 0)) { 282 return ENOENT; 283 } 284 285 err = pci_device_unmap_range(dev, dev->regions[region].memory, 286 dev->regions[region].size); 287 if (!err) { 288 dev->regions[region].memory = NULL; 289 } 290 291 return err; 292 } 293 294 295 /** 296 * Unmap the specified memory range so that it can no longer be accessed by the CPU. 297 * 298 * Unmaps the specified memory range that was previously mapped via 299 * \c pci_device_map_memory_range. 300 * 301 * \param dev Device whose memory is to be unmapped. 302 * \param memory Pointer to the base of the mapped range. 303 * \param size Size, in bytes, of the range to be unmapped. 304 * 305 * \return 306 * Zero on success or an \c errno value on failure. 307 * 308 * \sa pci_device_map_range, pci_device_unmap_range 309 * \deprecated 310 */ 311 int 312 pci_device_unmap_memory_range(struct pci_device *dev, void *memory, 313 pciaddr_t size) 314 { 315 return pci_device_unmap_range(dev, memory, size); 316 } 317 318 319 /** 320 * Unmap the specified memory range so that it can no longer be accessed by the CPU. 321 * 322 * Unmaps the specified memory range that was previously mapped via 323 * \c pci_device_map_memory_range. 324 * 325 * \param dev Device whose memory is to be unmapped. 326 * \param memory Pointer to the base of the mapped range. 327 * \param size Size, in bytes, of the range to be unmapped. 328 * 329 * \return 330 * Zero on success or an \c errno value on failure. 331 * 332 * \sa pci_device_map_range 333 */ 334 int 335 pci_device_unmap_range(struct pci_device *dev, void *memory, 336 pciaddr_t size) 337 { 338 struct pci_device_private *const devp = 339 (struct pci_device_private *) dev; 340 unsigned i; 341 int err; 342 343 344 if (dev == NULL) { 345 return EFAULT; 346 } 347 348 for (i = 0; i < devp->num_mappings; i++) { 349 if ((devp->mappings[i].memory == memory) 350 && (devp->mappings[i].size == size)) { 351 break; 352 } 353 } 354 355 if (i == devp->num_mappings) { 356 return ENOENT; 357 } 358 359 360 err = (*pci_sys->methods->unmap_range)(dev, &devp->mappings[i]); 361 if (!err) { 362 const unsigned entries_to_move = (devp->num_mappings - i) - 1; 363 364 if (entries_to_move > 0) { 365 (void) memmove(&devp->mappings[i], 366 &devp->mappings[i + 1], 367 entries_to_move * sizeof(devp->mappings[0])); 368 } 369 370 devp->num_mappings--; 371 devp->mappings = realloc(devp->mappings, 372 (sizeof(devp->mappings[0]) * devp->num_mappings)); 373 } 374 375 return err; 376 } 377 378 379 /** 380 * Read arbitrary bytes from device's PCI config space 381 * 382 * Reads data from the device's PCI configuration space. As with the system 383 * read command, less data may be returned, without an error, than was 384 * requested. This is particuarly the case if a non-root user tries to read 385 * beyond the first 64-bytes of configuration space. 386 * 387 * \param dev Device whose PCI configuration data is to be read. 388 * \param data Location to store the data 389 * \param offset Initial byte offset to read 390 * \param size Total number of bytes to read 391 * \param bytes_read Location to store the actual number of bytes read. This 392 * pointer may be \c NULL. 393 * 394 * \returns 395 * Zero on success or an errno value on failure. 396 * 397 * \note 398 * Data read from PCI configuartion space using this routine is \b not 399 * byte-swapped to the host's byte order. PCI configuration data is always 400 * stored in little-endian order, and that is what this routine returns. 401 */ 402 int 403 pci_device_cfg_read( struct pci_device * dev, void * data, 404 pciaddr_t offset, pciaddr_t size, 405 pciaddr_t * bytes_read ) 406 { 407 pciaddr_t scratch; 408 409 if ( (dev == NULL) || (data == NULL) ) { 410 return EFAULT; 411 } 412 413 return pci_sys->methods->read( dev, data, offset, size, 414 (bytes_read == NULL) 415 ? & scratch : bytes_read ); 416 } 417 418 419 int 420 pci_device_cfg_read_u8( struct pci_device * dev, uint8_t * data, 421 pciaddr_t offset ) 422 { 423 pciaddr_t bytes; 424 int err = pci_device_cfg_read( dev, data, offset, 1, & bytes ); 425 426 if ( (err == 0) && (bytes != 1) ) { 427 err = ENXIO; 428 } 429 430 return err; 431 } 432 433 434 int 435 pci_device_cfg_read_u16( struct pci_device * dev, uint16_t * data, 436 pciaddr_t offset ) 437 { 438 pciaddr_t bytes; 439 int err = pci_device_cfg_read( dev, data, offset, 2, & bytes ); 440 441 if ( (err == 0) && (bytes != 2) ) { 442 err = ENXIO; 443 } 444 445 *data = LETOH_16( *data ); 446 return err; 447 } 448 449 450 int 451 pci_device_cfg_read_u32( struct pci_device * dev, uint32_t * data, 452 pciaddr_t offset ) 453 { 454 pciaddr_t bytes; 455 int err = pci_device_cfg_read( dev, data, offset, 4, & bytes ); 456 457 if ( (err == 0) && (bytes != 4) ) { 458 err = ENXIO; 459 } 460 461 *data = LETOH_32( *data ); 462 return err; 463 } 464 465 466 /** 467 * Write arbitrary bytes to device's PCI config space 468 * 469 * Writess data to the device's PCI configuration space. As with the system 470 * write command, less data may be written, without an error, than was 471 * requested. 472 * 473 * \param dev Device whose PCI configuration data is to be written. 474 * \param data Location of the source data 475 * \param offset Initial byte offset to write 476 * \param size Total number of bytes to write 477 * \param bytes_read Location to store the actual number of bytes written. 478 * This pointer may be \c NULL. 479 * 480 * \returns 481 * Zero on success or an errno value on failure. 482 * 483 * \note 484 * Data written to PCI configuartion space using this routine is \b not 485 * byte-swapped from the host's byte order. PCI configuration data is always 486 * stored in little-endian order, so data written with this routine should be 487 * put in that order in advance. 488 */ 489 int 490 pci_device_cfg_write( struct pci_device * dev, const void * data, 491 pciaddr_t offset, pciaddr_t size, 492 pciaddr_t * bytes_written ) 493 { 494 pciaddr_t scratch; 495 496 if ( (dev == NULL) || (data == NULL) ) { 497 return EFAULT; 498 } 499 500 return pci_sys->methods->write( dev, data, offset, size, 501 (bytes_written == NULL) 502 ? & scratch : bytes_written ); 503 } 504 505 506 int 507 pci_device_cfg_write_u8(struct pci_device *dev, uint8_t data, 508 pciaddr_t offset) 509 { 510 pciaddr_t bytes; 511 int err = pci_device_cfg_write(dev, & data, offset, 1, & bytes); 512 513 if ( (err == 0) && (bytes != 1) ) { 514 err = ENOSPC; 515 } 516 517 518 return err; 519 } 520 521 522 int 523 pci_device_cfg_write_u16(struct pci_device *dev, uint16_t data, 524 pciaddr_t offset) 525 { 526 pciaddr_t bytes; 527 const uint16_t temp = HTOLE_16(data); 528 int err = pci_device_cfg_write( dev, & temp, offset, 2, & bytes ); 529 530 if ( (err == 0) && (bytes != 2) ) { 531 err = ENOSPC; 532 } 533 534 535 return err; 536 } 537 538 539 int 540 pci_device_cfg_write_u32(struct pci_device *dev, uint32_t data, 541 pciaddr_t offset) 542 { 543 pciaddr_t bytes; 544 const uint32_t temp = HTOLE_32(data); 545 int err = pci_device_cfg_write( dev, & temp, offset, 4, & bytes ); 546 547 if ( (err == 0) && (bytes != 4) ) { 548 err = ENOSPC; 549 } 550 551 552 return err; 553 } 554 555 556 int 557 pci_device_cfg_write_bits( struct pci_device * dev, uint32_t mask, 558 uint32_t data, pciaddr_t offset ) 559 { 560 uint32_t temp; 561 int err; 562 563 err = pci_device_cfg_read_u32( dev, & temp, offset ); 564 if ( ! err ) { 565 temp &= ~mask; 566 temp |= data; 567 568 err = pci_device_cfg_write_u32(dev, temp, offset); 569 } 570 571 return err; 572 } 573