1 /* 2 * (C) Copyright IBM Corporation 2006 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * on the rights to use, copy, modify, merge, publish, distribute, sub 9 * license, and/or sell copies of the Software, and to permit persons to whom 10 * the Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice (including the next 13 * paragraph) shall be included in all copies or substantial portions of the 14 * Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 19 * IBM AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 22 * DEALINGS IN THE SOFTWARE. 23 */ 24 25 /** 26 * \file common_interface.c 27 * Platform independent interface glue. 28 * 29 * \author Ian Romanick <[email protected]> 30 */ 31 32 #include <stdlib.h> 33 #include <errno.h> 34 35 #include "pciaccess.h" 36 #include "pciaccess_private.h" 37 38 #ifdef __linux__ 39 #include <byteswap.h> 40 41 #if __BYTE_ORDER == __BIG_ENDIAN 42 # define LETOH_16(x) bswap_16(x) 43 # define HTOLE_16(x) bswap_16(x) 44 # define LETOH_32(x) bswap_32(x) 45 # define HTOLE_32(x) bswap_32(x) 46 #else 47 # define LETOH_16(x) (x) 48 # define HTOLE_16(x) (x) 49 # define LETOH_32(x) (x) 50 # define HTOLE_32(x) (x) 51 #endif 52 53 #else 54 55 #include <sys/endian.h> 56 57 #define LETOH_16(x) le16toh(x) 58 #define HTOLE_16(x) htole16(x) 59 #define LETOH_32(x) le32toh(x) 60 #define HTOLE_32(x) htole32(x) 61 62 #endif 63 64 /** 65 * Read a device's expansion ROM. 66 * 67 * Reads the device's expansion ROM and stores the data in the memory pointed 68 * to by \c buffer. The buffer must be at least \c pci_device::rom_size 69 * bytes. 70 * 71 * \param dev Device whose expansion ROM is to be read. 72 * \param buffer Memory in which to store the ROM. 73 * 74 * \return 75 * Zero on success or an \c errno value on failure. 76 */ 77 int 78 pci_device_read_rom( struct pci_device * dev, void * buffer ) 79 { 80 if ( (dev == NULL) || (buffer == NULL) ) { 81 return EFAULT; 82 } 83 84 85 return (pci_sys->methods->read_rom)( dev, buffer ); 86 } 87 88 89 /** 90 * Probe a PCI device to learn information about the device. 91 * 92 * Probes a PCI device to learn various information about the device. Before 93 * calling this function, the only public fields in the \c pci_device 94 * structure that have valid values are \c pci_device::domain, 95 * \c pci_device::bus, \c pci_device::dev, and \c pci_device::func. 96 * 97 * \param dev Device to be probed. 98 * 99 * \return 100 * Zero on succes or an \c errno value on failure. 101 */ 102 int 103 pci_device_probe( struct pci_device * dev ) 104 { 105 if ( dev == NULL ) { 106 return EFAULT; 107 } 108 109 110 return (pci_sys->methods->probe)( dev ); 111 } 112 113 114 /** 115 * Map the specified BAR so that it can be accessed by the CPU. 116 * 117 * Maps the specified BAR for acces by the processor. The pointer to the 118 * mapped region is stored in the \c pci_mem_region::memory pointer for the 119 * BAR. 120 * 121 * \param dev Device whose memory region is to be mapped. 122 * \param region Region, on the range [0, 5], that is to be mapped. 123 * \param write_enable Map for writing (non-zero). 124 * 125 * \return 126 * Zero on success or an \c errno value on failure. 127 * 128 * \sa pci_device_unmap_region 129 */ 130 int 131 pci_device_map_region( struct pci_device * dev, unsigned region, 132 int write_enable ) 133 { 134 if ( dev == NULL ) { 135 return EFAULT; 136 } 137 138 if ( (region > 5) || (dev->regions[ region ].size == 0) ) { 139 return ENOENT; 140 } 141 142 if ( dev->regions[ region ].memory != NULL ) { 143 return 0; 144 } 145 146 return (pci_sys->methods->map)( dev, region, write_enable ); 147 } 148 149 150 /** 151 * Map the specified memory range so that it can be accessed by the CPU. 152 * 153 * Maps the specified memory range for access by the processor. The pointer 154 * to the mapped region is stored in \c addr. In addtion, the 155 * \c pci_mem_region::memory pointer for the BAR will be updated. 156 * 157 * \param dev Device whose memory region is to be mapped. 158 * \param base Base address of the range to be mapped. 159 * \param size Size of the range to be mapped. 160 * \param write_enable Map for writing (non-zero). 161 * \param addr Location to store the mapped address. 162 * 163 * \return 164 * Zero on success or an \c errno value on failure. 165 * 166 * \sa pci_device_unmap_memory_range, pci_device_map_region 167 */ 168 int 169 pci_device_map_memory_range(struct pci_device *dev, pciaddr_t base, 170 pciaddr_t size, int write_enable, 171 void **addr) 172 { 173 unsigned region; 174 int err = 0; 175 176 177 *addr = NULL; 178 179 if (dev == NULL) { 180 return EFAULT; 181 } 182 183 184 for (region = 0; region < 6; region++) { 185 const struct pci_mem_region const* r = &dev->regions[region]; 186 187 if (r->size != 0) { 188 if ((r->base_addr <= base) && ((r->base_addr + r->size) > base)) { 189 if ((base + size) > (r->base_addr + r->size)) { 190 return E2BIG; 191 } 192 193 break; 194 } 195 } 196 } 197 198 if (region > 5) { 199 return ENOENT; 200 } 201 202 if (dev->regions[region].memory == NULL) { 203 err = (*pci_sys->methods->map)(dev, region, write_enable); 204 } 205 206 if (err == 0) { 207 const pciaddr_t offset = base - dev->regions[region].base_addr; 208 209 *addr = ((uint8_t *)dev->regions[region].memory) + offset; 210 } 211 212 return err; 213 } 214 215 216 /** 217 * Unmap the specified BAR so that it can no longer be accessed by the CPU. 218 * 219 * Unmaps the specified BAR that was previously mapped via 220 * \c pci_device_map_region. 221 * 222 * \param dev Device whose memory region is to be mapped. 223 * \param region Region, on the range [0, 5], that is to be mapped. 224 * 225 * \return 226 * Zero on success or an \c errno value on failure. 227 * 228 * \sa pci_device_map_region 229 */ 230 int 231 pci_device_unmap_region( struct pci_device * dev, unsigned region ) 232 { 233 if ( dev == NULL ) { 234 return EFAULT; 235 } 236 237 if ( (region > 5) || (dev->regions[ region ].size == 0) ) { 238 return ENOENT; 239 } 240 241 if ( dev->regions[ region ].memory == NULL ) { 242 return 0; 243 } 244 245 return (pci_sys->methods->unmap)( dev, region ); 246 } 247 248 249 /** 250 * Unmap the specified memory range so that it can no longer be accessed by the CPU. 251 * 252 * Unmaps the specified memory range that was previously mapped via 253 * \c pci_device_map_memory_range. 254 * 255 * \param dev Device whose memory is to be unmapped. 256 * \param memory Pointer to the base of the mapped range. 257 * \param size Size, in bytes, of the range to be unmapped. 258 * 259 * \return 260 * Zero on success or an \c errno value on failure. 261 * 262 * \sa pci_device_map_memory_range, pci_device_unmap_region 263 */ 264 int 265 pci_device_unmap_memory_range(struct pci_device *dev, void *memory, 266 pciaddr_t size) 267 { 268 unsigned region; 269 270 271 if (dev == NULL) { 272 return EFAULT; 273 } 274 275 for (region = 0; region < 6; region++) { 276 const struct pci_mem_region const* r = &dev->regions[region]; 277 const uint8_t *const mem = r->memory; 278 279 if (r->size != 0) { 280 if ((mem <= memory) && ((mem + r->size) > memory)) { 281 if ((memory + size) > (mem + r->size)) { 282 return E2BIG; 283 } 284 285 break; 286 } 287 } 288 } 289 290 if (region > 5) { 291 return ENOENT; 292 } 293 294 return (dev->regions[region].memory != NULL) 295 ? (*pci_sys->methods->unmap)(dev, region) 296 : 0; 297 } 298 299 300 /** 301 * Read arbitrary bytes from device's PCI config space 302 * 303 * Reads data from the device's PCI configuration space. As with the system 304 * read command, less data may be returned, without an error, than was 305 * requested. This is particuarly the case if a non-root user tries to read 306 * beyond the first 64-bytes of configuration space. 307 * 308 * \param dev Device whose PCI configuration data is to be read. 309 * \param data Location to store the data 310 * \param offset Initial byte offset to read 311 * \param size Total number of bytes to read 312 * \param bytes_read Location to store the actual number of bytes read. This 313 * pointer may be \c NULL. 314 * 315 * \returns 316 * Zero on success or an errno value on failure. 317 * 318 * \note 319 * Data read from PCI configuartion space using this routine is \b not 320 * byte-swapped to the host's byte order. PCI configuration data is always 321 * stored in little-endian order, and that is what this routine returns. 322 */ 323 int 324 pci_device_cfg_read( struct pci_device * dev, void * data, 325 pciaddr_t offset, pciaddr_t size, 326 pciaddr_t * bytes_read ) 327 { 328 pciaddr_t scratch; 329 330 if ( (dev == NULL) || (data == NULL) ) { 331 return EFAULT; 332 } 333 334 return pci_sys->methods->read( dev, data, offset, size, 335 (bytes_read == NULL) 336 ? & scratch : bytes_read ); 337 } 338 339 340 int 341 pci_device_cfg_read_u8( struct pci_device * dev, uint8_t * data, 342 pciaddr_t offset ) 343 { 344 pciaddr_t bytes; 345 int err = pci_device_cfg_read( dev, data, offset, 1, & bytes ); 346 347 if ( (err == 0) && (bytes != 1) ) { 348 err = ENXIO; 349 } 350 351 return err; 352 } 353 354 355 int 356 pci_device_cfg_read_u16( struct pci_device * dev, uint16_t * data, 357 pciaddr_t offset ) 358 { 359 pciaddr_t bytes; 360 int err = pci_device_cfg_read( dev, data, offset, 2, & bytes ); 361 362 if ( (err == 0) && (bytes != 2) ) { 363 err = ENXIO; 364 } 365 366 *data = LETOH_16( *data ); 367 return err; 368 } 369 370 371 int 372 pci_device_cfg_read_u32( struct pci_device * dev, uint32_t * data, 373 pciaddr_t offset ) 374 { 375 pciaddr_t bytes; 376 int err = pci_device_cfg_read( dev, data, offset, 4, & bytes ); 377 378 if ( (err == 0) && (bytes != 4) ) { 379 err = ENXIO; 380 } 381 382 *data = LETOH_32( *data ); 383 return err; 384 } 385 386 387 /** 388 * Write arbitrary bytes to device's PCI config space 389 * 390 * Writess data to the device's PCI configuration space. As with the system 391 * write command, less data may be written, without an error, than was 392 * requested. 393 * 394 * \param dev Device whose PCI configuration data is to be written. 395 * \param data Location of the source data 396 * \param offset Initial byte offset to write 397 * \param size Total number of bytes to write 398 * \param bytes_read Location to store the actual number of bytes written. 399 * This pointer may be \c NULL. 400 * 401 * \returns 402 * Zero on success or an errno value on failure. 403 * 404 * \note 405 * Data written to PCI configuartion space using this routine is \b not 406 * byte-swapped from the host's byte order. PCI configuration data is always 407 * stored in little-endian order, so data written with this routine should be 408 * put in that order in advance. 409 */ 410 int 411 pci_device_cfg_write( struct pci_device * dev, const void * data, 412 pciaddr_t offset, pciaddr_t size, 413 pciaddr_t * bytes_written ) 414 { 415 pciaddr_t scratch; 416 417 if ( (dev == NULL) || (data == NULL) ) { 418 return EFAULT; 419 } 420 421 return pci_sys->methods->write( dev, data, offset, size, 422 (bytes_written == NULL) 423 ? & scratch : bytes_written ); 424 } 425 426 427 int 428 pci_device_cfg_write_u8( struct pci_device * dev, const uint8_t * data, 429 pciaddr_t offset ) 430 { 431 pciaddr_t bytes; 432 int err = pci_device_cfg_write( dev, data, offset, 1, & bytes ); 433 434 if ( (err == 0) && (bytes != 1) ) { 435 err = ENOSPC; 436 } 437 438 439 return err; 440 } 441 442 443 int 444 pci_device_cfg_write_u16( struct pci_device * dev, const uint16_t * data, 445 pciaddr_t offset ) 446 { 447 pciaddr_t bytes; 448 const uint16_t temp = HTOLE_16( *data ); 449 int err = pci_device_cfg_write( dev, & temp, offset, 2, & bytes ); 450 451 if ( (err == 0) && (bytes != 2) ) { 452 err = ENOSPC; 453 } 454 455 456 return err; 457 } 458 459 460 int 461 pci_device_cfg_write_u32( struct pci_device * dev, const uint32_t * data, 462 pciaddr_t offset ) 463 { 464 pciaddr_t bytes; 465 const uint32_t temp = HTOLE_32( *data ); 466 int err = pci_device_cfg_write( dev, & temp, offset, 4, & bytes ); 467 468 if ( (err == 0) && (bytes != 4) ) { 469 err = ENOSPC; 470 } 471 472 473 return err; 474 } 475 476 477 int 478 pci_device_cfg_write_bits( struct pci_device * dev, uint32_t mask, 479 uint32_t data, pciaddr_t offset ) 480 { 481 uint32_t temp; 482 int err; 483 484 err = pci_device_cfg_read_u32( dev, & temp, offset ); 485 if ( ! err ) { 486 temp &= ~mask; 487 temp |= data; 488 489 err = pci_device_cfg_write_u32( dev, & temp, offset ); 490 } 491 492 return err; 493 } 494