1 /* 2 * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. 3 * 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 5 * 6 * This file contains Original Code and/or Modifications of Original Code 7 * as defined in and that are subject to the Apple Public Source License 8 * Version 2.0 (the 'License'). You may not use this file except in 9 * compliance with the License. The rights granted to you under the License 10 * may not be used to create, or enable the creation or redistribution of, 11 * unlawful or unlicensed copies of an Apple operating system, or to 12 * circumvent, violate, or enable the circumvention or violation of, any 13 * terms of an Apple operating system software license agreement. 14 * 15 * Please obtain a copy of the License at 16 * http://www.opensource.apple.com/apsl/ and read it before using this file. 17 * 18 * The Original Code and all software distributed under the License are 19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 23 * Please see the License for the specific language governing rights and 24 * limitations under the License. 25 * 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 27 */ 28 #define IOKIT_ENABLE_SHARED_PTR 29 30 #define _IOMEMORYDESCRIPTOR_INTERNAL_ 31 32 #include <IOKit/assert.h> 33 #include <IOKit/system.h> 34 35 #include <IOKit/IOLib.h> 36 #include <IOKit/IOMapper.h> 37 #include <IOKit/IOBufferMemoryDescriptor.h> 38 #include <libkern/OSDebug.h> 39 #include <mach/mach_vm.h> 40 41 #include <vm/vm_kern_xnu.h> 42 43 #include "IOKitKernelInternal.h" 44 45 #ifdef IOALLOCDEBUG 46 #include <libkern/c++/OSCPPDebug.h> 47 #endif 48 #include <IOKit/IOStatisticsPrivate.h> 49 50 #if IOKITSTATS 51 #define IOStatisticsAlloc(type, size) \ 52 do { \ 53 IOStatistics::countAlloc(type, size); \ 54 } while (0) 55 #else 56 #define IOStatisticsAlloc(type, size) 57 #endif /* IOKITSTATS */ 58 59 60 __BEGIN_DECLS 61 void ipc_port_release_send(ipc_port_t port); 62 #include <vm/pmap.h> 63 64 KALLOC_HEAP_DEFINE(KHEAP_IOBMD_CONTROL, "IOBMD_control", KHEAP_ID_KT_VAR); 65 __END_DECLS 66 67 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 68 69 enum{ 70 kInternalFlagPhysical = 0x00000001, 71 kInternalFlagPageSized = 0x00000002, 72 kInternalFlagPageAllocated = 0x00000004, 73 kInternalFlagInit = 0x00000008, 74 kInternalFlagHasPointers = 0x00000010, 75 kInternalFlagGuardPages = 0x00000020, 76 }; 77 78 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 79 80 #define super IOGeneralMemoryDescriptor 81 OSDefineMetaClassAndStructorsWithZone(IOBufferMemoryDescriptor, 82 IOGeneralMemoryDescriptor, ZC_ZFREE_CLEARMEM); 83 84 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 85 86 #if defined(__x86_64__) 87 static uintptr_t 88 IOBMDPageProc(kalloc_heap_t kheap, iopa_t * a) 89 { 90 kern_return_t kr; 91 vm_address_t vmaddr = 0; 92 kma_flags_t kma_flags = KMA_ZERO; 93 94 if (kheap == KHEAP_DATA_BUFFERS) { 95 kma_flags = (kma_flags_t) (kma_flags | KMA_DATA); 96 } 97 kr = kmem_alloc(kernel_map, &vmaddr, page_size, 98 kma_flags, VM_KERN_MEMORY_IOKIT); 99 100 if (KERN_SUCCESS != kr) { 101 vmaddr = 0; 102 } 103 104 return (uintptr_t) vmaddr; 105 } 106 #endif /* defined(__x86_64__) */ 107 108 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 109 110 #ifndef __LP64__ 111 bool 112 IOBufferMemoryDescriptor::initWithOptions( 113 IOOptionBits options, 114 vm_size_t capacity, 115 vm_offset_t alignment, 116 task_t inTask) 117 { 118 mach_vm_address_t physicalMask = 0; 119 return initWithPhysicalMask(inTask, options, capacity, alignment, physicalMask); 120 } 121 #endif /* !__LP64__ */ 122 123 OSSharedPtr<IOBufferMemoryDescriptor> 124 IOBufferMemoryDescriptor::withCopy( 125 task_t inTask, 126 IOOptionBits options, 127 vm_map_t sourceMap, 128 mach_vm_address_t source, 129 mach_vm_size_t size) 130 { 131 OSSharedPtr<IOBufferMemoryDescriptor> inst; 132 kern_return_t err; 133 vm_map_copy_t copy; 134 vm_map_address_t address; 135 136 copy = NULL; 137 do { 138 err = kIOReturnNoMemory; 139 inst = OSMakeShared<IOBufferMemoryDescriptor>(); 140 if (!inst) { 141 break; 142 } 143 inst->_ranges.v64 = IOMallocType(IOAddressRange); 144 145 err = vm_map_copyin(sourceMap, source, size, 146 false /* src_destroy */, ©); 147 if (KERN_SUCCESS != err) { 148 break; 149 } 150 151 err = vm_map_copyout(get_task_map(inTask), &address, copy); 152 if (KERN_SUCCESS != err) { 153 break; 154 } 155 copy = NULL; 156 157 inst->_ranges.v64->address = address; 158 inst->_ranges.v64->length = size; 159 160 if (!inst->initWithPhysicalMask(inTask, options, size, page_size, 0)) { 161 err = kIOReturnError; 162 } 163 } while (false); 164 165 if (KERN_SUCCESS == err) { 166 return inst; 167 } 168 169 if (copy) { 170 vm_map_copy_discard(copy); 171 } 172 173 return nullptr; 174 } 175 176 177 bool 178 IOBufferMemoryDescriptor::initWithPhysicalMask( 179 task_t inTask, 180 IOOptionBits options, 181 mach_vm_size_t capacity, 182 mach_vm_address_t alignment, 183 mach_vm_address_t physicalMask) 184 { 185 task_t mapTask = NULL; 186 kalloc_heap_t kheap = KHEAP_DATA_BUFFERS; 187 mach_vm_address_t highestMask = 0; 188 IOOptionBits iomdOptions = kIOMemoryTypeVirtual64 | kIOMemoryAsReference; 189 IODMAMapSpecification mapSpec; 190 bool mapped = false; 191 bool withCopy = false; 192 bool mappedOrShared = false; 193 194 if (!capacity) { 195 return false; 196 } 197 198 /* 199 * The IOKit constructor requests the allocator for zeroed memory 200 * so the members of the class do not need to be explicitly zeroed. 201 */ 202 _options = options; 203 _capacity = capacity; 204 205 if (!_ranges.v64) { 206 _ranges.v64 = IOMallocType(IOAddressRange); 207 _ranges.v64->address = 0; 208 _ranges.v64->length = 0; 209 } else { 210 if (!_ranges.v64->address) { 211 return false; 212 } 213 if (!(kIOMemoryPageable & options)) { 214 return false; 215 } 216 if (!inTask) { 217 return false; 218 } 219 _buffer = (void *) _ranges.v64->address; 220 withCopy = true; 221 } 222 223 /* 224 * Set kalloc_heap to KHEAP_IOBMD_CONTROL if allocation contains pointers 225 */ 226 if (kInternalFlagHasPointers & _internalFlags) { 227 kheap = KHEAP_IOBMD_CONTROL; 228 } 229 230 // make sure super::free doesn't dealloc _ranges before super::init 231 _flags = kIOMemoryAsReference; 232 233 // Grab IOMD bits from the Buffer MD options 234 iomdOptions |= (options & kIOBufferDescriptorMemoryFlags); 235 236 if (!(kIOMemoryMapperNone & options)) { 237 IOMapper::checkForSystemMapper(); 238 mapped = (NULL != IOMapper::gSystem); 239 } 240 241 if (physicalMask && (alignment <= 1)) { 242 alignment = ((physicalMask ^ (-1ULL)) & (physicalMask - 1)); 243 highestMask = (physicalMask | alignment); 244 alignment++; 245 if (alignment < page_size) { 246 alignment = page_size; 247 } 248 } 249 250 if ((options & (kIOMemorySharingTypeMask | kIOMapCacheMask | kIOMemoryClearEncrypt)) && (alignment < page_size)) { 251 alignment = page_size; 252 } 253 254 if (alignment >= page_size) { 255 if (round_page_overflow(capacity, &capacity)) { 256 return false; 257 } 258 } 259 260 if (alignment > page_size) { 261 options |= kIOMemoryPhysicallyContiguous; 262 } 263 264 _alignment = alignment; 265 266 if ((capacity + alignment) < _capacity) { 267 return false; 268 } 269 270 if ((inTask != kernel_task) && !(options & kIOMemoryPageable)) { 271 return false; 272 } 273 274 bzero(&mapSpec, sizeof(mapSpec)); 275 mapSpec.alignment = _alignment; 276 mapSpec.numAddressBits = 64; 277 if (highestMask && mapped) { 278 if (highestMask <= 0xFFFFFFFF) { 279 mapSpec.numAddressBits = (uint8_t)(32 - __builtin_clz((unsigned int) highestMask)); 280 } else { 281 mapSpec.numAddressBits = (uint8_t)(64 - __builtin_clz((unsigned int) (highestMask >> 32))); 282 } 283 highestMask = 0; 284 } 285 286 // set memory entry cache mode, pageable, purgeable 287 iomdOptions |= ((options & kIOMapCacheMask) >> kIOMapCacheShift) << kIOMemoryBufferCacheShift; 288 if (options & kIOMemoryPageable) { 289 if (_internalFlags & kInternalFlagGuardPages) { 290 printf("IOBMD: Unsupported use of guard pages with pageable memory.\n"); 291 return false; 292 } 293 iomdOptions |= kIOMemoryBufferPageable; 294 if (options & kIOMemoryPurgeable) { 295 iomdOptions |= kIOMemoryBufferPurgeable; 296 } 297 } else { 298 // Buffer shouldn't auto prepare they should be prepared explicitly 299 // But it never was enforced so what are you going to do? 300 iomdOptions |= kIOMemoryAutoPrepare; 301 302 /* Allocate a wired-down buffer inside kernel space. */ 303 304 bool contig = (0 != (options & kIOMemoryHostPhysicallyContiguous)); 305 306 if (!contig && (0 != (options & kIOMemoryPhysicallyContiguous))) { 307 contig |= (!mapped); 308 contig |= (0 != (kIOMemoryMapperNone & options)); 309 #if 0 310 // treat kIOMemoryPhysicallyContiguous as kIOMemoryHostPhysicallyContiguous for now 311 contig |= true; 312 #endif 313 } 314 315 mappedOrShared = (mapped || (0 != (kIOMemorySharingTypeMask & options))); 316 if (contig || highestMask || (alignment > page_size)) { 317 if (_internalFlags & kInternalFlagGuardPages) { 318 printf("IOBMD: Unsupported use of guard pages with physical mask or contiguous memory.\n"); 319 return false; 320 } 321 _internalFlags |= kInternalFlagPhysical; 322 if (highestMask) { 323 _internalFlags |= kInternalFlagPageSized; 324 if (round_page_overflow(capacity, &capacity)) { 325 return false; 326 } 327 } 328 _buffer = (void *) IOKernelAllocateWithPhysicalRestrict(kheap, 329 capacity, highestMask, alignment, contig); 330 } else if (_internalFlags & kInternalFlagGuardPages) { 331 vm_offset_t address = 0; 332 kern_return_t kr; 333 uintptr_t alignMask; 334 kma_flags_t kma_flags = (kma_flags_t) (KMA_GUARD_FIRST | 335 KMA_GUARD_LAST | KMA_ZERO); 336 337 if (((uint32_t) alignment) != alignment) { 338 return false; 339 } 340 if (kheap == KHEAP_DATA_BUFFERS) { 341 kma_flags = (kma_flags_t) (kma_flags | KMA_DATA); 342 } 343 344 alignMask = (1UL << log2up((uint32_t) alignment)) - 1; 345 kr = kernel_memory_allocate(kernel_map, &address, 346 capacity + page_size * 2, alignMask, kma_flags, 347 IOMemoryTag(kernel_map)); 348 if (kr != KERN_SUCCESS || address == 0) { 349 return false; 350 } 351 #if IOALLOCDEBUG 352 OSAddAtomicLong(capacity, &debug_iomalloc_size); 353 #endif 354 IOStatisticsAlloc(kIOStatisticsMallocAligned, capacity); 355 _buffer = (void *)(address + page_size); 356 #if defined(__x86_64__) 357 } else if (mappedOrShared 358 && (capacity + alignment) <= (page_size - gIOPageAllocChunkBytes)) { 359 _internalFlags |= kInternalFlagPageAllocated; 360 _buffer = (void *) iopa_alloc(&gIOBMDPageAllocator, 361 &IOBMDPageProc, kheap, capacity, alignment); 362 if (_buffer) { 363 bzero(_buffer, capacity); 364 IOStatisticsAlloc(kIOStatisticsMallocAligned, capacity); 365 #if IOALLOCDEBUG 366 OSAddAtomicLong(capacity, &debug_iomalloc_size); 367 #endif 368 } 369 #endif /* defined(__x86_64__) */ 370 } else if (alignment > 1) { 371 /* BEGIN IGNORE CODESTYLE */ 372 __typed_allocators_ignore_push 373 _buffer = IOMallocAligned_internal(kheap, capacity, alignment, 374 Z_ZERO_VM_TAG_BT_BIT); 375 } else { 376 _buffer = IOMalloc_internal(kheap, capacity, Z_ZERO_VM_TAG_BT_BIT); 377 __typed_allocators_ignore_pop 378 /* END IGNORE CODESTYLE */ 379 } 380 if (!_buffer) { 381 return false; 382 } 383 } 384 385 if ((options & (kIOMemoryPageable | kIOMapCacheMask))) { 386 vm_size_t size = round_page(capacity); 387 388 // initWithOptions will create memory entry 389 if (!withCopy) { 390 iomdOptions |= kIOMemoryPersistent; 391 } 392 393 if (options & kIOMemoryPageable) { 394 #if IOALLOCDEBUG 395 OSAddAtomicLong(size, &debug_iomallocpageable_size); 396 #endif 397 if (!withCopy) { 398 mapTask = inTask; 399 } 400 if (NULL == inTask) { 401 inTask = kernel_task; 402 } 403 } else if (options & kIOMapCacheMask) { 404 // Prefetch each page to put entries into the pmap 405 volatile UInt8 * startAddr = (UInt8 *)_buffer; 406 volatile UInt8 * endAddr = (UInt8 *)_buffer + capacity; 407 408 while (startAddr < endAddr) { 409 UInt8 dummyVar = *startAddr; 410 (void) dummyVar; 411 startAddr += page_size; 412 } 413 } 414 } 415 416 _ranges.v64->address = (mach_vm_address_t) pgz_decode(_buffer, _capacity); 417 _ranges.v64->length = _capacity; 418 419 if (!super::initWithOptions(_ranges.v64, 1, 0, 420 inTask, iomdOptions, /* System mapper */ NULL)) { 421 return false; 422 } 423 424 _internalFlags |= kInternalFlagInit; 425 #if IOTRACKING 426 if (!(options & kIOMemoryPageable)) { 427 trackingAccumSize(capacity); 428 } 429 #endif /* IOTRACKING */ 430 431 // give any system mapper the allocation params 432 if (kIOReturnSuccess != dmaCommandOperation(kIOMDAddDMAMapSpec, 433 &mapSpec, sizeof(mapSpec))) { 434 return false; 435 } 436 437 if (mapTask) { 438 if (!reserved) { 439 reserved = IOMallocType(ExpansionData); 440 if (!reserved) { 441 return false; 442 } 443 } 444 reserved->map = createMappingInTask(mapTask, 0, 445 kIOMapAnywhere | (options & kIOMapPrefault) | (options & kIOMapCacheMask), 0, 0).detach(); 446 if (!reserved->map) { 447 _buffer = NULL; 448 return false; 449 } 450 release(); // map took a retain on this 451 reserved->map->retain(); 452 removeMapping(reserved->map); 453 mach_vm_address_t buffer = reserved->map->getAddress(); 454 _buffer = (void *) buffer; 455 if (kIOMemoryTypeVirtual64 == (kIOMemoryTypeMask & iomdOptions)) { 456 _ranges.v64->address = buffer; 457 } 458 } 459 460 setLength(_capacity); 461 462 return true; 463 } 464 465 bool 466 IOBufferMemoryDescriptor::initControlWithPhysicalMask( 467 task_t inTask, 468 IOOptionBits options, 469 mach_vm_size_t capacity, 470 mach_vm_address_t alignment, 471 mach_vm_address_t physicalMask) 472 { 473 _internalFlags = kInternalFlagHasPointers; 474 return initWithPhysicalMask(inTask, options, capacity, alignment, 475 physicalMask); 476 } 477 478 bool 479 IOBufferMemoryDescriptor::initWithGuardPages( 480 task_t inTask, 481 IOOptionBits options, 482 mach_vm_size_t capacity) 483 { 484 mach_vm_size_t roundedCapacity; 485 486 _internalFlags = kInternalFlagGuardPages; 487 488 if (round_page_overflow(capacity, &roundedCapacity)) { 489 return false; 490 } 491 492 return initWithPhysicalMask(inTask, options, roundedCapacity, page_size, 493 (mach_vm_address_t)0); 494 } 495 496 OSSharedPtr<IOBufferMemoryDescriptor> 497 IOBufferMemoryDescriptor::inTaskWithOptions( 498 task_t inTask, 499 IOOptionBits options, 500 vm_size_t capacity, 501 vm_offset_t alignment) 502 { 503 OSSharedPtr<IOBufferMemoryDescriptor> me = OSMakeShared<IOBufferMemoryDescriptor>(); 504 505 if (me && !me->initWithPhysicalMask(inTask, options, capacity, alignment, 0)) { 506 me.reset(); 507 } 508 return me; 509 } 510 511 OSSharedPtr<IOBufferMemoryDescriptor> 512 IOBufferMemoryDescriptor::inTaskWithOptions( 513 task_t inTask, 514 IOOptionBits options, 515 vm_size_t capacity, 516 vm_offset_t alignment, 517 uint32_t kernTag, 518 uint32_t userTag) 519 { 520 OSSharedPtr<IOBufferMemoryDescriptor> me = OSMakeShared<IOBufferMemoryDescriptor>(); 521 522 if (me) { 523 me->setVMTags(kernTag, userTag); 524 525 if (!me->initWithPhysicalMask(inTask, options, capacity, alignment, 0)) { 526 me.reset(); 527 } 528 } 529 return me; 530 } 531 532 OSSharedPtr<IOBufferMemoryDescriptor> 533 IOBufferMemoryDescriptor::inTaskWithPhysicalMask( 534 task_t inTask, 535 IOOptionBits options, 536 mach_vm_size_t capacity, 537 mach_vm_address_t physicalMask) 538 { 539 OSSharedPtr<IOBufferMemoryDescriptor> me = OSMakeShared<IOBufferMemoryDescriptor>(); 540 541 if (me && !me->initWithPhysicalMask(inTask, options, capacity, 1, physicalMask)) { 542 me.reset(); 543 } 544 return me; 545 } 546 547 OSSharedPtr<IOBufferMemoryDescriptor> 548 IOBufferMemoryDescriptor::inTaskWithGuardPages( 549 task_t inTask, 550 IOOptionBits options, 551 mach_vm_size_t capacity) 552 { 553 OSSharedPtr<IOBufferMemoryDescriptor> me = OSMakeShared<IOBufferMemoryDescriptor>(); 554 555 if (me && !me->initWithGuardPages(inTask, options, capacity)) { 556 me.reset(); 557 } 558 return me; 559 } 560 561 #ifndef __LP64__ 562 bool 563 IOBufferMemoryDescriptor::initWithOptions( 564 IOOptionBits options, 565 vm_size_t capacity, 566 vm_offset_t alignment) 567 { 568 return initWithPhysicalMask(kernel_task, options, capacity, alignment, (mach_vm_address_t)0); 569 } 570 #endif /* !__LP64__ */ 571 572 OSSharedPtr<IOBufferMemoryDescriptor> 573 IOBufferMemoryDescriptor::withOptions( 574 IOOptionBits options, 575 vm_size_t capacity, 576 vm_offset_t alignment) 577 { 578 OSSharedPtr<IOBufferMemoryDescriptor> me = OSMakeShared<IOBufferMemoryDescriptor>(); 579 580 if (me && !me->initWithPhysicalMask(kernel_task, options, capacity, alignment, 0)) { 581 me.reset(); 582 } 583 return me; 584 } 585 586 587 /* 588 * withCapacity: 589 * 590 * Returns a new IOBufferMemoryDescriptor with a buffer large enough to 591 * hold capacity bytes. The descriptor's length is initially set to the capacity. 592 */ 593 OSSharedPtr<IOBufferMemoryDescriptor> 594 IOBufferMemoryDescriptor::withCapacity(vm_size_t inCapacity, 595 IODirection inDirection, 596 bool inContiguous) 597 { 598 return IOBufferMemoryDescriptor::withOptions( 599 inDirection | kIOMemoryUnshared 600 | (inContiguous ? kIOMemoryPhysicallyContiguous : 0), 601 inCapacity, inContiguous ? inCapacity : 1 ); 602 } 603 604 #ifndef __LP64__ 605 /* 606 * initWithBytes: 607 * 608 * Initialize a new IOBufferMemoryDescriptor preloaded with bytes (copied). 609 * The descriptor's length and capacity are set to the input buffer's size. 610 */ 611 bool 612 IOBufferMemoryDescriptor::initWithBytes(const void * inBytes, 613 vm_size_t inLength, 614 IODirection inDirection, 615 bool inContiguous) 616 { 617 if (!initWithPhysicalMask(kernel_task, inDirection | kIOMemoryUnshared 618 | (inContiguous ? kIOMemoryPhysicallyContiguous : 0), 619 inLength, inLength, (mach_vm_address_t)0)) { 620 return false; 621 } 622 623 // start out with no data 624 setLength(0); 625 626 if (!appendBytes(inBytes, inLength)) { 627 return false; 628 } 629 630 return true; 631 } 632 #endif /* !__LP64__ */ 633 634 /* 635 * withBytes: 636 * 637 * Returns a new IOBufferMemoryDescriptor preloaded with bytes (copied). 638 * The descriptor's length and capacity are set to the input buffer's size. 639 */ 640 OSSharedPtr<IOBufferMemoryDescriptor> 641 IOBufferMemoryDescriptor::withBytes(const void * inBytes, 642 vm_size_t inLength, 643 IODirection inDirection, 644 bool inContiguous) 645 { 646 OSSharedPtr<IOBufferMemoryDescriptor> me = OSMakeShared<IOBufferMemoryDescriptor>(); 647 mach_vm_address_t alignment; 648 649 alignment = (inLength <= page_size) ? inLength : page_size; 650 if (me && !me->initWithPhysicalMask( 651 kernel_task, inDirection | kIOMemoryUnshared 652 | (inContiguous ? kIOMemoryPhysicallyContiguous : 0), 653 inLength, alignment, 0 )) { 654 me.reset(); 655 } 656 657 if (me) { 658 // start out with no data 659 me->setLength(0); 660 661 if (!me->appendBytes(inBytes, inLength)) { 662 me.reset(); 663 } 664 } 665 return me; 666 } 667 668 /* 669 * free: 670 * 671 * Free resources 672 */ 673 void 674 IOBufferMemoryDescriptor::free() 675 { 676 // Cache all of the relevant information on the stack for use 677 // after we call super::free()! 678 IOOptionBits flags = _flags; 679 IOOptionBits internalFlags = _internalFlags; 680 IOOptionBits options = _options; 681 vm_size_t size = _capacity; 682 void * buffer = _buffer; 683 IOMemoryMap * map = NULL; 684 IOAddressRange * range = _ranges.v64; 685 vm_offset_t alignment = _alignment; 686 kalloc_heap_t kheap = KHEAP_DATA_BUFFERS; 687 vm_size_t rsize; 688 689 if (alignment >= page_size) { 690 if (!round_page_overflow(size, &rsize)) { 691 size = rsize; 692 } 693 } 694 695 if (reserved) { 696 map = reserved->map; 697 IOFreeType(reserved, ExpansionData); 698 if (map) { 699 map->release(); 700 } 701 } 702 703 if ((options & kIOMemoryPageable) 704 || (kInternalFlagPageSized & internalFlags)) { 705 if (!round_page_overflow(size, &rsize)) { 706 size = rsize; 707 } 708 } 709 710 if (internalFlags & kInternalFlagHasPointers) { 711 kheap = KHEAP_IOBMD_CONTROL; 712 } 713 714 #if IOTRACKING 715 if (!(options & kIOMemoryPageable) 716 && buffer 717 && (kInternalFlagInit & _internalFlags)) { 718 trackingAccumSize(-size); 719 } 720 #endif /* IOTRACKING */ 721 722 /* super::free may unwire - deallocate buffer afterwards */ 723 super::free(); 724 725 if (options & kIOMemoryPageable) { 726 #if IOALLOCDEBUG 727 OSAddAtomicLong(-size, &debug_iomallocpageable_size); 728 #endif 729 } else if (buffer) { 730 if (kInternalFlagPhysical & internalFlags) { 731 IOKernelFreePhysical(kheap, (mach_vm_address_t) buffer, size); 732 } else if (kInternalFlagPageAllocated & internalFlags) { 733 #if defined(__x86_64__) 734 uintptr_t page; 735 page = iopa_free(&gIOBMDPageAllocator, (uintptr_t) buffer, size); 736 if (page) { 737 kmem_free(kernel_map, page, page_size); 738 } 739 #if IOALLOCDEBUG 740 OSAddAtomicLong(-size, &debug_iomalloc_size); 741 #endif 742 IOStatisticsAlloc(kIOStatisticsFreeAligned, size); 743 #else /* !defined(__x86_64__) */ 744 /* should be unreachable */ 745 panic("Attempting to free IOBMD with page allocated flag"); 746 #endif /* defined(__x86_64__) */ 747 } else if (kInternalFlagGuardPages & internalFlags) { 748 vm_offset_t allocation = (vm_offset_t)buffer - page_size; 749 kmem_free(kernel_map, allocation, size + page_size * 2); 750 #if IOALLOCDEBUG 751 OSAddAtomicLong(-size, &debug_iomalloc_size); 752 #endif 753 IOStatisticsAlloc(kIOStatisticsFreeAligned, size); 754 } else if (alignment > 1) { 755 /* BEGIN IGNORE CODESTYLE */ 756 __typed_allocators_ignore_push 757 IOFreeAligned_internal(kheap, buffer, size); 758 } else { 759 IOFree_internal(kheap, buffer, size); 760 __typed_allocators_ignore_pop 761 /* END IGNORE CODESTYLE */ 762 } 763 } 764 if (range && (kIOMemoryAsReference & flags)) { 765 IOFreeType(range, IOAddressRange); 766 } 767 } 768 769 /* 770 * getCapacity: 771 * 772 * Get the buffer capacity 773 */ 774 vm_size_t 775 IOBufferMemoryDescriptor::getCapacity() const 776 { 777 return _capacity; 778 } 779 780 /* 781 * setLength: 782 * 783 * Change the buffer length of the memory descriptor. When a new buffer 784 * is created, the initial length of the buffer is set to be the same as 785 * the capacity. The length can be adjusted via setLength for a shorter 786 * transfer (there is no need to create more buffer descriptors when you 787 * can reuse an existing one, even for different transfer sizes). Note 788 * that the specified length must not exceed the capacity of the buffer. 789 */ 790 void 791 IOBufferMemoryDescriptor::setLength(vm_size_t length) 792 { 793 assert(length <= _capacity); 794 if (length > _capacity) { 795 return; 796 } 797 798 _length = length; 799 _ranges.v64->length = length; 800 } 801 802 /* 803 * setDirection: 804 * 805 * Change the direction of the transfer. This method allows one to redirect 806 * the descriptor's transfer direction. This eliminates the need to destroy 807 * and create new buffers when different transfer directions are needed. 808 */ 809 void 810 IOBufferMemoryDescriptor::setDirection(IODirection direction) 811 { 812 _flags = (_flags & ~kIOMemoryDirectionMask) | direction; 813 #ifndef __LP64__ 814 _direction = (IODirection) (_flags & kIOMemoryDirectionMask); 815 #endif /* !__LP64__ */ 816 } 817 818 /* 819 * appendBytes: 820 * 821 * Add some data to the end of the buffer. This method automatically 822 * maintains the memory descriptor buffer length. Note that appendBytes 823 * will not copy past the end of the memory descriptor's current capacity. 824 */ 825 bool 826 IOBufferMemoryDescriptor::appendBytes(const void * bytes, vm_size_t withLength) 827 { 828 vm_size_t actualBytesToCopy = min(withLength, _capacity - _length); 829 IOByteCount offset; 830 831 assert(_length <= _capacity); 832 833 offset = _length; 834 _length += actualBytesToCopy; 835 _ranges.v64->length += actualBytesToCopy; 836 837 if (_task == kernel_task) { 838 bcopy(/* from */ bytes, (void *)(_ranges.v64->address + offset), 839 actualBytesToCopy); 840 } else { 841 writeBytes(offset, bytes, actualBytesToCopy); 842 } 843 844 return true; 845 } 846 847 /* 848 * getBytesNoCopy: 849 * 850 * Return the virtual address of the beginning of the buffer 851 */ 852 void * 853 IOBufferMemoryDescriptor::getBytesNoCopy() 854 { 855 if (kIOMemoryTypePhysical64 == (_flags & kIOMemoryTypeMask)) { 856 return _buffer; 857 } else { 858 return (void *)_ranges.v64->address; 859 } 860 } 861 862 863 /* 864 * getBytesNoCopy: 865 * 866 * Return the virtual address of an offset from the beginning of the buffer 867 */ 868 void * 869 IOBufferMemoryDescriptor::getBytesNoCopy(vm_size_t start, vm_size_t withLength) 870 { 871 IOVirtualAddress address; 872 873 if ((start + withLength) < start) { 874 return NULL; 875 } 876 877 if (kIOMemoryTypePhysical64 == (_flags & kIOMemoryTypeMask)) { 878 address = (IOVirtualAddress) _buffer; 879 } else { 880 address = _ranges.v64->address; 881 } 882 883 if (start < _length && (start + withLength) <= _length) { 884 return (void *)(address + start); 885 } 886 return NULL; 887 } 888 889 #ifndef __LP64__ 890 void * 891 IOBufferMemoryDescriptor::getVirtualSegment(IOByteCount offset, 892 IOByteCount * lengthOfSegment) 893 { 894 void * bytes = getBytesNoCopy(offset, 0); 895 896 if (bytes && lengthOfSegment) { 897 *lengthOfSegment = _length - offset; 898 } 899 900 return bytes; 901 } 902 #endif /* !__LP64__ */ 903 904 #ifdef __LP64__ 905 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 0); 906 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 1); 907 #else /* !__LP64__ */ 908 OSMetaClassDefineReservedUsedX86(IOBufferMemoryDescriptor, 0); 909 OSMetaClassDefineReservedUsedX86(IOBufferMemoryDescriptor, 1); 910 #endif /* !__LP64__ */ 911 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 2); 912 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 3); 913 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 4); 914 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 5); 915 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 6); 916 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 7); 917 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 8); 918 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 9); 919 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 10); 920 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 11); 921 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 12); 922 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 13); 923 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 14); 924 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 15); 925