1 /*
2 * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: vm/vm_debug.c.
60 * Author: Rich Draves
61 * Date: March, 1990
62 *
63 * Exported kernel calls. See mach_debug/mach_debug.defs.
64 */
65 #include <mach_vm_debug.h>
66 #include <mach/kern_return.h>
67 #include <mach/mach_host_server.h>
68 #include <mach_debug/vm_info.h>
69 #include <mach_debug/page_info.h>
70 #include <mach_debug/hash_info.h>
71
72 #if MACH_VM_DEBUG
73 #include <mach/machine/vm_types.h>
74 #include <mach/memory_object_types.h>
75 #include <mach/vm_prot.h>
76 #include <mach/vm_inherit.h>
77 #include <mach/vm_param.h>
78 #include <kern/thread.h>
79 #include <vm/vm_map_internal.h>
80 #include <vm/vm_kern_xnu.h>
81 #include <vm/vm_object_xnu.h>
82 #include <kern/task.h>
83 #include <kern/host.h>
84 #include <ipc/ipc_port.h>
85 #include <vm/vm_debug_internal.h>
86 #endif
87
88 #if !MACH_VM_DEBUG
89 #define __DEBUG_ONLY __unused
90 #else /* !MACH_VM_DEBUG */
91 #define __DEBUG_ONLY
92 #endif /* !MACH_VM_DEBUG */
93
94 #ifdef VM32_SUPPORT
95
96 #include <mach/vm32_map_server.h>
97 #include <mach/vm_map.h>
98
99 /*
100 * Routine: mach_vm_region_info [kernel call]
101 * Purpose:
102 * Retrieve information about a VM region,
103 * including info about the object chain.
104 * Conditions:
105 * Nothing locked.
106 * Returns:
107 * KERN_SUCCESS Retrieve region/object info.
108 * KERN_INVALID_TASK The map is null.
109 * KERN_NO_SPACE There is no entry at/after the address.
110 * KERN_RESOURCE_SHORTAGE Can't allocate memory.
111 */
112
113 kern_return_t
vm32_mach_vm_region_info(__DEBUG_ONLY vm_map_t map,__DEBUG_ONLY vm32_offset_ut address_u,__DEBUG_ONLY vm_info_region_t * regionp,__DEBUG_ONLY vm_info_object_array_t * objectsp,__DEBUG_ONLY mach_msg_type_number_t * objectsCntp)114 vm32_mach_vm_region_info(
115 __DEBUG_ONLY vm_map_t map,
116 __DEBUG_ONLY vm32_offset_ut address_u,
117 __DEBUG_ONLY vm_info_region_t *regionp,
118 __DEBUG_ONLY vm_info_object_array_t *objectsp,
119 __DEBUG_ONLY mach_msg_type_number_t *objectsCntp)
120 {
121 #if !MACH_VM_DEBUG
122 return KERN_FAILURE;
123 #else
124 /* This unwrap is safe as this function is DEBUG only. */
125 vm32_offset_t address = VM_SANITIZE_UNSAFE_UNWRAP(address_u);
126 vm_map_copy_t copy;
127 vm_offset_t addr = 0; /* memory for OOL data */
128 vm_size_t size; /* size of the memory */
129 unsigned int room; /* room for this many objects */
130 unsigned int used; /* actually this many objects */
131 vm_info_region_t region;
132 kern_return_t kr;
133
134 if (map == VM_MAP_NULL) {
135 return KERN_INVALID_TASK;
136 }
137
138 size = 0; /* no memory allocated yet */
139
140 for (;;) {
141 vm_map_t cmap; /* current map in traversal */
142 vm_map_t nmap; /* next map to look at */
143 vm_map_entry_t entry;
144 vm_object_t object, cobject, nobject;
145
146 /* nothing is locked */
147
148 vm_map_lock_read(map);
149 for (cmap = map;; cmap = nmap) {
150 /* cmap is read-locked */
151
152 if (!vm_map_lookup_entry_allow_pgz(cmap,
153 (vm_map_address_t)address, &entry)) {
154 entry = entry->vme_next;
155 if (entry == vm_map_to_entry(cmap)) {
156 vm_map_unlock_read(cmap);
157 if (size != 0) {
158 kmem_free(ipc_kernel_map,
159 addr, size);
160 }
161 return KERN_NO_SPACE;
162 }
163 }
164
165 if (entry->is_sub_map) {
166 nmap = VME_SUBMAP(entry);
167 } else {
168 break;
169 }
170
171 /* move down to the lower map */
172
173 vm_map_lock_read(nmap);
174 vm_map_unlock_read(cmap);
175 }
176
177 /* cmap is read-locked; we have a real entry */
178
179 object = VME_OBJECT(entry);
180 region.vir_start = (natural_t) entry->vme_start;
181 region.vir_end = (natural_t) entry->vme_end;
182 region.vir_object = (natural_t)(uintptr_t) object;
183 region.vir_offset = (natural_t) VME_OFFSET(entry);
184 region.vir_needs_copy = entry->needs_copy;
185 region.vir_protection = entry->protection;
186 region.vir_max_protection = entry->max_protection;
187 region.vir_inheritance = entry->inheritance;
188 region.vir_wired_count = entry->wired_count;
189 region.vir_user_wired_count = entry->user_wired_count;
190
191 used = 0;
192 room = (unsigned int) (size / sizeof(vm_info_object_t));
193
194 if (object == VM_OBJECT_NULL) {
195 vm_map_unlock_read(cmap);
196 /* no memory needed */
197 break;
198 }
199
200 vm_object_lock(object);
201 vm_map_unlock_read(cmap);
202
203 for (cobject = object;; cobject = nobject) {
204 /* cobject is locked */
205
206 if (used < room) {
207 vm_info_object_t *vio =
208 &((vm_info_object_t *) addr)[used];
209
210 vio->vio_object =
211 (natural_t)(uintptr_t) cobject;
212 vio->vio_size =
213 (natural_t) cobject->vo_size;
214 vio->vio_ref_count =
215 cobject->ref_count;
216 vio->vio_resident_page_count =
217 cobject->resident_page_count;
218 vio->vio_copy =
219 (natural_t)(uintptr_t) cobject->vo_copy;
220 vio->vio_shadow =
221 (natural_t)(uintptr_t) cobject->shadow;
222 vio->vio_shadow_offset =
223 (natural_t) cobject->vo_shadow_offset;
224 vio->vio_paging_offset =
225 (natural_t) cobject->paging_offset;
226 vio->vio_copy_strategy =
227 cobject->copy_strategy;
228 vio->vio_last_alloc =
229 (vm_offset_t) cobject->last_alloc;
230 vio->vio_paging_in_progress =
231 cobject->paging_in_progress +
232 cobject->activity_in_progress;
233 vio->vio_pager_created =
234 cobject->pager_created;
235 vio->vio_pager_initialized =
236 cobject->pager_initialized;
237 vio->vio_pager_ready =
238 cobject->pager_ready;
239 vio->vio_can_persist =
240 cobject->can_persist;
241 vio->vio_internal =
242 cobject->internal;
243 vio->vio_temporary =
244 FALSE;
245 vio->vio_alive =
246 cobject->alive;
247 vio->vio_purgable =
248 (cobject->purgable != VM_PURGABLE_DENY);
249 vio->vio_purgable_volatile =
250 (cobject->purgable == VM_PURGABLE_VOLATILE ||
251 cobject->purgable == VM_PURGABLE_EMPTY);
252 }
253
254 used++;
255 nobject = cobject->shadow;
256 if (nobject == VM_OBJECT_NULL) {
257 vm_object_unlock(cobject);
258 break;
259 }
260
261 vm_object_lock(nobject);
262 vm_object_unlock(cobject);
263 }
264
265 /* nothing locked */
266
267 if (used <= room) {
268 break;
269 }
270
271 /* must allocate more memory */
272
273 if (size != 0) {
274 kmem_free(ipc_kernel_map, addr, size);
275 }
276 size = vm_map_round_page(2 * used * sizeof(vm_info_object_t),
277 VM_MAP_PAGE_MASK(ipc_kernel_map));
278
279 kr = kmem_alloc(ipc_kernel_map, &addr, size,
280 KMA_DATA, VM_KERN_MEMORY_IPC);
281 if (kr != KERN_SUCCESS) {
282 return KERN_RESOURCE_SHORTAGE;
283 }
284 }
285
286 /* free excess memory; make remaining memory pageable */
287
288 if (used == 0) {
289 copy = VM_MAP_COPY_NULL;
290
291 if (size != 0) {
292 kmem_free(ipc_kernel_map, addr, size);
293 }
294 } else {
295 vm_size_t size_used = (used * sizeof(vm_info_object_t));
296 vm_size_t vmsize_used = vm_map_round_page(size_used,
297 VM_MAP_PAGE_MASK(ipc_kernel_map));
298
299 if (size_used < vmsize_used) {
300 bzero((char *)addr + size_used, vmsize_used - size_used);
301 }
302
303 kr = vm_map_unwire(ipc_kernel_map, addr, addr + size_used, FALSE);
304 assert(kr == KERN_SUCCESS);
305
306 kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr,
307 (vm_map_size_t)size_used, TRUE, ©);
308 assert(kr == KERN_SUCCESS);
309
310 if (size != vmsize_used) {
311 kmem_free(ipc_kernel_map,
312 addr + vmsize_used, size - vmsize_used);
313 }
314 }
315
316 *regionp = region;
317 *objectsp = (vm_info_object_array_t) copy;
318 *objectsCntp = used;
319 return KERN_SUCCESS;
320 #endif /* MACH_VM_DEBUG */
321 }
322
323 /*
324 * Temporary call for 64 bit data path interface transiotion
325 */
326
327 kern_return_t
vm32_mach_vm_region_info_64(__DEBUG_ONLY vm_map_t map,__DEBUG_ONLY vm32_offset_ut address_u,__DEBUG_ONLY vm_info_region_64_t * regionp,__DEBUG_ONLY vm_info_object_array_t * objectsp,__DEBUG_ONLY mach_msg_type_number_t * objectsCntp)328 vm32_mach_vm_region_info_64(
329 __DEBUG_ONLY vm_map_t map,
330 __DEBUG_ONLY vm32_offset_ut address_u,
331 __DEBUG_ONLY vm_info_region_64_t *regionp,
332 __DEBUG_ONLY vm_info_object_array_t *objectsp,
333 __DEBUG_ONLY mach_msg_type_number_t *objectsCntp)
334 {
335 #if !MACH_VM_DEBUG
336 return KERN_FAILURE;
337 #else
338 /* This unwrap is safe as this function is DEBUG only. */
339 vm32_offset_t address = VM_SANITIZE_UNSAFE_UNWRAP(address_u);
340 vm_map_copy_t copy;
341 vm_offset_t addr = 0; /* memory for OOL data */
342 vm_size_t size; /* size of the memory */
343 unsigned int room; /* room for this many objects */
344 unsigned int used; /* actually this many objects */
345 vm_info_region_64_t region;
346 kern_return_t kr;
347
348 if (map == VM_MAP_NULL) {
349 return KERN_INVALID_TASK;
350 }
351
352 size = 0; /* no memory allocated yet */
353
354 for (;;) {
355 vm_map_t cmap; /* current map in traversal */
356 vm_map_t nmap; /* next map to look at */
357 vm_map_entry_t entry;
358 vm_object_t object, cobject, nobject;
359
360 /* nothing is locked */
361
362 vm_map_lock_read(map);
363 for (cmap = map;; cmap = nmap) {
364 /* cmap is read-locked */
365
366 if (!vm_map_lookup_entry_allow_pgz(cmap, address, &entry)) {
367 entry = entry->vme_next;
368 if (entry == vm_map_to_entry(cmap)) {
369 vm_map_unlock_read(cmap);
370 if (size != 0) {
371 kmem_free(ipc_kernel_map,
372 addr, size);
373 }
374 return KERN_NO_SPACE;
375 }
376 }
377
378 if (entry->is_sub_map) {
379 nmap = VME_SUBMAP(entry);
380 } else {
381 break;
382 }
383
384 /* move down to the lower map */
385
386 vm_map_lock_read(nmap);
387 vm_map_unlock_read(cmap);
388 }
389
390 /* cmap is read-locked; we have a real entry */
391
392 object = VME_OBJECT(entry);
393 region.vir_start = (natural_t) entry->vme_start;
394 region.vir_end = (natural_t) entry->vme_end;
395 region.vir_object = (natural_t)(uintptr_t) object;
396 region.vir_offset = VME_OFFSET(entry);
397 region.vir_needs_copy = entry->needs_copy;
398 region.vir_protection = entry->protection;
399 region.vir_max_protection = entry->max_protection;
400 region.vir_inheritance = entry->inheritance;
401 region.vir_wired_count = entry->wired_count;
402 region.vir_user_wired_count = entry->user_wired_count;
403
404 used = 0;
405 room = (unsigned int) (size / sizeof(vm_info_object_t));
406
407 if (object == VM_OBJECT_NULL) {
408 vm_map_unlock_read(cmap);
409 /* no memory needed */
410 break;
411 }
412
413 vm_object_lock(object);
414 vm_map_unlock_read(cmap);
415
416 for (cobject = object;; cobject = nobject) {
417 /* cobject is locked */
418
419 if (used < room) {
420 vm_info_object_t *vio =
421 &((vm_info_object_t *) addr)[used];
422
423 vio->vio_object =
424 (natural_t)(uintptr_t) cobject;
425 vio->vio_size =
426 (natural_t) cobject->vo_size;
427 vio->vio_ref_count =
428 cobject->ref_count;
429 vio->vio_resident_page_count =
430 cobject->resident_page_count;
431 vio->vio_copy =
432 (natural_t)(uintptr_t) cobject->vo_copy;
433 vio->vio_shadow =
434 (natural_t)(uintptr_t) cobject->shadow;
435 vio->vio_shadow_offset =
436 (natural_t) cobject->vo_shadow_offset;
437 vio->vio_paging_offset =
438 (natural_t) cobject->paging_offset;
439 vio->vio_copy_strategy =
440 cobject->copy_strategy;
441 vio->vio_last_alloc =
442 (vm_offset_t) cobject->last_alloc;
443 vio->vio_paging_in_progress =
444 cobject->paging_in_progress +
445 cobject->activity_in_progress;
446 vio->vio_pager_created =
447 cobject->pager_created;
448 vio->vio_pager_initialized =
449 cobject->pager_initialized;
450 vio->vio_pager_ready =
451 cobject->pager_ready;
452 vio->vio_can_persist =
453 cobject->can_persist;
454 vio->vio_internal =
455 cobject->internal;
456 vio->vio_temporary =
457 FALSE;
458 vio->vio_alive =
459 cobject->alive;
460 vio->vio_purgable =
461 (cobject->purgable != VM_PURGABLE_DENY);
462 vio->vio_purgable_volatile =
463 (cobject->purgable == VM_PURGABLE_VOLATILE ||
464 cobject->purgable == VM_PURGABLE_EMPTY);
465 }
466
467 used++;
468 nobject = cobject->shadow;
469 if (nobject == VM_OBJECT_NULL) {
470 vm_object_unlock(cobject);
471 break;
472 }
473
474 vm_object_lock(nobject);
475 vm_object_unlock(cobject);
476 }
477
478 /* nothing locked */
479
480 if (used <= room) {
481 break;
482 }
483
484 /* must allocate more memory */
485
486 if (size != 0) {
487 kmem_free(ipc_kernel_map, addr, size);
488 }
489 size = vm_map_round_page(2 * used * sizeof(vm_info_object_t),
490 VM_MAP_PAGE_MASK(ipc_kernel_map));
491
492 kr = kmem_alloc(ipc_kernel_map, &addr, size,
493 KMA_DATA, VM_KERN_MEMORY_IPC);
494 if (kr != KERN_SUCCESS) {
495 return KERN_RESOURCE_SHORTAGE;
496 }
497 }
498
499 /* free excess memory; make remaining memory pageable */
500
501 if (used == 0) {
502 copy = VM_MAP_COPY_NULL;
503
504 if (size != 0) {
505 kmem_free(ipc_kernel_map, addr, size);
506 }
507 } else {
508 vm_size_t size_used = (used * sizeof(vm_info_object_t));
509 vm_size_t vmsize_used = vm_map_round_page(size_used,
510 VM_MAP_PAGE_MASK(ipc_kernel_map));
511
512 if (size_used < vmsize_used) {
513 bzero((char *)addr + size_used, vmsize_used - size_used);
514 }
515
516 kr = vm_map_unwire(ipc_kernel_map, addr, addr + size_used, FALSE);
517 assert(kr == KERN_SUCCESS);
518
519 kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr,
520 (vm_map_size_t)size_used, TRUE, ©);
521 assert(kr == KERN_SUCCESS);
522
523 if (size != vmsize_used) {
524 kmem_free(ipc_kernel_map,
525 addr + vmsize_used, size - vmsize_used);
526 }
527 }
528
529 *regionp = region;
530 *objectsp = (vm_info_object_array_t) copy;
531 *objectsCntp = used;
532 return KERN_SUCCESS;
533 #endif /* MACH_VM_DEBUG */
534 }
535 /*
536 * Return an array of virtual pages that are mapped to a task.
537 */
538 kern_return_t
vm32_vm_mapped_pages_info(__DEBUG_ONLY vm_map_t map,__DEBUG_ONLY page_address_array_t * pages,__DEBUG_ONLY mach_msg_type_number_t * pages_count)539 vm32_vm_mapped_pages_info(
540 __DEBUG_ONLY vm_map_t map,
541 __DEBUG_ONLY page_address_array_t *pages,
542 __DEBUG_ONLY mach_msg_type_number_t *pages_count)
543 {
544 #if !MACH_VM_DEBUG
545 return KERN_FAILURE;
546 #elif 1 /* pmap_resident_count is gone with rdar://68290810 */
547 (void)map; (void)pages; (void)pages_count;
548 return KERN_FAILURE;
549 #else
550 pmap_t pmap;
551 vm_size_t size, size_used;
552 unsigned int actual, space;
553 page_address_array_t list;
554 mach_vm_offset_t addr = 0;
555
556 if (map == VM_MAP_NULL) {
557 return KERN_INVALID_ARGUMENT;
558 }
559
560 pmap = map->pmap;
561 size = pmap_resident_count(pmap) * sizeof(vm_offset_t);
562 size = vm_map_round_page(size,
563 VM_MAP_PAGE_MASK(ipc_kernel_map));
564
565 for (;;) {
566 (void) mach_vm_allocate_kernel(ipc_kernel_map, &addr, size,
567 VM_MAP_KERNEL_FLAGS_ANYWHERE(.vm_tag = VM_KERN_MEMORY_IPC));
568 (void) vm_map_unwire(
569 ipc_kernel_map,
570 vm_map_trunc_page(addr,
571 VM_MAP_PAGE_MASK(ipc_kernel_map)),
572 vm_map_round_page(addr + size,
573 VM_MAP_PAGE_MASK(ipc_kernel_map)),
574 FALSE);
575
576 list = (page_address_array_t) addr;
577 space = (unsigned int) (size / sizeof(vm_offset_t));
578
579 actual = pmap_list_resident_pages(pmap,
580 list,
581 space);
582 if (actual <= space) {
583 break;
584 }
585
586 /*
587 * Free memory if not enough
588 */
589 (void) kmem_free(ipc_kernel_map, addr, size);
590
591 /*
592 * Try again, doubling the size
593 */
594 size = vm_map_round_page(actual * sizeof(vm_offset_t),
595 VM_MAP_PAGE_MASK(ipc_kernel_map));
596 }
597 if (actual == 0) {
598 *pages = 0;
599 *pages_count = 0;
600 (void) kmem_free(ipc_kernel_map, addr, size);
601 } else {
602 vm_size_t vmsize_used;
603 *pages_count = actual;
604 size_used = (actual * sizeof(vm_offset_t));
605 vmsize_used = vm_map_round_page(size_used,
606 VM_MAP_PAGE_MASK(ipc_kernel_map));
607 (void) vm_map_wire_kernel(
608 ipc_kernel_map,
609 vm_map_trunc_page(addr,
610 VM_MAP_PAGE_MASK(ipc_kernel_map)),
611 vm_map_round_page(addr + size,
612 VM_MAP_PAGE_MASK(ipc_kernel_map)),
613 VM_PROT_READ | VM_PROT_WRITE,
614 VM_KERN_MEMORY_IPC,
615 FALSE);
616 (void) vm_map_copyin(ipc_kernel_map,
617 (vm_map_address_t)addr,
618 (vm_map_size_t)size_used,
619 TRUE,
620 (vm_map_copy_t *)pages);
621 if (vmsize_used != size) {
622 (void) kmem_free(ipc_kernel_map,
623 addr + vmsize_used,
624 size - vmsize_used);
625 }
626 }
627
628 return KERN_SUCCESS;
629 #endif /* MACH_VM_DEBUG */
630 }
631
632 #endif /* VM32_SUPPORT */
633
634 /*
635 * Routine: host_virtual_physical_table_info
636 * Purpose:
637 * Return information about the VP table.
638 * Conditions:
639 * Nothing locked. Obeys CountInOut protocol.
640 * Returns:
641 * KERN_SUCCESS Returned information.
642 * KERN_INVALID_HOST The host is null.
643 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
644 */
645
646 kern_return_t
host_virtual_physical_table_info(__DEBUG_ONLY host_t host,__DEBUG_ONLY hash_info_bucket_array_t * infop,__DEBUG_ONLY mach_msg_type_number_t * countp)647 host_virtual_physical_table_info(
648 __DEBUG_ONLY host_t host,
649 __DEBUG_ONLY hash_info_bucket_array_t *infop,
650 __DEBUG_ONLY mach_msg_type_number_t *countp)
651 {
652 #if !MACH_VM_DEBUG
653 return KERN_FAILURE;
654 #else
655 vm_offset_t addr = 0;
656 vm_size_t size = 0;
657 hash_info_bucket_t *info;
658 unsigned int potential, actual;
659 kern_return_t kr;
660
661 if (host == HOST_NULL) {
662 return KERN_INVALID_HOST;
663 }
664
665 /* start with in-line data */
666
667 info = *infop;
668 potential = *countp;
669
670 for (;;) {
671 actual = vm_page_info(info, potential);
672 if (actual <= potential) {
673 break;
674 }
675
676 /* allocate more memory */
677
678 if (info != *infop) {
679 kmem_free(ipc_kernel_map, addr, size);
680 }
681
682 size = vm_map_round_page(actual * sizeof *info,
683 VM_MAP_PAGE_MASK(ipc_kernel_map));
684 kr = kmem_alloc(ipc_kernel_map, &addr, size,
685 KMA_PAGEABLE | KMA_DATA, VM_KERN_MEMORY_IPC);
686 if (kr != KERN_SUCCESS) {
687 return KERN_RESOURCE_SHORTAGE;
688 }
689
690 info = (hash_info_bucket_t *) addr;
691 potential = (unsigned int) (size / sizeof(*info));
692 }
693
694 if (info == *infop) {
695 /* data fit in-line; nothing to deallocate */
696
697 *countp = actual;
698 } else if (actual == 0) {
699 kmem_free(ipc_kernel_map, addr, size);
700
701 *countp = 0;
702 } else {
703 vm_map_copy_t copy;
704 vm_size_t used, vmused;
705
706 used = (actual * sizeof(*info));
707 vmused = vm_map_round_page(used, VM_MAP_PAGE_MASK(ipc_kernel_map));
708
709 if (vmused != size) {
710 kmem_free(ipc_kernel_map, addr + vmused, size - vmused);
711 }
712
713 kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr,
714 (vm_map_size_t)used, TRUE, ©);
715 assert(kr == KERN_SUCCESS);
716
717 *infop = (hash_info_bucket_t *) copy;
718 *countp = actual;
719 }
720
721 return KERN_SUCCESS;
722 #endif /* MACH_VM_DEBUG */
723 }
724