1 /*
2 * Copyright (c) 2019-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57 /*
58 * Compressor Pager.
59 * Memory Object Management.
60 */
61
62 #include <kern/host_statistics.h>
63 #include <kern/kalloc.h>
64 #include <kern/ipc_kobject.h>
65
66 #include <machine/atomic.h>
67
68 #include <mach/memory_object_control.h>
69 #include <mach/memory_object_types.h>
70 #include <mach/upl.h>
71
72 #include <vm/memory_object.h>
73 #include <vm/vm_compressor_pager_internal.h>
74 #include <vm/vm_external.h>
75 #include <vm/vm_fault.h>
76 #include <vm/vm_pageout.h>
77 #include <vm/vm_protos_internal.h>
78 #include <vm/vm_object_internal.h>
79
80 #include <sys/kdebug_triage.h>
81
82 /* memory_object interfaces */
83 void compressor_memory_object_reference(memory_object_t mem_obj);
84 void compressor_memory_object_deallocate(memory_object_t mem_obj);
85 kern_return_t compressor_memory_object_init(
86 memory_object_t mem_obj,
87 memory_object_control_t control,
88 memory_object_cluster_size_t pager_page_size);
89 kern_return_t compressor_memory_object_terminate(memory_object_t mem_obj);
90 kern_return_t compressor_memory_object_data_request(
91 memory_object_t mem_obj,
92 memory_object_offset_t offset,
93 memory_object_cluster_size_t length,
94 __unused vm_prot_t protection_required,
95 memory_object_fault_info_t fault_info);
96 kern_return_t compressor_memory_object_data_return(
97 memory_object_t mem_obj,
98 memory_object_offset_t offset,
99 memory_object_cluster_size_t size,
100 __unused memory_object_offset_t *resid_offset,
101 __unused int *io_error,
102 __unused boolean_t dirty,
103 __unused boolean_t kernel_copy,
104 __unused int upl_flags);
105 kern_return_t compressor_memory_object_data_initialize(
106 memory_object_t mem_obj,
107 memory_object_offset_t offset,
108 memory_object_cluster_size_t size);
109 kern_return_t compressor_memory_object_map(
110 __unused memory_object_t mem_obj,
111 __unused vm_prot_t prot);
112 kern_return_t compressor_memory_object_last_unmap(memory_object_t mem_obj);
113
114 const struct memory_object_pager_ops compressor_pager_ops = {
115 .memory_object_reference = compressor_memory_object_reference,
116 .memory_object_deallocate = compressor_memory_object_deallocate,
117 .memory_object_init = compressor_memory_object_init,
118 .memory_object_terminate = compressor_memory_object_terminate,
119 .memory_object_data_request = compressor_memory_object_data_request,
120 .memory_object_data_return = compressor_memory_object_data_return,
121 .memory_object_data_initialize = compressor_memory_object_data_initialize,
122 .memory_object_map = compressor_memory_object_map,
123 .memory_object_last_unmap = compressor_memory_object_last_unmap,
124 .memory_object_backing_object = NULL,
125 .memory_object_pager_name = "compressor pager"
126 };
127
128 /* internal data structures */
129
130 struct {
131 uint64_t data_returns;
132 uint64_t data_requests;
133 uint64_t put;
134 uint64_t get;
135 uint64_t state_clr;
136 uint64_t state_get;
137 uint64_t transfer;
138 } compressor_pager_stats;
139
140 typedef int compressor_slot_t; /* stand-in for c_slot_mapping */
141
142 typedef struct compressor_pager {
143 /* mandatory generic header */
144 struct memory_object cpgr_hdr;
145
146 /* pager-specific data */
147 lck_mtx_t cpgr_lock;
148 #if MEMORY_OBJECT_HAS_REFCOUNT
149 #define cpgr_references cpgr_hdr.mo_ref
150 #else
151 os_ref_atomic_t cpgr_references;
152 #endif
153 unsigned int cpgr_num_slots;
154 unsigned int cpgr_num_slots_occupied;
155 union {
156 compressor_slot_t cpgr_eslots[2]; /* embedded slots */
157 compressor_slot_t *cpgr_dslots; /* direct slots */
158 compressor_slot_t **cpgr_islots; /* indirect slots */
159 } cpgr_slots;
160 } *compressor_pager_t;
161
162 #define compressor_pager_lookup(_mem_obj_, _cpgr_) \
163 MACRO_BEGIN \
164 if (_mem_obj_ == NULL || \
165 _mem_obj_->mo_pager_ops != &compressor_pager_ops) { \
166 _cpgr_ = NULL; \
167 } else { \
168 _cpgr_ = (compressor_pager_t) _mem_obj_; \
169 } \
170 MACRO_END
171
172 /* embedded slot pointers in compressor_pager get packed, so VA restricted */
173 static ZONE_DEFINE_TYPE(compressor_pager_zone, "compressor_pager",
174 struct compressor_pager, ZC_NOENCRYPT | ZC_VM);
175
176 LCK_GRP_DECLARE(compressor_pager_lck_grp, "compressor_pager");
177
178 #define compressor_pager_lock(_cpgr_) \
179 lck_mtx_lock(&(_cpgr_)->cpgr_lock)
180 #define compressor_pager_unlock(_cpgr_) \
181 lck_mtx_unlock(&(_cpgr_)->cpgr_lock)
182 #define compressor_pager_lock_init(_cpgr_) \
183 lck_mtx_init(&(_cpgr_)->cpgr_lock, &compressor_pager_lck_grp, LCK_ATTR_NULL)
184 #define compressor_pager_lock_destroy(_cpgr_) \
185 lck_mtx_destroy(&(_cpgr_)->cpgr_lock, &compressor_pager_lck_grp)
186
187 #define COMPRESSOR_SLOTS_CHUNK_SIZE (512)
188 #define COMPRESSOR_SLOTS_PER_CHUNK (COMPRESSOR_SLOTS_CHUNK_SIZE / sizeof (compressor_slot_t))
189
190 /* forward declarations */
191 unsigned int compressor_pager_slots_chunk_free(compressor_slot_t *chunk,
192 int num_slots,
193 vm_compressor_options_t flags,
194 int *failures);
195 void compressor_pager_slot_lookup(
196 compressor_pager_t pager,
197 boolean_t do_alloc,
198 memory_object_offset_t offset,
199 compressor_slot_t **slot_pp);
200
201 #if defined(__LP64__)
202
203 /* restricted VA zones for slots */
204
205 #define NUM_SLOTS_ZONES 3
206
207 static const size_t compressor_slots_zones_sizes[NUM_SLOTS_ZONES] = {
208 16,
209 64,
210 COMPRESSOR_SLOTS_CHUNK_SIZE
211 };
212
213 static const char * compressor_slots_zones_names[NUM_SLOTS_ZONES] = {
214 "compressor_slots.16",
215 "compressor_slots.64",
216 "compressor_slots.512"
217 };
218
219 static zone_t
220 compressor_slots_zones[NUM_SLOTS_ZONES];
221
222 #endif /* defined(__LP64__) */
223
224 static void
225 zfree_slot_array(compressor_slot_t *slots, size_t size);
226 static compressor_slot_t *
227 zalloc_slot_array(size_t size, zalloc_flags_t);
228
229 static inline unsigned int
compressor_pager_num_chunks(compressor_pager_t pager)230 compressor_pager_num_chunks(
231 compressor_pager_t pager)
232 {
233 unsigned int num_chunks;
234
235 num_chunks = pager->cpgr_num_slots / COMPRESSOR_SLOTS_PER_CHUNK;
236 if (num_chunks * COMPRESSOR_SLOTS_PER_CHUNK < pager->cpgr_num_slots) {
237 num_chunks++; /* do the equivalent of ceil() instead of trunc() for the above division */
238 }
239 return num_chunks;
240 }
241
242 kern_return_t
compressor_memory_object_init(memory_object_t mem_obj,memory_object_control_t control,__unused memory_object_cluster_size_t pager_page_size)243 compressor_memory_object_init(
244 memory_object_t mem_obj,
245 memory_object_control_t control,
246 __unused memory_object_cluster_size_t pager_page_size)
247 {
248 compressor_pager_t pager;
249
250 assert(pager_page_size == PAGE_SIZE);
251
252 memory_object_control_reference(control);
253
254 compressor_pager_lookup(mem_obj, pager);
255 compressor_pager_lock(pager);
256
257 if (pager->cpgr_hdr.mo_control != MEMORY_OBJECT_CONTROL_NULL) {
258 panic("compressor_memory_object_init: bad request");
259 }
260 pager->cpgr_hdr.mo_control = control;
261
262 compressor_pager_unlock(pager);
263
264 return KERN_SUCCESS;
265 }
266
267 kern_return_t
compressor_memory_object_map(__unused memory_object_t mem_obj,__unused vm_prot_t prot)268 compressor_memory_object_map(
269 __unused memory_object_t mem_obj,
270 __unused vm_prot_t prot)
271 {
272 panic("compressor_memory_object_map");
273 return KERN_FAILURE;
274 }
275
276 kern_return_t
compressor_memory_object_last_unmap(__unused memory_object_t mem_obj)277 compressor_memory_object_last_unmap(
278 __unused memory_object_t mem_obj)
279 {
280 panic("compressor_memory_object_last_unmap");
281 return KERN_FAILURE;
282 }
283
284 kern_return_t
compressor_memory_object_terminate(memory_object_t mem_obj)285 compressor_memory_object_terminate(
286 memory_object_t mem_obj)
287 {
288 memory_object_control_t control;
289 compressor_pager_t pager;
290
291 /*
292 * control port is a receive right, not a send right.
293 */
294
295 compressor_pager_lookup(mem_obj, pager);
296 compressor_pager_lock(pager);
297
298 /*
299 * After memory_object_terminate both memory_object_init
300 * and a no-senders notification are possible, so we need
301 * to clean up our reference to the memory_object_control
302 * to prepare for a new init.
303 */
304
305 control = pager->cpgr_hdr.mo_control;
306 pager->cpgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL;
307
308 compressor_pager_unlock(pager);
309
310 /*
311 * Now we deallocate our reference on the control.
312 */
313 memory_object_control_deallocate(control);
314 return KERN_SUCCESS;
315 }
316
317 void
compressor_memory_object_reference(memory_object_t mem_obj)318 compressor_memory_object_reference(
319 memory_object_t mem_obj)
320 {
321 compressor_pager_t pager;
322
323 compressor_pager_lookup(mem_obj, pager);
324 if (pager == NULL) {
325 return;
326 }
327
328 compressor_pager_lock(pager);
329 os_ref_retain_locked_raw(&pager->cpgr_references, NULL);
330 compressor_pager_unlock(pager);
331 }
332
333 void
compressor_memory_object_deallocate(memory_object_t mem_obj)334 compressor_memory_object_deallocate(
335 memory_object_t mem_obj)
336 {
337 compressor_pager_t pager;
338 unsigned int num_slots_freed;
339
340 /*
341 * Because we don't give out multiple first references
342 * for a memory object, there can't be a race
343 * between getting a deallocate call and creating
344 * a new reference for the object.
345 */
346
347 compressor_pager_lookup(mem_obj, pager);
348 if (pager == NULL) {
349 return;
350 }
351
352 compressor_pager_lock(pager);
353 if (os_ref_release_locked_raw(&pager->cpgr_references, NULL) > 0) {
354 compressor_pager_unlock(pager);
355 return;
356 }
357
358 /*
359 * We shouldn't get a deallocation call
360 * when the kernel has the object cached.
361 */
362 if (pager->cpgr_hdr.mo_control != MEMORY_OBJECT_CONTROL_NULL) {
363 panic("compressor_memory_object_deallocate(): bad request");
364 }
365
366 /*
367 * Unlock the pager (though there should be no one
368 * waiting for it).
369 */
370 compressor_pager_unlock(pager);
371
372 /* free the compressor slots */
373 unsigned int num_chunks;
374 unsigned int i;
375 compressor_slot_t *chunk;
376
377 num_chunks = compressor_pager_num_chunks(pager);
378 if (num_chunks > 1) {
379 /* we have an array of chunks */
380 for (i = 0; i < num_chunks; i++) {
381 chunk = pager->cpgr_slots.cpgr_islots[i];
382 if (chunk != NULL) {
383 num_slots_freed =
384 compressor_pager_slots_chunk_free(
385 chunk,
386 COMPRESSOR_SLOTS_PER_CHUNK,
387 0,
388 NULL);
389 pager->cpgr_slots.cpgr_islots[i] = NULL;
390 zfree_slot_array(chunk, COMPRESSOR_SLOTS_CHUNK_SIZE);
391 }
392 }
393 kfree_type(compressor_slot_t *, num_chunks,
394 pager->cpgr_slots.cpgr_islots);
395 pager->cpgr_slots.cpgr_islots = NULL;
396 } else if (pager->cpgr_num_slots > 2) {
397 chunk = pager->cpgr_slots.cpgr_dslots;
398 num_slots_freed =
399 compressor_pager_slots_chunk_free(
400 chunk,
401 pager->cpgr_num_slots,
402 0,
403 NULL);
404 pager->cpgr_slots.cpgr_dslots = NULL;
405 zfree_slot_array(chunk,
406 (pager->cpgr_num_slots *
407 sizeof(pager->cpgr_slots.cpgr_dslots[0])));
408 } else {
409 chunk = &pager->cpgr_slots.cpgr_eslots[0];
410 num_slots_freed =
411 compressor_pager_slots_chunk_free(
412 chunk,
413 pager->cpgr_num_slots,
414 0,
415 NULL);
416 }
417
418 compressor_pager_lock_destroy(pager);
419 zfree(compressor_pager_zone, pager);
420 }
421
422 kern_return_t
compressor_memory_object_data_request(memory_object_t mem_obj,memory_object_offset_t offset,memory_object_cluster_size_t length,__unused vm_prot_t protection_required,__unused memory_object_fault_info_t fault_info)423 compressor_memory_object_data_request(
424 memory_object_t mem_obj,
425 memory_object_offset_t offset,
426 memory_object_cluster_size_t length,
427 __unused vm_prot_t protection_required,
428 __unused memory_object_fault_info_t fault_info)
429 {
430 compressor_pager_t pager;
431 kern_return_t kr;
432 compressor_slot_t *slot_p;
433
434 compressor_pager_stats.data_requests++;
435
436 /*
437 * Request must be on a page boundary and a multiple of pages.
438 */
439 if ((offset & PAGE_MASK) != 0 || (length & PAGE_MASK) != 0) {
440 panic("compressor_memory_object_data_request(): bad alignment");
441 }
442
443 if ((uint32_t)(offset / PAGE_SIZE) != (offset / PAGE_SIZE)) {
444 panic("%s: offset 0x%llx overflow",
445 __FUNCTION__, (uint64_t) offset);
446 return KERN_FAILURE;
447 }
448
449 compressor_pager_lookup(mem_obj, pager);
450
451 if (length == 0) {
452 /* we're only querying the pager for this page */
453 } else {
454 panic("compressor: data_request");
455 }
456
457 /* find the compressor slot for that page */
458 compressor_pager_slot_lookup(pager, FALSE, offset, &slot_p);
459
460 if (offset / PAGE_SIZE >= pager->cpgr_num_slots) {
461 /* out of range */
462 kr = KERN_FAILURE;
463 } else if (slot_p == NULL || *slot_p == 0) {
464 /* compressor does not have this page */
465 kr = KERN_FAILURE;
466 } else {
467 /* compressor does have this page */
468 kr = KERN_SUCCESS;
469 }
470 return kr;
471 }
472
473 /*
474 * memory_object_data_initialize: check whether we already have each page, and
475 * write it if we do not. The implementation is far from optimized, and
476 * also assumes that the default_pager is single-threaded.
477 */
478 /* It is questionable whether or not a pager should decide what is relevant */
479 /* and what is not in data sent from the kernel. Data initialize has been */
480 /* changed to copy back all data sent to it in preparation for its eventual */
481 /* merge with data return. It is the kernel that should decide what pages */
482 /* to write back. As of the writing of this note, this is indeed the case */
483 /* the kernel writes back one page at a time through this interface */
484
485 kern_return_t
compressor_memory_object_data_initialize(memory_object_t mem_obj,memory_object_offset_t offset,memory_object_cluster_size_t size)486 compressor_memory_object_data_initialize(
487 memory_object_t mem_obj,
488 memory_object_offset_t offset,
489 memory_object_cluster_size_t size)
490 {
491 compressor_pager_t pager;
492 memory_object_offset_t cur_offset;
493
494 compressor_pager_lookup(mem_obj, pager);
495 compressor_pager_lock(pager);
496
497 for (cur_offset = offset;
498 cur_offset < offset + size;
499 cur_offset += PAGE_SIZE) {
500 panic("do a data_return() if slot for this page is empty");
501 }
502
503 compressor_pager_unlock(pager);
504
505 return KERN_SUCCESS;
506 }
507
508
509 /*ARGSUSED*/
510 kern_return_t
compressor_memory_object_data_return(__unused memory_object_t mem_obj,__unused memory_object_offset_t offset,__unused memory_object_cluster_size_t size,__unused memory_object_offset_t * resid_offset,__unused int * io_error,__unused boolean_t dirty,__unused boolean_t kernel_copy,__unused int upl_flags)511 compressor_memory_object_data_return(
512 __unused memory_object_t mem_obj,
513 __unused memory_object_offset_t offset,
514 __unused memory_object_cluster_size_t size,
515 __unused memory_object_offset_t *resid_offset,
516 __unused int *io_error,
517 __unused boolean_t dirty,
518 __unused boolean_t kernel_copy,
519 __unused int upl_flags)
520 {
521 panic("compressor: data_return");
522 return KERN_FAILURE;
523 }
524
525 /*
526 * Routine: default_pager_memory_object_create
527 * Purpose:
528 * Handle requests for memory objects from the
529 * kernel.
530 * Notes:
531 * Because we only give out the default memory
532 * manager port to the kernel, we don't have to
533 * be so paranoid about the contents.
534 */
535 kern_return_t
compressor_memory_object_create(memory_object_size_t new_size,memory_object_t * new_mem_obj)536 compressor_memory_object_create(
537 memory_object_size_t new_size,
538 memory_object_t *new_mem_obj)
539 {
540 compressor_pager_t pager;
541 unsigned int num_chunks;
542
543 if ((uint32_t)(new_size / PAGE_SIZE) != (new_size / PAGE_SIZE)) {
544 /* 32-bit overflow for number of pages */
545 panic("%s: size 0x%llx overflow",
546 __FUNCTION__, (uint64_t) new_size);
547 return KERN_INVALID_ARGUMENT;
548 }
549
550 pager = zalloc_flags(compressor_pager_zone, Z_WAITOK | Z_NOFAIL);
551
552 compressor_pager_lock_init(pager);
553 os_ref_init_raw(&pager->cpgr_references, NULL);
554 pager->cpgr_num_slots = (uint32_t)(new_size / PAGE_SIZE);
555 pager->cpgr_num_slots_occupied = 0;
556
557 num_chunks = compressor_pager_num_chunks(pager);
558 if (num_chunks > 1) {
559 /* islots points to an array of chunks pointer. every chunk has 512/sizeof(int)=128 slot_mapping */
560 pager->cpgr_slots.cpgr_islots = kalloc_type(compressor_slot_t *,
561 num_chunks, Z_WAITOK | Z_ZERO);
562 } else if (pager->cpgr_num_slots > 2) {
563 pager->cpgr_slots.cpgr_dslots = zalloc_slot_array(pager->cpgr_num_slots *
564 sizeof(pager->cpgr_slots.cpgr_dslots[0]), Z_WAITOK | Z_ZERO);
565 } else {
566 pager->cpgr_slots.cpgr_eslots[0] = 0;
567 pager->cpgr_slots.cpgr_eslots[1] = 0;
568 }
569
570 /*
571 * Set up associations between this memory object
572 * and this compressor_pager structure
573 */
574 pager->cpgr_hdr.mo_ikot = IKOT_MEMORY_OBJECT;
575 pager->cpgr_hdr.mo_pager_ops = &compressor_pager_ops;
576 pager->cpgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL;
577
578 *new_mem_obj = (memory_object_t) pager;
579 return KERN_SUCCESS;
580 }
581
582
583 unsigned int
compressor_pager_slots_chunk_free(compressor_slot_t * chunk,int num_slots,vm_compressor_options_t flags,int * failures)584 compressor_pager_slots_chunk_free(
585 compressor_slot_t *chunk,
586 int num_slots,
587 vm_compressor_options_t flags,
588 int *failures)
589 {
590 int i;
591 int retval;
592 unsigned int num_slots_freed;
593
594 if (failures) {
595 *failures = 0;
596 }
597 num_slots_freed = 0;
598 for (i = 0; i < num_slots; i++) {
599 if (chunk[i] != 0) {
600 retval = vm_compressor_free(&chunk[i], flags);
601
602 if (retval == 0) {
603 num_slots_freed++;
604 } else {
605 if (retval == -2) {
606 assert(flags & C_DONT_BLOCK);
607 }
608
609 if (failures) {
610 *failures += 1;
611 }
612 }
613 }
614 }
615 return num_slots_freed;
616 }
617
618 /* check if this pager has a slot_mapping spot for this page, if so give its position, if not, make place for it */
619 void
compressor_pager_slot_lookup(compressor_pager_t pager,boolean_t do_alloc,memory_object_offset_t offset,compressor_slot_t ** slot_pp)620 compressor_pager_slot_lookup(
621 compressor_pager_t pager,
622 boolean_t do_alloc,
623 memory_object_offset_t offset,
624 compressor_slot_t **slot_pp /* OUT */)
625 {
626 unsigned int num_chunks;
627 uint32_t page_num;
628 unsigned int chunk_idx;
629 int slot_idx;
630 compressor_slot_t *chunk;
631 compressor_slot_t *t_chunk;
632
633 /* offset is relative to the pager, first page of the first vm_object that created the pager has an offset of 0 */
634 page_num = (uint32_t)(offset / PAGE_SIZE);
635 if (page_num != (offset / PAGE_SIZE)) {
636 /* overflow */
637 panic("%s: offset 0x%llx overflow",
638 __FUNCTION__, (uint64_t) offset);
639 *slot_pp = NULL;
640 return;
641 }
642 if (page_num >= pager->cpgr_num_slots) {
643 /* out of range */
644 *slot_pp = NULL;
645 return;
646 }
647 num_chunks = compressor_pager_num_chunks(pager);
648 if (num_chunks > 1) {
649 /* we have an array of chunks */
650 chunk_idx = page_num / COMPRESSOR_SLOTS_PER_CHUNK;
651 chunk = pager->cpgr_slots.cpgr_islots[chunk_idx];
652
653 if (chunk == NULL && do_alloc) {
654 t_chunk = zalloc_slot_array(COMPRESSOR_SLOTS_CHUNK_SIZE,
655 Z_WAITOK | Z_ZERO);
656
657 compressor_pager_lock(pager);
658
659 if ((chunk = pager->cpgr_slots.cpgr_islots[chunk_idx]) == NULL) {
660 /*
661 * On some platforms, the memory stores from
662 * the bzero(t_chunk) above might not have been
663 * made visible and another thread might see
664 * the contents of this new chunk before it's
665 * been fully zero-filled.
666 * This memory barrier should take care of this
667 * according to the platform requirements.
668 */
669 os_atomic_thread_fence(release);
670
671 chunk = pager->cpgr_slots.cpgr_islots[chunk_idx] = t_chunk;
672 t_chunk = NULL;
673 }
674 compressor_pager_unlock(pager);
675
676 if (t_chunk) {
677 zfree_slot_array(t_chunk, COMPRESSOR_SLOTS_CHUNK_SIZE);
678 }
679 }
680 if (chunk == NULL) {
681 *slot_pp = NULL;
682 } else {
683 slot_idx = page_num % COMPRESSOR_SLOTS_PER_CHUNK;
684 *slot_pp = &chunk[slot_idx];
685 }
686 } else if (pager->cpgr_num_slots > 2) {
687 slot_idx = page_num;
688 *slot_pp = &pager->cpgr_slots.cpgr_dslots[slot_idx];
689 } else {
690 slot_idx = page_num;
691 *slot_pp = &pager->cpgr_slots.cpgr_eslots[slot_idx];
692 }
693 }
694
695 #if defined(__LP64__)
696 __startup_func
697 static void
vm_compressor_slots_init(void)698 vm_compressor_slots_init(void)
699 {
700 for (unsigned int idx = 0; idx < NUM_SLOTS_ZONES; idx++) {
701 compressor_slots_zones[idx] = zone_create(
702 compressor_slots_zones_names[idx],
703 compressor_slots_zones_sizes[idx],
704 ZC_PGZ_USE_GUARDS | ZC_VM);
705 }
706 }
707 STARTUP(ZALLOC, STARTUP_RANK_MIDDLE, vm_compressor_slots_init);
708 #endif /* defined(__LP64__) */
709
710 static compressor_slot_t *
zalloc_slot_array(size_t size,zalloc_flags_t flags)711 zalloc_slot_array(size_t size, zalloc_flags_t flags)
712 {
713 #if defined(__LP64__)
714 compressor_slot_t *slots = NULL;
715
716 assert(size <= COMPRESSOR_SLOTS_CHUNK_SIZE);
717 for (unsigned int idx = 0; idx < NUM_SLOTS_ZONES; idx++) {
718 if (size > compressor_slots_zones_sizes[idx]) {
719 continue;
720 }
721 slots = zalloc_flags(compressor_slots_zones[idx], flags);
722 break;
723 }
724 return slots;
725 #else /* defined(__LP64__) */
726 return kalloc_data(size, flags);
727 #endif /* !defined(__LP64__) */
728 }
729
730 static void
zfree_slot_array(compressor_slot_t * slots,size_t size)731 zfree_slot_array(compressor_slot_t *slots, size_t size)
732 {
733 #if defined(__LP64__)
734 assert(size <= COMPRESSOR_SLOTS_CHUNK_SIZE);
735 for (unsigned int idx = 0; idx < NUM_SLOTS_ZONES; idx++) {
736 if (size > compressor_slots_zones_sizes[idx]) {
737 continue;
738 }
739 zfree(compressor_slots_zones[idx], slots);
740 break;
741 }
742 #else /* defined(__LP64__) */
743 kfree_data(slots, size);
744 #endif /* !defined(__LP64__) */
745 }
746
747 kern_return_t
vm_compressor_pager_put(memory_object_t mem_obj,memory_object_offset_t offset,ppnum_t ppnum,void ** current_chead,char * scratch_buf,int * compressed_count_delta_p,vm_compressor_options_t flags)748 vm_compressor_pager_put(
749 memory_object_t mem_obj,
750 memory_object_offset_t offset,
751 ppnum_t ppnum,
752 void **current_chead,
753 char *scratch_buf,
754 int *compressed_count_delta_p, /* OUT */
755 vm_compressor_options_t flags)
756 {
757 compressor_pager_t pager;
758 compressor_slot_t *slot_p;
759
760 compressor_pager_stats.put++;
761
762 *compressed_count_delta_p = 0;
763
764 /* This routine is called by the pageout thread. The pageout thread */
765 /* cannot be blocked by read activities unless the read activities */
766 /* Therefore the grant of vs lock must be done on a try versus a */
767 /* blocking basis. The code below relies on the fact that the */
768 /* interface is synchronous. Should this interface be again async */
769 /* for some type of pager in the future the pages will have to be */
770 /* returned through a separate, asynchronous path. */
771
772 compressor_pager_lookup(mem_obj, pager);
773
774 uint32_t dummy_conv;
775 if (os_convert_overflow(offset / PAGE_SIZE, &dummy_conv)) {
776 /* overflow, page number doesn't fit in a uint32 */
777 panic("%s: offset 0x%llx overflow", __FUNCTION__, (uint64_t) offset);
778 return KERN_RESOURCE_SHORTAGE;
779 }
780
781 /* we're looking for the slot_mapping that corresponds to the offset, which vm_compressor_put() is then going to
782 * set a value into after it allocates the slot. if the slot_mapping doesn't exist, this will create it */
783 compressor_pager_slot_lookup(pager, TRUE, offset, &slot_p);
784
785 if (slot_p == NULL) {
786 /* out of range ? */
787 panic("vm_compressor_pager_put: out of range");
788 }
789 if (*slot_p != 0) {
790 /*
791 * Already compressed: forget about the old one.
792 *
793 * This can happen after a vm_object_do_collapse() when
794 * the "backing_object" had some pages paged out and the
795 * "object" had an equivalent page resident.
796 */
797 vm_compressor_free(slot_p, flags);
798 *compressed_count_delta_p -= 1;
799 }
800
801 /*
802 * If the compressor operation succeeds, we presumably don't need to
803 * undo any previous WIMG update, as all live mappings should be
804 * disconnected.
805 */
806
807 if (vm_compressor_put(ppnum, slot_p, current_chead, scratch_buf, flags)) {
808 return KERN_RESOURCE_SHORTAGE;
809 }
810 *compressed_count_delta_p += 1;
811
812 return KERN_SUCCESS;
813 }
814
815
816 kern_return_t
vm_compressor_pager_get(memory_object_t mem_obj,memory_object_offset_t offset,ppnum_t ppnum,int * my_fault_type,vm_compressor_options_t flags,int * compressed_count_delta_p)817 vm_compressor_pager_get(
818 memory_object_t mem_obj,
819 memory_object_offset_t offset,
820 ppnum_t ppnum,
821 int *my_fault_type,
822 vm_compressor_options_t flags,
823 int *compressed_count_delta_p)
824 {
825 compressor_pager_t pager;
826 kern_return_t kr;
827 compressor_slot_t *slot_p;
828
829 compressor_pager_stats.get++;
830
831 *compressed_count_delta_p = 0;
832
833 if ((uint32_t)(offset / PAGE_SIZE) != (offset / PAGE_SIZE)) {
834 panic("%s: offset 0x%llx overflow",
835 __FUNCTION__, (uint64_t) offset);
836 return KERN_MEMORY_ERROR;
837 }
838
839 compressor_pager_lookup(mem_obj, pager);
840
841 /* find the compressor slot for that page */
842 compressor_pager_slot_lookup(pager, FALSE, offset, &slot_p);
843
844 if (offset / PAGE_SIZE >= pager->cpgr_num_slots) {
845 /* out of range */
846 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_COMPRESSOR_GET_OUT_OF_RANGE), 0 /* arg */);
847 kr = KERN_MEMORY_FAILURE;
848 } else if (slot_p == NULL || *slot_p == 0) {
849 /* compressor does not have this page */
850 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_COMPRESSOR_GET_NO_PAGE), 0 /* arg */);
851 kr = KERN_MEMORY_ERROR;
852 } else {
853 /* compressor does have this page */
854 kr = KERN_SUCCESS;
855 }
856 *my_fault_type = DBG_COMPRESSOR_FAULT;
857
858 if (kr == KERN_SUCCESS) {
859 int retval;
860 bool unmodified = (vm_compressor_is_slot_compressed(slot_p) == false);
861 /* get the page from the compressor */
862 retval = vm_compressor_get(ppnum, slot_p, (unmodified ? (flags | C_PAGE_UNMODIFIED) : flags));
863 if (retval == -1) {
864 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_COMPRESSOR_DECOMPRESS_FAILED), 0 /* arg */);
865 kr = KERN_MEMORY_FAILURE;
866 } else if (retval == 1) {
867 *my_fault_type = DBG_COMPRESSOR_SWAPIN_FAULT;
868 } else if (retval == -2) {
869 assert((flags & C_DONT_BLOCK));
870 /*
871 * Not a fatal failure because we just retry with a blocking get later. So we skip ktriage to avoid noise.
872 */
873 kr = KERN_FAILURE;
874 }
875 }
876
877 if (kr == KERN_SUCCESS) {
878 assert(slot_p != NULL);
879 if (*slot_p != 0) {
880 /*
881 * We got the page for a copy-on-write fault
882 * and we kept the original in place. Slot
883 * is still occupied.
884 */
885 } else {
886 *compressed_count_delta_p -= 1;
887 }
888 }
889
890 return kr;
891 }
892
893 unsigned int
vm_compressor_pager_state_clr(memory_object_t mem_obj,memory_object_offset_t offset)894 vm_compressor_pager_state_clr(
895 memory_object_t mem_obj,
896 memory_object_offset_t offset)
897 {
898 compressor_pager_t pager;
899 compressor_slot_t *slot_p;
900 unsigned int num_slots_freed;
901
902 assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
903
904 compressor_pager_stats.state_clr++;
905
906 if ((uint32_t)(offset / PAGE_SIZE) != (offset / PAGE_SIZE)) {
907 /* overflow */
908 panic("%s: offset 0x%llx overflow",
909 __FUNCTION__, (uint64_t) offset);
910 return 0;
911 }
912
913 compressor_pager_lookup(mem_obj, pager);
914
915 /* find the compressor slot for that page */
916 compressor_pager_slot_lookup(pager, FALSE, offset, &slot_p);
917
918 num_slots_freed = 0;
919 if (slot_p && *slot_p != 0) {
920 vm_compressor_free(slot_p, 0);
921 num_slots_freed++;
922 assert(*slot_p == 0);
923 }
924
925 return num_slots_freed;
926 }
927
928 vm_external_state_t
vm_compressor_pager_state_get(memory_object_t mem_obj,memory_object_offset_t offset)929 vm_compressor_pager_state_get(
930 memory_object_t mem_obj,
931 memory_object_offset_t offset)
932 {
933 compressor_pager_t pager;
934 compressor_slot_t *slot_p;
935
936 assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
937
938 compressor_pager_stats.state_get++;
939
940 if ((uint32_t)(offset / PAGE_SIZE) != (offset / PAGE_SIZE)) {
941 /* overflow */
942 panic("%s: offset 0x%llx overflow",
943 __FUNCTION__, (uint64_t) offset);
944 return VM_EXTERNAL_STATE_ABSENT;
945 }
946
947 compressor_pager_lookup(mem_obj, pager);
948
949 /* find the compressor slot for that page */
950 compressor_pager_slot_lookup(pager, FALSE, offset, &slot_p);
951
952 if (offset / PAGE_SIZE >= pager->cpgr_num_slots) {
953 /* out of range */
954 return VM_EXTERNAL_STATE_ABSENT;
955 } else if (slot_p == NULL || *slot_p == 0) {
956 /* compressor does not have this page */
957 return VM_EXTERNAL_STATE_ABSENT;
958 } else {
959 /* compressor does have this page */
960 return VM_EXTERNAL_STATE_EXISTS;
961 }
962 }
963
964 unsigned int
vm_compressor_pager_reap_pages(memory_object_t mem_obj,vm_compressor_options_t flags)965 vm_compressor_pager_reap_pages(
966 memory_object_t mem_obj,
967 vm_compressor_options_t flags)
968 {
969 compressor_pager_t pager;
970 unsigned int num_chunks;
971 int failures;
972 unsigned int i;
973 compressor_slot_t *chunk;
974 unsigned int num_slots_freed;
975
976 compressor_pager_lookup(mem_obj, pager);
977 if (pager == NULL) {
978 return 0;
979 }
980
981 compressor_pager_lock(pager);
982
983 /* reap the compressor slots */
984 num_slots_freed = 0;
985
986 num_chunks = compressor_pager_num_chunks(pager);
987 if (num_chunks > 1) {
988 /* we have an array of chunks */
989 for (i = 0; i < num_chunks; i++) {
990 chunk = pager->cpgr_slots.cpgr_islots[i];
991 if (chunk != NULL) {
992 num_slots_freed +=
993 compressor_pager_slots_chunk_free(
994 chunk,
995 COMPRESSOR_SLOTS_PER_CHUNK,
996 flags,
997 &failures);
998 if (failures == 0) {
999 pager->cpgr_slots.cpgr_islots[i] = NULL;
1000 zfree_slot_array(chunk, COMPRESSOR_SLOTS_CHUNK_SIZE);
1001 }
1002 }
1003 }
1004 } else if (pager->cpgr_num_slots > 2) {
1005 chunk = pager->cpgr_slots.cpgr_dslots;
1006 num_slots_freed +=
1007 compressor_pager_slots_chunk_free(
1008 chunk,
1009 pager->cpgr_num_slots,
1010 flags,
1011 NULL);
1012 } else {
1013 chunk = &pager->cpgr_slots.cpgr_eslots[0];
1014 num_slots_freed +=
1015 compressor_pager_slots_chunk_free(
1016 chunk,
1017 pager->cpgr_num_slots,
1018 flags,
1019 NULL);
1020 }
1021
1022 compressor_pager_unlock(pager);
1023
1024 return num_slots_freed;
1025 }
1026
1027 void
vm_compressor_pager_transfer(memory_object_t dst_mem_obj,memory_object_offset_t dst_offset,memory_object_t src_mem_obj,memory_object_offset_t src_offset)1028 vm_compressor_pager_transfer(
1029 memory_object_t dst_mem_obj,
1030 memory_object_offset_t dst_offset,
1031 memory_object_t src_mem_obj,
1032 memory_object_offset_t src_offset)
1033 {
1034 compressor_pager_t src_pager, dst_pager;
1035 compressor_slot_t *src_slot_p, *dst_slot_p;
1036
1037 compressor_pager_stats.transfer++;
1038
1039 /* find the compressor slot for the destination */
1040 compressor_pager_lookup(dst_mem_obj, dst_pager);
1041 assert(dst_offset / PAGE_SIZE < dst_pager->cpgr_num_slots);
1042 compressor_pager_slot_lookup(dst_pager, TRUE, dst_offset, &dst_slot_p);
1043 assert(dst_slot_p != NULL);
1044 assert(*dst_slot_p == 0);
1045
1046 /* find the compressor slot for the source */
1047 compressor_pager_lookup(src_mem_obj, src_pager);
1048 assert(src_offset / PAGE_SIZE < src_pager->cpgr_num_slots);
1049 compressor_pager_slot_lookup(src_pager, FALSE, src_offset, &src_slot_p);
1050 assert(src_slot_p != NULL);
1051 assert(*src_slot_p != 0);
1052
1053 /* transfer the slot from source to destination */
1054 vm_compressor_transfer(dst_slot_p, src_slot_p);
1055 OSAddAtomic(-1, &src_pager->cpgr_num_slots_occupied);
1056 OSAddAtomic(+1, &dst_pager->cpgr_num_slots_occupied);
1057 }
1058
1059 memory_object_offset_t
vm_compressor_pager_next_compressed(memory_object_t mem_obj,memory_object_offset_t offset)1060 vm_compressor_pager_next_compressed(
1061 memory_object_t mem_obj,
1062 memory_object_offset_t offset)
1063 {
1064 compressor_pager_t pager;
1065 unsigned int num_chunks;
1066 uint32_t page_num;
1067 unsigned int chunk_idx;
1068 uint32_t slot_idx;
1069 compressor_slot_t *chunk;
1070
1071 compressor_pager_lookup(mem_obj, pager);
1072
1073 page_num = (uint32_t)(offset / PAGE_SIZE);
1074 if (page_num != (offset / PAGE_SIZE)) {
1075 /* overflow */
1076 return (memory_object_offset_t) -1;
1077 }
1078 if (page_num >= pager->cpgr_num_slots) {
1079 /* out of range */
1080 return (memory_object_offset_t) -1;
1081 }
1082
1083 num_chunks = compressor_pager_num_chunks(pager);
1084 if (num_chunks == 1) {
1085 if (pager->cpgr_num_slots > 2) {
1086 chunk = pager->cpgr_slots.cpgr_dslots;
1087 } else {
1088 chunk = &pager->cpgr_slots.cpgr_eslots[0];
1089 }
1090 for (slot_idx = page_num;
1091 slot_idx < pager->cpgr_num_slots;
1092 slot_idx++) {
1093 if (chunk[slot_idx] != 0) {
1094 /* found a non-NULL slot in this chunk */
1095 return (memory_object_offset_t) slot_idx *
1096 PAGE_SIZE;
1097 }
1098 }
1099 return (memory_object_offset_t) -1;
1100 }
1101
1102 /* we have an array of chunks; find the next non-NULL chunk */
1103 chunk = NULL;
1104 for (chunk_idx = page_num / COMPRESSOR_SLOTS_PER_CHUNK,
1105 slot_idx = page_num % COMPRESSOR_SLOTS_PER_CHUNK;
1106 chunk_idx < num_chunks;
1107 chunk_idx++,
1108 slot_idx = 0) {
1109 chunk = pager->cpgr_slots.cpgr_islots[chunk_idx];
1110 if (chunk == NULL) {
1111 /* no chunk here: try the next one */
1112 continue;
1113 }
1114 /* search for an occupied slot in this chunk */
1115 for (;
1116 slot_idx < COMPRESSOR_SLOTS_PER_CHUNK;
1117 slot_idx++) {
1118 if (chunk[slot_idx] != 0) {
1119 /* found an occupied slot in this chunk */
1120 uint32_t next_slot;
1121
1122 next_slot = ((chunk_idx *
1123 COMPRESSOR_SLOTS_PER_CHUNK) +
1124 slot_idx);
1125 if (next_slot >= pager->cpgr_num_slots) {
1126 /* went beyond end of object */
1127 return (memory_object_offset_t) -1;
1128 }
1129 return (memory_object_offset_t) next_slot *
1130 PAGE_SIZE;
1131 }
1132 }
1133 }
1134 return (memory_object_offset_t) -1;
1135 }
1136
1137 unsigned int
vm_compressor_pager_get_count(memory_object_t mem_obj)1138 vm_compressor_pager_get_count(
1139 memory_object_t mem_obj)
1140 {
1141 compressor_pager_t pager;
1142
1143 compressor_pager_lookup(mem_obj, pager);
1144 if (pager == NULL) {
1145 return 0;
1146 }
1147
1148 /*
1149 * The caller should have the VM object locked and one
1150 * needs that lock to do a page-in or page-out, so no
1151 * need to lock the pager here.
1152 */
1153 assert(pager->cpgr_num_slots_occupied >= 0);
1154
1155 return pager->cpgr_num_slots_occupied;
1156 }
1157
1158 /* Add page count to the counter in the pager */
1159 void
vm_compressor_pager_count(memory_object_t mem_obj,int compressed_count_delta,boolean_t shared_lock,vm_object_t object __unused)1160 vm_compressor_pager_count(
1161 memory_object_t mem_obj,
1162 int compressed_count_delta,
1163 boolean_t shared_lock,
1164 vm_object_t object __unused)
1165 {
1166 compressor_pager_t pager;
1167
1168 if (compressed_count_delta == 0) {
1169 return;
1170 }
1171
1172 compressor_pager_lookup(mem_obj, pager);
1173 if (pager == NULL) {
1174 return;
1175 }
1176
1177 if (compressed_count_delta < 0) {
1178 assert(pager->cpgr_num_slots_occupied >=
1179 (unsigned int) -compressed_count_delta);
1180 }
1181
1182 /*
1183 * The caller should have the VM object locked,
1184 * shared or exclusive.
1185 */
1186 if (shared_lock) {
1187 vm_object_lock_assert_shared(object);
1188 OSAddAtomic(compressed_count_delta,
1189 &pager->cpgr_num_slots_occupied);
1190 } else {
1191 vm_object_lock_assert_exclusive(object);
1192 pager->cpgr_num_slots_occupied += compressed_count_delta;
1193 }
1194 }
1195
1196 #if CONFIG_FREEZE
1197 kern_return_t
vm_compressor_pager_relocate(memory_object_t mem_obj,memory_object_offset_t offset,void ** current_chead)1198 vm_compressor_pager_relocate(
1199 memory_object_t mem_obj,
1200 memory_object_offset_t offset,
1201 void **current_chead)
1202 {
1203 /*
1204 * Has the page at this offset been compressed?
1205 */
1206
1207 compressor_slot_t *slot_p;
1208 compressor_pager_t dst_pager;
1209
1210 assert(mem_obj);
1211
1212 compressor_pager_lookup(mem_obj, dst_pager);
1213 if (dst_pager == NULL) {
1214 return KERN_FAILURE;
1215 }
1216
1217 compressor_pager_slot_lookup(dst_pager, FALSE, offset, &slot_p);
1218 return vm_compressor_relocate(current_chead, slot_p);
1219 }
1220 #endif /* CONFIG_FREEZE */
1221
1222 #if DEVELOPMENT || DEBUG
1223
1224 kern_return_t
vm_compressor_pager_inject_error(memory_object_t mem_obj,memory_object_offset_t offset)1225 vm_compressor_pager_inject_error(memory_object_t mem_obj,
1226 memory_object_offset_t offset)
1227 {
1228 kern_return_t result = KERN_FAILURE;
1229 compressor_slot_t *slot_p;
1230 compressor_pager_t pager;
1231
1232 assert(mem_obj);
1233
1234 compressor_pager_lookup(mem_obj, pager);
1235 if (pager != NULL) {
1236 compressor_pager_slot_lookup(pager, FALSE, offset, &slot_p);
1237 if (slot_p != NULL && *slot_p != 0) {
1238 vm_compressor_inject_error(slot_p);
1239 result = KERN_SUCCESS;
1240 }
1241 }
1242
1243 return result;
1244 }
1245
1246
1247 /*
1248 * Write debugging information about the pager to the given buffer
1249 * returns: true on success, false if there was not enough space
1250 * argument size - in: bytes free in the buffer, out: bytes written
1251 */
1252 kern_return_t
vm_compressor_pager_dump(memory_object_t mem_obj,__unused char * buf,__unused size_t * size,bool * is_compressor,unsigned int * slot_count)1253 vm_compressor_pager_dump(memory_object_t mem_obj, /* IN */
1254 __unused char *buf, /* IN buffer to write to */
1255 __unused size_t *size, /* IN-OUT */
1256 bool *is_compressor, /* OUT */
1257 unsigned int *slot_count) /* OUT */
1258 {
1259 compressor_pager_t pager = NULL;
1260 compressor_pager_lookup(mem_obj, pager);
1261
1262 *size = 0;
1263 if (pager == NULL) {
1264 *is_compressor = false;
1265 *slot_count = 0;
1266 return KERN_SUCCESS;
1267 }
1268 *is_compressor = true;
1269 *slot_count = pager->cpgr_num_slots_occupied;
1270
1271 /*
1272 * size_t insize = *size;
1273 * unsigned int needed_size = 0; // pager->cpgr_num_slots_occupied * sizeof(compressor_slot_t) / sizeof(int);
1274 * if (needed_size > insize) {
1275 * return KERN_NO_SPACE;
1276 * }
1277 * TODO: not fully implemented yet, need to dump out the mappings
1278 * size = 0;
1279 */
1280 return KERN_SUCCESS;
1281 }
1282
1283 #endif
1284