1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <vm/vm_compressor_internal.h>
30
31 #if CONFIG_PHANTOM_CACHE
32 #include <vm/vm_phantom_cache_internal.h>
33 #endif
34
35 #include <vm/vm_map_xnu.h>
36 #include <vm/vm_pageout_xnu.h>
37 #include <vm/vm_map_internal.h>
38 #include <vm/memory_object.h>
39 #include <vm/vm_compressor_algorithms_internal.h>
40 #include <vm/vm_compressor_backing_store_internal.h>
41 #include <vm/vm_fault.h>
42 #include <vm/vm_protos.h>
43 #include <vm/vm_kern_xnu.h>
44 #include <vm/vm_compressor_pager_internal.h>
45 #include <vm/vm_iokit.h>
46 #include <mach/mach_host.h> /* for host_info() */
47 #if DEVELOPMENT || DEBUG
48 #include <kern/hvg_hypercall.h>
49 #include <vm/vm_compressor_info.h> /* for c_segment_info */
50 #endif
51 #include <kern/ledger.h>
52 #include <kern/policy_internal.h>
53 #include <kern/thread_group.h>
54 #include <san/kasan.h>
55 #include <os/atomic_private.h>
56 #include <os/log.h>
57 #include <pexpert/pexpert.h>
58 #include <pexpert/device_tree.h>
59
60 #if defined(__x86_64__)
61 #include <i386/misc_protos.h>
62 #endif
63 #if defined(__arm64__)
64 #include <arm/machine_routines.h>
65 #endif
66
67 #include <IOKit/IOHibernatePrivate.h>
68
69 /*
70 * The segment buffer size is a tradeoff.
71 * A larger buffer leads to faster I/O throughput, better compression ratios
72 * (since fewer bytes are wasted at the end of the segment),
73 * and less overhead (both in time and space).
74 * However, a smaller buffer causes less swap when the system is overcommited
75 * b/c a higher percentage of the swapped-in segment is definitely accessed
76 * before it goes back out to storage.
77 *
78 * So on systems without swap, a larger segment is a clear win.
79 * On systems with swap, the choice is murkier. Empirically, we've
80 * found that a 64KB segment provides a better tradeoff both in terms of
81 * performance and swap writes than a 256KB segment on systems with fast SSDs
82 * and a HW compression block.
83 */
84 #define C_SEG_BUFSIZE_ARM_SWAP (1024 * 64)
85 #if XNU_TARGET_OS_OSX && defined(__arm64__)
86 #define C_SEG_BUFSIZE_DEFAULT C_SEG_BUFSIZE_ARM_SWAP
87 #else
88 #define C_SEG_BUFSIZE_DEFAULT (1024 * 256)
89 #endif /* TARGET_OS_OSX && defined(__arm64__) */
90 uint32_t c_seg_bufsize;
91
92 uint32_t c_seg_max_pages; /* maximum number of pages the compressed data of a segment can take */
93 uint32_t c_seg_off_limit; /* if we've reached this size while filling the segment, don't bother trying to fill anymore
94 * because it's unlikely to succeed */
95 uint32_t c_seg_allocsize, c_seg_slot_var_array_min_len;
96
97 extern boolean_t vm_darkwake_mode;
98 extern zone_t vm_page_zone;
99
100 #if DEVELOPMENT || DEBUG
101 /* sysctl defined in bsd/dev/arm64/sysctl.c */
102 static event_t debug_cseg_wait_event = NULL;
103 #endif /* DEVELOPMENT || DEBUG */
104
105 #if CONFIG_FREEZE
106 bool freezer_incore_cseg_acct = TRUE; /* Only count incore compressed memory for jetsams. */
107 #endif /* CONFIG_FREEZE */
108
109 #if POPCOUNT_THE_COMPRESSED_DATA
110 boolean_t popcount_c_segs = TRUE;
111
112 static inline uint32_t
vmc_pop(uintptr_t ins,int sz)113 vmc_pop(uintptr_t ins, int sz)
114 {
115 uint32_t rv = 0;
116
117 if (__probable(popcount_c_segs == FALSE)) {
118 return 0xDEAD707C;
119 }
120
121 while (sz >= 16) {
122 uint32_t rv1, rv2;
123 uint64_t *ins64 = (uint64_t *) ins;
124 uint64_t *ins642 = (uint64_t *) (ins + 8);
125 rv1 = __builtin_popcountll(*ins64);
126 rv2 = __builtin_popcountll(*ins642);
127 rv += rv1 + rv2;
128 sz -= 16;
129 ins += 16;
130 }
131
132 while (sz >= 4) {
133 uint32_t *ins32 = (uint32_t *) ins;
134 rv += __builtin_popcount(*ins32);
135 sz -= 4;
136 ins += 4;
137 }
138
139 while (sz > 0) {
140 char *ins8 = (char *)ins;
141 rv += __builtin_popcount(*ins8);
142 sz--;
143 ins++;
144 }
145 return rv;
146 }
147 #endif
148
149 #if VALIDATE_C_SEGMENTS
150 boolean_t validate_c_segs = TRUE;
151 #endif
152 /*
153 * vm_compressor_mode has a hierarchy of control to set its value.
154 * boot-args are checked first, then device-tree, and finally
155 * the default value that is defined below. See vm_fault_init() for
156 * the boot-arg & device-tree code.
157 */
158
159 #if !XNU_TARGET_OS_OSX
160
161 #if CONFIG_FREEZE
162 int vm_compressor_mode = VM_PAGER_FREEZER_DEFAULT;
163 struct freezer_context freezer_context_global;
164 #else /* CONFIG_FREEZE */
165 int vm_compressor_mode = VM_PAGER_NOT_CONFIGURED;
166 #endif /* CONFIG_FREEZE */
167
168 #else /* !XNU_TARGET_OS_OSX */
169 int vm_compressor_mode = VM_PAGER_COMPRESSOR_WITH_SWAP;
170
171 #endif /* !XNU_TARGET_OS_OSX */
172
173 TUNABLE(uint32_t, vm_compression_limit, "vm_compression_limit", 0);
174 int vm_compressor_is_active = 0;
175 int vm_compressor_available = 0;
176
177 extern uint64_t vm_swap_get_max_configured_space(void);
178 extern void vm_pageout_io_throttle(void);
179
180 #if CHECKSUM_THE_DATA || CHECKSUM_THE_SWAP || CHECKSUM_THE_COMPRESSED_DATA
181 extern unsigned int hash_string(char *cp, int len);
182 static unsigned int vmc_hash(char *, int);
183 boolean_t checksum_c_segs = TRUE;
184
185 unsigned int
vmc_hash(char * cp,int len)186 vmc_hash(char *cp, int len)
187 {
188 unsigned int result;
189 if (__probable(checksum_c_segs == FALSE)) {
190 return 0xDEAD7A37;
191 }
192 vm_memtag_disable_checking();
193 result = hash_string(cp, len);
194 vm_memtag_enable_checking();
195 return result;
196 }
197 #endif
198
199 #define UNPACK_C_SIZE(cs) ((cs->c_size == (PAGE_SIZE-1)) ? PAGE_SIZE : cs->c_size)
200 #define PACK_C_SIZE(cs, size) (cs->c_size = ((size == PAGE_SIZE) ? PAGE_SIZE - 1 : size))
201
202
203 struct c_sv_hash_entry {
204 union {
205 struct {
206 uint32_t c_sv_he_ref;
207 uint32_t c_sv_he_data;
208 } c_sv_he;
209 uint64_t c_sv_he_record;
210 } c_sv_he_un;
211 };
212
213 #define he_ref c_sv_he_un.c_sv_he.c_sv_he_ref
214 #define he_data c_sv_he_un.c_sv_he.c_sv_he_data
215 #define he_record c_sv_he_un.c_sv_he_record
216
217 #define C_SV_HASH_MAX_MISS 32
218 #define C_SV_HASH_SIZE ((1 << 10))
219 #define C_SV_HASH_MASK ((1 << 10) - 1)
220
221 #if CONFIG_TRACK_UNMODIFIED_ANON_PAGES
222 #define C_SV_CSEG_ID ((1 << 21) - 1)
223 #else /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
224 #define C_SV_CSEG_ID ((1 << 22) - 1)
225 #endif /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
226
227 /* elements of c_segments array */
228 union c_segu {
229 c_segment_t c_seg;
230 uintptr_t c_segno; /* index of the next element in the segments free-list, c_free_segno_head is the head */
231 };
232
233 #define C_SLOT_ASSERT_PACKABLE(ptr) \
234 VM_ASSERT_POINTER_PACKABLE((vm_offset_t)(ptr), C_SLOT_PACKED_PTR);
235
236 #define C_SLOT_PACK_PTR(ptr) \
237 VM_PACK_POINTER((vm_offset_t)(ptr), C_SLOT_PACKED_PTR)
238
239 #define C_SLOT_UNPACK_PTR(cslot) \
240 (c_slot_mapping_t)VM_UNPACK_POINTER((cslot)->c_packed_ptr, C_SLOT_PACKED_PTR)
241
242 /* for debugging purposes */
243 SECURITY_READ_ONLY_EARLY(vm_packing_params_t) c_slot_packing_params =
244 VM_PACKING_PARAMS(C_SLOT_PACKED_PTR);
245
246 uint32_t c_segment_count = 0; /* count all allocated c_segments in all queues */
247 uint32_t c_segment_count_max = 0; /* maximum c_segment_count has ever been */
248
249 uint64_t c_generation_id = 0;
250 uint64_t c_generation_id_flush_barrier;
251
252 boolean_t hibernate_no_swapspace = FALSE;
253 boolean_t hibernate_flush_timed_out = FALSE;
254 clock_sec_t hibernate_flushing_deadline = 0;
255
256 #if RECORD_THE_COMPRESSED_DATA
257 /* buffer used as an intermediate stage before writing to file */
258 char *c_compressed_record_sbuf; /* start */
259 char *c_compressed_record_ebuf; /* end */
260 char *c_compressed_record_cptr; /* next buffered write */
261 #endif
262
263 /* the different queues a c_segment can be in via c_age_list */
264 queue_head_t c_age_list_head;
265 queue_head_t c_early_swappedin_list_head, c_regular_swappedin_list_head, c_late_swappedin_list_head;
266 queue_head_t c_early_swapout_list_head, c_regular_swapout_list_head, c_late_swapout_list_head;
267 queue_head_t c_swapio_list_head;
268 queue_head_t c_swappedout_list_head;
269 queue_head_t c_swappedout_sparse_list_head;
270 queue_head_t c_major_list_head;
271 queue_head_t c_filling_list_head;
272 queue_head_t c_bad_list_head;
273
274 /* count of each of the queues above */
275 uint32_t c_age_count = 0;
276 uint32_t c_early_swappedin_count = 0, c_regular_swappedin_count = 0, c_late_swappedin_count = 0;
277 uint32_t c_early_swapout_count = 0, c_regular_swapout_count = 0, c_late_swapout_count = 0;
278 uint32_t c_swapio_count = 0;
279 uint32_t c_swappedout_count = 0;
280 uint32_t c_swappedout_sparse_count = 0;
281 uint32_t c_major_count = 0;
282 uint32_t c_filling_count = 0;
283 uint32_t c_empty_count = 0;
284 uint32_t c_bad_count = 0;
285
286 /* a c_segment can be in the minor-compact queue as well as one of the above ones, via c_list */
287 queue_head_t c_minor_list_head;
288 uint32_t c_minor_count = 0;
289
290 int c_overage_swapped_count = 0;
291 int c_overage_swapped_limit = 0;
292
293 int c_seg_fixed_array_len; /* number of slots in the c_segment inline slots array */
294 union c_segu *c_segments; /* array of all c_segments, not all of it may be populated */
295 vm_offset_t c_buffers; /* starting address of all compressed data pointed to by c_segment.c_store.c_buffer */
296 vm_size_t c_buffers_size; /* total size allocated in c_buffers */
297 caddr_t c_segments_next_page; /* next page to populate for extending c_segments */
298 boolean_t c_segments_busy;
299 uint32_t c_segments_available; /* how many segments are in populated memory (used or free), populated size of c_segments array */
300 uint32_t c_segments_limit; /* max size of c_segments array */
301 uint32_t c_segments_nearing_limit;
302
303 uint32_t c_segment_svp_in_hash;
304 uint32_t c_segment_svp_hash_succeeded;
305 uint32_t c_segment_svp_hash_failed;
306 uint32_t c_segment_svp_zero_compressions;
307 uint32_t c_segment_svp_nonzero_compressions;
308 uint32_t c_segment_svp_zero_decompressions;
309 uint32_t c_segment_svp_nonzero_decompressions;
310
311 uint32_t c_segment_noncompressible_pages;
312
313 uint32_t c_segment_pages_compressed = 0; /* Tracks # of uncompressed pages fed into the compressor, including SV (single value) pages */
314 #if CONFIG_FREEZE
315 int32_t c_segment_pages_compressed_incore = 0; /* Tracks # of uncompressed pages fed into the compressor that are in memory */
316 int32_t c_segment_pages_compressed_incore_late_swapout = 0; /* Tracks # of uncompressed pages fed into the compressor that are in memory and tagged for swapout */
317 uint32_t c_segments_incore_limit = 0; /* Tracks # of segments allowed to be in-core. Based on compressor pool size */
318 #endif /* CONFIG_FREEZE */
319
320 uint32_t c_segment_pages_compressed_limit;
321 uint32_t c_segment_pages_compressed_nearing_limit;
322 uint32_t c_free_segno_head = (uint32_t)-1; /* head of free list of c_segment pointers in c_segments */
323
324 uint32_t vm_compressor_minorcompact_threshold_divisor = 10;
325 uint32_t vm_compressor_majorcompact_threshold_divisor = 10;
326 uint32_t vm_compressor_unthrottle_threshold_divisor = 10;
327 uint32_t vm_compressor_catchup_threshold_divisor = 10;
328
329 uint32_t vm_compressor_minorcompact_threshold_divisor_overridden = 0;
330 uint32_t vm_compressor_majorcompact_threshold_divisor_overridden = 0;
331 uint32_t vm_compressor_unthrottle_threshold_divisor_overridden = 0;
332 uint32_t vm_compressor_catchup_threshold_divisor_overridden = 0;
333
334 #define C_SEGMENTS_PER_PAGE (PAGE_SIZE / sizeof(union c_segu))
335
336 LCK_GRP_DECLARE(vm_compressor_lck_grp, "vm_compressor");
337 LCK_RW_DECLARE(c_master_lock, &vm_compressor_lck_grp);
338 LCK_MTX_DECLARE(c_list_lock_storage, &vm_compressor_lck_grp);
339
340 boolean_t decompressions_blocked = FALSE;
341
342 zone_t compressor_segment_zone;
343 int c_compressor_swap_trigger = 0;
344
345 uint32_t compressor_cpus;
346 char *compressor_scratch_bufs;
347
348 struct vm_compressor_kdp_state vm_compressor_kdp_state;
349
350 clock_sec_t start_of_sample_period_sec = 0;
351 clock_nsec_t start_of_sample_period_nsec = 0;
352 clock_sec_t start_of_eval_period_sec = 0;
353 clock_nsec_t start_of_eval_period_nsec = 0;
354 uint32_t sample_period_decompression_count = 0;
355 uint32_t sample_period_compression_count = 0;
356 uint32_t last_eval_decompression_count = 0;
357 uint32_t last_eval_compression_count = 0;
358
359 #define DECOMPRESSION_SAMPLE_MAX_AGE (60 * 30)
360
361 boolean_t vm_swapout_ripe_segments = FALSE;
362 uint32_t vm_ripe_target_age = (60 * 60 * 48);
363
364 uint32_t swapout_target_age = 0;
365 uint32_t age_of_decompressions_during_sample_period[DECOMPRESSION_SAMPLE_MAX_AGE];
366 uint32_t overage_decompressions_during_sample_period = 0;
367
368
369 void do_fastwake_warmup(queue_head_t *, boolean_t);
370 boolean_t fastwake_warmup = FALSE;
371 boolean_t fastwake_recording_in_progress = FALSE;
372 uint64_t dont_trim_until_ts = 0;
373
374 uint64_t c_segment_warmup_count;
375 uint64_t first_c_segment_to_warm_generation_id = 0;
376 uint64_t last_c_segment_to_warm_generation_id = 0;
377 boolean_t hibernate_flushing = FALSE;
378
379 _Atomic uint64_t c_segment_input_bytes = 0;
380 _Atomic uint64_t c_segment_compressed_bytes = 0;
381 _Atomic uint64_t compressor_bytes_used = 0;
382
383 /* Keeps track of the most recent timestamp for when major compaction finished. */
384 mach_timespec_t major_compact_ts;
385
386 struct c_sv_hash_entry c_segment_sv_hash_table[C_SV_HASH_SIZE] __attribute__ ((aligned(8)));
387
388 static void vm_compressor_swap_trigger_thread(void);
389 static void vm_compressor_do_delayed_compactions(boolean_t);
390 static void vm_compressor_compact_and_swap(boolean_t);
391 static void vm_compressor_process_regular_swapped_in_segments(boolean_t);
392 static void vm_compressor_process_special_swapped_in_segments_locked(void);
393
394 struct vm_compressor_swapper_stats vmcs_stats;
395
396 static void vm_compressor_process_major_segments(bool);
397 #if XNU_TARGET_OS_OSX
398 static void vm_compressor_take_paging_space_action(void);
399 #endif /* XNU_TARGET_OS_OSX */
400
401 void compute_swapout_target_age(void);
402
403 boolean_t c_seg_major_compact(c_segment_t, c_segment_t);
404 boolean_t c_seg_major_compact_ok(c_segment_t, c_segment_t);
405
406 int c_seg_minor_compaction_and_unlock(c_segment_t, boolean_t);
407 int c_seg_do_minor_compaction_and_unlock(c_segment_t, boolean_t, boolean_t, boolean_t);
408 void c_seg_try_minor_compaction_and_unlock(c_segment_t c_seg);
409
410 void c_seg_move_to_sparse_list(c_segment_t);
411 void c_seg_insert_into_q(queue_head_t *, c_segment_t);
412
413 uint64_t vm_available_memory(void);
414
415 /*
416 * indicate the need to do a major compaction if
417 * the overall set of in-use compression segments
418 * becomes sparse... on systems that support pressure
419 * driven swapping, this will also cause swapouts to
420 * be initiated.
421 */
422 static bool
vm_compressor_needs_to_major_compact(void)423 vm_compressor_needs_to_major_compact(void)
424 {
425 uint32_t incore_seg_count;
426
427 incore_seg_count = c_segment_count - c_swappedout_count - c_swappedout_sparse_count;
428
429 /* second condition:
430 * first term:
431 * - (incore_seg_count * c_seg_max_pages) is the maximum size that is this number of segments can hold in the buffer
432 * - VM_PAGE_COMPRESSOR_COUNT is the current size that is actually held by the buffers
433 * -- subtracting these gives the amount of pages that is wasted as holes due to segments not be full
434 * second term:
435 * - 1/8 of the maximum size that can be held by this many segments
436 * meaning of the comparison: is the ratio of wasted space greated than 1/8
437 * first condition:
438 * compare number of segments being used vs the number of segments that can ever be allocated
439 * if we don't have a lot of data in the compressor, then we don't need to bother caring about wasted space in holes
440 */
441
442 if ((c_segment_count >= (c_segments_nearing_limit / 8)) &&
443 ((incore_seg_count * c_seg_max_pages) - VM_PAGE_COMPRESSOR_COUNT) >
444 ((incore_seg_count / 8) * c_seg_max_pages)) {
445 return true;
446 }
447 return false;
448 }
449
450 TUNABLE_WRITEABLE(uint64_t, vm_compressor_minor_fragmentation_threshold_pct, "vm_compressor_minor_frag_threshold_pct", 10);
451
452 static bool
vm_compressor_needs_to_minor_compact(void)453 vm_compressor_needs_to_minor_compact(void)
454 {
455 uint32_t compactible_seg_count = os_atomic_load(&c_minor_count, relaxed);
456 if (compactible_seg_count == 0) {
457 return false;
458 }
459
460 bool is_pressured = AVAILABLE_NON_COMPRESSED_MEMORY <
461 VM_PAGE_COMPRESSOR_COMPACT_THRESHOLD;
462 if (!is_pressured) {
463 return false;
464 }
465
466 uint64_t bytes_used = os_atomic_load(&compressor_bytes_used, relaxed);
467 uint64_t bytes_total = VM_PAGE_COMPRESSOR_COUNT * PAGE_SIZE_64;
468 uint64_t bytes_frag = bytes_total - bytes_used;
469 bool is_fragmented = bytes_frag >
470 bytes_total * vm_compressor_minor_fragmentation_threshold_pct / 100;
471
472 return is_fragmented;
473 }
474
475
476 uint64_t
vm_available_memory(void)477 vm_available_memory(void)
478 {
479 return ((uint64_t)AVAILABLE_NON_COMPRESSED_MEMORY) * PAGE_SIZE_64;
480 }
481
482
483 uint32_t
vm_compressor_pool_size(void)484 vm_compressor_pool_size(void)
485 {
486 return VM_PAGE_COMPRESSOR_COUNT;
487 }
488
489 uint32_t
vm_compressor_fragmentation_level(void)490 vm_compressor_fragmentation_level(void)
491 {
492 const uint32_t incore_seg_count = c_segment_count - c_swappedout_count - c_swappedout_sparse_count;
493 if ((incore_seg_count == 0) || (c_seg_max_pages == 0)) {
494 return 0;
495 }
496 return 100 - (vm_compressor_pool_size() * 100 / (incore_seg_count * c_seg_max_pages));
497 }
498
499 uint32_t
vm_compression_ratio(void)500 vm_compression_ratio(void)
501 {
502 if (vm_compressor_pool_size() == 0) {
503 return UINT32_MAX;
504 }
505 return c_segment_pages_compressed / vm_compressor_pool_size();
506 }
507
508 uint64_t
vm_compressor_pages_compressed(void)509 vm_compressor_pages_compressed(void)
510 {
511 return c_segment_pages_compressed * PAGE_SIZE_64;
512 }
513
514 bool
vm_compressor_compressed_pages_nearing_limit(void)515 vm_compressor_compressed_pages_nearing_limit(void)
516 {
517 uint32_t pages = 0;
518
519 #if CONFIG_FREEZE
520 pages = os_atomic_load(&c_segment_pages_compressed_incore, relaxed);
521 #else /* CONFIG_FREEZE */
522 pages = c_segment_pages_compressed;
523 #endif /* CONFIG_FREEZE */
524
525 return pages > c_segment_pages_compressed_nearing_limit;
526 }
527
528 static bool
vm_compressor_segments_nearing_limit(void)529 vm_compressor_segments_nearing_limit(void)
530 {
531 uint64_t segments;
532
533 #if CONFIG_FREEZE
534 if (freezer_incore_cseg_acct) {
535 if (os_sub_overflow(c_segment_count, c_swappedout_count, &segments)) {
536 segments = 0;
537 }
538 if (os_sub_overflow(segments, c_swappedout_sparse_count, &segments)) {
539 segments = 0;
540 }
541 } else {
542 segments = os_atomic_load(&c_segment_count, relaxed);
543 }
544 #else /* CONFIG_FREEZE */
545 segments = c_segment_count;
546 #endif /* CONFIG_FREEZE */
547
548 return segments > c_segments_nearing_limit;
549 }
550
551 boolean_t
vm_compressor_low_on_space(void)552 vm_compressor_low_on_space(void)
553 {
554 return vm_compressor_compressed_pages_nearing_limit() ||
555 vm_compressor_segments_nearing_limit();
556 }
557
558
559 boolean_t
vm_compressor_out_of_space(void)560 vm_compressor_out_of_space(void)
561 {
562 #if CONFIG_FREEZE
563 uint64_t incore_seg_count;
564 uint32_t incore_compressed_pages;
565 if (freezer_incore_cseg_acct) {
566 if (os_sub_overflow(c_segment_count, c_swappedout_count, &incore_seg_count)) {
567 incore_seg_count = 0;
568 }
569 if (os_sub_overflow(incore_seg_count, c_swappedout_sparse_count, &incore_seg_count)) {
570 incore_seg_count = 0;
571 }
572 incore_compressed_pages = os_atomic_load(&c_segment_pages_compressed_incore, relaxed);
573 } else {
574 incore_seg_count = os_atomic_load(&c_segment_count, relaxed);
575 incore_compressed_pages = os_atomic_load(&c_segment_pages_compressed_incore, relaxed);
576 }
577
578 if ((incore_compressed_pages >= c_segment_pages_compressed_limit) ||
579 (incore_seg_count > c_segments_incore_limit)) {
580 return TRUE;
581 }
582 #else /* CONFIG_FREEZE */
583 if ((c_segment_pages_compressed >= c_segment_pages_compressed_limit) ||
584 (c_segment_count >= c_segments_limit)) {
585 return TRUE;
586 }
587 #endif /* CONFIG_FREEZE */
588 return FALSE;
589 }
590
591 bool
vm_compressor_is_thrashing()592 vm_compressor_is_thrashing()
593 {
594 compute_swapout_target_age();
595
596 if (swapout_target_age) {
597 c_segment_t c_seg;
598
599 lck_mtx_lock_spin_always(c_list_lock);
600
601 if (!queue_empty(&c_age_list_head)) {
602 c_seg = (c_segment_t) queue_first(&c_age_list_head);
603
604 if (c_seg->c_creation_ts > swapout_target_age) {
605 swapout_target_age = 0;
606 }
607 }
608 lck_mtx_unlock_always(c_list_lock);
609 }
610
611 return swapout_target_age != 0;
612 }
613
614
615 int
vm_wants_task_throttled(task_t task)616 vm_wants_task_throttled(task_t task)
617 {
618 ledger_amount_t compressed;
619 if (task == kernel_task) {
620 return 0;
621 }
622
623 if (VM_CONFIG_SWAP_IS_ACTIVE) {
624 if ((vm_compressor_low_on_space() || HARD_THROTTLE_LIMIT_REACHED())) {
625 ledger_get_balance(task->ledger, task_ledgers.internal_compressed, &compressed);
626 compressed >>= VM_MAP_PAGE_SHIFT(task->map);
627 if ((unsigned int)compressed > (c_segment_pages_compressed / 4)) {
628 return 1;
629 }
630 }
631 }
632 return 0;
633 }
634
635
636 #if DEVELOPMENT || DEBUG
637 /*
638 * On compressor/swap exhaustion, kill the largest process regardless of
639 * its chosen process policy.
640 */
641 TUNABLE(bool, kill_on_no_paging_space, "-kill_on_no_paging_space", false);
642 #endif /* DEVELOPMENT || DEBUG */
643
644 #if CONFIG_JETSAM
645 boolean_t memorystatus_kill_on_VM_compressor_space_shortage(boolean_t);
646 void memorystatus_thread_wake(void);
647 extern uint32_t jetsam_kill_on_low_swap;
648 bool memorystatus_disable_swap(void);
649 #if CONFIG_PHANTOM_CACHE
650 extern bool memorystatus_phantom_cache_pressure;
651 #endif /* CONFIG_PHANTOM_CACHE */
652 int compressor_thrashing_induced_jetsam = 0;
653 int filecache_thrashing_induced_jetsam = 0;
654 static boolean_t vm_compressor_thrashing_detected = FALSE;
655 #else /* CONFIG_JETSAM */
656 static bool no_paging_space_action_in_progress = false;
657 extern void memorystatus_send_low_swap_note(void);
658 #endif /* CONFIG_JETSAM */
659
660 static void
vm_compressor_take_paging_space_action(void)661 vm_compressor_take_paging_space_action(void)
662 {
663 #if CONFIG_JETSAM
664 /*
665 * On systems with both swap and jetsam,
666 * just wake up the jetsam thread and have it handle the low swap condition
667 * by killing apps.
668 */
669 if (jetsam_kill_on_low_swap) {
670 memorystatus_thread_wake();
671 }
672 #else /* CONFIG_JETSAM */
673 if (os_atomic_cmpxchg(&no_paging_space_action_in_progress, false, true, relaxed)) {
674 if (no_paging_space_action()) {
675 #if DEVELOPMENT || DEBUG
676 if (kill_on_no_paging_space) {
677 /*
678 * Since we are choosing to always kill a process, we don't need the
679 * "out of application memory" dialog box in this mode. And, hence we won't
680 * send the knote.
681 */
682 os_atomic_store(&no_paging_space_action_in_progress, false, relaxed);
683 return;
684 }
685 #endif /* DEVELOPMENT || DEBUG */
686 memorystatus_send_low_swap_note();
687 }
688 os_atomic_store(&no_paging_space_action_in_progress, false, relaxed);
689 }
690 #endif /* !CONFIG_JETSAM */
691 }
692
693
694 void
vm_decompressor_lock(void)695 vm_decompressor_lock(void)
696 {
697 PAGE_REPLACEMENT_ALLOWED(TRUE);
698
699 decompressions_blocked = TRUE;
700
701 PAGE_REPLACEMENT_ALLOWED(FALSE);
702 }
703
704 void
vm_decompressor_unlock(void)705 vm_decompressor_unlock(void)
706 {
707 PAGE_REPLACEMENT_ALLOWED(TRUE);
708
709 decompressions_blocked = FALSE;
710
711 PAGE_REPLACEMENT_ALLOWED(FALSE);
712
713 thread_wakeup((event_t)&decompressions_blocked);
714 }
715
716 static inline void
cslot_copy(c_slot_t cdst,c_slot_t csrc)717 cslot_copy(c_slot_t cdst, c_slot_t csrc)
718 {
719 #if CHECKSUM_THE_DATA
720 cdst->c_hash_data = csrc->c_hash_data;
721 #endif
722 #if CHECKSUM_THE_COMPRESSED_DATA
723 cdst->c_hash_compressed_data = csrc->c_hash_compressed_data;
724 #endif
725 #if POPCOUNT_THE_COMPRESSED_DATA
726 cdst->c_pop_cdata = csrc->c_pop_cdata;
727 #endif
728 cdst->c_size = csrc->c_size;
729 cdst->c_packed_ptr = csrc->c_packed_ptr;
730 #if defined(__arm64__)
731 cdst->c_codec = csrc->c_codec;
732 #endif
733 }
734
735 #if XNU_TARGET_OS_OSX
736 #define VM_COMPRESSOR_MAX_POOL_SIZE (192UL << 30)
737 #else
738 #define VM_COMPRESSOR_MAX_POOL_SIZE (0)
739 #endif
740
741 static vm_map_size_t compressor_size;
742 static SECURITY_READ_ONLY_LATE(struct mach_vm_range) compressor_range;
743 vm_map_t compressor_map;
744 uint64_t compressor_pool_max_size;
745 uint64_t compressor_pool_size;
746 uint32_t compressor_pool_multiplier;
747
748 #if DEVELOPMENT || DEBUG
749 /*
750 * Compressor segments are write-protected in development/debug
751 * kernels to help debug memory corruption.
752 * In cases where performance is a concern, this can be disabled
753 * via the boot-arg "-disable_cseg_write_protection".
754 */
755 boolean_t write_protect_c_segs = TRUE;
756 int vm_compressor_test_seg_wp;
757 uint32_t vm_ktrace_enabled;
758 #endif /* DEVELOPMENT || DEBUG */
759
760 #if (XNU_TARGET_OS_OSX && __arm64__)
761
762 #include <IOKit/IOPlatformExpert.h>
763 #include <sys/random.h>
764
765 static const char *csegbufsizeExperimentProperty = "_csegbufsz_experiment";
766 static thread_call_t csegbufsz_experiment_thread_call;
767
768 extern boolean_t IOServiceWaitForMatchingResource(const char * property, uint64_t timeout);
769 static void
erase_csegbufsz_experiment_property(__unused void * param0,__unused void * param1)770 erase_csegbufsz_experiment_property(__unused void *param0, __unused void *param1)
771 {
772 // Wait for NVRAM to be writable
773 if (!IOServiceWaitForMatchingResource("IONVRAM", UINT64_MAX)) {
774 printf("csegbufsz_experiment_property: Failed to wait for IONVRAM.");
775 }
776
777 if (!PERemoveNVRAMProperty(csegbufsizeExperimentProperty)) {
778 printf("csegbufsize_experiment_property: Failed to remove %s from NVRAM.", csegbufsizeExperimentProperty);
779 }
780 thread_call_free(csegbufsz_experiment_thread_call);
781 }
782
783 static void
erase_csegbufsz_experiment_property_async()784 erase_csegbufsz_experiment_property_async()
785 {
786 csegbufsz_experiment_thread_call = thread_call_allocate_with_priority(
787 erase_csegbufsz_experiment_property,
788 NULL,
789 THREAD_CALL_PRIORITY_LOW
790 );
791 if (csegbufsz_experiment_thread_call == NULL) {
792 printf("csegbufsize_experiment_property: Unable to allocate thread call.");
793 } else {
794 thread_call_enter(csegbufsz_experiment_thread_call);
795 }
796 }
797
798 static void
cleanup_csegbufsz_experiment(__unused void * arg0)799 cleanup_csegbufsz_experiment(__unused void *arg0)
800 {
801 char nvram = 0;
802 unsigned int len = sizeof(nvram);
803 if (PEReadNVRAMProperty(csegbufsizeExperimentProperty, &nvram, &len)) {
804 erase_csegbufsz_experiment_property_async();
805 }
806 }
807
808 STARTUP_ARG(EARLY_BOOT, STARTUP_RANK_FIRST, cleanup_csegbufsz_experiment, NULL);
809 #endif /* XNU_TARGET_OS_OSX && __arm64__ */
810
811 #if CONFIG_JETSAM
812 extern unsigned int memorystatus_swap_all_apps;
813 #endif /* CONFIG_JETSAM */
814
815 TUNABLE_DT(uint64_t, swap_vol_min_capacity, "/defaults", "kern.swap_min_capacity", "kern.swap_min_capacity", 0, TUNABLE_DT_NONE);
816
817 static void
vm_compressor_set_size(void)818 vm_compressor_set_size(void)
819 {
820 /*
821 * Note that this function may be called multiple times on systems with app swap
822 * because the value of vm_swap_get_max_configured_space() and memorystatus_swap_all_apps
823 * can change based the size of the swap volume. On these systems, we'll call
824 * this function once early in boot to reserve the maximum amount of VA required
825 * for the compressor submap and then one more time in vm_compressor_init after
826 * determining the swap volume size. We must not return a larger value the second
827 * time around.
828 */
829 vm_size_t c_segments_arr_size = 0;
830 struct c_slot_mapping tmp_slot_ptr;
831
832 /* The segment size can be overwritten by a boot-arg */
833 if (!PE_parse_boot_argn("vm_compressor_segment_buffer_size", &c_seg_bufsize, sizeof(c_seg_bufsize))) {
834 #if CONFIG_JETSAM
835 if (memorystatus_swap_all_apps) {
836 c_seg_bufsize = C_SEG_BUFSIZE_ARM_SWAP;
837 } else {
838 c_seg_bufsize = C_SEG_BUFSIZE_DEFAULT;
839 }
840 #else
841 c_seg_bufsize = C_SEG_BUFSIZE_DEFAULT;
842 #endif /* CONFIG_JETSAM */
843 }
844
845 vm_compressor_swap_init_swap_file_limit();
846 if (vm_compression_limit) {
847 compressor_pool_size = ptoa_64(vm_compression_limit);
848 }
849
850 compressor_pool_max_size = C_SEG_MAX_LIMIT;
851 compressor_pool_max_size *= c_seg_bufsize;
852
853 #if XNU_TARGET_OS_OSX
854
855 if (vm_compression_limit == 0) {
856 if (max_mem <= (4ULL * 1024ULL * 1024ULL * 1024ULL)) {
857 compressor_pool_size = 16ULL * max_mem;
858 } else if (max_mem <= (8ULL * 1024ULL * 1024ULL * 1024ULL)) {
859 compressor_pool_size = 8ULL * max_mem;
860 } else if (max_mem <= (32ULL * 1024ULL * 1024ULL * 1024ULL)) {
861 compressor_pool_size = 4ULL * max_mem;
862 } else {
863 compressor_pool_size = 2ULL * max_mem;
864 }
865 }
866 /*
867 * Cap the compressor pool size to a max of 192G
868 */
869 if (compressor_pool_size > VM_COMPRESSOR_MAX_POOL_SIZE) {
870 compressor_pool_size = VM_COMPRESSOR_MAX_POOL_SIZE;
871 }
872 if (max_mem <= (8ULL * 1024ULL * 1024ULL * 1024ULL)) {
873 compressor_pool_multiplier = 1;
874 } else if (max_mem <= (32ULL * 1024ULL * 1024ULL * 1024ULL)) {
875 compressor_pool_multiplier = 2;
876 } else {
877 compressor_pool_multiplier = 4;
878 }
879
880 #else
881
882 if (compressor_pool_max_size > max_mem) {
883 compressor_pool_max_size = max_mem;
884 }
885
886 if (vm_compression_limit == 0) {
887 compressor_pool_size = max_mem;
888 }
889
890 #if XNU_TARGET_OS_WATCH
891 compressor_pool_multiplier = 2;
892 #elif XNU_TARGET_OS_IOS
893 if (max_mem <= (2ULL * 1024ULL * 1024ULL * 1024ULL)) {
894 compressor_pool_multiplier = 2;
895 } else {
896 compressor_pool_multiplier = 1;
897 }
898 #else
899 compressor_pool_multiplier = 1;
900 #endif
901
902 #endif
903
904 PE_parse_boot_argn("kern.compressor_pool_multiplier", &compressor_pool_multiplier, sizeof(compressor_pool_multiplier));
905 if (compressor_pool_multiplier < 1) {
906 compressor_pool_multiplier = 1;
907 }
908
909 if (compressor_pool_size > compressor_pool_max_size) {
910 compressor_pool_size = compressor_pool_max_size;
911 }
912
913 c_seg_max_pages = (c_seg_bufsize / PAGE_SIZE);
914 c_seg_slot_var_array_min_len = c_seg_max_pages;
915
916 #if !defined(__x86_64__)
917 c_seg_off_limit = (C_SEG_BYTES_TO_OFFSET((c_seg_bufsize - 512)));
918 c_seg_allocsize = (c_seg_bufsize + PAGE_SIZE);
919 #else
920 c_seg_off_limit = (C_SEG_BYTES_TO_OFFSET((c_seg_bufsize - 128)));
921 c_seg_allocsize = c_seg_bufsize;
922 #endif /* !defined(__x86_64__) */
923
924 c_segments_limit = (uint32_t)(compressor_pool_size / (vm_size_t)(c_seg_allocsize));
925 tmp_slot_ptr.s_cseg = c_segments_limit;
926 /* Panic on internal configs*/
927 assertf((tmp_slot_ptr.s_cseg == c_segments_limit), "vm_compressor_init: overflowed s_cseg field in c_slot_mapping with c_segno: %d", c_segments_limit);
928
929 if (tmp_slot_ptr.s_cseg != c_segments_limit) {
930 tmp_slot_ptr.s_cseg = -1;
931 c_segments_limit = tmp_slot_ptr.s_cseg - 1; /*limited by segment idx bits in c_slot_mapping*/
932 compressor_pool_size = (c_segments_limit * (vm_size_t)(c_seg_allocsize));
933 }
934
935 c_segments_nearing_limit = (uint32_t)(((uint64_t)c_segments_limit * 98ULL) / 100ULL);
936
937 /* an upper limit on how many input pages the compressor can hold */
938 c_segment_pages_compressed_limit = (c_segments_limit * (c_seg_bufsize / PAGE_SIZE) * compressor_pool_multiplier);
939
940 if (c_segment_pages_compressed_limit < (uint32_t)(max_mem / PAGE_SIZE)) {
941 #if defined(XNU_TARGET_OS_WATCH)
942 c_segment_pages_compressed_limit = (uint32_t)(max_mem / PAGE_SIZE);
943 #else
944 if (!vm_compression_limit) {
945 c_segment_pages_compressed_limit = (uint32_t)(max_mem / PAGE_SIZE);
946 }
947 #endif
948 }
949
950 c_segment_pages_compressed_nearing_limit = (uint32_t)(((uint64_t)c_segment_pages_compressed_limit * 98ULL) / 100ULL);
951
952 #if CONFIG_FREEZE
953 /*
954 * Our in-core limits are based on the size of the compressor pool.
955 * The c_segments_nearing_limit is also based on the compressor pool
956 * size and calculated above.
957 */
958 c_segments_incore_limit = c_segments_limit;
959
960 if (freezer_incore_cseg_acct) {
961 /*
962 * Add enough segments to track all frozen c_segs that can be stored in swap.
963 */
964 c_segments_limit += (uint32_t)(vm_swap_get_max_configured_space() / (vm_size_t)(c_seg_allocsize));
965 tmp_slot_ptr.s_cseg = c_segments_limit;
966 /* Panic on internal configs*/
967 assertf((tmp_slot_ptr.s_cseg == c_segments_limit), "vm_compressor_init: freezer reserve overflowed s_cseg field in c_slot_mapping with c_segno: %d", c_segments_limit);
968 }
969 #endif
970 /*
971 * Submap needs space for:
972 * - c_segments
973 * - c_buffers
974 * - swap reclaimations -- c_seg_bufsize
975 */
976 c_segments_arr_size = vm_map_round_page((sizeof(union c_segu) * c_segments_limit), VM_MAP_PAGE_MASK(kernel_map));
977 c_buffers_size = vm_map_round_page(((vm_size_t)c_seg_allocsize * (vm_size_t)c_segments_limit), VM_MAP_PAGE_MASK(kernel_map));
978
979 compressor_size = c_segments_arr_size + c_buffers_size + c_seg_bufsize;
980
981 #if RECORD_THE_COMPRESSED_DATA
982 c_compressed_record_sbuf_size = (vm_size_t)c_seg_allocsize + (PAGE_SIZE * 2);
983 compressor_size += c_compressed_record_sbuf_size;
984 #endif /* RECORD_THE_COMPRESSED_DATA */
985 }
986 STARTUP(KMEM, STARTUP_RANK_FIRST, vm_compressor_set_size);
987
988 KMEM_RANGE_REGISTER_DYNAMIC(compressor, &compressor_range, ^() {
989 return compressor_size;
990 });
991
992 bool
osenvironment_is_diagnostics(void)993 osenvironment_is_diagnostics(void)
994 {
995 DTEntry chosen;
996 const char *osenvironment;
997 unsigned int size;
998 if (kSuccess == SecureDTLookupEntry(0, "/chosen", &chosen)) {
999 if (kSuccess == SecureDTGetProperty(chosen, "osenvironment", (void const **) &osenvironment, &size)) {
1000 return strcmp(osenvironment, "diagnostics") == 0;
1001 }
1002 }
1003 return false;
1004 }
1005
1006 void
vm_compressor_init(void)1007 vm_compressor_init(void)
1008 {
1009 thread_t thread;
1010 #if RECORD_THE_COMPRESSED_DATA
1011 vm_size_t c_compressed_record_sbuf_size = 0;
1012 #endif /* RECORD_THE_COMPRESSED_DATA */
1013
1014 #if DEVELOPMENT || DEBUG || CONFIG_FREEZE
1015 char bootarg_name[32];
1016 #endif /* DEVELOPMENT || DEBUG || CONFIG_FREEZE */
1017 __unused uint64_t early_boot_compressor_size = compressor_size;
1018
1019 #if CONFIG_JETSAM
1020 if (memorystatus_swap_all_apps && osenvironment_is_diagnostics()) {
1021 printf("osenvironment == \"diagnostics\". Disabling app swap.\n");
1022 memorystatus_disable_swap();
1023 }
1024
1025 if (memorystatus_swap_all_apps) {
1026 /*
1027 * App swap is disabled on devices with small NANDs.
1028 * Now that we're no longer in early boot, we can get
1029 * the NAND size and re-run vm_compressor_set_size.
1030 */
1031 int error = vm_swap_vol_get_capacity(SWAP_VOLUME_NAME, &vm_swap_volume_capacity);
1032 #if DEVELOPMENT || DEBUG
1033 if (error != 0) {
1034 panic("vm_compressor_init: Unable to get swap volume capacity. error=%d\n", error);
1035 }
1036 #else
1037 if (error != 0) {
1038 os_log_with_startup_serial(OS_LOG_DEFAULT, "vm_compressor_init: Unable to get swap volume capacity. error=%d\n", error);
1039 }
1040 #endif /* DEVELOPMENT || DEBUG */
1041 if (vm_swap_volume_capacity < swap_vol_min_capacity) {
1042 memorystatus_disable_swap();
1043 }
1044 /*
1045 * Resize the compressor and swap now that we know the capacity
1046 * of the swap volume.
1047 */
1048 vm_compressor_set_size();
1049 /*
1050 * We reserved a chunk of VA early in boot for the compressor submap.
1051 * We can't allocate more than that.
1052 */
1053 assert(compressor_size <= early_boot_compressor_size);
1054 }
1055 #endif /* CONFIG_JETSAM */
1056
1057 #if DEVELOPMENT || DEBUG
1058 if (PE_parse_boot_argn("-disable_cseg_write_protection", bootarg_name, sizeof(bootarg_name))) {
1059 write_protect_c_segs = FALSE;
1060 }
1061
1062 int vmcval = 1;
1063 #if defined(XNU_TARGET_OS_WATCH)
1064 vmcval = 0;
1065 #endif /* XNU_TARGET_OS_WATCH */
1066 PE_parse_boot_argn("vm_compressor_validation", &vmcval, sizeof(vmcval));
1067
1068 if (kern_feature_override(KF_COMPRSV_OVRD)) {
1069 vmcval = 0;
1070 }
1071
1072 if (vmcval == 0) {
1073 #if POPCOUNT_THE_COMPRESSED_DATA
1074 popcount_c_segs = FALSE;
1075 #endif
1076 #if CHECKSUM_THE_DATA || CHECKSUM_THE_COMPRESSED_DATA
1077 checksum_c_segs = FALSE;
1078 #endif
1079 #if VALIDATE_C_SEGMENTS
1080 validate_c_segs = FALSE;
1081 #endif
1082 write_protect_c_segs = FALSE;
1083 }
1084 #endif /* DEVELOPMENT || DEBUG */
1085
1086 #if CONFIG_FREEZE
1087 if (PE_parse_boot_argn("-disable_freezer_cseg_acct", bootarg_name, sizeof(bootarg_name))) {
1088 freezer_incore_cseg_acct = FALSE;
1089 }
1090 #endif /* CONFIG_FREEZE */
1091
1092 assert((C_SEGMENTS_PER_PAGE * sizeof(union c_segu)) == PAGE_SIZE);
1093
1094 #if !XNU_TARGET_OS_OSX
1095 vm_compressor_minorcompact_threshold_divisor = 20;
1096 vm_compressor_majorcompact_threshold_divisor = 30;
1097 vm_compressor_unthrottle_threshold_divisor = 40;
1098 vm_compressor_catchup_threshold_divisor = 60;
1099 #else /* !XNU_TARGET_OS_OSX */
1100 if (max_mem <= (3ULL * 1024ULL * 1024ULL * 1024ULL)) {
1101 vm_compressor_minorcompact_threshold_divisor = 11;
1102 vm_compressor_majorcompact_threshold_divisor = 13;
1103 vm_compressor_unthrottle_threshold_divisor = 20;
1104 vm_compressor_catchup_threshold_divisor = 35;
1105 } else {
1106 vm_compressor_minorcompact_threshold_divisor = 20;
1107 vm_compressor_majorcompact_threshold_divisor = 25;
1108 vm_compressor_unthrottle_threshold_divisor = 35;
1109 vm_compressor_catchup_threshold_divisor = 50;
1110 }
1111 #endif /* !XNU_TARGET_OS_OSX */
1112
1113 queue_init(&c_bad_list_head);
1114 queue_init(&c_age_list_head);
1115 queue_init(&c_minor_list_head);
1116 queue_init(&c_major_list_head);
1117 queue_init(&c_filling_list_head);
1118 queue_init(&c_early_swapout_list_head);
1119 queue_init(&c_regular_swapout_list_head);
1120 queue_init(&c_late_swapout_list_head);
1121 queue_init(&c_swapio_list_head);
1122 queue_init(&c_early_swappedin_list_head);
1123 queue_init(&c_regular_swappedin_list_head);
1124 queue_init(&c_late_swappedin_list_head);
1125 queue_init(&c_swappedout_list_head);
1126 queue_init(&c_swappedout_sparse_list_head);
1127
1128 c_free_segno_head = -1;
1129 c_segments_available = 0;
1130
1131 compressor_map = kmem_suballoc(kernel_map, &compressor_range.min_address,
1132 compressor_size, VM_MAP_CREATE_NEVER_FAULTS,
1133 VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, KMS_NOFAIL | KMS_PERMANENT,
1134 VM_KERN_MEMORY_COMPRESSOR).kmr_submap;
1135
1136 kmem_alloc(compressor_map, (vm_offset_t *)(&c_segments),
1137 (sizeof(union c_segu) * c_segments_limit),
1138 KMA_NOFAIL | KMA_KOBJECT | KMA_VAONLY | KMA_PERMANENT,
1139 VM_KERN_MEMORY_COMPRESSOR);
1140 kmem_alloc(compressor_map, &c_buffers, c_buffers_size,
1141 KMA_NOFAIL | KMA_COMPRESSOR | KMA_VAONLY | KMA_PERMANENT,
1142 VM_KERN_MEMORY_COMPRESSOR);
1143
1144 #if DEVELOPMENT || DEBUG
1145 if (hvg_is_hcall_available(HVG_HCALL_SET_COREDUMP_DATA)) {
1146 hvg_hcall_set_coredump_data();
1147 }
1148 #endif
1149
1150 /*
1151 * Pick a good size that will minimize fragmentation in zalloc
1152 * by minimizing the fragmentation in a 16k run.
1153 *
1154 * c_seg_slot_var_array_min_len is larger on 4k systems than 16k ones,
1155 * making the fragmentation in a 4k page terrible. Using 16k for all
1156 * systems matches zalloc() and will minimize fragmentation.
1157 */
1158 uint32_t c_segment_size = sizeof(struct c_segment) + (c_seg_slot_var_array_min_len * sizeof(struct c_slot));
1159 uint32_t cnt = (16 << 10) / c_segment_size;
1160 uint32_t frag = (16 << 10) % c_segment_size;
1161
1162 c_seg_fixed_array_len = c_seg_slot_var_array_min_len;
1163
1164 while (cnt * sizeof(struct c_slot) < frag) {
1165 c_segment_size += sizeof(struct c_slot);
1166 c_seg_fixed_array_len++;
1167 frag -= cnt * sizeof(struct c_slot);
1168 }
1169
1170 compressor_segment_zone = zone_create("compressor_segment",
1171 c_segment_size, ZC_PGZ_USE_GUARDS | ZC_NOENCRYPT | ZC_ZFREE_CLEARMEM);
1172
1173 c_segments_busy = FALSE;
1174
1175 c_segments_next_page = (caddr_t)c_segments;
1176 vm_compressor_algorithm_init();
1177
1178 {
1179 host_basic_info_data_t hinfo;
1180 mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
1181 size_t bufsize;
1182 char *buf;
1183
1184 #define BSD_HOST 1
1185 host_info((host_t)BSD_HOST, HOST_BASIC_INFO, (host_info_t)&hinfo, &count);
1186
1187 compressor_cpus = hinfo.max_cpus;
1188
1189 /* allocate various scratch buffers at the same place */
1190 bufsize = PAGE_SIZE;
1191 bufsize += compressor_cpus * vm_compressor_get_decode_scratch_size();
1192 /* For the panic path */
1193 bufsize += vm_compressor_get_decode_scratch_size();
1194 #if CONFIG_FREEZE
1195 bufsize += vm_compressor_get_encode_scratch_size();
1196 #endif
1197 #if RECORD_THE_COMPRESSED_DATA
1198 bufsize += c_compressed_record_sbuf_size;
1199 #endif
1200
1201 kmem_alloc(kernel_map, (vm_offset_t *)&buf, bufsize,
1202 KMA_DATA | KMA_NOFAIL | KMA_KOBJECT | KMA_PERMANENT,
1203 VM_KERN_MEMORY_COMPRESSOR);
1204
1205 /*
1206 * vm_compressor_kdp_state.kc_decompressed_page must be page aligned because we access
1207 * it through the physical aperture by page number.
1208 */
1209 vm_compressor_kdp_state.kc_panic_decompressed_page = buf;
1210 vm_compressor_kdp_state.kc_panic_decompressed_page_paddr = kvtophys((vm_offset_t)vm_compressor_kdp_state.kc_panic_decompressed_page);
1211 vm_compressor_kdp_state.kc_panic_decompressed_page_ppnum = (ppnum_t) atop(vm_compressor_kdp_state.kc_panic_decompressed_page_paddr);
1212 buf += PAGE_SIZE;
1213 bufsize -= PAGE_SIZE;
1214
1215 compressor_scratch_bufs = buf;
1216 buf += compressor_cpus * vm_compressor_get_decode_scratch_size();
1217 bufsize -= compressor_cpus * vm_compressor_get_decode_scratch_size();
1218
1219 vm_compressor_kdp_state.kc_panic_scratch_buf = buf;
1220 buf += vm_compressor_get_decode_scratch_size();
1221 bufsize -= vm_compressor_get_decode_scratch_size();
1222
1223 /* This is set up before each stackshot in vm_compressor_kdp_init */
1224 vm_compressor_kdp_state.kc_scratch_bufs = NULL;
1225
1226 #if CONFIG_FREEZE
1227 freezer_context_global.freezer_ctx_compressor_scratch_buf = buf;
1228 buf += vm_compressor_get_encode_scratch_size();
1229 bufsize -= vm_compressor_get_encode_scratch_size();
1230 #endif
1231
1232 #if RECORD_THE_COMPRESSED_DATA
1233 c_compressed_record_sbuf = buf;
1234 c_compressed_record_cptr = buf;
1235 c_compressed_record_ebuf = c_compressed_record_sbuf + c_compressed_record_sbuf_size;
1236 buf += c_compressed_record_sbuf_size;
1237 bufsize -= c_compressed_record_sbuf_size;
1238 #endif
1239 assert(bufsize == 0);
1240 }
1241
1242 if (kernel_thread_start_priority((thread_continue_t)vm_compressor_swap_trigger_thread, NULL,
1243 BASEPRI_VM, &thread) != KERN_SUCCESS) {
1244 panic("vm_compressor_swap_trigger_thread: create failed");
1245 }
1246 thread_deallocate(thread);
1247
1248 if (vm_pageout_internal_start() != KERN_SUCCESS) {
1249 panic("vm_compressor_init: Failed to start the internal pageout thread.");
1250 }
1251 if (VM_CONFIG_SWAP_IS_PRESENT) {
1252 vm_compressor_swap_init();
1253 }
1254
1255 if (VM_CONFIG_COMPRESSOR_IS_ACTIVE) {
1256 vm_compressor_is_active = 1;
1257 }
1258
1259 vm_compressor_available = 1;
1260
1261 vm_page_reactivate_all_throttled();
1262
1263 bzero(&vmcs_stats, sizeof(struct vm_compressor_swapper_stats));
1264 }
1265
1266 #define COMPRESSOR_KDP_BUFSIZE (\
1267 (vm_compressor_get_decode_scratch_size() * compressor_cpus) + \
1268 (PAGE_SIZE * compressor_cpus)) + \
1269 (sizeof(*vm_compressor_kdp_state.kc_decompressed_pages_paddr) * compressor_cpus) + \
1270 (sizeof(*vm_compressor_kdp_state.kc_decompressed_pages_ppnum) * compressor_cpus)
1271
1272
1273 /**
1274 * Initializes the VM compressor in preparation for a stackshot.
1275 * Stackshot mutex must be held.
1276 */
1277 kern_return_t
vm_compressor_kdp_init(void)1278 vm_compressor_kdp_init(void)
1279 {
1280 char *buf;
1281 kern_return_t err;
1282 size_t bufsize;
1283 size_t total_decode_size;
1284
1285 #if DEVELOPMENT || DEBUG
1286 extern lck_mtx_t stackshot_subsys_mutex;
1287 lck_mtx_assert(&stackshot_subsys_mutex, LCK_MTX_ASSERT_OWNED);
1288 #endif /* DEVELOPMENT || DEBUG */
1289
1290 if (!vm_compressor_available) {
1291 return KERN_SUCCESS;
1292 }
1293
1294 bufsize = COMPRESSOR_KDP_BUFSIZE;
1295
1296 /* Allocate the per-cpu decompression pages. */
1297 err = kmem_alloc(kernel_map, (vm_offset_t *)&buf, bufsize,
1298 KMA_DATA | KMA_NOFAIL | KMA_KOBJECT,
1299 VM_KERN_MEMORY_COMPRESSOR);
1300
1301 if (err != KERN_SUCCESS) {
1302 return err;
1303 }
1304
1305 assert(vm_compressor_kdp_state.kc_scratch_bufs == NULL);
1306 vm_compressor_kdp_state.kc_scratch_bufs = buf;
1307 total_decode_size = vm_compressor_get_decode_scratch_size() * compressor_cpus;
1308 buf += total_decode_size;
1309 bufsize -= total_decode_size;
1310
1311 /*
1312 * vm_compressor_kdp_state.kc_decompressed_page must be page aligned because we access
1313 * it through the physical aperture by page number.
1314 */
1315 assert(vm_compressor_kdp_state.kc_decompressed_pages == NULL);
1316 vm_compressor_kdp_state.kc_decompressed_pages = buf;
1317 buf += PAGE_SIZE * compressor_cpus;
1318 bufsize -= PAGE_SIZE * compressor_cpus;
1319
1320 /* Scary! This will be aligned, I promise :) */
1321 assert(((vm_address_t) buf) % _Alignof(addr64_t) == 0);
1322 assert(vm_compressor_kdp_state.kc_decompressed_pages_paddr == NULL);
1323 vm_compressor_kdp_state.kc_decompressed_pages_paddr = (addr64_t*) (void*) buf;
1324 buf += sizeof(*vm_compressor_kdp_state.kc_decompressed_pages_paddr) * compressor_cpus;
1325 bufsize -= sizeof(*vm_compressor_kdp_state.kc_decompressed_pages_paddr) * compressor_cpus;
1326
1327 assert(((vm_address_t) buf) % _Alignof(ppnum_t) == 0);
1328 assert(vm_compressor_kdp_state.kc_decompressed_pages_ppnum == NULL);
1329 vm_compressor_kdp_state.kc_decompressed_pages_ppnum = (ppnum_t*) (void*) buf;
1330 buf += sizeof(*vm_compressor_kdp_state.kc_decompressed_pages_ppnum) * compressor_cpus;
1331 bufsize -= sizeof(*vm_compressor_kdp_state.kc_decompressed_pages_ppnum) * compressor_cpus;
1332
1333 assert(bufsize == 0);
1334
1335 for (size_t i = 0; i < compressor_cpus; i++) {
1336 vm_offset_t offset = (vm_offset_t) &vm_compressor_kdp_state.kc_decompressed_pages[i * PAGE_SIZE];
1337 vm_compressor_kdp_state.kc_decompressed_pages_paddr[i] = kvtophys(offset);
1338 vm_compressor_kdp_state.kc_decompressed_pages_ppnum[i] = (ppnum_t) atop(vm_compressor_kdp_state.kc_decompressed_pages_paddr[i]);
1339 }
1340
1341 return KERN_SUCCESS;
1342 }
1343
1344 /*
1345 * Frees up compressor buffers used by stackshot.
1346 * Stackshot mutex must be held.
1347 */
1348 void
vm_compressor_kdp_teardown(void)1349 vm_compressor_kdp_teardown(void)
1350 {
1351 extern lck_mtx_t stackshot_subsys_mutex;
1352 LCK_MTX_ASSERT(&stackshot_subsys_mutex, LCK_MTX_ASSERT_OWNED);
1353
1354 if (vm_compressor_kdp_state.kc_scratch_bufs == NULL) {
1355 return;
1356 }
1357
1358 /* Deallocate the per-cpu decompression pages. */
1359 kmem_free(kernel_map, (vm_offset_t) vm_compressor_kdp_state.kc_scratch_bufs, COMPRESSOR_KDP_BUFSIZE);
1360
1361 vm_compressor_kdp_state.kc_scratch_bufs = NULL;
1362 vm_compressor_kdp_state.kc_decompressed_pages = NULL;
1363 vm_compressor_kdp_state.kc_decompressed_pages_paddr = 0;
1364 vm_compressor_kdp_state.kc_decompressed_pages_ppnum = 0;
1365 }
1366
1367 #if VALIDATE_C_SEGMENTS
1368
1369 static void
c_seg_validate(c_segment_t c_seg,boolean_t must_be_compact)1370 c_seg_validate(c_segment_t c_seg, boolean_t must_be_compact)
1371 {
1372 uint16_t c_indx;
1373 int32_t bytes_used;
1374 uint32_t c_rounded_size;
1375 uint32_t c_size;
1376 c_slot_t cs;
1377
1378 if (__probable(validate_c_segs == FALSE)) {
1379 return;
1380 }
1381 if (c_seg->c_firstemptyslot < c_seg->c_nextslot) {
1382 c_indx = c_seg->c_firstemptyslot;
1383 cs = C_SEG_SLOT_FROM_INDEX(c_seg, c_indx);
1384
1385 if (cs == NULL) {
1386 panic("c_seg_validate: no slot backing c_firstemptyslot");
1387 }
1388
1389 if (cs->c_size) {
1390 panic("c_seg_validate: c_firstemptyslot has non-zero size (%d)", cs->c_size);
1391 }
1392 }
1393 bytes_used = 0;
1394
1395 for (c_indx = 0; c_indx < c_seg->c_nextslot; c_indx++) {
1396 cs = C_SEG_SLOT_FROM_INDEX(c_seg, c_indx);
1397
1398 c_size = UNPACK_C_SIZE(cs);
1399
1400 c_rounded_size = (c_size + C_SEG_OFFSET_ALIGNMENT_MASK) & ~C_SEG_OFFSET_ALIGNMENT_MASK;
1401
1402 bytes_used += c_rounded_size;
1403
1404 #if CHECKSUM_THE_COMPRESSED_DATA
1405 unsigned csvhash;
1406 if (c_size && cs->c_hash_compressed_data != (csvhash = vmc_hash((char *)&c_seg->c_store.c_buffer[cs->c_offset], c_size))) {
1407 addr64_t csvphys = kvtophys((vm_offset_t)&c_seg->c_store.c_buffer[cs->c_offset]);
1408 panic("Compressed data doesn't match original %p phys: 0x%llx %d %p %d %d 0x%x 0x%x", c_seg, csvphys, cs->c_offset, cs, c_indx, c_size, cs->c_hash_compressed_data, csvhash);
1409 }
1410 #endif
1411 #if POPCOUNT_THE_COMPRESSED_DATA
1412 unsigned csvpop;
1413 if (c_size) {
1414 uintptr_t csvaddr = (uintptr_t) &c_seg->c_store.c_buffer[cs->c_offset];
1415 if (cs->c_pop_cdata != (csvpop = vmc_pop(csvaddr, c_size))) {
1416 panic("Compressed data popcount doesn't match original, bit distance: %d %p (phys: %p) %p %p 0x%llx 0x%x 0x%x 0x%x", (csvpop - cs->c_pop_cdata), (void *)csvaddr, (void *) kvtophys(csvaddr), c_seg, cs, (uint64_t)cs->c_offset, c_size, csvpop, cs->c_pop_cdata);
1417 }
1418 }
1419 #endif
1420 }
1421
1422 if (bytes_used != c_seg->c_bytes_used) {
1423 panic("c_seg_validate: bytes_used mismatch - found %d, segment has %d", bytes_used, c_seg->c_bytes_used);
1424 }
1425
1426 if (c_seg->c_bytes_used > C_SEG_OFFSET_TO_BYTES((int32_t)c_seg->c_nextoffset)) {
1427 panic("c_seg_validate: c_bytes_used > c_nextoffset - c_nextoffset = %d, c_bytes_used = %d",
1428 (int32_t)C_SEG_OFFSET_TO_BYTES((int32_t)c_seg->c_nextoffset), c_seg->c_bytes_used);
1429 }
1430
1431 if (must_be_compact) {
1432 if (c_seg->c_bytes_used != C_SEG_OFFSET_TO_BYTES((int32_t)c_seg->c_nextoffset)) {
1433 panic("c_seg_validate: c_bytes_used doesn't match c_nextoffset - c_nextoffset = %d, c_bytes_used = %d",
1434 (int32_t)C_SEG_OFFSET_TO_BYTES((int32_t)c_seg->c_nextoffset), c_seg->c_bytes_used);
1435 }
1436 }
1437 }
1438
1439 #endif
1440
1441
1442 void
c_seg_need_delayed_compaction(c_segment_t c_seg,boolean_t c_list_lock_held)1443 c_seg_need_delayed_compaction(c_segment_t c_seg, boolean_t c_list_lock_held)
1444 {
1445 boolean_t clear_busy = FALSE;
1446
1447 if (c_list_lock_held == FALSE) {
1448 if (!lck_mtx_try_lock_spin_always(c_list_lock)) {
1449 C_SEG_BUSY(c_seg);
1450
1451 lck_mtx_unlock_always(&c_seg->c_lock);
1452 lck_mtx_lock_spin_always(c_list_lock);
1453 lck_mtx_lock_spin_always(&c_seg->c_lock);
1454
1455 clear_busy = TRUE;
1456 }
1457 }
1458 assert(c_seg->c_state != C_IS_FILLING);
1459
1460 if (!c_seg->c_on_minorcompact_q && !(C_SEG_IS_ON_DISK_OR_SOQ(c_seg)) && !c_seg->c_has_donated_pages) {
1461 queue_enter(&c_minor_list_head, c_seg, c_segment_t, c_list);
1462 c_seg->c_on_minorcompact_q = 1;
1463 os_atomic_inc(&c_minor_count, relaxed);
1464 }
1465 if (c_list_lock_held == FALSE) {
1466 lck_mtx_unlock_always(c_list_lock);
1467 }
1468
1469 if (clear_busy == TRUE) {
1470 C_SEG_WAKEUP_DONE(c_seg);
1471 }
1472 }
1473
1474
1475 unsigned int c_seg_moved_to_sparse_list = 0;
1476
1477 void
c_seg_move_to_sparse_list(c_segment_t c_seg)1478 c_seg_move_to_sparse_list(c_segment_t c_seg)
1479 {
1480 boolean_t clear_busy = FALSE;
1481
1482 if (!lck_mtx_try_lock_spin_always(c_list_lock)) {
1483 C_SEG_BUSY(c_seg);
1484
1485 lck_mtx_unlock_always(&c_seg->c_lock);
1486 lck_mtx_lock_spin_always(c_list_lock);
1487 lck_mtx_lock_spin_always(&c_seg->c_lock);
1488
1489 clear_busy = TRUE;
1490 }
1491 c_seg_switch_state(c_seg, C_ON_SWAPPEDOUTSPARSE_Q, FALSE);
1492
1493 c_seg_moved_to_sparse_list++;
1494
1495 lck_mtx_unlock_always(c_list_lock);
1496
1497 if (clear_busy == TRUE) {
1498 C_SEG_WAKEUP_DONE(c_seg);
1499 }
1500 }
1501
1502
1503
1504
1505 int try_minor_compaction_failed = 0;
1506 int try_minor_compaction_succeeded = 0;
1507
1508 void
c_seg_try_minor_compaction_and_unlock(c_segment_t c_seg)1509 c_seg_try_minor_compaction_and_unlock(c_segment_t c_seg)
1510 {
1511 assert(c_seg->c_on_minorcompact_q);
1512 /*
1513 * c_seg is currently on the delayed minor compaction
1514 * queue and we have c_seg locked... if we can get the
1515 * c_list_lock w/o blocking (if we blocked we could deadlock
1516 * because the lock order is c_list_lock then c_seg's lock)
1517 * we'll pull it from the delayed list and free it directly
1518 */
1519 if (!lck_mtx_try_lock_spin_always(c_list_lock)) {
1520 /*
1521 * c_list_lock is held, we need to bail
1522 */
1523 try_minor_compaction_failed++;
1524
1525 lck_mtx_unlock_always(&c_seg->c_lock);
1526 } else {
1527 try_minor_compaction_succeeded++;
1528
1529 C_SEG_BUSY(c_seg);
1530 c_seg_do_minor_compaction_and_unlock(c_seg, TRUE, FALSE, FALSE);
1531 }
1532 }
1533
1534
1535 int
c_seg_do_minor_compaction_and_unlock(c_segment_t c_seg,boolean_t clear_busy,boolean_t need_list_lock,boolean_t disallow_page_replacement)1536 c_seg_do_minor_compaction_and_unlock(c_segment_t c_seg, boolean_t clear_busy, boolean_t need_list_lock, boolean_t disallow_page_replacement)
1537 {
1538 int c_seg_freed;
1539
1540 assert(c_seg->c_busy);
1541 assert(!C_SEG_IS_ON_DISK_OR_SOQ(c_seg));
1542
1543 /*
1544 * check for the case that can occur when we are not swapping
1545 * and this segment has been major compacted in the past
1546 * and moved to the majorcompact q to remove it from further
1547 * consideration... if the occupancy falls too low we need
1548 * to put it back on the age_q so that it will be considered
1549 * in the next major compaction sweep... if we don't do this
1550 * we will eventually run into the c_segments_limit
1551 */
1552 if (c_seg->c_state == C_ON_MAJORCOMPACT_Q && C_SEG_SHOULD_MAJORCOMPACT_NOW(c_seg)) {
1553 c_seg_switch_state(c_seg, C_ON_AGE_Q, FALSE);
1554 }
1555 if (!c_seg->c_on_minorcompact_q) {
1556 if (clear_busy == TRUE) {
1557 C_SEG_WAKEUP_DONE(c_seg);
1558 }
1559
1560 lck_mtx_unlock_always(&c_seg->c_lock);
1561
1562 return 0;
1563 }
1564 queue_remove(&c_minor_list_head, c_seg, c_segment_t, c_list);
1565 c_seg->c_on_minorcompact_q = 0;
1566 os_atomic_dec(&c_minor_count, relaxed);
1567
1568 lck_mtx_unlock_always(c_list_lock);
1569
1570 if (disallow_page_replacement == TRUE) {
1571 lck_mtx_unlock_always(&c_seg->c_lock);
1572
1573 PAGE_REPLACEMENT_DISALLOWED(TRUE);
1574
1575 lck_mtx_lock_spin_always(&c_seg->c_lock);
1576 }
1577 c_seg_freed = c_seg_minor_compaction_and_unlock(c_seg, clear_busy);
1578
1579 if (disallow_page_replacement == TRUE) {
1580 PAGE_REPLACEMENT_DISALLOWED(FALSE);
1581 }
1582
1583 if (need_list_lock == TRUE) {
1584 lck_mtx_lock_spin_always(c_list_lock);
1585 }
1586
1587 return c_seg_freed;
1588 }
1589
1590 void
kdp_compressor_busy_find_owner(event64_t wait_event,thread_waitinfo_t * waitinfo)1591 kdp_compressor_busy_find_owner(event64_t wait_event, thread_waitinfo_t *waitinfo)
1592 {
1593 c_segment_t c_seg = (c_segment_t) wait_event;
1594
1595 waitinfo->owner = thread_tid(c_seg->c_busy_for_thread);
1596 waitinfo->context = VM_KERNEL_UNSLIDE_OR_PERM(c_seg);
1597 }
1598
1599 #if DEVELOPMENT || DEBUG
1600 int
do_cseg_wedge_thread(void)1601 do_cseg_wedge_thread(void)
1602 {
1603 struct c_segment c_seg;
1604 c_seg.c_busy_for_thread = current_thread();
1605
1606 debug_cseg_wait_event = (event_t) &c_seg;
1607
1608 thread_set_pending_block_hint(current_thread(), kThreadWaitCompressor);
1609 assert_wait((event_t) (&c_seg), THREAD_INTERRUPTIBLE);
1610
1611 thread_block(THREAD_CONTINUE_NULL);
1612
1613 return 0;
1614 }
1615
1616 int
do_cseg_unwedge_thread(void)1617 do_cseg_unwedge_thread(void)
1618 {
1619 thread_wakeup(debug_cseg_wait_event);
1620 debug_cseg_wait_event = NULL;
1621
1622 return 0;
1623 }
1624 #endif /* DEVELOPMENT || DEBUG */
1625
1626 void
c_seg_wait_on_busy(c_segment_t c_seg)1627 c_seg_wait_on_busy(c_segment_t c_seg)
1628 {
1629 c_seg->c_wanted = 1;
1630
1631 thread_set_pending_block_hint(current_thread(), kThreadWaitCompressor);
1632 assert_wait((event_t) (c_seg), THREAD_UNINT);
1633
1634 lck_mtx_unlock_always(&c_seg->c_lock);
1635 thread_block(THREAD_CONTINUE_NULL);
1636 }
1637
1638 #if CONFIG_FREEZE
1639 /*
1640 * We don't have the task lock held while updating the task's
1641 * c_seg queues. We can do that because of the following restrictions:
1642 *
1643 * - SINGLE FREEZER CONTEXT:
1644 * We 'insert' c_segs into the task list on the task_freeze path.
1645 * There can only be one such freeze in progress and the task
1646 * isn't disappearing because we have the VM map lock held throughout
1647 * and we have a reference on the proc too.
1648 *
1649 * - SINGLE TASK DISOWN CONTEXT:
1650 * We 'disown' c_segs of a task ONLY from the task_terminate context. So
1651 * we don't need the task lock but we need the c_list_lock and the
1652 * compressor master lock (shared). We also hold the individual
1653 * c_seg locks (exclusive).
1654 *
1655 * If we either:
1656 * - can't get the c_seg lock on a try, then we start again because maybe
1657 * the c_seg is part of a compaction and might get freed. So we can't trust
1658 * that linkage and need to restart our queue traversal.
1659 * - OR, we run into a busy c_seg (say being swapped in or free-ing) we
1660 * drop all locks again and wait and restart our queue traversal.
1661 *
1662 * - The new_owner_task below is currently only the kernel or NULL.
1663 *
1664 */
1665 void
c_seg_update_task_owner(c_segment_t c_seg,task_t new_owner_task)1666 c_seg_update_task_owner(c_segment_t c_seg, task_t new_owner_task)
1667 {
1668 task_t owner_task = c_seg->c_task_owner;
1669 uint64_t uncompressed_bytes = ((c_seg->c_slots_used) * PAGE_SIZE_64);
1670
1671 LCK_MTX_ASSERT(c_list_lock, LCK_MTX_ASSERT_OWNED);
1672 LCK_MTX_ASSERT(&c_seg->c_lock, LCK_MTX_ASSERT_OWNED);
1673
1674 if (owner_task) {
1675 task_update_frozen_to_swap_acct(owner_task, uncompressed_bytes, DEBIT_FROM_SWAP);
1676 queue_remove(&owner_task->task_frozen_cseg_q, c_seg,
1677 c_segment_t, c_task_list_next_cseg);
1678 }
1679
1680 if (new_owner_task) {
1681 queue_enter(&new_owner_task->task_frozen_cseg_q, c_seg,
1682 c_segment_t, c_task_list_next_cseg);
1683 task_update_frozen_to_swap_acct(new_owner_task, uncompressed_bytes, CREDIT_TO_SWAP);
1684 }
1685
1686 c_seg->c_task_owner = new_owner_task;
1687 }
1688
1689 void
task_disown_frozen_csegs(task_t owner_task)1690 task_disown_frozen_csegs(task_t owner_task)
1691 {
1692 c_segment_t c_seg = NULL, next_cseg = NULL;
1693
1694 again:
1695 PAGE_REPLACEMENT_DISALLOWED(TRUE);
1696 lck_mtx_lock_spin_always(c_list_lock);
1697
1698 for (c_seg = (c_segment_t) queue_first(&owner_task->task_frozen_cseg_q);
1699 !queue_end(&owner_task->task_frozen_cseg_q, (queue_entry_t) c_seg);
1700 c_seg = next_cseg) {
1701 next_cseg = (c_segment_t) queue_next(&c_seg->c_task_list_next_cseg);
1702
1703 if (!lck_mtx_try_lock_spin_always(&c_seg->c_lock)) {
1704 lck_mtx_unlock(c_list_lock);
1705 PAGE_REPLACEMENT_DISALLOWED(FALSE);
1706 goto again;
1707 }
1708
1709 if (c_seg->c_busy) {
1710 lck_mtx_unlock(c_list_lock);
1711 PAGE_REPLACEMENT_DISALLOWED(FALSE);
1712
1713 c_seg_wait_on_busy(c_seg);
1714
1715 goto again;
1716 }
1717 assert(c_seg->c_task_owner == owner_task);
1718 c_seg_update_task_owner(c_seg, kernel_task);
1719 lck_mtx_unlock_always(&c_seg->c_lock);
1720 }
1721
1722 lck_mtx_unlock(c_list_lock);
1723 PAGE_REPLACEMENT_DISALLOWED(FALSE);
1724 }
1725 #endif /* CONFIG_FREEZE */
1726
1727 void
c_seg_switch_state(c_segment_t c_seg,int new_state,boolean_t insert_head)1728 c_seg_switch_state(c_segment_t c_seg, int new_state, boolean_t insert_head)
1729 {
1730 int old_state = c_seg->c_state;
1731 queue_head_t *donate_swapout_list_head, *donate_swappedin_list_head;
1732 uint32_t *donate_swapout_count, *donate_swappedin_count;
1733
1734 /*
1735 * On macOS the donate queue is swapped first ie the c_early_swapout queue.
1736 * On other swap-capable platforms, we want to swap those out last. So we
1737 * use the c_late_swapout queue.
1738 */
1739 #if XNU_TARGET_OS_OSX /* tag:DONATE */
1740 #if (DEVELOPMENT || DEBUG)
1741 if (new_state != C_IS_FILLING) {
1742 LCK_MTX_ASSERT(&c_seg->c_lock, LCK_MTX_ASSERT_OWNED);
1743 }
1744 LCK_MTX_ASSERT(c_list_lock, LCK_MTX_ASSERT_OWNED);
1745 #endif /* DEVELOPMENT || DEBUG */
1746
1747 donate_swapout_list_head = &c_early_swapout_list_head;
1748 donate_swapout_count = &c_early_swapout_count;
1749 donate_swappedin_list_head = &c_early_swappedin_list_head;
1750 donate_swappedin_count = &c_early_swappedin_count;
1751 #else /* XNU_TARGET_OS_OSX */
1752 donate_swapout_list_head = &c_late_swapout_list_head;
1753 donate_swapout_count = &c_late_swapout_count;
1754 donate_swappedin_list_head = &c_late_swappedin_list_head;
1755 donate_swappedin_count = &c_late_swappedin_count;
1756 #endif /* XNU_TARGET_OS_OSX */
1757
1758 switch (old_state) {
1759 case C_IS_EMPTY:
1760 assert(new_state == C_IS_FILLING || new_state == C_IS_FREE);
1761
1762 c_empty_count--;
1763 break;
1764
1765 case C_IS_FILLING:
1766 assert(new_state == C_ON_AGE_Q || new_state == C_ON_SWAPOUT_Q);
1767
1768 queue_remove(&c_filling_list_head, c_seg, c_segment_t, c_age_list);
1769 c_filling_count--;
1770 break;
1771
1772 case C_ON_AGE_Q:
1773 assert(new_state == C_ON_SWAPOUT_Q || new_state == C_ON_MAJORCOMPACT_Q ||
1774 new_state == C_IS_FREE);
1775
1776 queue_remove(&c_age_list_head, c_seg, c_segment_t, c_age_list);
1777 c_age_count--;
1778 break;
1779
1780 case C_ON_SWAPPEDIN_Q:
1781 if (c_seg->c_has_donated_pages) {
1782 assert(new_state == C_ON_SWAPOUT_Q || new_state == C_IS_FREE);
1783 queue_remove(donate_swappedin_list_head, c_seg, c_segment_t, c_age_list);
1784 *donate_swappedin_count -= 1;
1785 } else {
1786 assert(new_state == C_ON_AGE_Q || new_state == C_IS_FREE);
1787 #if CONFIG_FREEZE
1788 assert(c_seg->c_has_freezer_pages);
1789 queue_remove(&c_early_swappedin_list_head, c_seg, c_segment_t, c_age_list);
1790 c_early_swappedin_count--;
1791 #else /* CONFIG_FREEZE */
1792 queue_remove(&c_regular_swappedin_list_head, c_seg, c_segment_t, c_age_list);
1793 c_regular_swappedin_count--;
1794 #endif /* CONFIG_FREEZE */
1795 }
1796 break;
1797
1798 case C_ON_SWAPOUT_Q:
1799 assert(new_state == C_ON_AGE_Q || new_state == C_IS_FREE || new_state == C_IS_EMPTY || new_state == C_ON_SWAPIO_Q);
1800
1801 #if CONFIG_FREEZE
1802 if (c_seg->c_has_freezer_pages) {
1803 if (c_seg->c_task_owner && (new_state != C_ON_SWAPIO_Q)) {
1804 c_seg_update_task_owner(c_seg, NULL);
1805 }
1806 queue_remove(&c_early_swapout_list_head, c_seg, c_segment_t, c_age_list);
1807 c_early_swapout_count--;
1808 } else
1809 #endif /* CONFIG_FREEZE */
1810 {
1811 if (c_seg->c_has_donated_pages) {
1812 queue_remove(donate_swapout_list_head, c_seg, c_segment_t, c_age_list);
1813 *donate_swapout_count -= 1;
1814 } else {
1815 queue_remove(&c_regular_swapout_list_head, c_seg, c_segment_t, c_age_list);
1816 c_regular_swapout_count--;
1817 }
1818 }
1819
1820 if (new_state == C_ON_AGE_Q) {
1821 c_seg->c_has_donated_pages = 0;
1822 }
1823 thread_wakeup((event_t)&compaction_swapper_running);
1824 break;
1825
1826 case C_ON_SWAPIO_Q:
1827 #if CONFIG_FREEZE
1828 if (c_seg->c_has_freezer_pages) {
1829 assert(new_state == C_ON_SWAPPEDOUT_Q || new_state == C_ON_SWAPPEDOUTSPARSE_Q || new_state == C_ON_AGE_Q);
1830 } else
1831 #endif /* CONFIG_FREEZE */
1832 {
1833 if (c_seg->c_has_donated_pages) {
1834 assert(new_state == C_ON_SWAPPEDOUT_Q || new_state == C_ON_SWAPPEDOUTSPARSE_Q || new_state == C_ON_SWAPPEDIN_Q);
1835 } else {
1836 assert(new_state == C_ON_SWAPPEDOUT_Q || new_state == C_ON_SWAPPEDOUTSPARSE_Q || new_state == C_ON_AGE_Q);
1837 }
1838 }
1839
1840 queue_remove(&c_swapio_list_head, c_seg, c_segment_t, c_age_list);
1841 c_swapio_count--;
1842 break;
1843
1844 case C_ON_SWAPPEDOUT_Q:
1845 assert(new_state == C_ON_SWAPPEDIN_Q || new_state == C_ON_AGE_Q ||
1846 new_state == C_ON_SWAPPEDOUTSPARSE_Q ||
1847 new_state == C_ON_BAD_Q || new_state == C_IS_EMPTY || new_state == C_IS_FREE);
1848
1849 queue_remove(&c_swappedout_list_head, c_seg, c_segment_t, c_age_list);
1850 c_swappedout_count--;
1851 break;
1852
1853 case C_ON_SWAPPEDOUTSPARSE_Q:
1854 assert(new_state == C_ON_SWAPPEDIN_Q || new_state == C_ON_AGE_Q ||
1855 new_state == C_ON_BAD_Q || new_state == C_IS_EMPTY || new_state == C_IS_FREE);
1856
1857 queue_remove(&c_swappedout_sparse_list_head, c_seg, c_segment_t, c_age_list);
1858 c_swappedout_sparse_count--;
1859 break;
1860
1861 case C_ON_MAJORCOMPACT_Q:
1862 assert(new_state == C_ON_AGE_Q || new_state == C_IS_FREE);
1863
1864 queue_remove(&c_major_list_head, c_seg, c_segment_t, c_age_list);
1865 c_major_count--;
1866 break;
1867
1868 case C_ON_BAD_Q:
1869 assert(new_state == C_IS_FREE);
1870
1871 queue_remove(&c_bad_list_head, c_seg, c_segment_t, c_age_list);
1872 c_bad_count--;
1873 break;
1874
1875 default:
1876 panic("c_seg %p has bad c_state = %d", c_seg, old_state);
1877 }
1878
1879 switch (new_state) {
1880 case C_IS_FREE:
1881 assert(old_state != C_IS_FILLING);
1882
1883 break;
1884
1885 case C_IS_EMPTY:
1886 assert(old_state == C_ON_SWAPOUT_Q || old_state == C_ON_SWAPPEDOUT_Q || old_state == C_ON_SWAPPEDOUTSPARSE_Q);
1887
1888 c_empty_count++;
1889 break;
1890
1891 case C_IS_FILLING:
1892 assert(old_state == C_IS_EMPTY);
1893
1894 queue_enter(&c_filling_list_head, c_seg, c_segment_t, c_age_list);
1895 c_filling_count++;
1896 break;
1897
1898 case C_ON_AGE_Q:
1899 assert(old_state == C_IS_FILLING || old_state == C_ON_SWAPPEDIN_Q ||
1900 old_state == C_ON_SWAPOUT_Q || old_state == C_ON_SWAPIO_Q ||
1901 old_state == C_ON_MAJORCOMPACT_Q || old_state == C_ON_SWAPPEDOUT_Q || old_state == C_ON_SWAPPEDOUTSPARSE_Q);
1902
1903 assert(!c_seg->c_has_donated_pages);
1904 if (old_state == C_IS_FILLING) {
1905 queue_enter(&c_age_list_head, c_seg, c_segment_t, c_age_list);
1906 } else {
1907 if (!queue_empty(&c_age_list_head)) {
1908 c_segment_t c_first;
1909
1910 c_first = (c_segment_t)queue_first(&c_age_list_head);
1911 c_seg->c_creation_ts = c_first->c_creation_ts;
1912 }
1913 queue_enter_first(&c_age_list_head, c_seg, c_segment_t, c_age_list);
1914 }
1915 c_age_count++;
1916 break;
1917
1918 case C_ON_SWAPPEDIN_Q:
1919 {
1920 queue_head_t *list_head;
1921
1922 assert(old_state == C_ON_SWAPPEDOUT_Q || old_state == C_ON_SWAPPEDOUTSPARSE_Q || old_state == C_ON_SWAPIO_Q);
1923 if (c_seg->c_has_donated_pages) {
1924 /* Error in swapouts could happen while the c_seg is still on the swapio queue */
1925 list_head = donate_swappedin_list_head;
1926 *donate_swappedin_count += 1;
1927 } else {
1928 #if CONFIG_FREEZE
1929 assert(c_seg->c_has_freezer_pages);
1930 list_head = &c_early_swappedin_list_head;
1931 c_early_swappedin_count++;
1932 #else /* CONFIG_FREEZE */
1933 list_head = &c_regular_swappedin_list_head;
1934 c_regular_swappedin_count++;
1935 #endif /* CONFIG_FREEZE */
1936 }
1937
1938 if (insert_head == TRUE) {
1939 queue_enter_first(list_head, c_seg, c_segment_t, c_age_list);
1940 } else {
1941 queue_enter(list_head, c_seg, c_segment_t, c_age_list);
1942 }
1943 break;
1944 }
1945
1946 case C_ON_SWAPOUT_Q:
1947 {
1948 queue_head_t *list_head;
1949
1950 #if CONFIG_FREEZE
1951 /*
1952 * A segment with both identities of frozen + donated pages
1953 * will be put on early swapout Q ie the frozen identity wins.
1954 * This is because when both identities are set, the donation bit
1955 * is added on after in the c_current_seg_filled path for accounting
1956 * purposes.
1957 */
1958 if (c_seg->c_has_freezer_pages) {
1959 assert(old_state == C_ON_AGE_Q || old_state == C_IS_FILLING);
1960 list_head = &c_early_swapout_list_head;
1961 c_early_swapout_count++;
1962 } else
1963 #endif
1964 {
1965 if (c_seg->c_has_donated_pages) {
1966 assert(old_state == C_ON_SWAPPEDIN_Q || old_state == C_IS_FILLING);
1967 list_head = donate_swapout_list_head;
1968 *donate_swapout_count += 1;
1969 } else {
1970 assert(old_state == C_ON_AGE_Q || old_state == C_IS_FILLING);
1971 list_head = &c_regular_swapout_list_head;
1972 c_regular_swapout_count++;
1973 }
1974 }
1975
1976 if (insert_head == TRUE) {
1977 queue_enter_first(list_head, c_seg, c_segment_t, c_age_list);
1978 } else {
1979 queue_enter(list_head, c_seg, c_segment_t, c_age_list);
1980 }
1981 break;
1982 }
1983
1984 case C_ON_SWAPIO_Q:
1985 assert(old_state == C_ON_SWAPOUT_Q);
1986
1987 if (insert_head == TRUE) {
1988 queue_enter_first(&c_swapio_list_head, c_seg, c_segment_t, c_age_list);
1989 } else {
1990 queue_enter(&c_swapio_list_head, c_seg, c_segment_t, c_age_list);
1991 }
1992 c_swapio_count++;
1993 break;
1994
1995 case C_ON_SWAPPEDOUT_Q:
1996 assert(old_state == C_ON_SWAPIO_Q);
1997
1998 if (insert_head == TRUE) {
1999 queue_enter_first(&c_swappedout_list_head, c_seg, c_segment_t, c_age_list);
2000 } else {
2001 queue_enter(&c_swappedout_list_head, c_seg, c_segment_t, c_age_list);
2002 }
2003 c_swappedout_count++;
2004 break;
2005
2006 case C_ON_SWAPPEDOUTSPARSE_Q:
2007 assert(old_state == C_ON_SWAPIO_Q || old_state == C_ON_SWAPPEDOUT_Q);
2008
2009 if (insert_head == TRUE) {
2010 queue_enter_first(&c_swappedout_sparse_list_head, c_seg, c_segment_t, c_age_list);
2011 } else {
2012 queue_enter(&c_swappedout_sparse_list_head, c_seg, c_segment_t, c_age_list);
2013 }
2014
2015 c_swappedout_sparse_count++;
2016 break;
2017
2018 case C_ON_MAJORCOMPACT_Q:
2019 assert(old_state == C_ON_AGE_Q);
2020 assert(!c_seg->c_has_donated_pages);
2021
2022 if (insert_head == TRUE) {
2023 queue_enter_first(&c_major_list_head, c_seg, c_segment_t, c_age_list);
2024 } else {
2025 queue_enter(&c_major_list_head, c_seg, c_segment_t, c_age_list);
2026 }
2027 c_major_count++;
2028 break;
2029
2030 case C_ON_BAD_Q:
2031 assert(old_state == C_ON_SWAPPEDOUT_Q || old_state == C_ON_SWAPPEDOUTSPARSE_Q);
2032
2033 if (insert_head == TRUE) {
2034 queue_enter_first(&c_bad_list_head, c_seg, c_segment_t, c_age_list);
2035 } else {
2036 queue_enter(&c_bad_list_head, c_seg, c_segment_t, c_age_list);
2037 }
2038 c_bad_count++;
2039 break;
2040
2041 default:
2042 panic("c_seg %p requesting bad c_state = %d", c_seg, new_state);
2043 }
2044 c_seg->c_state = new_state;
2045 }
2046
2047
2048
2049 void
c_seg_free(c_segment_t c_seg)2050 c_seg_free(c_segment_t c_seg)
2051 {
2052 assert(c_seg->c_busy);
2053
2054 lck_mtx_unlock_always(&c_seg->c_lock);
2055 lck_mtx_lock_spin_always(c_list_lock);
2056 lck_mtx_lock_spin_always(&c_seg->c_lock);
2057
2058 c_seg_free_locked(c_seg);
2059 }
2060
2061
2062 void
c_seg_free_locked(c_segment_t c_seg)2063 c_seg_free_locked(c_segment_t c_seg)
2064 {
2065 int segno;
2066 int pages_populated = 0;
2067 int32_t *c_buffer = NULL;
2068 uint64_t c_swap_handle = 0;
2069
2070 assert(c_seg->c_busy);
2071 assert(c_seg->c_slots_used == 0);
2072 assert(!c_seg->c_on_minorcompact_q);
2073 assert(!c_seg->c_busy_swapping);
2074
2075 if (c_seg->c_overage_swap == TRUE) {
2076 c_overage_swapped_count--;
2077 c_seg->c_overage_swap = FALSE;
2078 }
2079 if (!(C_SEG_IS_ONDISK(c_seg))) {
2080 c_buffer = c_seg->c_store.c_buffer;
2081 } else {
2082 c_swap_handle = c_seg->c_store.c_swap_handle;
2083 }
2084
2085 c_seg_switch_state(c_seg, C_IS_FREE, FALSE);
2086
2087 if (c_buffer) {
2088 pages_populated = (round_page_32(C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset))) / PAGE_SIZE;
2089 c_seg->c_store.c_buffer = NULL;
2090 } else {
2091 #if CONFIG_FREEZE
2092 c_seg_update_task_owner(c_seg, NULL);
2093 #endif /* CONFIG_FREEZE */
2094
2095 c_seg->c_store.c_swap_handle = (uint64_t)-1;
2096 }
2097
2098 lck_mtx_unlock_always(&c_seg->c_lock);
2099
2100 lck_mtx_unlock_always(c_list_lock);
2101
2102 if (c_buffer) {
2103 if (pages_populated) {
2104 kernel_memory_depopulate((vm_offset_t)c_buffer,
2105 ptoa(pages_populated), KMA_COMPRESSOR,
2106 VM_KERN_MEMORY_COMPRESSOR);
2107 }
2108 } else if (c_swap_handle) {
2109 /*
2110 * Free swap space on disk.
2111 */
2112 vm_swap_free(c_swap_handle);
2113 }
2114 lck_mtx_lock_spin_always(&c_seg->c_lock);
2115 /*
2116 * c_seg must remain busy until
2117 * after the call to vm_swap_free
2118 */
2119 C_SEG_WAKEUP_DONE(c_seg);
2120 lck_mtx_unlock_always(&c_seg->c_lock);
2121
2122 segno = c_seg->c_mysegno;
2123
2124 lck_mtx_lock_spin_always(c_list_lock);
2125 /*
2126 * because the c_buffer is now associated with the segno,
2127 * we can't put the segno back on the free list until
2128 * after we have depopulated the c_buffer range, or
2129 * we run the risk of depopulating a range that is
2130 * now being used in one of the compressor heads
2131 */
2132 c_segments[segno].c_segno = c_free_segno_head;
2133 c_free_segno_head = segno;
2134 c_segment_count--;
2135
2136 lck_mtx_unlock_always(c_list_lock);
2137
2138 lck_mtx_destroy(&c_seg->c_lock, &vm_compressor_lck_grp);
2139
2140 if (c_seg->c_slot_var_array_len) {
2141 kfree_type(struct c_slot, c_seg->c_slot_var_array_len,
2142 c_seg->c_slot_var_array);
2143 }
2144
2145 zfree(compressor_segment_zone, c_seg);
2146 }
2147
2148 #if DEVELOPMENT || DEBUG
2149 int c_seg_trim_page_count = 0;
2150 #endif
2151
2152 void
c_seg_trim_tail(c_segment_t c_seg)2153 c_seg_trim_tail(c_segment_t c_seg)
2154 {
2155 c_slot_t cs;
2156 uint32_t c_size;
2157 uint32_t c_offset;
2158 uint32_t c_rounded_size;
2159 uint16_t current_nextslot;
2160 uint32_t current_populated_offset;
2161
2162 if (c_seg->c_bytes_used == 0) {
2163 return;
2164 }
2165 current_nextslot = c_seg->c_nextslot;
2166 current_populated_offset = c_seg->c_populated_offset;
2167
2168 while (c_seg->c_nextslot) {
2169 cs = C_SEG_SLOT_FROM_INDEX(c_seg, (c_seg->c_nextslot - 1));
2170
2171 c_size = UNPACK_C_SIZE(cs);
2172
2173 if (c_size) {
2174 if (current_nextslot != c_seg->c_nextslot) {
2175 c_rounded_size = (c_size + C_SEG_OFFSET_ALIGNMENT_MASK) & ~C_SEG_OFFSET_ALIGNMENT_MASK;
2176 c_offset = cs->c_offset + C_SEG_BYTES_TO_OFFSET(c_rounded_size);
2177
2178 c_seg->c_nextoffset = c_offset;
2179 c_seg->c_populated_offset = (c_offset + (C_SEG_BYTES_TO_OFFSET(PAGE_SIZE) - 1)) &
2180 ~(C_SEG_BYTES_TO_OFFSET(PAGE_SIZE) - 1);
2181
2182 if (c_seg->c_firstemptyslot > c_seg->c_nextslot) {
2183 c_seg->c_firstemptyslot = c_seg->c_nextslot;
2184 }
2185 #if DEVELOPMENT || DEBUG
2186 c_seg_trim_page_count += ((round_page_32(C_SEG_OFFSET_TO_BYTES(current_populated_offset)) -
2187 round_page_32(C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset))) /
2188 PAGE_SIZE);
2189 #endif
2190 }
2191 break;
2192 }
2193 c_seg->c_nextslot--;
2194 }
2195 assert(c_seg->c_nextslot);
2196 }
2197
2198
2199 int
c_seg_minor_compaction_and_unlock(c_segment_t c_seg,boolean_t clear_busy)2200 c_seg_minor_compaction_and_unlock(c_segment_t c_seg, boolean_t clear_busy)
2201 {
2202 c_slot_mapping_t slot_ptr;
2203 uint32_t c_offset = 0;
2204 uint32_t old_populated_offset;
2205 uint32_t c_rounded_size;
2206 uint32_t c_size;
2207 uint16_t c_indx = 0;
2208 int i;
2209 c_slot_t c_dst;
2210 c_slot_t c_src;
2211
2212 assert(c_seg->c_busy);
2213
2214 #if VALIDATE_C_SEGMENTS
2215 c_seg_validate(c_seg, FALSE);
2216 #endif
2217 if (c_seg->c_bytes_used == 0) {
2218 c_seg_free(c_seg);
2219 return 1;
2220 }
2221 lck_mtx_unlock_always(&c_seg->c_lock);
2222
2223 if (c_seg->c_firstemptyslot >= c_seg->c_nextslot || C_SEG_UNUSED_BYTES(c_seg) < PAGE_SIZE) {
2224 goto done;
2225 }
2226
2227 /* TODO: assert first emptyslot's c_size is actually 0 */
2228
2229 #if DEVELOPMENT || DEBUG
2230 C_SEG_MAKE_WRITEABLE(c_seg);
2231 #endif
2232
2233 #if VALIDATE_C_SEGMENTS
2234 c_seg->c_was_minor_compacted++;
2235 #endif
2236 c_indx = c_seg->c_firstemptyslot;
2237 c_dst = C_SEG_SLOT_FROM_INDEX(c_seg, c_indx);
2238
2239 old_populated_offset = c_seg->c_populated_offset;
2240 c_offset = c_dst->c_offset;
2241
2242 for (i = c_indx + 1; i < c_seg->c_nextslot && c_offset < c_seg->c_nextoffset; i++) {
2243 c_src = C_SEG_SLOT_FROM_INDEX(c_seg, i);
2244
2245 c_size = UNPACK_C_SIZE(c_src);
2246
2247 if (c_size == 0) {
2248 continue;
2249 }
2250
2251 c_rounded_size = (c_size + C_SEG_OFFSET_ALIGNMENT_MASK) & ~C_SEG_OFFSET_ALIGNMENT_MASK;
2252 /* N.B.: This memcpy may be an overlapping copy */
2253 memcpy(&c_seg->c_store.c_buffer[c_offset], &c_seg->c_store.c_buffer[c_src->c_offset], c_rounded_size);
2254
2255 cslot_copy(c_dst, c_src);
2256 c_dst->c_offset = c_offset;
2257
2258 slot_ptr = C_SLOT_UNPACK_PTR(c_dst);
2259 slot_ptr->s_cindx = c_indx;
2260
2261 c_offset += C_SEG_BYTES_TO_OFFSET(c_rounded_size);
2262 PACK_C_SIZE(c_src, 0);
2263 c_indx++;
2264
2265 c_dst = C_SEG_SLOT_FROM_INDEX(c_seg, c_indx);
2266 }
2267 c_seg->c_firstemptyslot = c_indx;
2268 c_seg->c_nextslot = c_indx;
2269 c_seg->c_nextoffset = c_offset;
2270 c_seg->c_populated_offset = (c_offset + (C_SEG_BYTES_TO_OFFSET(PAGE_SIZE) - 1)) & ~(C_SEG_BYTES_TO_OFFSET(PAGE_SIZE) - 1);
2271 c_seg->c_bytes_unused = 0;
2272
2273 #if VALIDATE_C_SEGMENTS
2274 c_seg_validate(c_seg, TRUE);
2275 #endif
2276 if (old_populated_offset > c_seg->c_populated_offset) {
2277 uint32_t gc_size;
2278 int32_t *gc_ptr;
2279
2280 gc_size = C_SEG_OFFSET_TO_BYTES(old_populated_offset - c_seg->c_populated_offset);
2281 gc_ptr = &c_seg->c_store.c_buffer[c_seg->c_populated_offset];
2282
2283 kernel_memory_depopulate((vm_offset_t)gc_ptr, gc_size,
2284 KMA_COMPRESSOR, VM_KERN_MEMORY_COMPRESSOR);
2285 }
2286
2287 #if DEVELOPMENT || DEBUG
2288 C_SEG_WRITE_PROTECT(c_seg);
2289 #endif
2290
2291 done:
2292 if (clear_busy == TRUE) {
2293 lck_mtx_lock_spin_always(&c_seg->c_lock);
2294 C_SEG_WAKEUP_DONE(c_seg);
2295 lck_mtx_unlock_always(&c_seg->c_lock);
2296 }
2297 return 0;
2298 }
2299
2300
2301 static void
c_seg_alloc_nextslot(c_segment_t c_seg)2302 c_seg_alloc_nextslot(c_segment_t c_seg)
2303 {
2304 struct c_slot *old_slot_array = NULL;
2305 struct c_slot *new_slot_array = NULL;
2306 int newlen;
2307 int oldlen;
2308
2309 if (c_seg->c_nextslot < c_seg_fixed_array_len) {
2310 return;
2311 }
2312
2313 if ((c_seg->c_nextslot - c_seg_fixed_array_len) >= c_seg->c_slot_var_array_len) {
2314 oldlen = c_seg->c_slot_var_array_len;
2315 old_slot_array = c_seg->c_slot_var_array;
2316
2317 if (oldlen == 0) {
2318 newlen = c_seg_slot_var_array_min_len;
2319 } else {
2320 newlen = oldlen * 2;
2321 }
2322
2323 new_slot_array = kalloc_type(struct c_slot, newlen, Z_WAITOK);
2324
2325 lck_mtx_lock_spin_always(&c_seg->c_lock);
2326
2327 if (old_slot_array) {
2328 memcpy(new_slot_array, old_slot_array,
2329 sizeof(struct c_slot) * oldlen);
2330 }
2331
2332 c_seg->c_slot_var_array_len = newlen;
2333 c_seg->c_slot_var_array = new_slot_array;
2334
2335 lck_mtx_unlock_always(&c_seg->c_lock);
2336
2337 kfree_type(struct c_slot, oldlen, old_slot_array);
2338 }
2339 }
2340
2341
2342 #define C_SEG_MAJOR_COMPACT_STATS_MAX (30)
2343
2344 struct {
2345 uint64_t asked_permission;
2346 uint64_t compactions;
2347 uint64_t moved_slots;
2348 uint64_t moved_bytes;
2349 uint64_t wasted_space_in_swapouts;
2350 uint64_t count_of_swapouts;
2351 uint64_t count_of_freed_segs;
2352 uint64_t bailed_compactions;
2353 uint64_t bytes_freed_rate_us;
2354 } c_seg_major_compact_stats[C_SEG_MAJOR_COMPACT_STATS_MAX];
2355
2356 int c_seg_major_compact_stats_now = 0;
2357
2358
2359 #define C_MAJOR_COMPACTION_SIZE_APPROPRIATE ((c_seg_bufsize * 90) / 100)
2360
2361
2362 boolean_t
c_seg_major_compact_ok(c_segment_t c_seg_dst,c_segment_t c_seg_src)2363 c_seg_major_compact_ok(
2364 c_segment_t c_seg_dst,
2365 c_segment_t c_seg_src)
2366 {
2367 c_seg_major_compact_stats[c_seg_major_compact_stats_now].asked_permission++;
2368
2369 if (c_seg_src->c_bytes_used >= C_MAJOR_COMPACTION_SIZE_APPROPRIATE &&
2370 c_seg_dst->c_bytes_used >= C_MAJOR_COMPACTION_SIZE_APPROPRIATE) {
2371 return FALSE;
2372 }
2373
2374 if (c_seg_dst->c_nextoffset >= c_seg_off_limit || c_seg_dst->c_nextslot >= C_SLOT_MAX_INDEX) {
2375 /*
2376 * destination segment is full... can't compact
2377 */
2378 return FALSE;
2379 }
2380
2381 return TRUE;
2382 }
2383
2384
2385 boolean_t
c_seg_major_compact(c_segment_t c_seg_dst,c_segment_t c_seg_src)2386 c_seg_major_compact(
2387 c_segment_t c_seg_dst,
2388 c_segment_t c_seg_src)
2389 {
2390 c_slot_mapping_t slot_ptr;
2391 uint32_t c_rounded_size;
2392 uint32_t c_size;
2393 uint16_t dst_slot;
2394 int i;
2395 c_slot_t c_dst;
2396 c_slot_t c_src;
2397 boolean_t keep_compacting = TRUE;
2398
2399 /*
2400 * segments are not locked but they are both marked c_busy
2401 * which keeps c_decompress from working on them...
2402 * we can safely allocate new pages, move compressed data
2403 * from c_seg_src to c_seg_dst and update both c_segment's
2404 * state w/o holding the master lock
2405 */
2406 #if DEVELOPMENT || DEBUG
2407 C_SEG_MAKE_WRITEABLE(c_seg_dst);
2408 #endif
2409
2410 #if VALIDATE_C_SEGMENTS
2411 c_seg_dst->c_was_major_compacted++;
2412 c_seg_src->c_was_major_donor++;
2413 #endif
2414 assertf(c_seg_dst->c_has_donated_pages == c_seg_src->c_has_donated_pages, "Mismatched donation status Dst: %p, Src: %p\n", c_seg_dst, c_seg_src);
2415 c_seg_major_compact_stats[c_seg_major_compact_stats_now].compactions++;
2416
2417 dst_slot = c_seg_dst->c_nextslot;
2418
2419 for (i = 0; i < c_seg_src->c_nextslot; i++) {
2420 c_src = C_SEG_SLOT_FROM_INDEX(c_seg_src, i);
2421
2422 c_size = UNPACK_C_SIZE(c_src);
2423
2424 if (c_size == 0) {
2425 /* BATCH: move what we have so far; */
2426 continue;
2427 }
2428
2429 int combined_size;
2430 combined_size = c_size;
2431 c_rounded_size = (combined_size + C_SEG_OFFSET_ALIGNMENT_MASK) & ~C_SEG_OFFSET_ALIGNMENT_MASK;
2432
2433 if (C_SEG_OFFSET_TO_BYTES(c_seg_dst->c_populated_offset - c_seg_dst->c_nextoffset) < (unsigned) combined_size) {
2434 int size_to_populate;
2435
2436 /* doesn't fit */
2437 size_to_populate = c_seg_bufsize - C_SEG_OFFSET_TO_BYTES(c_seg_dst->c_populated_offset);
2438
2439 if (size_to_populate == 0) {
2440 /* can't fit */
2441 keep_compacting = FALSE;
2442 break;
2443 }
2444 if (size_to_populate > C_SEG_MAX_POPULATE_SIZE) {
2445 size_to_populate = C_SEG_MAX_POPULATE_SIZE;
2446 }
2447
2448 kernel_memory_populate(
2449 (vm_offset_t) &c_seg_dst->c_store.c_buffer[c_seg_dst->c_populated_offset],
2450 size_to_populate,
2451 KMA_NOFAIL | KMA_COMPRESSOR,
2452 VM_KERN_MEMORY_COMPRESSOR);
2453
2454 c_seg_dst->c_populated_offset += C_SEG_BYTES_TO_OFFSET(size_to_populate);
2455 assert(C_SEG_OFFSET_TO_BYTES(c_seg_dst->c_populated_offset) <= c_seg_bufsize);
2456 }
2457 c_seg_alloc_nextslot(c_seg_dst);
2458
2459 c_dst = C_SEG_SLOT_FROM_INDEX(c_seg_dst, c_seg_dst->c_nextslot);
2460
2461 memcpy(&c_seg_dst->c_store.c_buffer[c_seg_dst->c_nextoffset], &c_seg_src->c_store.c_buffer[c_src->c_offset], combined_size);
2462
2463 c_seg_major_compact_stats[c_seg_major_compact_stats_now].moved_slots++;
2464 c_seg_major_compact_stats[c_seg_major_compact_stats_now].moved_bytes += combined_size;
2465
2466 cslot_copy(c_dst, c_src);
2467 c_dst->c_offset = c_seg_dst->c_nextoffset;
2468
2469 if (c_seg_dst->c_firstemptyslot == c_seg_dst->c_nextslot) {
2470 c_seg_dst->c_firstemptyslot++;
2471 }
2472 c_seg_dst->c_slots_used++;
2473 c_seg_dst->c_nextslot++;
2474 c_seg_dst->c_bytes_used += c_rounded_size;
2475 c_seg_dst->c_nextoffset += C_SEG_BYTES_TO_OFFSET(c_rounded_size);
2476
2477 PACK_C_SIZE(c_src, 0);
2478
2479 c_seg_src->c_bytes_used -= c_rounded_size;
2480 c_seg_src->c_bytes_unused += c_rounded_size;
2481 c_seg_src->c_firstemptyslot = 0;
2482
2483 assert(c_seg_src->c_slots_used);
2484 c_seg_src->c_slots_used--;
2485
2486 if (!c_seg_src->c_swappedin) {
2487 /* Pessimistically lose swappedin status when non-swappedin pages are added. */
2488 c_seg_dst->c_swappedin = false;
2489 }
2490
2491 if (c_seg_dst->c_nextoffset >= c_seg_off_limit || c_seg_dst->c_nextslot >= C_SLOT_MAX_INDEX) {
2492 /* dest segment is now full */
2493 keep_compacting = FALSE;
2494 break;
2495 }
2496 }
2497 #if DEVELOPMENT || DEBUG
2498 C_SEG_WRITE_PROTECT(c_seg_dst);
2499 #endif
2500 if (dst_slot < c_seg_dst->c_nextslot) {
2501 PAGE_REPLACEMENT_ALLOWED(TRUE);
2502 /*
2503 * we've now locked out c_decompress from
2504 * converting the slot passed into it into
2505 * a c_segment_t which allows us to use
2506 * the backptr to change which c_segment and
2507 * index the slot points to
2508 */
2509 while (dst_slot < c_seg_dst->c_nextslot) {
2510 c_dst = C_SEG_SLOT_FROM_INDEX(c_seg_dst, dst_slot);
2511
2512 slot_ptr = C_SLOT_UNPACK_PTR(c_dst);
2513 /* <csegno=0,indx=0> would mean "empty slot", so use csegno+1 */
2514 slot_ptr->s_cseg = c_seg_dst->c_mysegno + 1;
2515 slot_ptr->s_cindx = dst_slot++;
2516 }
2517 PAGE_REPLACEMENT_ALLOWED(FALSE);
2518 }
2519 return keep_compacting;
2520 }
2521
2522
2523 uint64_t
vm_compressor_compute_elapsed_msecs(clock_sec_t end_sec,clock_nsec_t end_nsec,clock_sec_t start_sec,clock_nsec_t start_nsec)2524 vm_compressor_compute_elapsed_msecs(clock_sec_t end_sec, clock_nsec_t end_nsec, clock_sec_t start_sec, clock_nsec_t start_nsec)
2525 {
2526 uint64_t end_msecs;
2527 uint64_t start_msecs;
2528
2529 end_msecs = (end_sec * 1000) + end_nsec / 1000000;
2530 start_msecs = (start_sec * 1000) + start_nsec / 1000000;
2531
2532 return end_msecs - start_msecs;
2533 }
2534
2535
2536
2537 uint32_t compressor_eval_period_in_msecs = 250;
2538 uint32_t compressor_sample_min_in_msecs = 500;
2539 uint32_t compressor_sample_max_in_msecs = 10000;
2540 uint32_t compressor_thrashing_threshold_per_10msecs = 50;
2541 uint32_t compressor_thrashing_min_per_10msecs = 20;
2542
2543 /* When true, reset sample data next chance we get. */
2544 static boolean_t compressor_need_sample_reset = FALSE;
2545
2546
2547 void
compute_swapout_target_age(void)2548 compute_swapout_target_age(void)
2549 {
2550 clock_sec_t cur_ts_sec;
2551 clock_nsec_t cur_ts_nsec;
2552 uint32_t min_operations_needed_in_this_sample;
2553 uint64_t elapsed_msecs_in_eval;
2554 uint64_t elapsed_msecs_in_sample;
2555 boolean_t need_eval_reset = FALSE;
2556
2557 clock_get_system_nanotime(&cur_ts_sec, &cur_ts_nsec);
2558
2559 elapsed_msecs_in_sample = vm_compressor_compute_elapsed_msecs(cur_ts_sec, cur_ts_nsec, start_of_sample_period_sec, start_of_sample_period_nsec);
2560
2561 if (compressor_need_sample_reset ||
2562 elapsed_msecs_in_sample >= compressor_sample_max_in_msecs) {
2563 compressor_need_sample_reset = TRUE;
2564 need_eval_reset = TRUE;
2565 goto done;
2566 }
2567 elapsed_msecs_in_eval = vm_compressor_compute_elapsed_msecs(cur_ts_sec, cur_ts_nsec, start_of_eval_period_sec, start_of_eval_period_nsec);
2568
2569 if (elapsed_msecs_in_eval < compressor_eval_period_in_msecs) {
2570 goto done;
2571 }
2572 need_eval_reset = TRUE;
2573
2574 KERNEL_DEBUG(0xe0400020 | DBG_FUNC_START, elapsed_msecs_in_eval, sample_period_compression_count, sample_period_decompression_count, 0, 0);
2575
2576 min_operations_needed_in_this_sample = (compressor_thrashing_min_per_10msecs * (uint32_t)elapsed_msecs_in_eval) / 10;
2577
2578 if ((sample_period_compression_count - last_eval_compression_count) < min_operations_needed_in_this_sample ||
2579 (sample_period_decompression_count - last_eval_decompression_count) < min_operations_needed_in_this_sample) {
2580 KERNEL_DEBUG(0xe0400020 | DBG_FUNC_END, sample_period_compression_count - last_eval_compression_count,
2581 sample_period_decompression_count - last_eval_decompression_count, 0, 1, 0);
2582
2583 swapout_target_age = 0;
2584
2585 compressor_need_sample_reset = TRUE;
2586 need_eval_reset = TRUE;
2587 goto done;
2588 }
2589 last_eval_compression_count = sample_period_compression_count;
2590 last_eval_decompression_count = sample_period_decompression_count;
2591
2592 if (elapsed_msecs_in_sample < compressor_sample_min_in_msecs) {
2593 KERNEL_DEBUG(0xe0400020 | DBG_FUNC_END, swapout_target_age, 0, 0, 5, 0);
2594 goto done;
2595 }
2596 if (sample_period_decompression_count > ((compressor_thrashing_threshold_per_10msecs * elapsed_msecs_in_sample) / 10)) {
2597 uint64_t running_total;
2598 uint64_t working_target;
2599 uint64_t aging_target;
2600 uint32_t oldest_age_of_csegs_sampled = 0;
2601 uint64_t working_set_approximation = 0;
2602
2603 swapout_target_age = 0;
2604
2605 working_target = (sample_period_decompression_count / 100) * 95; /* 95 percent */
2606 aging_target = (sample_period_decompression_count / 100) * 1; /* 1 percent */
2607 running_total = 0;
2608
2609 for (oldest_age_of_csegs_sampled = 0; oldest_age_of_csegs_sampled < DECOMPRESSION_SAMPLE_MAX_AGE; oldest_age_of_csegs_sampled++) {
2610 running_total += age_of_decompressions_during_sample_period[oldest_age_of_csegs_sampled];
2611
2612 working_set_approximation += oldest_age_of_csegs_sampled * age_of_decompressions_during_sample_period[oldest_age_of_csegs_sampled];
2613
2614 if (running_total >= working_target) {
2615 break;
2616 }
2617 }
2618 if (oldest_age_of_csegs_sampled < DECOMPRESSION_SAMPLE_MAX_AGE) {
2619 working_set_approximation = (working_set_approximation * 1000) / elapsed_msecs_in_sample;
2620
2621 if (working_set_approximation < VM_PAGE_COMPRESSOR_COUNT) {
2622 running_total = overage_decompressions_during_sample_period;
2623
2624 for (oldest_age_of_csegs_sampled = DECOMPRESSION_SAMPLE_MAX_AGE - 1; oldest_age_of_csegs_sampled; oldest_age_of_csegs_sampled--) {
2625 running_total += age_of_decompressions_during_sample_period[oldest_age_of_csegs_sampled];
2626
2627 if (running_total >= aging_target) {
2628 break;
2629 }
2630 }
2631 swapout_target_age = (uint32_t)cur_ts_sec - oldest_age_of_csegs_sampled;
2632
2633 KERNEL_DEBUG(0xe0400020 | DBG_FUNC_END, swapout_target_age, working_set_approximation, VM_PAGE_COMPRESSOR_COUNT, 2, 0);
2634 } else {
2635 KERNEL_DEBUG(0xe0400020 | DBG_FUNC_END, working_set_approximation, VM_PAGE_COMPRESSOR_COUNT, 0, 3, 0);
2636 }
2637 } else {
2638 KERNEL_DEBUG(0xe0400020 | DBG_FUNC_END, working_target, running_total, 0, 4, 0);
2639 }
2640
2641 compressor_need_sample_reset = TRUE;
2642 need_eval_reset = TRUE;
2643 } else {
2644 KERNEL_DEBUG(0xe0400020 | DBG_FUNC_END, sample_period_decompression_count, (compressor_thrashing_threshold_per_10msecs * elapsed_msecs_in_sample) / 10, 0, 6, 0);
2645 }
2646 done:
2647 if (compressor_need_sample_reset == TRUE) {
2648 bzero(age_of_decompressions_during_sample_period, sizeof(age_of_decompressions_during_sample_period));
2649 overage_decompressions_during_sample_period = 0;
2650
2651 start_of_sample_period_sec = cur_ts_sec;
2652 start_of_sample_period_nsec = cur_ts_nsec;
2653 sample_period_decompression_count = 0;
2654 sample_period_compression_count = 0;
2655 last_eval_decompression_count = 0;
2656 last_eval_compression_count = 0;
2657 compressor_need_sample_reset = FALSE;
2658 }
2659 if (need_eval_reset == TRUE) {
2660 start_of_eval_period_sec = cur_ts_sec;
2661 start_of_eval_period_nsec = cur_ts_nsec;
2662 }
2663 }
2664
2665
2666 int compaction_swapper_init_now = 0;
2667 int compaction_swapper_running = 0;
2668 int compaction_swapper_awakened = 0;
2669 int compaction_swapper_abort = 0;
2670
2671 bool
vm_compressor_swapout_is_ripe()2672 vm_compressor_swapout_is_ripe()
2673 {
2674 bool is_ripe = false;
2675 if (vm_swapout_ripe_segments == TRUE && c_overage_swapped_count < c_overage_swapped_limit) {
2676 c_segment_t c_seg;
2677 clock_sec_t now;
2678 clock_sec_t age;
2679 clock_nsec_t nsec;
2680
2681 clock_get_system_nanotime(&now, &nsec);
2682 age = 0;
2683
2684 lck_mtx_lock_spin_always(c_list_lock);
2685
2686 if (!queue_empty(&c_age_list_head)) {
2687 c_seg = (c_segment_t) queue_first(&c_age_list_head);
2688
2689 age = now - c_seg->c_creation_ts;
2690 }
2691 lck_mtx_unlock_always(c_list_lock);
2692
2693 if (age >= vm_ripe_target_age) {
2694 is_ripe = true;
2695 }
2696 }
2697 return is_ripe;
2698 }
2699
2700 static bool
compressor_swapout_conditions_met(void)2701 compressor_swapout_conditions_met(void)
2702 {
2703 bool should_swap = false;
2704 if (COMPRESSOR_NEEDS_TO_SWAP()) {
2705 should_swap = true;
2706 vmcs_stats.compressor_swap_threshold_exceeded++;
2707 }
2708 if (VM_PAGE_Q_THROTTLED(&vm_pageout_queue_external) && vm_page_anonymous_count < (vm_page_inactive_count / 20)) {
2709 should_swap = true;
2710 vmcs_stats.external_q_throttled++;
2711 }
2712 if (vm_page_free_count < (vm_page_free_reserved - (COMPRESSOR_FREE_RESERVED_LIMIT * 2))) {
2713 should_swap = true;
2714 vmcs_stats.free_count_below_reserve++;
2715 }
2716 return should_swap;
2717 }
2718
2719 static bool
compressor_needs_to_swap()2720 compressor_needs_to_swap()
2721 {
2722 bool should_swap = false;
2723 if (vm_compressor_swapout_is_ripe()) {
2724 should_swap = true;
2725 goto check_if_low_space;
2726 }
2727
2728 if (VM_CONFIG_SWAP_IS_ACTIVE) {
2729 should_swap = compressor_swapout_conditions_met();
2730 if (should_swap) {
2731 goto check_if_low_space;
2732 }
2733 }
2734
2735 #if (XNU_TARGET_OS_OSX && __arm64__)
2736 /*
2737 * Thrashing detection disabled.
2738 */
2739 #else /* (XNU_TARGET_OS_OSX && __arm64__) */
2740
2741 if (vm_compressor_is_thrashing()) {
2742 should_swap = true;
2743 vmcs_stats.thrashing_detected++;
2744 }
2745
2746 #if CONFIG_PHANTOM_CACHE
2747 if (vm_phantom_cache_check_pressure()) {
2748 os_atomic_store(&memorystatus_phantom_cache_pressure, true, release);
2749 should_swap = true;
2750 }
2751 #endif
2752 if (swapout_target_age) {
2753 should_swap = true;
2754 }
2755 #endif /* (XNU_TARGET_OS_OSX && __arm64__) */
2756
2757 check_if_low_space:
2758
2759 #if CONFIG_JETSAM
2760 if (should_swap || vm_compressor_low_on_space() == TRUE) {
2761 if (vm_compressor_thrashing_detected == FALSE) {
2762 vm_compressor_thrashing_detected = TRUE;
2763
2764 if (swapout_target_age) {
2765 compressor_thrashing_induced_jetsam++;
2766 } else if (vm_compressor_low_on_space() == TRUE) {
2767 compressor_thrashing_induced_jetsam++;
2768 } else {
2769 filecache_thrashing_induced_jetsam++;
2770 }
2771 /*
2772 * Wake up the memorystatus thread so that it can return
2773 * the system to a healthy state (by killing processes).
2774 */
2775 memorystatus_thread_wake();
2776 }
2777 /*
2778 * let the jetsam take precedence over
2779 * any major compactions we might have
2780 * been able to do... otherwise we run
2781 * the risk of doing major compactions
2782 * on segments we're about to free up
2783 * due to the jetsam activity.
2784 */
2785 should_swap = false;
2786 if (memorystatus_swap_all_apps && vm_swap_low_on_space()) {
2787 vm_compressor_take_paging_space_action();
2788 }
2789 }
2790
2791 #else /* CONFIG_JETSAM */
2792 if (should_swap && vm_swap_low_on_space()) {
2793 vm_compressor_take_paging_space_action();
2794 }
2795 #endif /* CONFIG_JETSAM */
2796
2797 if (should_swap == false) {
2798 /*
2799 * vm_compressor_needs_to_major_compact returns true only if we're
2800 * about to run out of available compressor segments... in this
2801 * case, we absolutely need to run a major compaction even if
2802 * we've just kicked off a jetsam or we don't otherwise need to
2803 * swap... terminating objects releases
2804 * pages back to the uncompressed cache, but does not guarantee
2805 * that we will free up even a single compression segment
2806 */
2807 should_swap = vm_compressor_needs_to_major_compact();
2808 if (should_swap) {
2809 vmcs_stats.fragmentation_detected++;
2810 }
2811 }
2812
2813 /*
2814 * returning TRUE when swap_supported == FALSE
2815 * will cause the major compaction engine to
2816 * run, but will not trigger any swapping...
2817 * segments that have been major compacted
2818 * will be moved to the majorcompact queue
2819 */
2820 return should_swap;
2821 }
2822
2823 #if CONFIG_JETSAM
2824 /*
2825 * This function is called from the jetsam thread after killing something to
2826 * mitigate thrashing.
2827 *
2828 * We need to restart our thrashing detection heuristics since memory pressure
2829 * has potentially changed significantly, and we don't want to detect on old
2830 * data from before the jetsam.
2831 */
2832 void
vm_thrashing_jetsam_done(void)2833 vm_thrashing_jetsam_done(void)
2834 {
2835 vm_compressor_thrashing_detected = FALSE;
2836
2837 /* Were we compressor-thrashing or filecache-thrashing? */
2838 if (swapout_target_age) {
2839 swapout_target_age = 0;
2840 compressor_need_sample_reset = TRUE;
2841 }
2842 #if CONFIG_PHANTOM_CACHE
2843 else {
2844 vm_phantom_cache_restart_sample();
2845 }
2846 #endif
2847 }
2848 #endif /* CONFIG_JETSAM */
2849
2850 uint32_t vm_wake_compactor_swapper_calls = 0;
2851 uint32_t vm_run_compactor_already_running = 0;
2852 uint32_t vm_run_compactor_empty_minor_q = 0;
2853 uint32_t vm_run_compactor_did_compact = 0;
2854 uint32_t vm_run_compactor_waited = 0;
2855
2856 /* run minor compaction right now, if the compaction-swapper thread is not already running */
2857 void
vm_run_compactor(void)2858 vm_run_compactor(void)
2859 {
2860 if (c_segment_count == 0) {
2861 return;
2862 }
2863
2864 if (os_atomic_load(&c_minor_count, relaxed) == 0) {
2865 vm_run_compactor_empty_minor_q++;
2866 return;
2867 }
2868
2869 lck_mtx_lock_spin_always(c_list_lock);
2870
2871 if (compaction_swapper_running) {
2872 if (vm_pageout_state.vm_restricted_to_single_processor == FALSE) {
2873 vm_run_compactor_already_running++;
2874
2875 lck_mtx_unlock_always(c_list_lock);
2876 return;
2877 }
2878 vm_run_compactor_waited++;
2879
2880 assert_wait((event_t)&compaction_swapper_running, THREAD_UNINT);
2881
2882 lck_mtx_unlock_always(c_list_lock);
2883
2884 thread_block(THREAD_CONTINUE_NULL);
2885
2886 return;
2887 }
2888 vm_run_compactor_did_compact++;
2889
2890 fastwake_warmup = FALSE;
2891 compaction_swapper_running = 1;
2892
2893 vm_compressor_do_delayed_compactions(FALSE);
2894
2895 compaction_swapper_running = 0;
2896
2897 lck_mtx_unlock_always(c_list_lock);
2898
2899 thread_wakeup((event_t)&compaction_swapper_running);
2900 }
2901
2902
2903 void
vm_wake_compactor_swapper(void)2904 vm_wake_compactor_swapper(void)
2905 {
2906 if (compaction_swapper_running || compaction_swapper_awakened || c_segment_count == 0) {
2907 return;
2908 }
2909
2910 if (os_atomic_load(&c_minor_count, relaxed) ||
2911 vm_compressor_needs_to_major_compact()) {
2912 lck_mtx_lock_spin_always(c_list_lock);
2913
2914 fastwake_warmup = FALSE;
2915
2916 if (compaction_swapper_running == 0 && compaction_swapper_awakened == 0) {
2917 vm_wake_compactor_swapper_calls++;
2918
2919 compaction_swapper_awakened = 1;
2920 thread_wakeup((event_t)&c_compressor_swap_trigger);
2921 }
2922 lck_mtx_unlock_always(c_list_lock);
2923 }
2924 }
2925
2926
2927 void
vm_consider_swapping()2928 vm_consider_swapping()
2929 {
2930 assert(VM_CONFIG_SWAP_IS_PRESENT);
2931
2932 lck_mtx_lock_spin_always(c_list_lock);
2933
2934 compaction_swapper_abort = 1;
2935
2936 while (compaction_swapper_running) {
2937 assert_wait((event_t)&compaction_swapper_running, THREAD_UNINT);
2938
2939 lck_mtx_unlock_always(c_list_lock);
2940
2941 thread_block(THREAD_CONTINUE_NULL);
2942
2943 lck_mtx_lock_spin_always(c_list_lock);
2944 }
2945 compaction_swapper_abort = 0;
2946 compaction_swapper_running = 1;
2947
2948 vm_swapout_ripe_segments = TRUE;
2949
2950 vm_compressor_process_major_segments(vm_swapout_ripe_segments);
2951
2952 vm_compressor_compact_and_swap(FALSE);
2953
2954 compaction_swapper_running = 0;
2955
2956 vm_swapout_ripe_segments = FALSE;
2957
2958 lck_mtx_unlock_always(c_list_lock);
2959
2960 thread_wakeup((event_t)&compaction_swapper_running);
2961 }
2962
2963
2964 void
vm_consider_waking_compactor_swapper(void)2965 vm_consider_waking_compactor_swapper(void)
2966 {
2967 bool need_wakeup = false;
2968
2969 if (c_segment_count == 0) {
2970 return;
2971 }
2972
2973 if (compaction_swapper_running || compaction_swapper_awakened) {
2974 return;
2975 }
2976
2977 if (!compaction_swapper_inited && !compaction_swapper_init_now) {
2978 compaction_swapper_init_now = 1;
2979 need_wakeup = true;
2980 } else if (vm_compressor_needs_to_minor_compact() ||
2981 compressor_needs_to_swap()) {
2982 need_wakeup = true;
2983 }
2984
2985 if (need_wakeup) {
2986 lck_mtx_lock_spin_always(c_list_lock);
2987
2988 fastwake_warmup = FALSE;
2989
2990 if (compaction_swapper_running == 0 && compaction_swapper_awakened == 0) {
2991 memoryshot(DBG_VM_WAKEUP_COMPACTOR_SWAPPER, DBG_FUNC_NONE);
2992
2993 compaction_swapper_awakened = 1;
2994 thread_wakeup((event_t)&c_compressor_swap_trigger);
2995 }
2996 lck_mtx_unlock_always(c_list_lock);
2997 }
2998 }
2999
3000
3001 #define C_SWAPOUT_LIMIT 4
3002 #define DELAYED_COMPACTIONS_PER_PASS 30
3003
3004 /* process segments that are in the minor compaction queue */
3005 void
vm_compressor_do_delayed_compactions(boolean_t flush_all)3006 vm_compressor_do_delayed_compactions(boolean_t flush_all)
3007 {
3008 c_segment_t c_seg;
3009 int number_compacted = 0;
3010 boolean_t needs_to_swap = FALSE;
3011 uint32_t c_swapout_count = 0;
3012
3013
3014 VM_DEBUG_CONSTANT_EVENT(vm_compressor_do_delayed_compactions, DBG_VM_COMPRESSOR_DELAYED_COMPACT, DBG_FUNC_START, c_minor_count, flush_all, 0, 0);
3015
3016 #if XNU_TARGET_OS_OSX
3017 LCK_MTX_ASSERT(c_list_lock, LCK_MTX_ASSERT_OWNED);
3018 #endif /* XNU_TARGET_OS_OSX */
3019
3020 while (!queue_empty(&c_minor_list_head) && needs_to_swap == FALSE) {
3021 c_seg = (c_segment_t)queue_first(&c_minor_list_head);
3022
3023 lck_mtx_lock_spin_always(&c_seg->c_lock);
3024
3025 if (c_seg->c_busy) {
3026 lck_mtx_unlock_always(c_list_lock);
3027 c_seg_wait_on_busy(c_seg);
3028 lck_mtx_lock_spin_always(c_list_lock);
3029
3030 continue;
3031 }
3032 C_SEG_BUSY(c_seg);
3033
3034 c_seg_do_minor_compaction_and_unlock(c_seg, TRUE, FALSE, TRUE);
3035
3036 c_swapout_count = c_early_swapout_count + c_regular_swapout_count + c_late_swapout_count;
3037 if (VM_CONFIG_SWAP_IS_ACTIVE && (number_compacted++ > DELAYED_COMPACTIONS_PER_PASS)) {
3038 if ((flush_all == TRUE || compressor_needs_to_swap()) && c_swapout_count < C_SWAPOUT_LIMIT) {
3039 needs_to_swap = TRUE;
3040 }
3041
3042 number_compacted = 0;
3043 }
3044 lck_mtx_lock_spin_always(c_list_lock);
3045 }
3046
3047 VM_DEBUG_CONSTANT_EVENT(vm_compressor_do_delayed_compactions, DBG_VM_COMPRESSOR_DELAYED_COMPACT, DBG_FUNC_END, c_minor_count, number_compacted, needs_to_swap, 0);
3048 }
3049
3050 int min_csegs_per_major_compaction = DELAYED_COMPACTIONS_PER_PASS;
3051
3052 static bool
vm_compressor_major_compact_cseg(c_segment_t c_seg,uint32_t * c_seg_considered,bool * bail_wanted_cseg,uint64_t * total_bytes_freed)3053 vm_compressor_major_compact_cseg(c_segment_t c_seg, uint32_t* c_seg_considered, bool* bail_wanted_cseg, uint64_t* total_bytes_freed)
3054 {
3055 /*
3056 * Major compaction
3057 */
3058 bool keep_compacting = true, fully_compacted = true;
3059 queue_head_t *list_head = NULL;
3060 c_segment_t c_seg_next;
3061 uint64_t bytes_to_free = 0, bytes_freed = 0;
3062 uint32_t number_considered = 0;
3063
3064 if (c_seg->c_state == C_ON_AGE_Q) {
3065 assert(!c_seg->c_has_donated_pages);
3066 list_head = &c_age_list_head;
3067 } else if (c_seg->c_state == C_ON_SWAPPEDIN_Q) {
3068 assert(c_seg->c_has_donated_pages);
3069 list_head = &c_late_swappedin_list_head;
3070 }
3071
3072 while (keep_compacting == TRUE) {
3073 assert(c_seg->c_busy);
3074
3075 /* look for another segment to consolidate */
3076
3077 c_seg_next = (c_segment_t) queue_next(&c_seg->c_age_list);
3078
3079 if (queue_end(list_head, (queue_entry_t)c_seg_next)) {
3080 break;
3081 }
3082
3083 assert(c_seg_next->c_state == c_seg->c_state);
3084
3085 number_considered++;
3086
3087 if (c_seg_major_compact_ok(c_seg, c_seg_next) == FALSE) {
3088 break;
3089 }
3090
3091 lck_mtx_lock_spin_always(&c_seg_next->c_lock);
3092
3093 if (c_seg_next->c_busy) {
3094 /*
3095 * We are going to block for our neighbor.
3096 * If our c_seg is wanted, we should unbusy
3097 * it because we don't know how long we might
3098 * have to block here.
3099 */
3100 if (c_seg->c_wanted) {
3101 lck_mtx_unlock_always(&c_seg_next->c_lock);
3102 fully_compacted = false;
3103 c_seg_major_compact_stats[c_seg_major_compact_stats_now].bailed_compactions++;
3104 *bail_wanted_cseg = true;
3105 break;
3106 }
3107
3108 lck_mtx_unlock_always(c_list_lock);
3109
3110 VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, DBG_VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 8, (void*) VM_KERNEL_ADDRPERM(c_seg_next), 0, 0);
3111
3112 c_seg_wait_on_busy(c_seg_next);
3113 lck_mtx_lock_spin_always(c_list_lock);
3114
3115 continue;
3116 }
3117 /* grab that segment */
3118 C_SEG_BUSY(c_seg_next);
3119
3120 bytes_to_free = C_SEG_OFFSET_TO_BYTES(c_seg_next->c_populated_offset);
3121 if (c_seg_do_minor_compaction_and_unlock(c_seg_next, FALSE, TRUE, TRUE)) {
3122 /*
3123 * found an empty c_segment and freed it
3124 * so we can't continue to use c_seg_next
3125 */
3126 bytes_freed += bytes_to_free;
3127 c_seg_major_compact_stats[c_seg_major_compact_stats_now].count_of_freed_segs++;
3128 continue;
3129 }
3130
3131 /* unlock the list ... */
3132 lck_mtx_unlock_always(c_list_lock);
3133
3134 /* do the major compaction */
3135
3136 keep_compacting = c_seg_major_compact(c_seg, c_seg_next);
3137
3138 VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, DBG_VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 9, keep_compacting, 0, 0);
3139
3140 PAGE_REPLACEMENT_DISALLOWED(TRUE);
3141
3142 lck_mtx_lock_spin_always(&c_seg_next->c_lock);
3143 /*
3144 * run a minor compaction on the donor segment
3145 * since we pulled at least some of it's
3146 * data into our target... if we've emptied
3147 * it, now is a good time to free it which
3148 * c_seg_minor_compaction_and_unlock also takes care of
3149 *
3150 * by passing TRUE, we ask for c_busy to be cleared
3151 * and c_wanted to be taken care of
3152 */
3153 bytes_to_free = C_SEG_OFFSET_TO_BYTES(c_seg_next->c_populated_offset);
3154 if (c_seg_minor_compaction_and_unlock(c_seg_next, TRUE)) {
3155 bytes_freed += bytes_to_free;
3156 c_seg_major_compact_stats[c_seg_major_compact_stats_now].count_of_freed_segs++;
3157 } else {
3158 bytes_to_free -= C_SEG_OFFSET_TO_BYTES(c_seg_next->c_populated_offset);
3159 bytes_freed += bytes_to_free;
3160 }
3161
3162 PAGE_REPLACEMENT_DISALLOWED(FALSE);
3163
3164 /* relock the list */
3165 lck_mtx_lock_spin_always(c_list_lock);
3166
3167 if (c_seg->c_wanted) {
3168 /*
3169 * Our c_seg is in demand. Let's
3170 * unbusy it and wakeup the waiters
3171 * instead of continuing the compaction
3172 * because we could be in this loop
3173 * for a while.
3174 */
3175 fully_compacted = false;
3176 *bail_wanted_cseg = true;
3177 c_seg_major_compact_stats[c_seg_major_compact_stats_now].bailed_compactions++;
3178 break;
3179 }
3180 } /* major compaction */
3181
3182 VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, DBG_VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 10, number_considered, *bail_wanted_cseg, 0);
3183
3184 *c_seg_considered += number_considered;
3185 *total_bytes_freed += bytes_freed;
3186
3187 lck_mtx_lock_spin_always(&c_seg->c_lock);
3188 return fully_compacted;
3189 }
3190
3191 #define TIME_SUB(rsecs, secs, rfrac, frac, unit) \
3192 MACRO_BEGIN \
3193 if ((int)((rfrac) -= (frac)) < 0) { \
3194 (rfrac) += (unit); \
3195 (rsecs) -= 1; \
3196 } \
3197 (rsecs) -= (secs); \
3198 MACRO_END
3199
3200 clock_nsec_t c_process_major_report_over_ms = 9; /* report if over 9 ms */
3201 int c_process_major_yield_after = 1000; /* yield after moving 1,000 segments */
3202 uint64_t c_process_major_reports = 0;
3203 clock_sec_t c_process_major_max_sec = 0;
3204 clock_nsec_t c_process_major_max_nsec = 0;
3205 uint32_t c_process_major_peak_segcount = 0;
3206 static void
vm_compressor_process_major_segments(bool ripe_age_only)3207 vm_compressor_process_major_segments(bool ripe_age_only)
3208 {
3209 c_segment_t c_seg = NULL;
3210 int count = 0, total = 0, breaks = 0;
3211 clock_sec_t start_sec, end_sec;
3212 clock_nsec_t start_nsec, end_nsec;
3213 clock_nsec_t report_over_ns;
3214
3215 if (queue_empty(&c_major_list_head)) {
3216 return;
3217 }
3218
3219 // printf("%s: starting to move segments from MAJORQ to AGEQ\n", __FUNCTION__);
3220 if (c_process_major_report_over_ms != 0) {
3221 report_over_ns = c_process_major_report_over_ms * NSEC_PER_MSEC;
3222 } else {
3223 report_over_ns = (clock_nsec_t)-1;
3224 }
3225
3226 if (ripe_age_only) {
3227 if (c_overage_swapped_count >= c_overage_swapped_limit) {
3228 /*
3229 * Return while we wait for the overage segments
3230 * in our queue to get pushed out first.
3231 */
3232 return;
3233 }
3234 }
3235
3236 clock_get_system_nanotime(&start_sec, &start_nsec);
3237 while (!queue_empty(&c_major_list_head)) {
3238 if (!ripe_age_only) {
3239 /*
3240 * Start from the end to preserve aging order. The newer
3241 * segments are at the tail and so need to be inserted in
3242 * the aging queue in this way so we have the older segments
3243 * at the end of the AGE_Q.
3244 */
3245 c_seg = (c_segment_t)queue_last(&c_major_list_head);
3246 } else {
3247 c_seg = (c_segment_t)queue_first(&c_major_list_head);
3248 if ((start_sec - c_seg->c_creation_ts) < vm_ripe_target_age) {
3249 /*
3250 * We have found the first segment in our queue that is not ripe. Segments after it
3251 * will be the same. So let's bail here. Return with c_list_lock held.
3252 */
3253 break;
3254 }
3255 }
3256
3257 lck_mtx_lock_spin_always(&c_seg->c_lock);
3258 c_seg_switch_state(c_seg, C_ON_AGE_Q, FALSE);
3259 lck_mtx_unlock_always(&c_seg->c_lock);
3260
3261 count++;
3262 if (count == c_process_major_yield_after ||
3263 queue_empty(&c_major_list_head)) {
3264 /* done or time to take a break */
3265 } else {
3266 /* keep going */
3267 continue;
3268 }
3269
3270 total += count;
3271 clock_get_system_nanotime(&end_sec, &end_nsec);
3272 TIME_SUB(end_sec, start_sec, end_nsec, start_nsec, NSEC_PER_SEC);
3273 if (end_sec > c_process_major_max_sec) {
3274 c_process_major_max_sec = end_sec;
3275 c_process_major_max_nsec = end_nsec;
3276 } else if (end_sec == c_process_major_max_sec &&
3277 end_nsec > c_process_major_max_nsec) {
3278 c_process_major_max_nsec = end_nsec;
3279 }
3280 if (total > c_process_major_peak_segcount) {
3281 c_process_major_peak_segcount = total;
3282 }
3283 if (end_sec > 0 ||
3284 end_nsec >= report_over_ns) {
3285 /* we used more than expected */
3286 c_process_major_reports++;
3287 printf("%s: moved %d/%d segments from MAJORQ to AGEQ in %lu.%09u seconds and %d breaks\n",
3288 __FUNCTION__, count, total,
3289 end_sec, end_nsec, breaks);
3290 }
3291 if (queue_empty(&c_major_list_head)) {
3292 /* done */
3293 break;
3294 }
3295 /* take a break to allow someone else to grab the lock */
3296 lck_mtx_unlock_always(c_list_lock);
3297 mutex_pause(0); /* 10 microseconds */
3298 lck_mtx_lock_spin_always(c_list_lock);
3299 /* start again */
3300 clock_get_system_nanotime(&start_sec, &start_nsec);
3301 count = 0;
3302 breaks++;
3303 }
3304 }
3305
3306 /*
3307 * macOS special swappable csegs -> early_swapin queue
3308 * non-macOS special swappable+non-freezer csegs -> late_swapin queue
3309 * Processing special csegs means minor compacting each cseg and then
3310 * major compacting it and putting them on the early or late
3311 * (depending on platform) swapout queue. tag:DONATE
3312 */
3313 static void
vm_compressor_process_special_swapped_in_segments_locked(void)3314 vm_compressor_process_special_swapped_in_segments_locked(void)
3315 {
3316 c_segment_t c_seg = NULL;
3317 bool switch_state = true, bail_wanted_cseg = false;
3318 unsigned int number_considered = 0, yield_after_considered_per_pass = 0;
3319 uint64_t bytes_freed = 0;
3320 queue_head_t *special_swappedin_list_head;
3321
3322 #if XNU_TARGET_OS_OSX
3323 special_swappedin_list_head = &c_early_swappedin_list_head;
3324 #else /* XNU_TARGET_OS_OSX */
3325 if (memorystatus_swap_all_apps) {
3326 special_swappedin_list_head = &c_late_swappedin_list_head;
3327 } else {
3328 /* called on unsupported config*/
3329 return;
3330 }
3331 #endif /* XNU_TARGET_OS_OSX */
3332
3333 yield_after_considered_per_pass = MAX(min_csegs_per_major_compaction, DELAYED_COMPACTIONS_PER_PASS);
3334 while (!queue_empty(special_swappedin_list_head)) {
3335 c_seg = (c_segment_t)queue_first(special_swappedin_list_head);
3336
3337 lck_mtx_lock_spin_always(&c_seg->c_lock);
3338
3339 if (c_seg->c_busy) {
3340 lck_mtx_unlock_always(c_list_lock);
3341 c_seg_wait_on_busy(c_seg);
3342 lck_mtx_lock_spin_always(c_list_lock);
3343 continue;
3344 }
3345
3346 C_SEG_BUSY(c_seg);
3347 lck_mtx_unlock_always(&c_seg->c_lock);
3348 lck_mtx_unlock_always(c_list_lock);
3349
3350 PAGE_REPLACEMENT_DISALLOWED(TRUE);
3351
3352 lck_mtx_lock_spin_always(&c_seg->c_lock);
3353
3354 if (c_seg_minor_compaction_and_unlock(c_seg, FALSE /*clear busy?*/)) {
3355 /*
3356 * found an empty c_segment and freed it
3357 * so go grab the next guy in the queue
3358 */
3359 PAGE_REPLACEMENT_DISALLOWED(FALSE);
3360 lck_mtx_lock_spin_always(c_list_lock);
3361 continue;
3362 }
3363
3364 PAGE_REPLACEMENT_DISALLOWED(FALSE);
3365 lck_mtx_lock_spin_always(c_list_lock);
3366
3367 switch_state = vm_compressor_major_compact_cseg(c_seg, &number_considered, &bail_wanted_cseg, &bytes_freed);
3368 assert(c_seg->c_busy);
3369 assert(!c_seg->c_on_minorcompact_q);
3370
3371 if (switch_state) {
3372 if (VM_CONFIG_SWAP_IS_ACTIVE || VM_CONFIG_FREEZER_SWAP_IS_ACTIVE) {
3373 /*
3374 * Ordinarily we let swapped in segments age out + get
3375 * major compacted with the rest of the c_segs on the ageQ.
3376 * But the early donated c_segs, if well compacted, should be
3377 * kept ready to be swapped out if needed. These are typically
3378 * describing memory belonging to a leaky app (macOS) or a swap-
3379 * capable app (iPadOS) and for the latter we can keep these
3380 * around longer because we control the triggers in the memorystatus
3381 * subsystem
3382 */
3383 c_seg_switch_state(c_seg, C_ON_SWAPOUT_Q, FALSE);
3384 }
3385 }
3386
3387 C_SEG_WAKEUP_DONE(c_seg);
3388
3389 lck_mtx_unlock_always(&c_seg->c_lock);
3390
3391 if (number_considered >= yield_after_considered_per_pass) {
3392 if (bail_wanted_cseg) {
3393 /*
3394 * We stopped major compactions on a c_seg
3395 * that is wanted. We don't know the priority
3396 * of the waiter unfortunately but we are at
3397 * a very high priority and so, just in case
3398 * the waiter is a critical system daemon or
3399 * UI thread, let's give up the CPU in case
3400 * the system is running a few CPU intensive
3401 * tasks.
3402 */
3403 bail_wanted_cseg = false;
3404 lck_mtx_unlock_always(c_list_lock);
3405
3406 mutex_pause(2); /* 100us yield */
3407
3408 lck_mtx_lock_spin_always(c_list_lock);
3409 }
3410
3411 number_considered = 0;
3412 }
3413 }
3414 }
3415
3416 void
vm_compressor_process_special_swapped_in_segments(void)3417 vm_compressor_process_special_swapped_in_segments(void)
3418 {
3419 lck_mtx_lock_spin_always(c_list_lock);
3420 vm_compressor_process_special_swapped_in_segments_locked();
3421 lck_mtx_unlock_always(c_list_lock);
3422 }
3423
3424 #define C_SEGMENT_SWAPPEDIN_AGE_LIMIT 10
3425 /*
3426 * Processing regular csegs means aging them.
3427 */
3428 static void
vm_compressor_process_regular_swapped_in_segments(boolean_t flush_all)3429 vm_compressor_process_regular_swapped_in_segments(boolean_t flush_all)
3430 {
3431 c_segment_t c_seg;
3432 clock_sec_t now;
3433 clock_nsec_t nsec;
3434
3435 clock_get_system_nanotime(&now, &nsec);
3436
3437 while (!queue_empty(&c_regular_swappedin_list_head)) {
3438 c_seg = (c_segment_t)queue_first(&c_regular_swappedin_list_head);
3439
3440 if (flush_all == FALSE && (now - c_seg->c_swappedin_ts) < C_SEGMENT_SWAPPEDIN_AGE_LIMIT) {
3441 break;
3442 }
3443
3444 lck_mtx_lock_spin_always(&c_seg->c_lock);
3445
3446 c_seg_switch_state(c_seg, C_ON_AGE_Q, FALSE);
3447 c_seg->c_agedin_ts = (uint32_t) now;
3448
3449 lck_mtx_unlock_always(&c_seg->c_lock);
3450 }
3451 }
3452
3453
3454 extern int vm_num_swap_files;
3455 extern int vm_num_pinned_swap_files;
3456 extern int vm_swappin_enabled;
3457
3458 extern unsigned int vm_swapfile_total_segs_used;
3459 extern unsigned int vm_swapfile_total_segs_alloced;
3460
3461
3462 void
vm_compressor_flush(void)3463 vm_compressor_flush(void)
3464 {
3465 uint64_t vm_swap_put_failures_at_start;
3466 wait_result_t wait_result = 0;
3467 AbsoluteTime startTime, endTime;
3468 clock_sec_t now_sec;
3469 clock_nsec_t now_nsec;
3470 uint64_t nsec;
3471 c_segment_t c_seg, c_seg_next;
3472
3473 HIBLOG("vm_compressor_flush - starting\n");
3474
3475 clock_get_uptime(&startTime);
3476
3477 lck_mtx_lock_spin_always(c_list_lock);
3478
3479 fastwake_warmup = FALSE;
3480 compaction_swapper_abort = 1;
3481
3482 while (compaction_swapper_running) {
3483 assert_wait((event_t)&compaction_swapper_running, THREAD_UNINT);
3484
3485 lck_mtx_unlock_always(c_list_lock);
3486
3487 thread_block(THREAD_CONTINUE_NULL);
3488
3489 lck_mtx_lock_spin_always(c_list_lock);
3490 }
3491 compaction_swapper_abort = 0;
3492 compaction_swapper_running = 1;
3493
3494 hibernate_flushing = TRUE;
3495 hibernate_no_swapspace = FALSE;
3496 hibernate_flush_timed_out = FALSE;
3497 c_generation_id_flush_barrier = c_generation_id + 1000;
3498
3499 clock_get_system_nanotime(&now_sec, &now_nsec);
3500 hibernate_flushing_deadline = now_sec + HIBERNATE_FLUSHING_SECS_TO_COMPLETE;
3501
3502 vm_swap_put_failures_at_start = vm_swap_put_failures;
3503
3504 /*
3505 * We are about to hibernate and so we want all segments flushed to disk.
3506 * Segments that are on the major compaction queue won't be considered in
3507 * the vm_compressor_compact_and_swap() pass. So we need to bring them to
3508 * the ageQ for consideration.
3509 */
3510 if (!queue_empty(&c_major_list_head)) {
3511 c_seg = (c_segment_t)queue_first(&c_major_list_head);
3512
3513 while (!queue_end(&c_major_list_head, (queue_entry_t)c_seg)) {
3514 c_seg_next = (c_segment_t) queue_next(&c_seg->c_age_list);
3515 lck_mtx_lock_spin_always(&c_seg->c_lock);
3516 c_seg_switch_state(c_seg, C_ON_AGE_Q, FALSE);
3517 lck_mtx_unlock_always(&c_seg->c_lock);
3518 c_seg = c_seg_next;
3519 }
3520 }
3521 vm_compressor_compact_and_swap(TRUE);
3522 /* need to wait here since the swap thread may also be running in parallel and handling segments */
3523 while (!queue_empty(&c_early_swapout_list_head) || !queue_empty(&c_regular_swapout_list_head) || !queue_empty(&c_late_swapout_list_head)) {
3524 assert_wait_timeout((event_t) &compaction_swapper_running, THREAD_INTERRUPTIBLE, 5000, 1000 * NSEC_PER_USEC);
3525
3526 lck_mtx_unlock_always(c_list_lock);
3527
3528 wait_result = thread_block(THREAD_CONTINUE_NULL);
3529
3530 lck_mtx_lock_spin_always(c_list_lock);
3531
3532 if (wait_result == THREAD_TIMED_OUT) {
3533 break;
3534 }
3535 }
3536 hibernate_flushing = FALSE;
3537 compaction_swapper_running = 0;
3538
3539 if (vm_swap_put_failures > vm_swap_put_failures_at_start) {
3540 HIBLOG("vm_compressor_flush failed to clean %llu segments - vm_page_compressor_count(%d)\n",
3541 vm_swap_put_failures - vm_swap_put_failures_at_start, VM_PAGE_COMPRESSOR_COUNT);
3542 }
3543
3544 lck_mtx_unlock_always(c_list_lock);
3545
3546 thread_wakeup((event_t)&compaction_swapper_running);
3547
3548 clock_get_uptime(&endTime);
3549 SUB_ABSOLUTETIME(&endTime, &startTime);
3550 absolutetime_to_nanoseconds(endTime, &nsec);
3551
3552 HIBLOG("vm_compressor_flush completed - took %qd msecs - vm_num_swap_files = %d, vm_num_pinned_swap_files = %d, vm_swappin_enabled = %d\n",
3553 nsec / 1000000ULL, vm_num_swap_files, vm_num_pinned_swap_files, vm_swappin_enabled);
3554 }
3555
3556
3557 int compaction_swap_trigger_thread_awakened = 0;
3558
3559 static void
vm_compressor_swap_trigger_thread(void)3560 vm_compressor_swap_trigger_thread(void)
3561 {
3562 current_thread()->options |= TH_OPT_VMPRIV;
3563
3564 /*
3565 * compaction_swapper_init_now is set when the first call to
3566 * vm_consider_waking_compactor_swapper is made from
3567 * vm_pageout_scan... since this function is called upon
3568 * thread creation, we want to make sure to delay adjusting
3569 * the tuneables until we are awakened via vm_pageout_scan
3570 * so that we are at a point where the vm_swapfile_open will
3571 * be operating on the correct directory (in case the default
3572 * of using the VM volume is overridden by the dynamic_pager)
3573 */
3574 if (compaction_swapper_init_now) {
3575 vm_compaction_swapper_do_init();
3576
3577 if (vm_pageout_state.vm_restricted_to_single_processor == TRUE) {
3578 thread_vm_bind_group_add();
3579 }
3580 #if CONFIG_THREAD_GROUPS
3581 thread_group_vm_add();
3582 #endif
3583 thread_set_thread_name(current_thread(), "VM_cswap_trigger");
3584 compaction_swapper_init_now = 0;
3585 }
3586 lck_mtx_lock_spin_always(c_list_lock);
3587
3588 compaction_swap_trigger_thread_awakened++;
3589 compaction_swapper_awakened = 0;
3590
3591 if (compaction_swapper_running == 0) {
3592 compaction_swapper_running = 1;
3593
3594 vm_compressor_compact_and_swap(FALSE);
3595
3596 compaction_swapper_running = 0;
3597 }
3598 assert_wait((event_t)&c_compressor_swap_trigger, THREAD_UNINT);
3599
3600 if (compaction_swapper_running == 0) {
3601 thread_wakeup((event_t)&compaction_swapper_running);
3602 }
3603
3604 lck_mtx_unlock_always(c_list_lock);
3605
3606 thread_block((thread_continue_t)vm_compressor_swap_trigger_thread);
3607
3608 /* NOTREACHED */
3609 }
3610
3611
3612 void
vm_compressor_record_warmup_start(void)3613 vm_compressor_record_warmup_start(void)
3614 {
3615 c_segment_t c_seg;
3616
3617 lck_mtx_lock_spin_always(c_list_lock);
3618
3619 if (first_c_segment_to_warm_generation_id == 0) {
3620 if (!queue_empty(&c_age_list_head)) {
3621 c_seg = (c_segment_t)queue_last(&c_age_list_head);
3622
3623 first_c_segment_to_warm_generation_id = c_seg->c_generation_id;
3624 } else {
3625 first_c_segment_to_warm_generation_id = 0;
3626 }
3627
3628 fastwake_recording_in_progress = TRUE;
3629 }
3630 lck_mtx_unlock_always(c_list_lock);
3631 }
3632
3633
3634 void
vm_compressor_record_warmup_end(void)3635 vm_compressor_record_warmup_end(void)
3636 {
3637 c_segment_t c_seg;
3638
3639 lck_mtx_lock_spin_always(c_list_lock);
3640
3641 if (fastwake_recording_in_progress == TRUE) {
3642 if (!queue_empty(&c_age_list_head)) {
3643 c_seg = (c_segment_t)queue_last(&c_age_list_head);
3644
3645 last_c_segment_to_warm_generation_id = c_seg->c_generation_id;
3646 } else {
3647 last_c_segment_to_warm_generation_id = first_c_segment_to_warm_generation_id;
3648 }
3649
3650 fastwake_recording_in_progress = FALSE;
3651
3652 HIBLOG("vm_compressor_record_warmup (%qd - %qd)\n", first_c_segment_to_warm_generation_id, last_c_segment_to_warm_generation_id);
3653 }
3654 lck_mtx_unlock_always(c_list_lock);
3655 }
3656
3657
3658 #define DELAY_TRIM_ON_WAKE_NS (25 * NSEC_PER_SEC)
3659
3660 void
vm_compressor_delay_trim(void)3661 vm_compressor_delay_trim(void)
3662 {
3663 uint64_t now = mach_absolute_time();
3664 uint64_t delay_abstime;
3665 nanoseconds_to_absolutetime(DELAY_TRIM_ON_WAKE_NS, &delay_abstime);
3666 dont_trim_until_ts = now + delay_abstime;
3667 }
3668
3669
3670 void
vm_compressor_do_warmup(void)3671 vm_compressor_do_warmup(void)
3672 {
3673 lck_mtx_lock_spin_always(c_list_lock);
3674
3675 if (first_c_segment_to_warm_generation_id == last_c_segment_to_warm_generation_id) {
3676 first_c_segment_to_warm_generation_id = last_c_segment_to_warm_generation_id = 0;
3677
3678 lck_mtx_unlock_always(c_list_lock);
3679 return;
3680 }
3681
3682 if (compaction_swapper_running == 0 && compaction_swapper_awakened == 0) {
3683 fastwake_warmup = TRUE;
3684
3685 compaction_swapper_awakened = 1;
3686 thread_wakeup((event_t)&c_compressor_swap_trigger);
3687 }
3688 lck_mtx_unlock_always(c_list_lock);
3689 }
3690
3691 void
do_fastwake_warmup_all(void)3692 do_fastwake_warmup_all(void)
3693 {
3694 lck_mtx_lock_spin_always(c_list_lock);
3695
3696 if (queue_empty(&c_swappedout_list_head) && queue_empty(&c_swappedout_sparse_list_head)) {
3697 lck_mtx_unlock_always(c_list_lock);
3698 return;
3699 }
3700
3701 fastwake_warmup = TRUE;
3702
3703 do_fastwake_warmup(&c_swappedout_list_head, TRUE);
3704
3705 do_fastwake_warmup(&c_swappedout_sparse_list_head, TRUE);
3706
3707 fastwake_warmup = FALSE;
3708
3709 lck_mtx_unlock_always(c_list_lock);
3710 }
3711
3712 void
do_fastwake_warmup(queue_head_t * c_queue,boolean_t consider_all_cseg)3713 do_fastwake_warmup(queue_head_t *c_queue, boolean_t consider_all_cseg)
3714 {
3715 c_segment_t c_seg = NULL;
3716 AbsoluteTime startTime, endTime;
3717 uint64_t nsec;
3718
3719
3720 HIBLOG("vm_compressor_fastwake_warmup (%qd - %qd) - starting\n", first_c_segment_to_warm_generation_id, last_c_segment_to_warm_generation_id);
3721
3722 clock_get_uptime(&startTime);
3723
3724 lck_mtx_unlock_always(c_list_lock);
3725
3726 proc_set_thread_policy(current_thread(),
3727 TASK_POLICY_INTERNAL, TASK_POLICY_IO, THROTTLE_LEVEL_COMPRESSOR_TIER2);
3728
3729 PAGE_REPLACEMENT_DISALLOWED(TRUE);
3730
3731 lck_mtx_lock_spin_always(c_list_lock);
3732
3733 while (!queue_empty(c_queue) && fastwake_warmup == TRUE) {
3734 c_seg = (c_segment_t) queue_first(c_queue);
3735
3736 if (consider_all_cseg == FALSE) {
3737 if (c_seg->c_generation_id < first_c_segment_to_warm_generation_id ||
3738 c_seg->c_generation_id > last_c_segment_to_warm_generation_id) {
3739 break;
3740 }
3741
3742 if (vm_page_free_count < (AVAILABLE_MEMORY / 4)) {
3743 break;
3744 }
3745 }
3746
3747 lck_mtx_lock_spin_always(&c_seg->c_lock);
3748 lck_mtx_unlock_always(c_list_lock);
3749
3750 if (c_seg->c_busy) {
3751 PAGE_REPLACEMENT_DISALLOWED(FALSE);
3752 c_seg_wait_on_busy(c_seg);
3753 PAGE_REPLACEMENT_DISALLOWED(TRUE);
3754 } else {
3755 if (c_seg_swapin(c_seg, TRUE, FALSE) == 0) {
3756 lck_mtx_unlock_always(&c_seg->c_lock);
3757 }
3758 c_segment_warmup_count++;
3759
3760 PAGE_REPLACEMENT_DISALLOWED(FALSE);
3761 vm_pageout_io_throttle();
3762 PAGE_REPLACEMENT_DISALLOWED(TRUE);
3763 }
3764 lck_mtx_lock_spin_always(c_list_lock);
3765 }
3766 lck_mtx_unlock_always(c_list_lock);
3767
3768 PAGE_REPLACEMENT_DISALLOWED(FALSE);
3769
3770 proc_set_thread_policy(current_thread(),
3771 TASK_POLICY_INTERNAL, TASK_POLICY_IO, THROTTLE_LEVEL_COMPRESSOR_TIER0);
3772
3773 clock_get_uptime(&endTime);
3774 SUB_ABSOLUTETIME(&endTime, &startTime);
3775 absolutetime_to_nanoseconds(endTime, &nsec);
3776
3777 HIBLOG("vm_compressor_fastwake_warmup completed - took %qd msecs\n", nsec / 1000000ULL);
3778
3779 lck_mtx_lock_spin_always(c_list_lock);
3780
3781 if (consider_all_cseg == FALSE) {
3782 first_c_segment_to_warm_generation_id = last_c_segment_to_warm_generation_id = 0;
3783 }
3784 }
3785
3786 extern bool vm_swapout_thread_running;
3787 extern boolean_t compressor_store_stop_compaction;
3788
3789 void
vm_compressor_compact_and_swap(boolean_t flush_all)3790 vm_compressor_compact_and_swap(boolean_t flush_all)
3791 {
3792 c_segment_t c_seg;
3793 bool switch_state, bail_wanted_cseg = false;
3794 clock_sec_t now;
3795 clock_nsec_t nsec;
3796 mach_timespec_t start_ts, end_ts;
3797 unsigned int number_considered, wanted_cseg_found, yield_after_considered_per_pass, number_yields;
3798 uint64_t bytes_freed, delta_usec;
3799 uint32_t c_swapout_count = 0;
3800
3801 VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, DBG_VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_START, c_age_count, c_minor_count, c_major_count, vm_page_free_count);
3802
3803 if (fastwake_warmup == TRUE) {
3804 uint64_t starting_warmup_count;
3805
3806 starting_warmup_count = c_segment_warmup_count;
3807
3808 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 11) | DBG_FUNC_START, c_segment_warmup_count,
3809 first_c_segment_to_warm_generation_id, last_c_segment_to_warm_generation_id, 0, 0);
3810 do_fastwake_warmup(&c_swappedout_list_head, FALSE);
3811 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 11) | DBG_FUNC_END, c_segment_warmup_count, c_segment_warmup_count - starting_warmup_count, 0, 0, 0);
3812
3813 fastwake_warmup = FALSE;
3814 }
3815
3816 #if (XNU_TARGET_OS_OSX && __arm64__)
3817 /*
3818 * Re-considering major csegs showed benefits on all platforms by
3819 * significantly reducing fragmentation and getting back memory.
3820 * However, on smaller devices, eg watch, there was increased power
3821 * use for the additional compactions. And the turnover in csegs on
3822 * those smaller platforms is high enough in the decompression/free
3823 * path that we can skip reconsidering them here because we already
3824 * consider them for major compaction in those paths.
3825 */
3826 vm_compressor_process_major_segments(false /*all segments and not just the ripe-aged ones*/);
3827 #endif /* (XNU_TARGET_OS_OSX && __arm64__) */
3828
3829 /*
3830 * it's possible for the c_age_list_head to be empty if we
3831 * hit our limits for growing the compressor pool and we subsequently
3832 * hibernated... on the next hibernation we could see the queue as
3833 * empty and not proceeed even though we have a bunch of segments on
3834 * the swapped in queue that need to be dealt with.
3835 */
3836 vm_compressor_do_delayed_compactions(flush_all);
3837 vm_compressor_process_special_swapped_in_segments_locked();
3838 vm_compressor_process_regular_swapped_in_segments(flush_all);
3839
3840 /*
3841 * we only need to grab the timestamp once per
3842 * invocation of this function since the
3843 * timescale we're interested in is measured
3844 * in days
3845 */
3846 clock_get_system_nanotime(&now, &nsec);
3847
3848 start_ts.tv_sec = (int) now;
3849 start_ts.tv_nsec = nsec;
3850 delta_usec = 0;
3851 number_considered = 0;
3852 wanted_cseg_found = 0;
3853 number_yields = 0;
3854 bytes_freed = 0;
3855 yield_after_considered_per_pass = MAX(min_csegs_per_major_compaction, DELAYED_COMPACTIONS_PER_PASS);
3856
3857 #if 0
3858 /**
3859 * SW: Need to figure out how to properly rate limit this log because it is currently way too
3860 * noisy. rdar://99379414 (Figure out how to rate limit the fragmentation level logging)
3861 */
3862 os_log(OS_LOG_DEFAULT, "memorystatus: before compaction fragmentation level %u\n", vm_compressor_fragmentation_level());
3863 #endif
3864
3865 while (!queue_empty(&c_age_list_head) && !compaction_swapper_abort && !compressor_store_stop_compaction) {
3866 if (hibernate_flushing == TRUE) {
3867 clock_sec_t sec;
3868
3869 if (hibernate_should_abort()) {
3870 HIBLOG("vm_compressor_flush - hibernate_should_abort returned TRUE\n");
3871 break;
3872 }
3873 if (hibernate_no_swapspace == TRUE) {
3874 HIBLOG("vm_compressor_flush - out of swap space\n");
3875 break;
3876 }
3877 if (vm_swap_files_pinned() == FALSE) {
3878 HIBLOG("vm_compressor_flush - unpinned swap files\n");
3879 break;
3880 }
3881 if (hibernate_in_progress_with_pinned_swap == TRUE &&
3882 (vm_swapfile_total_segs_alloced == vm_swapfile_total_segs_used)) {
3883 HIBLOG("vm_compressor_flush - out of pinned swap space\n");
3884 break;
3885 }
3886 clock_get_system_nanotime(&sec, &nsec);
3887
3888 if (sec > hibernate_flushing_deadline) {
3889 hibernate_flush_timed_out = TRUE;
3890 HIBLOG("vm_compressor_flush - failed to finish before deadline\n");
3891 break;
3892 }
3893 }
3894
3895 c_swapout_count = c_early_swapout_count + c_regular_swapout_count + c_late_swapout_count;
3896 if (VM_CONFIG_SWAP_IS_ACTIVE && !vm_swap_out_of_space() && c_swapout_count >= C_SWAPOUT_LIMIT) {
3897 assert_wait_timeout((event_t) &compaction_swapper_running, THREAD_INTERRUPTIBLE, 100, 1000 * NSEC_PER_USEC);
3898
3899 if (!vm_swapout_thread_running) {
3900 thread_wakeup((event_t)&vm_swapout_thread);
3901 }
3902
3903 lck_mtx_unlock_always(c_list_lock);
3904
3905 VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, DBG_VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 1, c_swapout_count, 0, 0);
3906
3907 thread_block(THREAD_CONTINUE_NULL);
3908
3909 lck_mtx_lock_spin_always(c_list_lock);
3910 }
3911 /*
3912 * Minor compactions
3913 */
3914 vm_compressor_do_delayed_compactions(flush_all);
3915
3916 /*
3917 * vm_compressor_process_early_swapped_in_segments()
3918 * might be too aggressive. So OFF for now.
3919 */
3920 vm_compressor_process_regular_swapped_in_segments(flush_all);
3921
3922 /* Recompute because we dropped the c_list_lock above*/
3923 c_swapout_count = c_early_swapout_count + c_regular_swapout_count + c_late_swapout_count;
3924 if (VM_CONFIG_SWAP_IS_ACTIVE && !vm_swap_out_of_space() && c_swapout_count >= C_SWAPOUT_LIMIT) {
3925 /*
3926 * we timed out on the above thread_block
3927 * let's loop around and try again
3928 * the timeout allows us to continue
3929 * to do minor compactions to make
3930 * more memory available
3931 */
3932 VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, DBG_VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 2, c_swapout_count, 0, 0);
3933
3934 continue;
3935 }
3936
3937 /*
3938 * Swap out segments?
3939 */
3940 if (flush_all == FALSE) {
3941 bool needs_to_swap;
3942
3943 lck_mtx_unlock_always(c_list_lock);
3944
3945 needs_to_swap = compressor_needs_to_swap();
3946
3947 lck_mtx_lock_spin_always(c_list_lock);
3948
3949 VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, DBG_VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 3, needs_to_swap, 0, 0);
3950
3951 if (!needs_to_swap) {
3952 break;
3953 }
3954 }
3955 if (queue_empty(&c_age_list_head)) {
3956 VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, DBG_VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 4, c_age_count, 0, 0);
3957 break;
3958 }
3959 c_seg = (c_segment_t) queue_first(&c_age_list_head);
3960
3961 assert(c_seg->c_state == C_ON_AGE_Q);
3962
3963 if (flush_all == TRUE && c_seg->c_generation_id > c_generation_id_flush_barrier) {
3964 VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, DBG_VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 5, 0, 0, 0);
3965 break;
3966 }
3967
3968 lck_mtx_lock_spin_always(&c_seg->c_lock);
3969
3970 if (c_seg->c_busy) {
3971 VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, DBG_VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 6, (void*) VM_KERNEL_ADDRPERM(c_seg), 0, 0);
3972
3973 lck_mtx_unlock_always(c_list_lock);
3974 c_seg_wait_on_busy(c_seg);
3975 lck_mtx_lock_spin_always(c_list_lock);
3976
3977 continue;
3978 }
3979 C_SEG_BUSY(c_seg);
3980
3981 if (c_seg_do_minor_compaction_and_unlock(c_seg, FALSE, TRUE, TRUE)) {
3982 /*
3983 * found an empty c_segment and freed it
3984 * so go grab the next guy in the queue
3985 */
3986 VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, DBG_VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 7, 0, 0, 0);
3987 c_seg_major_compact_stats[c_seg_major_compact_stats_now].count_of_freed_segs++;
3988 continue;
3989 }
3990
3991 switch_state = vm_compressor_major_compact_cseg(c_seg, &number_considered, &bail_wanted_cseg, &bytes_freed);
3992 if (bail_wanted_cseg) {
3993 wanted_cseg_found++;
3994 bail_wanted_cseg = false;
3995 }
3996
3997 assert(c_seg->c_busy);
3998 assert(!c_seg->c_on_minorcompact_q);
3999
4000 if (switch_state) {
4001 if (VM_CONFIG_SWAP_IS_ACTIVE) {
4002 int new_state = C_ON_SWAPOUT_Q;
4003 #if (XNU_TARGET_OS_OSX && __arm64__)
4004 if (flush_all == false && compressor_swapout_conditions_met() == false) {
4005 new_state = C_ON_MAJORCOMPACT_Q;
4006 }
4007 #endif /* (XNU_TARGET_OS_OSX && __arm64__) */
4008
4009 if (new_state == C_ON_SWAPOUT_Q) {
4010 /*
4011 * This mode of putting a generic c_seg on the swapout list is
4012 * only supported when we have general swapping enabled
4013 */
4014 clock_sec_t lnow;
4015 clock_nsec_t lnsec;
4016 clock_get_system_nanotime(&lnow, &lnsec);
4017 if (c_seg->c_agedin_ts && (lnow - c_seg->c_agedin_ts) < 30) {
4018 vmcs_stats.unripe_under_30s++;
4019 } else if (c_seg->c_agedin_ts && (lnow - c_seg->c_agedin_ts) < 60) {
4020 vmcs_stats.unripe_under_60s++;
4021 } else if (c_seg->c_agedin_ts && (lnow - c_seg->c_agedin_ts) < 300) {
4022 vmcs_stats.unripe_under_300s++;
4023 }
4024 }
4025
4026 c_seg_switch_state(c_seg, new_state, FALSE);
4027 } else {
4028 if ((vm_swapout_ripe_segments == TRUE && c_overage_swapped_count < c_overage_swapped_limit)) {
4029 assert(VM_CONFIG_SWAP_IS_PRESENT);
4030 /*
4031 * we are running compressor sweeps with swap-behind
4032 * make sure the c_seg has aged enough before swapping it
4033 * out...
4034 */
4035 if ((now - c_seg->c_creation_ts) >= vm_ripe_target_age) {
4036 c_seg->c_overage_swap = TRUE;
4037 c_overage_swapped_count++;
4038 c_seg_switch_state(c_seg, C_ON_SWAPOUT_Q, FALSE);
4039 }
4040 }
4041 }
4042 if (c_seg->c_state == C_ON_AGE_Q) {
4043 /*
4044 * this c_seg didn't get moved to the swapout queue
4045 * so we need to move it out of the way...
4046 * we just did a major compaction on it so put it
4047 * on that queue
4048 */
4049 c_seg_switch_state(c_seg, C_ON_MAJORCOMPACT_Q, FALSE);
4050 } else {
4051 c_seg_major_compact_stats[c_seg_major_compact_stats_now].wasted_space_in_swapouts += c_seg_bufsize - c_seg->c_bytes_used;
4052 c_seg_major_compact_stats[c_seg_major_compact_stats_now].count_of_swapouts++;
4053 }
4054 }
4055
4056 C_SEG_WAKEUP_DONE(c_seg);
4057
4058 lck_mtx_unlock_always(&c_seg->c_lock);
4059
4060 /*
4061 * On systems _with_ general swap, regardless of jetsam, we wake up the swapout thread here.
4062 * On systems _without_ general swap, it's the responsibility of the memorystatus
4063 * subsystem to wake up the swapper.
4064 * TODO: When we have full jetsam support on a swap enabled system, we will need to revisit
4065 * this policy.
4066 */
4067 if (VM_CONFIG_SWAP_IS_ACTIVE && c_swapout_count) {
4068 /*
4069 * We don't pause/yield here because we will either
4070 * yield below or at the top of the loop with the
4071 * assert_wait_timeout.
4072 */
4073 if (!vm_swapout_thread_running) {
4074 thread_wakeup((event_t)&vm_swapout_thread);
4075 }
4076 }
4077
4078 if (number_considered >= yield_after_considered_per_pass) {
4079 if (wanted_cseg_found) {
4080 /*
4081 * We stopped major compactions on a c_seg
4082 * that is wanted. We don't know the priority
4083 * of the waiter unfortunately but we are at
4084 * a very high priority and so, just in case
4085 * the waiter is a critical system daemon or
4086 * UI thread, let's give up the CPU in case
4087 * the system is running a few CPU intensive
4088 * tasks.
4089 */
4090 lck_mtx_unlock_always(c_list_lock);
4091
4092 mutex_pause(2); /* 100us yield */
4093
4094 number_yields++;
4095
4096 VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, DBG_VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 11, number_considered, number_yields, 0);
4097
4098 lck_mtx_lock_spin_always(c_list_lock);
4099 }
4100
4101 number_considered = 0;
4102 wanted_cseg_found = 0;
4103 }
4104 }
4105 clock_get_system_nanotime(&now, &nsec);
4106
4107 end_ts = major_compact_ts = (mach_timespec_t){.tv_sec = (int)now, .tv_nsec = nsec};
4108
4109 SUB_MACH_TIMESPEC(&end_ts, &start_ts);
4110
4111 delta_usec = (end_ts.tv_sec * USEC_PER_SEC) + (end_ts.tv_nsec / NSEC_PER_USEC) - (number_yields * 100);
4112
4113 delta_usec = MAX(1, delta_usec); /* we could have 0 usec run if conditions weren't right */
4114
4115 c_seg_major_compact_stats[c_seg_major_compact_stats_now].bytes_freed_rate_us = (bytes_freed / delta_usec);
4116
4117 if ((c_seg_major_compact_stats_now + 1) == C_SEG_MAJOR_COMPACT_STATS_MAX) {
4118 c_seg_major_compact_stats_now = 0;
4119 } else {
4120 c_seg_major_compact_stats_now++;
4121 }
4122
4123 assert(c_seg_major_compact_stats_now < C_SEG_MAJOR_COMPACT_STATS_MAX);
4124
4125 VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, DBG_VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_END, c_age_count, c_minor_count, c_major_count, vm_page_free_count);
4126 }
4127
4128
4129 static c_segment_t
c_seg_allocate(c_segment_t * current_chead)4130 c_seg_allocate(c_segment_t *current_chead)
4131 {
4132 c_segment_t c_seg;
4133 int min_needed;
4134 int size_to_populate;
4135 c_segment_t *donate_queue_head;
4136
4137 #if XNU_TARGET_OS_OSX
4138 if (vm_compressor_low_on_space()) {
4139 vm_compressor_take_paging_space_action();
4140 }
4141 #endif /* XNU_TARGET_OS_OSX */
4142
4143 if ((c_seg = *current_chead) == NULL) {
4144 uint32_t c_segno;
4145
4146 lck_mtx_lock_spin_always(c_list_lock);
4147
4148 while (c_segments_busy == TRUE) {
4149 assert_wait((event_t) (&c_segments_busy), THREAD_UNINT);
4150
4151 lck_mtx_unlock_always(c_list_lock);
4152
4153 thread_block(THREAD_CONTINUE_NULL);
4154
4155 lck_mtx_lock_spin_always(c_list_lock);
4156 }
4157 if (c_free_segno_head == (uint32_t)-1) {
4158 uint32_t c_segments_available_new;
4159 uint32_t compressed_pages;
4160
4161 #if CONFIG_FREEZE
4162 if (freezer_incore_cseg_acct) {
4163 compressed_pages = c_segment_pages_compressed_incore;
4164 } else {
4165 compressed_pages = c_segment_pages_compressed;
4166 }
4167 #else
4168 compressed_pages = c_segment_pages_compressed;
4169 #endif /* CONFIG_FREEZE */
4170
4171 if (c_segments_available >= c_segments_limit || compressed_pages >= c_segment_pages_compressed_limit) {
4172 lck_mtx_unlock_always(c_list_lock);
4173
4174 return NULL;
4175 }
4176 c_segments_busy = TRUE;
4177 lck_mtx_unlock_always(c_list_lock);
4178
4179 /* pages for c_segments are never depopulated, c_segments_available never goes down */
4180 kernel_memory_populate((vm_offset_t)c_segments_next_page,
4181 PAGE_SIZE, KMA_NOFAIL | KMA_KOBJECT,
4182 VM_KERN_MEMORY_COMPRESSOR);
4183 c_segments_next_page += PAGE_SIZE;
4184
4185 c_segments_available_new = c_segments_available + C_SEGMENTS_PER_PAGE;
4186
4187 if (c_segments_available_new > c_segments_limit) {
4188 c_segments_available_new = c_segments_limit;
4189 }
4190
4191 /* add the just-added segments to the top of the free-list */
4192 for (c_segno = c_segments_available + 1; c_segno < c_segments_available_new; c_segno++) {
4193 c_segments[c_segno - 1].c_segno = c_segno; /* next free is the one after you */
4194 }
4195
4196 lck_mtx_lock_spin_always(c_list_lock);
4197
4198 c_segments[c_segno - 1].c_segno = c_free_segno_head; /* link to the rest of, existing freelist */
4199 c_free_segno_head = c_segments_available; /* first one in the page that was just allocated */
4200 c_segments_available = c_segments_available_new;
4201
4202 c_segments_busy = FALSE;
4203 thread_wakeup((event_t) (&c_segments_busy));
4204 }
4205 c_segno = c_free_segno_head;
4206 assert(c_segno >= 0 && c_segno < c_segments_limit);
4207
4208 c_free_segno_head = (uint32_t)c_segments[c_segno].c_segno;
4209
4210 /*
4211 * do the rest of the bookkeeping now while we're still behind
4212 * the list lock and grab our generation id now into a local
4213 * so that we can install it once we have the c_seg allocated
4214 */
4215 c_segment_count++;
4216 if (c_segment_count > c_segment_count_max) {
4217 c_segment_count_max = c_segment_count;
4218 }
4219
4220 lck_mtx_unlock_always(c_list_lock);
4221
4222 c_seg = zalloc_flags(compressor_segment_zone, Z_WAITOK | Z_ZERO);
4223
4224 c_seg->c_store.c_buffer = (int32_t *)C_SEG_BUFFER_ADDRESS(c_segno);
4225
4226 lck_mtx_init(&c_seg->c_lock, &vm_compressor_lck_grp, LCK_ATTR_NULL);
4227
4228 c_seg->c_state = C_IS_EMPTY;
4229 c_seg->c_firstemptyslot = C_SLOT_MAX_INDEX;
4230 c_seg->c_mysegno = c_segno;
4231
4232 lck_mtx_lock_spin_always(c_list_lock);
4233 c_empty_count++; /* going to be immediately decremented in the next call */
4234 c_seg_switch_state(c_seg, C_IS_FILLING, FALSE);
4235 c_segments[c_segno].c_seg = c_seg;
4236 assert(c_segments[c_segno].c_segno > c_segments_available); /* we just assigned a pointer to it so this is an indication that it is occupied */
4237 lck_mtx_unlock_always(c_list_lock);
4238
4239 for (int i = 0; i < vm_pageout_state.vm_compressor_thread_count; i++) {
4240 #if XNU_TARGET_OS_OSX /* tag:DONATE */
4241 donate_queue_head = (c_segment_t*) &(pgo_iothread_internal_state[i].current_early_swapout_chead);
4242 #else /* XNU_TARGET_OS_OSX */
4243 if (memorystatus_swap_all_apps) {
4244 donate_queue_head = (c_segment_t*) &(pgo_iothread_internal_state[i].current_late_swapout_chead);
4245 } else {
4246 donate_queue_head = NULL;
4247 }
4248 #endif /* XNU_TARGET_OS_OSX */
4249
4250 if (current_chead == donate_queue_head) {
4251 c_seg->c_has_donated_pages = 1;
4252 break;
4253 }
4254 }
4255
4256 *current_chead = c_seg;
4257
4258 #if DEVELOPMENT || DEBUG
4259 C_SEG_MAKE_WRITEABLE(c_seg);
4260 #endif
4261 }
4262 c_seg_alloc_nextslot(c_seg);
4263
4264 size_to_populate = c_seg_allocsize - C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset);
4265
4266 if (size_to_populate) {
4267 min_needed = PAGE_SIZE + (c_seg_allocsize - c_seg_bufsize);
4268
4269 if (C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset - c_seg->c_nextoffset) < (unsigned) min_needed) {
4270 if (size_to_populate > C_SEG_MAX_POPULATE_SIZE) {
4271 size_to_populate = C_SEG_MAX_POPULATE_SIZE;
4272 }
4273
4274 OSAddAtomic64(size_to_populate / PAGE_SIZE, &vm_pageout_vminfo.vm_compressor_pages_grabbed);
4275
4276 kernel_memory_populate(
4277 (vm_offset_t) &c_seg->c_store.c_buffer[c_seg->c_populated_offset],
4278 size_to_populate,
4279 KMA_NOFAIL | KMA_COMPRESSOR,
4280 VM_KERN_MEMORY_COMPRESSOR);
4281 } else {
4282 size_to_populate = 0;
4283 }
4284 }
4285 PAGE_REPLACEMENT_DISALLOWED(TRUE);
4286
4287 lck_mtx_lock_spin_always(&c_seg->c_lock);
4288
4289 if (size_to_populate) {
4290 c_seg->c_populated_offset += C_SEG_BYTES_TO_OFFSET(size_to_populate);
4291 }
4292
4293 return c_seg;
4294 }
4295
4296 #if DEVELOPMENT || DEBUG
4297 #if CONFIG_FREEZE
4298 extern boolean_t memorystatus_freeze_to_memory;
4299 #endif /* CONFIG_FREEZE */
4300 #endif /* DEVELOPMENT || DEBUG */
4301 uint64_t c_seg_total_donated_bytes = 0; /* For testing/debugging only for now. Remove and add new counters for vm_stat.*/
4302
4303 uint64_t c_seg_filled_no_contention = 0;
4304 uint64_t c_seg_filled_contention = 0;
4305 clock_sec_t c_seg_filled_contention_sec_max = 0;
4306 clock_nsec_t c_seg_filled_contention_nsec_max = 0;
4307
4308 static void
c_current_seg_filled(c_segment_t c_seg,c_segment_t * current_chead)4309 c_current_seg_filled(c_segment_t c_seg, c_segment_t *current_chead)
4310 {
4311 uint32_t unused_bytes;
4312 uint32_t offset_to_depopulate;
4313 int new_state = C_ON_AGE_Q;
4314 clock_sec_t sec;
4315 clock_nsec_t nsec;
4316 bool head_insert = false, wakeup_swapout_thread = false;
4317
4318 unused_bytes = trunc_page_32(C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset - c_seg->c_nextoffset));
4319
4320 if (unused_bytes) {
4321 /* if this is a platform that need an extra page at the end of the segment when running compress
4322 * then now is the time to depopulate that extra page. it still takes virtual space but doesn't
4323 * actually waste memory */
4324 offset_to_depopulate = C_SEG_BYTES_TO_OFFSET(round_page_32(C_SEG_OFFSET_TO_BYTES(c_seg->c_nextoffset)));
4325
4326 /* release the extra physical page(s) at the end of the segment */
4327 lck_mtx_unlock_always(&c_seg->c_lock);
4328
4329 kernel_memory_depopulate(
4330 (vm_offset_t) &c_seg->c_store.c_buffer[offset_to_depopulate],
4331 unused_bytes,
4332 KMA_COMPRESSOR,
4333 VM_KERN_MEMORY_COMPRESSOR);
4334
4335 lck_mtx_lock_spin_always(&c_seg->c_lock);
4336
4337 c_seg->c_populated_offset = offset_to_depopulate;
4338 }
4339 assert(C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset) <= c_seg_bufsize);
4340
4341 #if DEVELOPMENT || DEBUG
4342 {
4343 boolean_t c_seg_was_busy = FALSE;
4344
4345 if (!c_seg->c_busy) {
4346 C_SEG_BUSY(c_seg);
4347 } else {
4348 c_seg_was_busy = TRUE;
4349 }
4350
4351 lck_mtx_unlock_always(&c_seg->c_lock);
4352
4353 C_SEG_WRITE_PROTECT(c_seg);
4354
4355 lck_mtx_lock_spin_always(&c_seg->c_lock);
4356
4357 if (c_seg_was_busy == FALSE) {
4358 C_SEG_WAKEUP_DONE(c_seg);
4359 }
4360 }
4361 #endif
4362
4363 #if CONFIG_FREEZE
4364 if (current_chead == (c_segment_t*) &(freezer_context_global.freezer_ctx_chead) &&
4365 VM_CONFIG_SWAP_IS_PRESENT &&
4366 VM_CONFIG_FREEZER_SWAP_IS_ACTIVE
4367 #if DEVELOPMENT || DEBUG
4368 && !memorystatus_freeze_to_memory
4369 #endif /* DEVELOPMENT || DEBUG */
4370 ) {
4371 new_state = C_ON_SWAPOUT_Q;
4372 wakeup_swapout_thread = true;
4373 }
4374 #endif /* CONFIG_FREEZE */
4375
4376 if (vm_darkwake_mode == TRUE) {
4377 new_state = C_ON_SWAPOUT_Q;
4378 head_insert = true;
4379 wakeup_swapout_thread = true;
4380 } else {
4381 c_segment_t *donate_queue_head;
4382 for (int i = 0; i < vm_pageout_state.vm_compressor_thread_count; i++) {
4383 #if XNU_TARGET_OS_OSX /* tag:DONATE */
4384 donate_queue_head = (c_segment_t*) &(pgo_iothread_internal_state[i].current_early_swapout_chead);
4385 #else /* XNU_TARGET_OS_OSX */
4386 donate_queue_head = (c_segment_t*) &(pgo_iothread_internal_state[i].current_late_swapout_chead);
4387 #endif /* XNU_TARGET_OS_OSX */
4388 if (current_chead == donate_queue_head) {
4389 /* This is the place where the "donating" task actually does the so-called donation
4390 * Instead of continueing to take place in memory in the compressor, the segment goes directly
4391 * to swap-out instead of going to AGE_Q */
4392 assert(c_seg->c_has_donated_pages);
4393 new_state = C_ON_SWAPOUT_Q;
4394 c_seg_total_donated_bytes += c_seg->c_bytes_used;
4395 break;
4396 }
4397 }
4398 }
4399
4400 clock_get_system_nanotime(&sec, &nsec);
4401 c_seg->c_creation_ts = (uint32_t)sec;
4402
4403 if (!lck_mtx_try_lock_spin_always(c_list_lock)) {
4404 clock_sec_t sec2;
4405 clock_nsec_t nsec2;
4406
4407 lck_mtx_lock_spin_always(c_list_lock);
4408 clock_get_system_nanotime(&sec2, &nsec2);
4409 TIME_SUB(sec2, sec, nsec2, nsec, NSEC_PER_SEC);
4410 /* keep track of how much time we've waited for c_list_lock */
4411 if (sec2 > c_seg_filled_contention_sec_max) {
4412 c_seg_filled_contention_sec_max = sec2;
4413 c_seg_filled_contention_nsec_max = nsec2;
4414 } else if (sec2 == c_seg_filled_contention_sec_max && nsec2 > c_seg_filled_contention_nsec_max) {
4415 c_seg_filled_contention_nsec_max = nsec2;
4416 }
4417 c_seg_filled_contention++;
4418 } else {
4419 c_seg_filled_no_contention++;
4420 }
4421
4422 #if CONFIG_FREEZE
4423 if (current_chead == (c_segment_t*) &(freezer_context_global.freezer_ctx_chead)) {
4424 if (freezer_context_global.freezer_ctx_task->donates_own_pages) {
4425 assert(!c_seg->c_has_donated_pages);
4426 c_seg->c_has_donated_pages = 1;
4427 OSAddAtomic(c_seg->c_slots_used, &c_segment_pages_compressed_incore_late_swapout);
4428 }
4429 c_seg->c_has_freezer_pages = 1;
4430 }
4431 #endif /* CONFIG_FREEZE */
4432
4433 c_seg->c_generation_id = c_generation_id++;
4434 c_seg_switch_state(c_seg, new_state, head_insert);
4435
4436 #if CONFIG_FREEZE
4437 /*
4438 * Donated segments count as frozen to swap if we go through the freezer.
4439 * TODO: What we need is a new ledger and cseg state that can describe
4440 * a frozen cseg from a donated task so we can accurately decrement it on
4441 * swapins.
4442 */
4443 if (current_chead == (c_segment_t*) &(freezer_context_global.freezer_ctx_chead) && (c_seg->c_state == C_ON_SWAPOUT_Q)) {
4444 /*
4445 * darkwake and freezer can't co-exist together
4446 * We'll need to fix this accounting as a start.
4447 * And early donation c_segs are separate from frozen c_segs.
4448 */
4449 assert(vm_darkwake_mode == FALSE);
4450 c_seg_update_task_owner(c_seg, freezer_context_global.freezer_ctx_task);
4451 freezer_context_global.freezer_ctx_swapped_bytes += c_seg->c_bytes_used;
4452 }
4453 #endif /* CONFIG_FREEZE */
4454
4455 if (c_seg->c_state == C_ON_AGE_Q && C_SEG_UNUSED_BYTES(c_seg) >= PAGE_SIZE) {
4456 /* this is possible if we decompressed a page from the segment before it ended filling */
4457 #if CONFIG_FREEZE
4458 assert(c_seg->c_task_owner == NULL);
4459 #endif /* CONFIG_FREEZE */
4460 c_seg_need_delayed_compaction(c_seg, TRUE);
4461 }
4462
4463 lck_mtx_unlock_always(c_list_lock);
4464
4465 if (wakeup_swapout_thread) {
4466 /*
4467 * Darkwake and Freeze configs always
4468 * wake up the swapout thread because
4469 * the compactor thread that normally handles
4470 * it may not be running as much in these
4471 * configs.
4472 */
4473 thread_wakeup((event_t)&vm_swapout_thread);
4474 }
4475
4476 *current_chead = NULL;
4477 }
4478
4479 /*
4480 * returns with c_seg locked
4481 */
4482 void
c_seg_swapin_requeue(c_segment_t c_seg,boolean_t has_data,boolean_t minor_compact_ok,boolean_t age_on_swapin_q)4483 c_seg_swapin_requeue(c_segment_t c_seg, boolean_t has_data, boolean_t minor_compact_ok, boolean_t age_on_swapin_q)
4484 {
4485 clock_sec_t sec;
4486 clock_nsec_t nsec;
4487
4488 clock_get_system_nanotime(&sec, &nsec);
4489
4490 lck_mtx_lock_spin_always(c_list_lock);
4491 lck_mtx_lock_spin_always(&c_seg->c_lock);
4492
4493 assert(c_seg->c_busy_swapping);
4494 assert(c_seg->c_busy);
4495
4496 c_seg->c_busy_swapping = 0;
4497
4498 if (c_seg->c_overage_swap == TRUE) {
4499 c_overage_swapped_count--;
4500 c_seg->c_overage_swap = FALSE;
4501 }
4502 if (has_data == TRUE) {
4503 if (age_on_swapin_q == TRUE || c_seg->c_has_donated_pages) {
4504 #if CONFIG_FREEZE
4505 /*
4506 * If a segment has both identities, frozen and donated bits set, the donated
4507 * bit wins on the swapin path. This is because the segment is being swapped back
4508 * in and so is in demand and should be given more time to spend in memory before
4509 * being swapped back out under pressure.
4510 */
4511 if (c_seg->c_has_donated_pages) {
4512 c_seg->c_has_freezer_pages = 0;
4513 }
4514 #endif /* CONFIG_FREEZE */
4515 c_seg_switch_state(c_seg, C_ON_SWAPPEDIN_Q, FALSE);
4516 } else {
4517 c_seg_switch_state(c_seg, C_ON_AGE_Q, FALSE);
4518 }
4519
4520 if (minor_compact_ok == TRUE && !c_seg->c_on_minorcompact_q && C_SEG_UNUSED_BYTES(c_seg) >= PAGE_SIZE) {
4521 c_seg_need_delayed_compaction(c_seg, TRUE);
4522 }
4523 } else {
4524 c_seg->c_store.c_buffer = (int32_t*) NULL;
4525 c_seg->c_populated_offset = C_SEG_BYTES_TO_OFFSET(0);
4526
4527 c_seg_switch_state(c_seg, C_ON_BAD_Q, FALSE);
4528 }
4529 c_seg->c_swappedin_ts = (uint32_t)sec;
4530 c_seg->c_swappedin = true;
4531 #if TRACK_C_SEGMENT_UTILIZATION
4532 c_seg->c_decompressions_since_swapin = 0;
4533 #endif /* TRACK_C_SEGMENT_UTILIZATION */
4534
4535 lck_mtx_unlock_always(c_list_lock);
4536 }
4537
4538
4539
4540 /*
4541 * c_seg has to be locked and is returned locked if the c_seg isn't freed
4542 * PAGE_REPLACMENT_DISALLOWED has to be TRUE on entry and is returned TRUE
4543 * c_seg_swapin returns 1 if the c_seg was freed, 0 otherwise
4544 */
4545
4546 int
c_seg_swapin(c_segment_t c_seg,boolean_t force_minor_compaction,boolean_t age_on_swapin_q)4547 c_seg_swapin(c_segment_t c_seg, boolean_t force_minor_compaction, boolean_t age_on_swapin_q)
4548 {
4549 vm_offset_t addr = 0;
4550 uint32_t io_size = 0;
4551 uint64_t f_offset;
4552 thread_pri_floor_t token;
4553
4554 assert(C_SEG_IS_ONDISK(c_seg));
4555
4556 #if !CHECKSUM_THE_SWAP
4557 c_seg_trim_tail(c_seg);
4558 #endif
4559 io_size = round_page_32(C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset));
4560 f_offset = c_seg->c_store.c_swap_handle;
4561
4562 C_SEG_BUSY(c_seg);
4563 c_seg->c_busy_swapping = 1;
4564
4565 /*
4566 * This thread is likely going to block for I/O.
4567 * Make sure it is ready to run when the I/O completes because
4568 * it needs to clear the busy bit on the c_seg so that other
4569 * waiting threads can make progress too.
4570 */
4571 token = thread_priority_floor_start();
4572 lck_mtx_unlock_always(&c_seg->c_lock);
4573
4574 PAGE_REPLACEMENT_DISALLOWED(FALSE);
4575
4576 addr = (vm_offset_t)C_SEG_BUFFER_ADDRESS(c_seg->c_mysegno);
4577 c_seg->c_store.c_buffer = (int32_t*) addr;
4578
4579 kernel_memory_populate(addr, io_size, KMA_NOFAIL | KMA_COMPRESSOR,
4580 VM_KERN_MEMORY_COMPRESSOR);
4581
4582 if (vm_swap_get(c_seg, f_offset, io_size) != KERN_SUCCESS) {
4583 PAGE_REPLACEMENT_DISALLOWED(TRUE);
4584
4585 kernel_memory_depopulate(addr, io_size, KMA_COMPRESSOR,
4586 VM_KERN_MEMORY_COMPRESSOR);
4587
4588 c_seg_swapin_requeue(c_seg, FALSE, TRUE, age_on_swapin_q);
4589 } else {
4590 #if ENCRYPTED_SWAP
4591 vm_swap_decrypt(c_seg);
4592 #endif /* ENCRYPTED_SWAP */
4593
4594 #if CHECKSUM_THE_SWAP
4595 if (c_seg->cseg_swap_size != io_size) {
4596 panic("swapin size doesn't match swapout size");
4597 }
4598
4599 if (c_seg->cseg_hash != vmc_hash((char*) c_seg->c_store.c_buffer, (int)io_size)) {
4600 panic("c_seg_swapin - Swap hash mismatch");
4601 }
4602 #endif /* CHECKSUM_THE_SWAP */
4603
4604 PAGE_REPLACEMENT_DISALLOWED(TRUE);
4605
4606 c_seg_swapin_requeue(c_seg, TRUE, force_minor_compaction == TRUE ? FALSE : TRUE, age_on_swapin_q);
4607
4608 #if CONFIG_FREEZE
4609 /*
4610 * c_seg_swapin_requeue() returns with the c_seg lock held.
4611 */
4612 if (!lck_mtx_try_lock_spin_always(c_list_lock)) {
4613 assert(c_seg->c_busy);
4614
4615 lck_mtx_unlock_always(&c_seg->c_lock);
4616 lck_mtx_lock_spin_always(c_list_lock);
4617 lck_mtx_lock_spin_always(&c_seg->c_lock);
4618 }
4619
4620 if (c_seg->c_task_owner) {
4621 c_seg_update_task_owner(c_seg, NULL);
4622 }
4623
4624 lck_mtx_unlock_always(c_list_lock);
4625
4626 OSAddAtomic(c_seg->c_slots_used, &c_segment_pages_compressed_incore);
4627 if (c_seg->c_has_donated_pages) {
4628 OSAddAtomic(c_seg->c_slots_used, &c_segment_pages_compressed_incore_late_swapout);
4629 }
4630 #endif /* CONFIG_FREEZE */
4631
4632 OSAddAtomic64(c_seg->c_bytes_used, &compressor_bytes_used);
4633
4634 if (force_minor_compaction == TRUE) {
4635 if (c_seg_minor_compaction_and_unlock(c_seg, FALSE)) {
4636 /*
4637 * c_seg was completely empty so it was freed,
4638 * so be careful not to reference it again
4639 *
4640 * Drop the boost so that the thread priority
4641 * is returned back to where it is supposed to be.
4642 */
4643 thread_priority_floor_end(&token);
4644 return 1;
4645 }
4646
4647 lck_mtx_lock_spin_always(&c_seg->c_lock);
4648 }
4649 }
4650 C_SEG_WAKEUP_DONE(c_seg);
4651
4652 /*
4653 * Drop the boost so that the thread priority
4654 * is returned back to where it is supposed to be.
4655 */
4656 thread_priority_floor_end(&token);
4657
4658 return 0;
4659 }
4660
4661
4662 static void
c_segment_sv_hash_drop_ref(int hash_indx)4663 c_segment_sv_hash_drop_ref(int hash_indx)
4664 {
4665 struct c_sv_hash_entry o_sv_he, n_sv_he;
4666
4667 while (1) {
4668 o_sv_he.he_record = c_segment_sv_hash_table[hash_indx].he_record;
4669
4670 n_sv_he.he_ref = o_sv_he.he_ref - 1;
4671 n_sv_he.he_data = o_sv_he.he_data;
4672
4673 if (OSCompareAndSwap64((UInt64)o_sv_he.he_record, (UInt64)n_sv_he.he_record, (UInt64 *) &c_segment_sv_hash_table[hash_indx].he_record) == TRUE) {
4674 if (n_sv_he.he_ref == 0) {
4675 OSAddAtomic(-1, &c_segment_svp_in_hash);
4676 }
4677 break;
4678 }
4679 }
4680 }
4681
4682
4683 static int
c_segment_sv_hash_insert(uint32_t data)4684 c_segment_sv_hash_insert(uint32_t data)
4685 {
4686 int hash_sindx;
4687 int misses;
4688 struct c_sv_hash_entry o_sv_he, n_sv_he;
4689 boolean_t got_ref = FALSE;
4690
4691 if (data == 0) {
4692 OSAddAtomic(1, &c_segment_svp_zero_compressions);
4693 } else {
4694 OSAddAtomic(1, &c_segment_svp_nonzero_compressions);
4695 }
4696
4697 hash_sindx = data & C_SV_HASH_MASK;
4698
4699 for (misses = 0; misses < C_SV_HASH_MAX_MISS; misses++) {
4700 o_sv_he.he_record = c_segment_sv_hash_table[hash_sindx].he_record;
4701
4702 while (o_sv_he.he_data == data || o_sv_he.he_ref == 0) {
4703 n_sv_he.he_ref = o_sv_he.he_ref + 1;
4704 n_sv_he.he_data = data;
4705
4706 if (OSCompareAndSwap64((UInt64)o_sv_he.he_record, (UInt64)n_sv_he.he_record, (UInt64 *) &c_segment_sv_hash_table[hash_sindx].he_record) == TRUE) {
4707 if (n_sv_he.he_ref == 1) {
4708 OSAddAtomic(1, &c_segment_svp_in_hash);
4709 }
4710 got_ref = TRUE;
4711 break;
4712 }
4713 o_sv_he.he_record = c_segment_sv_hash_table[hash_sindx].he_record;
4714 }
4715 if (got_ref == TRUE) {
4716 break;
4717 }
4718 hash_sindx++;
4719
4720 if (hash_sindx == C_SV_HASH_SIZE) {
4721 hash_sindx = 0;
4722 }
4723 }
4724 if (got_ref == FALSE) {
4725 return -1;
4726 }
4727
4728 return hash_sindx;
4729 }
4730
4731
4732 #if RECORD_THE_COMPRESSED_DATA
4733
4734 static void
c_compressed_record_data(char * src,int c_size)4735 c_compressed_record_data(char *src, int c_size)
4736 {
4737 if ((c_compressed_record_cptr + c_size + 4) >= c_compressed_record_ebuf) {
4738 panic("c_compressed_record_cptr >= c_compressed_record_ebuf");
4739 }
4740
4741 *(int *)((void *)c_compressed_record_cptr) = c_size;
4742
4743 c_compressed_record_cptr += 4;
4744
4745 memcpy(c_compressed_record_cptr, src, c_size);
4746 c_compressed_record_cptr += c_size;
4747 }
4748 #endif
4749
4750
4751 /**
4752 * Do the actual compression of the given page
4753 * @param src [IN] address in the physical aperture of the page to compress.
4754 * @param slot_ptr [OUT] fill the slot-mapping of the c_seg+slot where the page ends up being stored
4755 * @param current_chead [IN-OUT] current filling c_seg. pointer comes from the current compression thread state
4756 * On the very first call this is going to point to NULL and this function will fill that pointer with a new
4757 * filling c_sec if the current filling c_seg doesn't have enough space, it will be replaced in this location
4758 * with a new filling c_seg
4759 * @param scratch_buf [IN] pointer from the current thread state, used by the compression codec
4760 * @return 0 on success, 1 on memory allocation error
4761 */
4762 static int
c_compress_page(char * src,c_slot_mapping_t slot_ptr,c_segment_t * current_chead,char * scratch_buf,__unused vm_compressor_options_t flags)4763 c_compress_page(
4764 char *src,
4765 c_slot_mapping_t slot_ptr,
4766 c_segment_t *current_chead,
4767 char *scratch_buf,
4768 __unused vm_compressor_options_t flags)
4769 {
4770 int c_size = -1;
4771 int c_rounded_size = 0;
4772 int max_csize;
4773 c_slot_t cs;
4774 c_segment_t c_seg;
4775
4776 KERNEL_DEBUG(0xe0400000 | DBG_FUNC_START, *current_chead, 0, 0, 0, 0);
4777 retry: /* may need to retry if the currently filling c_seg will not have enough space */
4778 if ((c_seg = c_seg_allocate(current_chead)) == NULL) {
4779 return 1;
4780 }
4781 /*
4782 * returns with c_seg lock held
4783 * and PAGE_REPLACEMENT_DISALLOWED(TRUE)...
4784 * c_nextslot has been allocated and
4785 * c_store.c_buffer populated
4786 */
4787 assert(c_seg->c_state == C_IS_FILLING);
4788
4789 cs = C_SEG_SLOT_FROM_INDEX(c_seg, c_seg->c_nextslot);
4790
4791 C_SLOT_ASSERT_PACKABLE(slot_ptr);
4792 cs->c_packed_ptr = C_SLOT_PACK_PTR(slot_ptr);
4793
4794 cs->c_offset = c_seg->c_nextoffset;
4795
4796 unsigned int avail_space = c_seg_bufsize - C_SEG_OFFSET_TO_BYTES((int32_t)cs->c_offset);
4797
4798
4799 max_csize = avail_space;
4800 if (max_csize > PAGE_SIZE) {
4801 max_csize = PAGE_SIZE;
4802 }
4803
4804 #if CHECKSUM_THE_DATA
4805 cs->c_hash_data = vmc_hash(src, PAGE_SIZE);
4806 #endif
4807 boolean_t incomp_copy = FALSE; /* codec indicates it already did copy an incompressible page */
4808 int max_csize_adj = (max_csize - 4); /* how much size we have left in this c_seg to fill. */
4809
4810 if (vm_compressor_algorithm() != VM_COMPRESSOR_DEFAULT_CODEC) {
4811 #if defined(__arm64__)
4812 uint16_t ccodec = CINVALID;
4813 uint32_t inline_popcount;
4814 if (max_csize >= C_SEG_OFFSET_ALIGNMENT_BOUNDARY) {
4815 vm_memtag_disable_checking();
4816 c_size = metacompressor((const uint8_t *) src,
4817 (uint8_t *) &c_seg->c_store.c_buffer[cs->c_offset],
4818 max_csize_adj, &ccodec,
4819 scratch_buf, &incomp_copy, &inline_popcount);
4820 vm_memtag_enable_checking();
4821 assert(inline_popcount == C_SLOT_NO_POPCOUNT);
4822
4823 #if C_SEG_OFFSET_ALIGNMENT_BOUNDARY > 4
4824 if (c_size > max_csize_adj) {
4825 c_size = -1;
4826 }
4827 #endif
4828 } else {
4829 c_size = -1;
4830 }
4831 assert(ccodec == CCWK || ccodec == CCLZ4);
4832 cs->c_codec = ccodec;
4833 #endif
4834 } else {
4835 #if defined(__arm64__)
4836 vm_memtag_disable_checking();
4837 cs->c_codec = CCWK;
4838 __unreachable_ok_push
4839 if (PAGE_SIZE == 4096) {
4840 c_size = WKdm_compress_4k((WK_word *)(uintptr_t)src, (WK_word *)(uintptr_t)&c_seg->c_store.c_buffer[cs->c_offset],
4841 (WK_word *)(uintptr_t)scratch_buf, max_csize_adj);
4842 } else {
4843 c_size = WKdm_compress_16k((WK_word *)(uintptr_t)src, (WK_word *)(uintptr_t)&c_seg->c_store.c_buffer[cs->c_offset],
4844 (WK_word *)(uintptr_t)scratch_buf, max_csize_adj);
4845 }
4846 __unreachable_ok_pop
4847 vm_memtag_enable_checking();
4848 #else
4849 vm_memtag_disable_checking();
4850 c_size = WKdm_compress_new((const WK_word *)(uintptr_t)src, (WK_word *)(uintptr_t)&c_seg->c_store.c_buffer[cs->c_offset],
4851 (WK_word *)(uintptr_t)scratch_buf, max_csize_adj);
4852 vm_memtag_enable_checking();
4853 #endif
4854 }
4855 /* c_size is the size written by the codec, or 0 if it's uniform 32 bit value or (-1 if there was not enough space
4856 * or it was incompressible) */
4857 assertf(((c_size <= max_csize_adj) && (c_size >= -1)),
4858 "c_size invalid (%d, %d), cur compressions: %d", c_size, max_csize_adj, c_segment_pages_compressed);
4859
4860 if (c_size == -1) {
4861 if (max_csize < PAGE_SIZE) {
4862 c_current_seg_filled(c_seg, current_chead);
4863 assert(*current_chead == NULL);
4864
4865 lck_mtx_unlock_always(&c_seg->c_lock);
4866 /* TODO: it may be worth requiring codecs to distinguish
4867 * between incompressible inputs and failures due to budget exhaustion.
4868 * right now this assumes that if the space we had is > PAGE_SIZE, then the codec failed due to incompressible input */
4869
4870 PAGE_REPLACEMENT_DISALLOWED(FALSE);
4871 goto retry; /* previous c_seg didn't have enought space, we finalized it and can try again with a fresh c_seg */
4872 }
4873 c_size = PAGE_SIZE;
4874
4875 if (incomp_copy == FALSE) { /* codec did not copy the incompressible input */
4876 vm_memtag_disable_checking();
4877 memcpy(&c_seg->c_store.c_buffer[cs->c_offset], src, c_size);
4878 vm_memtag_enable_checking();
4879 }
4880
4881 OSAddAtomic(1, &c_segment_noncompressible_pages);
4882 } else if (c_size == 0) {
4883 /*
4884 * Special case - this is a page completely full of a single 32 bit value.
4885 * We store some values directly in the c_slot_mapping, if not there, the
4886 * 4 byte value goes in the compressor segment.
4887 */
4888 int hash_index = c_segment_sv_hash_insert(*(uint32_t *)(uintptr_t)src);
4889
4890 if (hash_index != -1
4891 ) {
4892 slot_ptr->s_cindx = hash_index;
4893 slot_ptr->s_cseg = C_SV_CSEG_ID;
4894 #if CONFIG_TRACK_UNMODIFIED_ANON_PAGES
4895 slot_ptr->s_uncompressed = 0;
4896 #endif /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
4897
4898 OSAddAtomic(1, &c_segment_svp_hash_succeeded);
4899 #if RECORD_THE_COMPRESSED_DATA
4900 c_compressed_record_data(src, 4);
4901 #endif
4902 /* we didn't write anything to c_buffer and didn't end up using the slot in the c_seg at all, so skip all
4903 * the book-keeping of the case that we did */
4904 goto sv_compression;
4905 }
4906 OSAddAtomic(1, &c_segment_svp_hash_failed);
4907
4908 c_size = 4;
4909 vm_memtag_disable_checking();
4910 memcpy(&c_seg->c_store.c_buffer[cs->c_offset], src, c_size);
4911 vm_memtag_enable_checking();
4912 }
4913
4914 #if RECORD_THE_COMPRESSED_DATA
4915 c_compressed_record_data((char *)&c_seg->c_store.c_buffer[cs->c_offset], c_size);
4916 #endif
4917 #if CHECKSUM_THE_COMPRESSED_DATA
4918 cs->c_hash_compressed_data = vmc_hash((char *)&c_seg->c_store.c_buffer[cs->c_offset], c_size);
4919 #endif
4920 #if POPCOUNT_THE_COMPRESSED_DATA
4921 cs->c_pop_cdata = vmc_pop((uintptr_t) &c_seg->c_store.c_buffer[cs->c_offset], c_size);
4922 #endif
4923
4924 PACK_C_SIZE(cs, c_size);
4925
4926 c_rounded_size = (c_size + C_SEG_OFFSET_ALIGNMENT_MASK) & ~C_SEG_OFFSET_ALIGNMENT_MASK;
4927
4928 c_seg->c_bytes_used += c_rounded_size;
4929 c_seg->c_nextoffset += C_SEG_BYTES_TO_OFFSET(c_rounded_size);
4930 c_seg->c_slots_used++;
4931
4932 #if CONFIG_FREEZE
4933 /* TODO: should c_segment_pages_compressed be up here too? See 88598046 for details */
4934 OSAddAtomic(1, &c_segment_pages_compressed_incore);
4935 if (c_seg->c_has_donated_pages) {
4936 OSAddAtomic(1, &c_segment_pages_compressed_incore_late_swapout);
4937 }
4938 #endif /* CONFIG_FREEZE */
4939
4940 slot_ptr->s_cindx = c_seg->c_nextslot++;
4941 /* <csegno=0,indx=0> would mean "empty slot", so use csegno+1, see other usages of s_cseg where it's decremented */
4942 slot_ptr->s_cseg = c_seg->c_mysegno + 1;
4943
4944 #if CONFIG_TRACK_UNMODIFIED_ANON_PAGES
4945 slot_ptr->s_uncompressed = 0;
4946 #endif /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
4947
4948 sv_compression:
4949 /* can we say this c_seg is full? */
4950 if (c_seg->c_nextoffset >= c_seg_off_limit || c_seg->c_nextslot >= C_SLOT_MAX_INDEX) {
4951 /* condition 1: segment buffer is almost full, don't bother trying to fill it further.
4952 * condition 2: we can't have any more slots in this c_segment even if we had buffer space */
4953 c_current_seg_filled(c_seg, current_chead);
4954 assert(*current_chead == NULL);
4955 }
4956
4957 lck_mtx_unlock_always(&c_seg->c_lock);
4958
4959 PAGE_REPLACEMENT_DISALLOWED(FALSE);
4960
4961 #if RECORD_THE_COMPRESSED_DATA
4962 if ((c_compressed_record_cptr - c_compressed_record_sbuf) >= c_seg_allocsize) {
4963 c_compressed_record_write(c_compressed_record_sbuf, (int)(c_compressed_record_cptr - c_compressed_record_sbuf));
4964 c_compressed_record_cptr = c_compressed_record_sbuf;
4965 }
4966 #endif
4967 if (c_size) {
4968 OSAddAtomic64(c_size, &c_segment_compressed_bytes);
4969 OSAddAtomic64(c_rounded_size, &compressor_bytes_used);
4970 }
4971 OSAddAtomic64(PAGE_SIZE, &c_segment_input_bytes);
4972
4973 OSAddAtomic(1, &c_segment_pages_compressed);
4974 #if DEVELOPMENT || DEBUG
4975 if (!compressor_running_perf_test) {
4976 /*
4977 * The perf_compressor benchmark should not be able to trigger
4978 * compressor thrashing jetsams.
4979 */
4980 OSAddAtomic(1, &sample_period_compression_count);
4981 }
4982 #else /* DEVELOPMENT || DEBUG */
4983 OSAddAtomic(1, &sample_period_compression_count);
4984 #endif /* DEVELOPMENT || DEBUG */
4985
4986 KERNEL_DEBUG(0xe0400000 | DBG_FUNC_END, *current_chead, c_size, c_segment_input_bytes, c_segment_compressed_bytes, 0);
4987
4988 return 0;
4989 }
4990
4991 static inline void
sv_decompress(int32_t * ddst,int32_t pattern)4992 sv_decompress(int32_t *ddst, int32_t pattern)
4993 {
4994 // assert(__builtin_constant_p(PAGE_SIZE) != 0);
4995 #if defined(__x86_64__)
4996 memset_word(ddst, pattern, PAGE_SIZE / sizeof(int32_t));
4997 #elif defined(__arm64__)
4998 assert((PAGE_SIZE % 128) == 0);
4999 if (pattern == 0) {
5000 fill32_dczva((addr64_t)ddst, PAGE_SIZE);
5001 } else {
5002 fill32_nt((addr64_t)ddst, PAGE_SIZE, pattern);
5003 }
5004 #else
5005 size_t i;
5006
5007 /* Unroll the pattern fill loop 4x to encourage the
5008 * compiler to emit NEON stores, cf.
5009 * <rdar://problem/25839866> Loop autovectorization
5010 * anomalies.
5011 */
5012 /* * We use separate loops for each PAGE_SIZE
5013 * to allow the autovectorizer to engage, as PAGE_SIZE
5014 * may not be a constant.
5015 */
5016
5017 __unreachable_ok_push
5018 if (PAGE_SIZE == 4096) {
5019 for (i = 0; i < (4096U / sizeof(int32_t)); i += 4) {
5020 *ddst++ = pattern;
5021 *ddst++ = pattern;
5022 *ddst++ = pattern;
5023 *ddst++ = pattern;
5024 }
5025 } else {
5026 assert(PAGE_SIZE == 16384);
5027 for (i = 0; i < (int)(16384U / sizeof(int32_t)); i += 4) {
5028 *ddst++ = pattern;
5029 *ddst++ = pattern;
5030 *ddst++ = pattern;
5031 *ddst++ = pattern;
5032 }
5033 }
5034 __unreachable_ok_pop
5035 #endif
5036 }
5037
5038 static int
c_decompress_page(char * dst,volatile c_slot_mapping_t slot_ptr,vm_compressor_options_t flags,int * zeroslot)5039 c_decompress_page(
5040 char *dst,
5041 volatile c_slot_mapping_t slot_ptr, /* why volatile? perhaps due to changes across hibernation */
5042 vm_compressor_options_t flags,
5043 int *zeroslot)
5044 {
5045 c_slot_t cs;
5046 c_segment_t c_seg;
5047 uint32_t c_segno;
5048 uint16_t c_indx;
5049 int c_rounded_size;
5050 uint32_t c_size;
5051 int retval = 0;
5052 boolean_t need_unlock = TRUE;
5053 boolean_t consider_defragmenting = FALSE;
5054 boolean_t kdp_mode = FALSE;
5055
5056 if (__improbable(flags & C_KDP)) {
5057 if (not_in_kdp) {
5058 panic("C_KDP passed to decompress page from outside of debugger context");
5059 }
5060
5061 assert((flags & C_KEEP) == C_KEEP);
5062 assert((flags & C_DONT_BLOCK) == C_DONT_BLOCK);
5063
5064 if ((flags & (C_DONT_BLOCK | C_KEEP)) != (C_DONT_BLOCK | C_KEEP)) {
5065 return -2;
5066 }
5067
5068 kdp_mode = TRUE;
5069 *zeroslot = 0;
5070 }
5071
5072 ReTry:
5073 if (__probable(!kdp_mode)) {
5074 PAGE_REPLACEMENT_DISALLOWED(TRUE);
5075 } else {
5076 if (kdp_lck_rw_lock_is_acquired_exclusive(&c_master_lock)) {
5077 return -2;
5078 }
5079 }
5080
5081 #if HIBERNATION
5082 /*
5083 * if hibernation is enabled, it indicates (via a call
5084 * to 'vm_decompressor_lock' that no further
5085 * decompressions are allowed once it reaches
5086 * the point of flushing all of the currently dirty
5087 * anonymous memory through the compressor and out
5088 * to disk... in this state we allow freeing of compressed
5089 * pages and must honor the C_DONT_BLOCK case
5090 */
5091 if (__improbable(dst && decompressions_blocked == TRUE)) {
5092 if (flags & C_DONT_BLOCK) {
5093 if (__probable(!kdp_mode)) {
5094 PAGE_REPLACEMENT_DISALLOWED(FALSE);
5095 }
5096
5097 *zeroslot = 0;
5098 return -2;
5099 }
5100 /*
5101 * it's safe to atomically assert and block behind the
5102 * lock held in shared mode because "decompressions_blocked" is
5103 * only set and cleared and the thread_wakeup done when the lock
5104 * is held exclusively
5105 */
5106 assert_wait((event_t)&decompressions_blocked, THREAD_UNINT);
5107
5108 PAGE_REPLACEMENT_DISALLOWED(FALSE);
5109
5110 thread_block(THREAD_CONTINUE_NULL);
5111
5112 goto ReTry;
5113 }
5114 #endif
5115 /* s_cseg is actually "segno+1" */
5116 c_segno = slot_ptr->s_cseg - 1;
5117
5118 if (__improbable(c_segno >= c_segments_available)) {
5119 panic("c_decompress_page: c_segno %d >= c_segments_available %d, slot_ptr(%p), slot_data(%x)",
5120 c_segno, c_segments_available, slot_ptr, *(int *)((void *)slot_ptr));
5121 }
5122
5123 if (__improbable(c_segments[c_segno].c_segno < c_segments_available)) {
5124 panic("c_decompress_page: c_segno %d is free, slot_ptr(%p), slot_data(%x)",
5125 c_segno, slot_ptr, *(int *)((void *)slot_ptr));
5126 }
5127
5128 c_seg = c_segments[c_segno].c_seg;
5129
5130 if (__probable(!kdp_mode)) {
5131 lck_mtx_lock_spin_always(&c_seg->c_lock);
5132 } else {
5133 if (kdp_lck_mtx_lock_spin_is_acquired(&c_seg->c_lock)) {
5134 return -2;
5135 }
5136 }
5137
5138 assert(c_seg->c_state != C_IS_EMPTY && c_seg->c_state != C_IS_FREE);
5139
5140 if (dst == NULL && c_seg->c_busy_swapping) {
5141 assert(c_seg->c_busy);
5142
5143 goto bypass_busy_check;
5144 }
5145 if (flags & C_DONT_BLOCK) {
5146 if (c_seg->c_busy || (C_SEG_IS_ONDISK(c_seg) && dst)) {
5147 *zeroslot = 0;
5148
5149 retval = -2;
5150 goto done;
5151 }
5152 }
5153 if (c_seg->c_busy) {
5154 PAGE_REPLACEMENT_DISALLOWED(FALSE);
5155
5156 c_seg_wait_on_busy(c_seg);
5157
5158 goto ReTry;
5159 }
5160 bypass_busy_check:
5161
5162 c_indx = slot_ptr->s_cindx;
5163
5164 if (__improbable(c_indx >= c_seg->c_nextslot)) {
5165 panic("c_decompress_page: c_indx %d >= c_nextslot %d, c_seg(%p), slot_ptr(%p), slot_data(%x)",
5166 c_indx, c_seg->c_nextslot, c_seg, slot_ptr, *(int *)((void *)slot_ptr));
5167 }
5168
5169 cs = C_SEG_SLOT_FROM_INDEX(c_seg, c_indx);
5170
5171 c_size = UNPACK_C_SIZE(cs);
5172
5173 if (__improbable(c_size == 0)) { /* sanity check it's not an empty slot */
5174 panic("c_decompress_page: c_size == 0, c_seg(%p), slot_ptr(%p), slot_data(%x)",
5175 c_seg, slot_ptr, *(int *)((void *)slot_ptr));
5176 }
5177
5178 c_rounded_size = (c_size + C_SEG_OFFSET_ALIGNMENT_MASK) & ~C_SEG_OFFSET_ALIGNMENT_MASK;
5179
5180 if (dst) { /* would be NULL if we don't want the page content, from free */
5181 uint32_t age_of_cseg;
5182 clock_sec_t cur_ts_sec;
5183 clock_nsec_t cur_ts_nsec;
5184
5185 if (C_SEG_IS_ONDISK(c_seg)) {
5186 #if CONFIG_FREEZE
5187 if (freezer_incore_cseg_acct) {
5188 if ((c_seg->c_slots_used + c_segment_pages_compressed_incore) >= c_segment_pages_compressed_nearing_limit) {
5189 PAGE_REPLACEMENT_DISALLOWED(FALSE);
5190 lck_mtx_unlock_always(&c_seg->c_lock);
5191
5192 memorystatus_kill_on_VM_compressor_space_shortage(FALSE /* async */);
5193
5194 goto ReTry;
5195 }
5196
5197 uint32_t incore_seg_count = c_segment_count - c_swappedout_count - c_swappedout_sparse_count;
5198 if ((incore_seg_count + 1) >= c_segments_nearing_limit) {
5199 PAGE_REPLACEMENT_DISALLOWED(FALSE);
5200 lck_mtx_unlock_always(&c_seg->c_lock);
5201
5202 memorystatus_kill_on_VM_compressor_space_shortage(FALSE /* async */);
5203
5204 goto ReTry;
5205 }
5206 }
5207 #endif /* CONFIG_FREEZE */
5208 assert(kdp_mode == FALSE);
5209 retval = c_seg_swapin(c_seg, FALSE, TRUE);
5210 assert(retval == 0);
5211
5212 retval = 1;
5213 }
5214 if (c_seg->c_state == C_ON_BAD_Q) {
5215 assert(c_seg->c_store.c_buffer == NULL);
5216 *zeroslot = 0;
5217
5218 retval = -1;
5219 goto done;
5220 }
5221
5222 #if POPCOUNT_THE_COMPRESSED_DATA
5223 unsigned csvpop;
5224 uintptr_t csvaddr = (uintptr_t) &c_seg->c_store.c_buffer[cs->c_offset];
5225 if (cs->c_pop_cdata != (csvpop = vmc_pop(csvaddr, c_size))) {
5226 panic("Compressed data popcount doesn't match original, bit distance: %d %p (phys: %p) %p %p 0x%x 0x%x 0x%x 0x%x", (csvpop - cs->c_pop_cdata), (void *)csvaddr, (void *) kvtophys(csvaddr), c_seg, cs, cs->c_offset, c_size, csvpop, cs->c_pop_cdata);
5227 }
5228 #endif
5229
5230 #if CHECKSUM_THE_COMPRESSED_DATA
5231 unsigned csvhash;
5232 if (cs->c_hash_compressed_data != (csvhash = vmc_hash((char *)&c_seg->c_store.c_buffer[cs->c_offset], c_size))) {
5233 panic("Compressed data doesn't match original %p %p %u %u %u", c_seg, cs, c_size, cs->c_hash_compressed_data, csvhash);
5234 }
5235 #endif
5236 if (c_rounded_size == PAGE_SIZE) {
5237 /* page wasn't compressible... just copy it out */
5238 vm_memtag_disable_checking();
5239 memcpy(dst, &c_seg->c_store.c_buffer[cs->c_offset], PAGE_SIZE);
5240 vm_memtag_enable_checking();
5241 } else if (c_size == 4) {
5242 int32_t data;
5243 int32_t *dptr;
5244
5245 /*
5246 * page was populated with a single value
5247 * that didn't fit into our fast hash
5248 * so we packed it in as a single non-compressed value
5249 * that we need to populate the page with
5250 */
5251 dptr = (int32_t *)(uintptr_t)dst;
5252 data = *(int32_t *)(&c_seg->c_store.c_buffer[cs->c_offset]);
5253 vm_memtag_disable_checking();
5254 sv_decompress(dptr, data);
5255 vm_memtag_enable_checking();
5256 } else { /* normal segment decompress */
5257 uint32_t my_cpu_no;
5258 char *scratch_buf;
5259
5260 my_cpu_no = cpu_number();
5261
5262 assert(my_cpu_no < compressor_cpus);
5263
5264 if (__probable(!kdp_mode)) {
5265 /*
5266 * we're behind the c_seg lock held in spin mode
5267 * which means pre-emption is disabled... therefore
5268 * the following sequence is atomic and safe
5269 */
5270 scratch_buf = &compressor_scratch_bufs[my_cpu_no * vm_compressor_get_decode_scratch_size()];
5271 } else if (flags & C_KDP_MULTICPU) {
5272 assert(vm_compressor_kdp_state.kc_scratch_bufs != NULL);
5273 scratch_buf = &vm_compressor_kdp_state.kc_scratch_bufs[my_cpu_no * vm_compressor_get_decode_scratch_size()];
5274 } else {
5275 scratch_buf = vm_compressor_kdp_state.kc_panic_scratch_buf;
5276 }
5277
5278 if (vm_compressor_algorithm() != VM_COMPRESSOR_DEFAULT_CODEC) {
5279 #if defined(__arm64__)
5280 uint16_t c_codec = cs->c_codec;
5281 uint32_t inline_popcount;
5282 vm_memtag_disable_checking();
5283 if (!metadecompressor((const uint8_t *) &c_seg->c_store.c_buffer[cs->c_offset],
5284 (uint8_t *)dst, c_size, c_codec, (void *)scratch_buf, &inline_popcount)) {
5285 vm_memtag_enable_checking();
5286 retval = -1;
5287 } else {
5288 vm_memtag_enable_checking();
5289 assert(inline_popcount == C_SLOT_NO_POPCOUNT);
5290 }
5291 #endif
5292 } else { /* algorithm == VM_COMPRESSOR_DEFAULT_CODEC */
5293 vm_memtag_disable_checking();
5294 #if defined(__arm64__)
5295 __unreachable_ok_push
5296 if (PAGE_SIZE == 4096) {
5297 WKdm_decompress_4k((WK_word *)(uintptr_t)&c_seg->c_store.c_buffer[cs->c_offset],
5298 (WK_word *)(uintptr_t)dst, (WK_word *)(uintptr_t)scratch_buf, c_size);
5299 } else {
5300 WKdm_decompress_16k((WK_word *)(uintptr_t)&c_seg->c_store.c_buffer[cs->c_offset],
5301 (WK_word *)(uintptr_t)dst, (WK_word *)(uintptr_t)scratch_buf, c_size);
5302 }
5303 __unreachable_ok_pop
5304 #else
5305 WKdm_decompress_new((WK_word *)(uintptr_t)&c_seg->c_store.c_buffer[cs->c_offset],
5306 (WK_word *)(uintptr_t)dst, (WK_word *)(uintptr_t)scratch_buf, c_size);
5307 #endif
5308 vm_memtag_enable_checking();
5309 }
5310 } /* normal segment decompress */
5311
5312 #if CHECKSUM_THE_DATA
5313 if (cs->c_hash_data != vmc_hash(dst, PAGE_SIZE)) {
5314 #if defined(__arm64__)
5315 int32_t *dinput = &c_seg->c_store.c_buffer[cs->c_offset];
5316 panic("decompressed data doesn't match original cs: %p, hash: 0x%x, offset: %d, c_size: %d, c_rounded_size: %d, codec: %d, header: 0x%x 0x%x 0x%x", cs, cs->c_hash_data, cs->c_offset, c_size, c_rounded_size, cs->c_codec, *dinput, *(dinput + 1), *(dinput + 2));
5317 #else
5318 panic("decompressed data doesn't match original cs: %p, hash: %d, offset: 0x%x, c_size: %d", cs, cs->c_hash_data, cs->c_offset, c_size);
5319 #endif
5320 }
5321 #endif
5322 if (c_seg->c_swappedin_ts == 0 && !kdp_mode) {
5323 clock_get_system_nanotime(&cur_ts_sec, &cur_ts_nsec);
5324
5325 age_of_cseg = (uint32_t)cur_ts_sec - c_seg->c_creation_ts;
5326 if (age_of_cseg < DECOMPRESSION_SAMPLE_MAX_AGE) {
5327 OSAddAtomic(1, &age_of_decompressions_during_sample_period[age_of_cseg]);
5328 } else {
5329 OSAddAtomic(1, &overage_decompressions_during_sample_period);
5330 }
5331
5332 OSAddAtomic(1, &sample_period_decompression_count);
5333 }
5334
5335
5336 #if TRACK_C_SEGMENT_UTILIZATION
5337 if (c_seg->c_swappedin) {
5338 c_seg->c_decompressions_since_swapin++;
5339 }
5340 #endif /* TRACK_C_SEGMENT_UTILIZATION */
5341 } /* dst */
5342 #if CONFIG_FREEZE
5343 else {
5344 /*
5345 * We are freeing an uncompressed page from this c_seg and so balance the ledgers.
5346 */
5347 if (C_SEG_IS_ONDISK(c_seg)) {
5348 /*
5349 * The compression sweep feature will push out anonymous pages to disk
5350 * without going through the freezer path and so those c_segs, while
5351 * swapped out, won't have an owner.
5352 */
5353 if (c_seg->c_task_owner) {
5354 task_update_frozen_to_swap_acct(c_seg->c_task_owner, PAGE_SIZE_64, DEBIT_FROM_SWAP);
5355 }
5356
5357 /*
5358 * We are freeing a page in swap without swapping it in. We bump the in-core
5359 * count here to simulate a swapin of a page so that we can accurately
5360 * decrement it below.
5361 */
5362 OSAddAtomic(1, &c_segment_pages_compressed_incore);
5363 if (c_seg->c_has_donated_pages) {
5364 OSAddAtomic(1, &c_segment_pages_compressed_incore_late_swapout);
5365 }
5366 } else if (c_seg->c_state == C_ON_BAD_Q) {
5367 assert(c_seg->c_store.c_buffer == NULL);
5368 *zeroslot = 0;
5369
5370 retval = -1;
5371 goto done;
5372 }
5373 }
5374 #endif /* CONFIG_FREEZE */
5375
5376 if (flags & C_KEEP) {
5377 *zeroslot = 0;
5378 goto done;
5379 }
5380 /* now perform needed bookkeeping for the removal of the slot from the segment */
5381 assert(kdp_mode == FALSE);
5382
5383 c_seg->c_bytes_unused += c_rounded_size;
5384 c_seg->c_bytes_used -= c_rounded_size;
5385
5386 assert(c_seg->c_slots_used);
5387 c_seg->c_slots_used--;
5388 if (dst && c_seg->c_swappedin) {
5389 task_t task = current_task();
5390 if (task) {
5391 ledger_credit(task->ledger, task_ledgers.swapins, PAGE_SIZE);
5392 }
5393 }
5394
5395 PACK_C_SIZE(cs, 0); /* mark slot as empty */
5396
5397 if (c_indx < c_seg->c_firstemptyslot) {
5398 c_seg->c_firstemptyslot = c_indx;
5399 }
5400
5401 OSAddAtomic(-1, &c_segment_pages_compressed);
5402 #if CONFIG_FREEZE
5403 OSAddAtomic(-1, &c_segment_pages_compressed_incore);
5404 assertf(c_segment_pages_compressed_incore >= 0, "-ve incore count %p 0x%x", c_seg, c_segment_pages_compressed_incore);
5405 if (c_seg->c_has_donated_pages) {
5406 OSAddAtomic(-1, &c_segment_pages_compressed_incore_late_swapout);
5407 assertf(c_segment_pages_compressed_incore_late_swapout >= 0, "-ve lateswapout count %p 0x%x", c_seg, c_segment_pages_compressed_incore_late_swapout);
5408 }
5409 #endif /* CONFIG_FREEZE */
5410
5411 if (c_seg->c_state != C_ON_BAD_Q && !(C_SEG_IS_ONDISK(c_seg))) {
5412 /*
5413 * C_SEG_IS_ONDISK == TRUE can occur when we're doing a
5414 * free of a compressed page (i.e. dst == NULL)
5415 */
5416 OSAddAtomic64(-c_rounded_size, &compressor_bytes_used);
5417 }
5418 if (c_seg->c_busy_swapping) {
5419 /*
5420 * bypass case for c_busy_swapping...
5421 * let the swapin/swapout paths deal with putting
5422 * the c_seg on the minor compaction queue if needed
5423 */
5424 assert(c_seg->c_busy);
5425 goto done;
5426 }
5427 assert(!c_seg->c_busy);
5428
5429 if (c_seg->c_state != C_IS_FILLING) {
5430 /* did we just remove the last slot from the segment? */
5431 if (c_seg->c_bytes_used == 0) {
5432 if (!(C_SEG_IS_ONDISK(c_seg))) {
5433 /* it was compressed resident in memory */
5434 int pages_populated;
5435
5436 pages_populated = (round_page_32(C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset))) / PAGE_SIZE;
5437 c_seg->c_populated_offset = C_SEG_BYTES_TO_OFFSET(0);
5438
5439 if (pages_populated) {
5440 assert(c_seg->c_state != C_ON_BAD_Q);
5441 assert(c_seg->c_store.c_buffer != NULL);
5442
5443 C_SEG_BUSY(c_seg);
5444 lck_mtx_unlock_always(&c_seg->c_lock);
5445
5446 kernel_memory_depopulate(
5447 (vm_offset_t) c_seg->c_store.c_buffer,
5448 ptoa(pages_populated),
5449 KMA_COMPRESSOR, VM_KERN_MEMORY_COMPRESSOR);
5450
5451 lck_mtx_lock_spin_always(&c_seg->c_lock);
5452 C_SEG_WAKEUP_DONE(c_seg);
5453 }
5454 /* minor compaction will free it */
5455 if (!c_seg->c_on_minorcompact_q && c_seg->c_state != C_ON_SWAPIO_Q) {
5456 if (c_seg->c_state == C_ON_SWAPOUT_Q) {
5457 /* If we're on the swapout q, we want to get out of it since there's no reason to swapout
5458 * anymore, so put on AGE Q in the meantime until minor compact */
5459 bool clear_busy = false;
5460 if (!lck_mtx_try_lock_spin_always(c_list_lock)) {
5461 C_SEG_BUSY(c_seg);
5462
5463 lck_mtx_unlock_always(&c_seg->c_lock);
5464 lck_mtx_lock_spin_always(c_list_lock);
5465 lck_mtx_lock_spin_always(&c_seg->c_lock);
5466 clear_busy = true;
5467 }
5468 c_seg_switch_state(c_seg, C_ON_AGE_Q, FALSE);
5469 if (clear_busy) {
5470 C_SEG_WAKEUP_DONE(c_seg);
5471 clear_busy = false;
5472 }
5473 lck_mtx_unlock_always(c_list_lock);
5474 }
5475 c_seg_need_delayed_compaction(c_seg, FALSE);
5476 }
5477 } else { /* C_SEG_IS_ONDISK(c_seg) */
5478 /* it's empty and on-disk, make sure it's marked as sparse */
5479 if (c_seg->c_state != C_ON_SWAPPEDOUTSPARSE_Q) {
5480 c_seg_move_to_sparse_list(c_seg);
5481 consider_defragmenting = TRUE;
5482 }
5483 }
5484 } else if (c_seg->c_on_minorcompact_q) {
5485 assert(c_seg->c_state != C_ON_BAD_Q);
5486 assert(!C_SEG_IS_ON_DISK_OR_SOQ(c_seg));
5487
5488 if (C_SEG_SHOULD_MINORCOMPACT_NOW(c_seg)) {
5489 c_seg_try_minor_compaction_and_unlock(c_seg);
5490 need_unlock = FALSE;
5491 }
5492 } else if (!(C_SEG_IS_ONDISK(c_seg))) {
5493 if (c_seg->c_state != C_ON_BAD_Q && c_seg->c_state != C_ON_SWAPOUT_Q && c_seg->c_state != C_ON_SWAPIO_Q &&
5494 C_SEG_UNUSED_BYTES(c_seg) >= PAGE_SIZE) {
5495 c_seg_need_delayed_compaction(c_seg, FALSE);
5496 }
5497 } else if (c_seg->c_state != C_ON_SWAPPEDOUTSPARSE_Q && C_SEG_ONDISK_IS_SPARSE(c_seg)) {
5498 c_seg_move_to_sparse_list(c_seg);
5499 consider_defragmenting = TRUE;
5500 }
5501 } /* c_state != C_IS_FILLING */
5502 done:
5503 if (__improbable(kdp_mode)) {
5504 return retval;
5505 }
5506
5507 if (need_unlock == TRUE) {
5508 lck_mtx_unlock_always(&c_seg->c_lock);
5509 }
5510
5511 PAGE_REPLACEMENT_DISALLOWED(FALSE);
5512
5513 if (consider_defragmenting == TRUE) {
5514 vm_swap_consider_defragmenting(VM_SWAP_FLAGS_NONE);
5515 }
5516
5517 #if !XNU_TARGET_OS_OSX
5518 /*
5519 * Decompressions will generate fragmentation in the compressor pool
5520 * over time. Consider waking the compactor thread if any of the
5521 * fragmentation thresholds have been crossed as a result of this
5522 * decompression.
5523 */
5524 vm_consider_waking_compactor_swapper();
5525 #endif /* !XNU_TARGET_OS_OSX */
5526
5527 return retval;
5528 }
5529
5530
5531 inline bool
vm_compressor_is_slot_compressed(int * slot)5532 vm_compressor_is_slot_compressed(int *slot)
5533 {
5534 #if !CONFIG_TRACK_UNMODIFIED_ANON_PAGES
5535 #pragma unused(slot)
5536 return true;
5537 #else /* !CONFIG_TRACK_UNMODIFIED_ANON_PAGES*/
5538 c_slot_mapping_t slot_ptr = (c_slot_mapping_t)slot;
5539 return !slot_ptr->s_uncompressed;
5540 #endif /* !CONFIG_TRACK_UNMODIFIED_ANON_PAGES*/
5541 }
5542
5543 int
vm_compressor_get(ppnum_t pn,int * slot,vm_compressor_options_t flags)5544 vm_compressor_get(ppnum_t pn, int *slot, vm_compressor_options_t flags)
5545 {
5546 c_slot_mapping_t slot_ptr;
5547 char *dst;
5548 int zeroslot = 1;
5549 int retval;
5550
5551 #if CONFIG_TRACK_UNMODIFIED_ANON_PAGES
5552 if (flags & C_PAGE_UNMODIFIED) {
5553 retval = vm_uncompressed_get(pn, slot, flags | C_KEEP);
5554 if (retval == 0) {
5555 os_atomic_inc(&compressor_ro_uncompressed_get, relaxed);
5556 }
5557
5558 return retval;
5559 }
5560 #endif /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
5561
5562 /* get address in physical aperture of this page for fill into */
5563 dst = pmap_map_compressor_page(pn);
5564 slot_ptr = (c_slot_mapping_t)slot;
5565
5566 assert(dst != NULL);
5567
5568 if (slot_ptr->s_cseg == C_SV_CSEG_ID) {
5569 int32_t data;
5570 int32_t *dptr;
5571
5572 /*
5573 * page was populated with a single value
5574 * that found a home in our hash table
5575 * grab that value from the hash and populate the page
5576 * that we need to populate the page with
5577 */
5578 dptr = (int32_t *)(uintptr_t)dst;
5579 data = c_segment_sv_hash_table[slot_ptr->s_cindx].he_data;
5580 sv_decompress(dptr, data);
5581
5582 if (!(flags & C_KEEP)) {
5583 c_segment_sv_hash_drop_ref(slot_ptr->s_cindx);
5584
5585 OSAddAtomic(-1, &c_segment_pages_compressed);
5586 *slot = 0;
5587 }
5588 if (data) {
5589 OSAddAtomic(1, &c_segment_svp_nonzero_decompressions);
5590 } else {
5591 OSAddAtomic(1, &c_segment_svp_zero_decompressions);
5592 }
5593
5594 pmap_unmap_compressor_page(pn, dst);
5595 return 0;
5596 }
5597 retval = c_decompress_page(dst, slot_ptr, flags, &zeroslot);
5598
5599 /*
5600 * zeroslot will be set to 0 by c_decompress_page if (flags & C_KEEP)
5601 * or (flags & C_DONT_BLOCK) and we found 'c_busy' or 'C_SEG_IS_ONDISK' to be TRUE
5602 */
5603 if (zeroslot) {
5604 *slot = 0;
5605 }
5606
5607 pmap_unmap_compressor_page(pn, dst);
5608
5609 /*
5610 * returns 0 if we successfully decompressed a page from a segment already in memory
5611 * returns 1 if we had to first swap in the segment, before successfully decompressing the page
5612 * returns -1 if we encountered an error swapping in the segment - decompression failed
5613 * returns -2 if (flags & C_DONT_BLOCK) and we found 'c_busy' or 'C_SEG_IS_ONDISK' to be true
5614 */
5615 return retval;
5616 }
5617
5618 int
vm_compressor_free(int * slot,vm_compressor_options_t flags)5619 vm_compressor_free(int *slot, vm_compressor_options_t flags)
5620 {
5621 bool slot_is_compressed = vm_compressor_is_slot_compressed(slot);
5622
5623 if (slot_is_compressed) {
5624 c_slot_mapping_t slot_ptr;
5625 int zeroslot = 1;
5626 int retval = 0;
5627
5628 assert(flags == 0 || flags == C_DONT_BLOCK);
5629
5630 slot_ptr = (c_slot_mapping_t)slot;
5631
5632 if (slot_ptr->s_cseg == C_SV_CSEG_ID) {
5633 c_segment_sv_hash_drop_ref(slot_ptr->s_cindx);
5634 OSAddAtomic(-1, &c_segment_pages_compressed);
5635
5636 *slot = 0;
5637 return 0;
5638 }
5639
5640 retval = c_decompress_page(NULL, slot_ptr, flags, &zeroslot);
5641 /*
5642 * returns 0 if we successfully freed the specified compressed page
5643 * returns -1 if we encountered an error swapping in the segment - decompression failed
5644 * returns -2 if (flags & C_DONT_BLOCK) and we found 'c_busy' set
5645 */
5646
5647 if (retval == 0) {
5648 *slot = 0;
5649 }
5650
5651 return retval;
5652 }
5653 #if CONFIG_TRACK_UNMODIFIED_ANON_PAGES
5654 else {
5655 if ((flags & C_PAGE_UNMODIFIED) == 0) {
5656 /* moving from uncompressed state to compressed. Free it.*/
5657 vm_uncompressed_free(slot, 0);
5658 assert(*slot == 0);
5659 }
5660 }
5661 #endif /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
5662 return KERN_SUCCESS;
5663 }
5664
5665 int
vm_compressor_put(ppnum_t pn,int * slot,void ** current_chead,char * scratch_buf,vm_compressor_options_t flags)5666 vm_compressor_put(ppnum_t pn, int *slot, void **current_chead, char *scratch_buf, vm_compressor_options_t flags)
5667 {
5668 char *src;
5669 int retval = 0;
5670
5671 #if CONFIG_TRACK_UNMODIFIED_ANON_PAGES
5672 if (flags & C_PAGE_UNMODIFIED) {
5673 if (*slot) {
5674 os_atomic_inc(&compressor_ro_uncompressed_skip_returned, relaxed);
5675 return retval;
5676 } else {
5677 retval = vm_uncompressed_put(pn, slot);
5678 if (retval == KERN_SUCCESS) {
5679 os_atomic_inc(&compressor_ro_uncompressed_put, relaxed);
5680 return retval;
5681 }
5682 }
5683 }
5684 #endif /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
5685
5686 /* get the address of the page in the physical apperture in the kernel task virtual memory */
5687 src = pmap_map_compressor_page(pn); /* XXX HERE JOE this needs to map with MTE */
5688 assert(src != NULL);
5689
5690 retval = c_compress_page(src, (c_slot_mapping_t)slot, (c_segment_t *)current_chead, scratch_buf,
5691 flags);
5692 pmap_unmap_compressor_page(pn, src);
5693
5694 return retval;
5695 }
5696
5697 void
vm_compressor_transfer(int * dst_slot_p,int * src_slot_p)5698 vm_compressor_transfer(
5699 int *dst_slot_p,
5700 int *src_slot_p)
5701 {
5702 c_slot_mapping_t dst_slot, src_slot;
5703 c_segment_t c_seg;
5704 uint16_t c_indx;
5705 c_slot_t cs;
5706
5707 src_slot = (c_slot_mapping_t) src_slot_p;
5708
5709 if (src_slot->s_cseg == C_SV_CSEG_ID || !vm_compressor_is_slot_compressed(src_slot_p)) {
5710 *dst_slot_p = *src_slot_p;
5711 *src_slot_p = 0;
5712 return;
5713 }
5714 dst_slot = (c_slot_mapping_t) dst_slot_p;
5715 Retry:
5716 PAGE_REPLACEMENT_DISALLOWED(TRUE);
5717 /* get segment for src_slot */
5718 c_seg = c_segments[src_slot->s_cseg - 1].c_seg;
5719 /* lock segment */
5720 lck_mtx_lock_spin_always(&c_seg->c_lock);
5721 /* wait if it's busy */
5722 if (c_seg->c_busy && !c_seg->c_busy_swapping) {
5723 PAGE_REPLACEMENT_DISALLOWED(FALSE);
5724 c_seg_wait_on_busy(c_seg);
5725 goto Retry;
5726 }
5727 /* find the c_slot */
5728 c_indx = src_slot->s_cindx;
5729 cs = C_SEG_SLOT_FROM_INDEX(c_seg, c_indx);
5730 /* point the c_slot back to dst_slot instead of src_slot */
5731 C_SLOT_ASSERT_PACKABLE(dst_slot);
5732 cs->c_packed_ptr = C_SLOT_PACK_PTR(dst_slot);
5733 /* transfer */
5734 *dst_slot_p = *src_slot_p;
5735 *src_slot_p = 0;
5736 lck_mtx_unlock_always(&c_seg->c_lock);
5737 PAGE_REPLACEMENT_DISALLOWED(FALSE);
5738 }
5739
5740 #if defined(__arm64__)
5741 extern uint64_t vm_swapfile_last_failed_to_create_ts;
5742 __attribute__((noreturn))
5743 void
vm_panic_hibernate_write_image_failed(int err)5744 vm_panic_hibernate_write_image_failed(int err)
5745 {
5746 panic("hibernate_write_image encountered error 0x%x - %u, %u, %d, %d, %d, %d, %d, %d, %d, %d, %llu, %d, %d, %d\n",
5747 err,
5748 VM_PAGE_COMPRESSOR_COUNT, vm_page_wire_count,
5749 c_age_count, c_major_count, c_minor_count, (c_early_swapout_count + c_regular_swapout_count + c_late_swapout_count), c_swappedout_sparse_count,
5750 vm_num_swap_files, vm_num_pinned_swap_files, vm_swappin_enabled, vm_swap_put_failures,
5751 (vm_swapfile_last_failed_to_create_ts ? 1:0), hibernate_no_swapspace, hibernate_flush_timed_out);
5752 }
5753 #endif /*(__arm64__)*/
5754
5755 #if CONFIG_FREEZE
5756
5757 int freezer_finished_filling = 0;
5758
5759 void
vm_compressor_finished_filling(void ** current_chead)5760 vm_compressor_finished_filling(
5761 void **current_chead)
5762 {
5763 c_segment_t c_seg;
5764
5765 if ((c_seg = *(c_segment_t *)current_chead) == NULL) {
5766 return;
5767 }
5768
5769 assert(c_seg->c_state == C_IS_FILLING);
5770
5771 lck_mtx_lock_spin_always(&c_seg->c_lock);
5772
5773 c_current_seg_filled(c_seg, (c_segment_t *)current_chead);
5774
5775 lck_mtx_unlock_always(&c_seg->c_lock);
5776
5777 freezer_finished_filling++;
5778 }
5779
5780
5781 /*
5782 * This routine is used to transfer the compressed chunks from
5783 * the c_seg/cindx pointed to by slot_p into a new c_seg headed
5784 * by the current_chead and a new cindx within that c_seg.
5785 *
5786 * Currently, this routine is only used by the "freezer backed by
5787 * compressor with swap" mode to create a series of c_segs that
5788 * only contain compressed data belonging to one task. So, we
5789 * move a task's previously compressed data into a set of new
5790 * c_segs which will also hold the task's yet to be compressed data.
5791 */
5792
5793 kern_return_t
vm_compressor_relocate(void ** current_chead,int * slot_p)5794 vm_compressor_relocate(
5795 void **current_chead,
5796 int *slot_p)
5797 {
5798 c_slot_mapping_t slot_ptr;
5799 c_slot_mapping_t src_slot;
5800 uint32_t c_rounded_size;
5801 uint32_t c_size;
5802 uint16_t dst_slot;
5803 c_slot_t c_dst;
5804 c_slot_t c_src;
5805 uint16_t c_indx;
5806 c_segment_t c_seg_dst = NULL;
5807 c_segment_t c_seg_src = NULL;
5808 kern_return_t kr = KERN_SUCCESS;
5809
5810
5811 src_slot = (c_slot_mapping_t) slot_p;
5812
5813 if (src_slot->s_cseg == C_SV_CSEG_ID) {
5814 /*
5815 * no need to relocate... this is a page full of a single
5816 * value which is hashed to a single entry not contained
5817 * in a c_segment_t
5818 */
5819 return kr;
5820 }
5821
5822 if (vm_compressor_is_slot_compressed((int *)src_slot) == false) {
5823 /*
5824 * Unmodified anonymous pages are sitting uncompressed on disk.
5825 * So don't pull them back in again.
5826 */
5827 return kr;
5828 }
5829
5830 Relookup_dst:
5831 c_seg_dst = c_seg_allocate((c_segment_t *)current_chead);
5832 /*
5833 * returns with c_seg lock held
5834 * and PAGE_REPLACEMENT_DISALLOWED(TRUE)...
5835 * c_nextslot has been allocated and
5836 * c_store.c_buffer populated
5837 */
5838 if (c_seg_dst == NULL) {
5839 /*
5840 * Out of compression segments?
5841 */
5842 kr = KERN_RESOURCE_SHORTAGE;
5843 goto out;
5844 }
5845
5846 assert(c_seg_dst->c_busy == 0);
5847
5848 C_SEG_BUSY(c_seg_dst);
5849
5850 dst_slot = c_seg_dst->c_nextslot;
5851
5852 lck_mtx_unlock_always(&c_seg_dst->c_lock);
5853
5854 Relookup_src:
5855 c_seg_src = c_segments[src_slot->s_cseg - 1].c_seg;
5856
5857 assert(c_seg_dst != c_seg_src);
5858
5859 lck_mtx_lock_spin_always(&c_seg_src->c_lock);
5860
5861 if (C_SEG_IS_ON_DISK_OR_SOQ(c_seg_src) ||
5862 c_seg_src->c_state == C_IS_FILLING) {
5863 /*
5864 * Skip this page if :-
5865 * a) the src c_seg is already on-disk (or on its way there)
5866 * A "thaw" can mark a process as eligible for
5867 * another freeze cycle without bringing any of
5868 * its swapped out c_segs back from disk (because
5869 * that is done on-demand).
5870 * Or, this page may be mapped elsewhere in the task's map,
5871 * and we may have marked it for swap already.
5872 *
5873 * b) Or, the src c_seg is being filled by the compressor
5874 * thread. We don't want the added latency of waiting for
5875 * this c_seg in the freeze path and so we skip it.
5876 */
5877
5878 PAGE_REPLACEMENT_DISALLOWED(FALSE);
5879
5880 lck_mtx_unlock_always(&c_seg_src->c_lock);
5881
5882 c_seg_src = NULL;
5883
5884 goto out;
5885 }
5886
5887 if (c_seg_src->c_busy) {
5888 PAGE_REPLACEMENT_DISALLOWED(FALSE);
5889 c_seg_wait_on_busy(c_seg_src);
5890
5891 c_seg_src = NULL;
5892
5893 PAGE_REPLACEMENT_DISALLOWED(TRUE);
5894
5895 goto Relookup_src;
5896 }
5897
5898 C_SEG_BUSY(c_seg_src);
5899
5900 lck_mtx_unlock_always(&c_seg_src->c_lock);
5901
5902 PAGE_REPLACEMENT_DISALLOWED(FALSE);
5903
5904 /* find the c_slot */
5905 c_indx = src_slot->s_cindx;
5906
5907 c_src = C_SEG_SLOT_FROM_INDEX(c_seg_src, c_indx);
5908
5909 c_size = UNPACK_C_SIZE(c_src);
5910
5911 assert(c_size);
5912
5913 int combined_size;
5914 combined_size = c_size;
5915
5916 if (combined_size > (uint32_t)(c_seg_bufsize - C_SEG_OFFSET_TO_BYTES((int32_t)c_seg_dst->c_nextoffset))) {
5917 /*
5918 * This segment is full. We need a new one.
5919 */
5920
5921 PAGE_REPLACEMENT_DISALLOWED(TRUE);
5922
5923 lck_mtx_lock_spin_always(&c_seg_src->c_lock);
5924 C_SEG_WAKEUP_DONE(c_seg_src);
5925 lck_mtx_unlock_always(&c_seg_src->c_lock);
5926
5927 c_seg_src = NULL;
5928
5929 lck_mtx_lock_spin_always(&c_seg_dst->c_lock);
5930
5931 assert(c_seg_dst->c_busy);
5932 assert(c_seg_dst->c_state == C_IS_FILLING);
5933 assert(!c_seg_dst->c_on_minorcompact_q);
5934
5935 c_current_seg_filled(c_seg_dst, (c_segment_t *)current_chead);
5936 assert(*current_chead == NULL);
5937
5938 C_SEG_WAKEUP_DONE(c_seg_dst);
5939
5940 lck_mtx_unlock_always(&c_seg_dst->c_lock);
5941
5942 c_seg_dst = NULL;
5943
5944 PAGE_REPLACEMENT_DISALLOWED(FALSE);
5945
5946 goto Relookup_dst;
5947 }
5948
5949 c_dst = C_SEG_SLOT_FROM_INDEX(c_seg_dst, c_seg_dst->c_nextslot);
5950
5951 memcpy(&c_seg_dst->c_store.c_buffer[c_seg_dst->c_nextoffset], &c_seg_src->c_store.c_buffer[c_src->c_offset], combined_size);
5952 /*
5953 * Is platform alignment actually necessary since wkdm aligns its output?
5954 */
5955 c_rounded_size = (combined_size + C_SEG_OFFSET_ALIGNMENT_MASK) & ~C_SEG_OFFSET_ALIGNMENT_MASK;
5956
5957 cslot_copy(c_dst, c_src);
5958 c_dst->c_offset = c_seg_dst->c_nextoffset;
5959
5960 if (c_seg_dst->c_firstemptyslot == c_seg_dst->c_nextslot) {
5961 c_seg_dst->c_firstemptyslot++;
5962 }
5963
5964 c_seg_dst->c_slots_used++;
5965 c_seg_dst->c_nextslot++;
5966 c_seg_dst->c_bytes_used += c_rounded_size;
5967 c_seg_dst->c_nextoffset += C_SEG_BYTES_TO_OFFSET(c_rounded_size);
5968
5969
5970 PACK_C_SIZE(c_src, 0);
5971
5972 c_seg_src->c_bytes_used -= c_rounded_size;
5973 c_seg_src->c_bytes_unused += c_rounded_size;
5974
5975 assert(c_seg_src->c_slots_used);
5976 c_seg_src->c_slots_used--;
5977
5978 if (!c_seg_src->c_swappedin) {
5979 /* Pessimistically lose swappedin status when non-swappedin pages are added. */
5980 c_seg_dst->c_swappedin = false;
5981 }
5982
5983 if (c_indx < c_seg_src->c_firstemptyslot) {
5984 c_seg_src->c_firstemptyslot = c_indx;
5985 }
5986
5987 c_dst = C_SEG_SLOT_FROM_INDEX(c_seg_dst, dst_slot);
5988
5989 PAGE_REPLACEMENT_ALLOWED(TRUE);
5990 slot_ptr = C_SLOT_UNPACK_PTR(c_dst);
5991 /* <csegno=0,indx=0> would mean "empty slot", so use csegno+1 */
5992 slot_ptr->s_cseg = c_seg_dst->c_mysegno + 1;
5993 slot_ptr->s_cindx = dst_slot;
5994
5995 PAGE_REPLACEMENT_ALLOWED(FALSE);
5996
5997 out:
5998 if (c_seg_src) {
5999 lck_mtx_lock_spin_always(&c_seg_src->c_lock);
6000
6001 C_SEG_WAKEUP_DONE(c_seg_src);
6002
6003 if (c_seg_src->c_bytes_used == 0 && c_seg_src->c_state != C_IS_FILLING) {
6004 if (!c_seg_src->c_on_minorcompact_q) {
6005 c_seg_need_delayed_compaction(c_seg_src, FALSE);
6006 }
6007 }
6008
6009 lck_mtx_unlock_always(&c_seg_src->c_lock);
6010 }
6011
6012 if (c_seg_dst) {
6013 PAGE_REPLACEMENT_DISALLOWED(TRUE);
6014
6015 lck_mtx_lock_spin_always(&c_seg_dst->c_lock);
6016
6017 if (c_seg_dst->c_nextoffset >= c_seg_off_limit || c_seg_dst->c_nextslot >= C_SLOT_MAX_INDEX) {
6018 /*
6019 * Nearing or exceeded maximum slot and offset capacity.
6020 */
6021 assert(c_seg_dst->c_busy);
6022 assert(c_seg_dst->c_state == C_IS_FILLING);
6023 assert(!c_seg_dst->c_on_minorcompact_q);
6024
6025 c_current_seg_filled(c_seg_dst, (c_segment_t *)current_chead);
6026 assert(*current_chead == NULL);
6027 }
6028
6029 C_SEG_WAKEUP_DONE(c_seg_dst);
6030
6031 lck_mtx_unlock_always(&c_seg_dst->c_lock);
6032
6033 c_seg_dst = NULL;
6034
6035 PAGE_REPLACEMENT_DISALLOWED(FALSE);
6036 }
6037
6038 return kr;
6039 }
6040 #endif /* CONFIG_FREEZE */
6041
6042 #if DEVELOPMENT || DEBUG
6043
6044 void
vm_compressor_inject_error(int * slot)6045 vm_compressor_inject_error(int *slot)
6046 {
6047 c_slot_mapping_t slot_ptr = (c_slot_mapping_t)slot;
6048
6049 /* No error detection for single-value compression. */
6050 if (slot_ptr->s_cseg == C_SV_CSEG_ID) {
6051 printf("%s(): cannot inject errors in SV-compressed pages\n", __func__ );
6052 return;
6053 }
6054
6055 /* s_cseg is actually "segno+1" */
6056 const uint32_t c_segno = slot_ptr->s_cseg - 1;
6057
6058 assert(c_segno < c_segments_available);
6059 assert(c_segments[c_segno].c_segno >= c_segments_available);
6060
6061 const c_segment_t c_seg = c_segments[c_segno].c_seg;
6062
6063 PAGE_REPLACEMENT_DISALLOWED(TRUE);
6064
6065 lck_mtx_lock_spin_always(&c_seg->c_lock);
6066 assert(c_seg->c_state != C_IS_EMPTY && c_seg->c_state != C_IS_FREE);
6067
6068 const uint16_t c_indx = slot_ptr->s_cindx;
6069 assert(c_indx < c_seg->c_nextslot);
6070
6071 /*
6072 * To safely make this segment temporarily writable, we need to mark
6073 * the segment busy, which allows us to release the segment lock.
6074 */
6075 while (c_seg->c_busy) {
6076 c_seg_wait_on_busy(c_seg);
6077 lck_mtx_lock_spin_always(&c_seg->c_lock);
6078 }
6079 C_SEG_BUSY(c_seg);
6080
6081 bool already_writable = (c_seg->c_state == C_IS_FILLING);
6082 if (!already_writable) {
6083 /*
6084 * Protection update must be performed preemptibly, so temporarily drop
6085 * the lock. Having set c_busy will prevent most other concurrent
6086 * operations.
6087 */
6088 lck_mtx_unlock_always(&c_seg->c_lock);
6089 C_SEG_MAKE_WRITEABLE(c_seg);
6090 lck_mtx_lock_spin_always(&c_seg->c_lock);
6091 }
6092
6093 /*
6094 * Once we've released the lock following our c_state == C_IS_FILLING check,
6095 * c_current_seg_filled() can (re-)write-protect the segment. However, it
6096 * will transition from C_IS_FILLING before releasing the c_seg lock, so we
6097 * can detect this by re-checking after we've reobtained the lock.
6098 */
6099 if (already_writable && c_seg->c_state != C_IS_FILLING) {
6100 lck_mtx_unlock_always(&c_seg->c_lock);
6101 C_SEG_MAKE_WRITEABLE(c_seg);
6102 lck_mtx_lock_spin_always(&c_seg->c_lock);
6103 already_writable = false;
6104 /* Segment can't be freed while c_busy is set. */
6105 assert(c_seg->c_state != C_IS_FILLING);
6106 }
6107
6108 /*
6109 * Skip if the segment is on disk. This check can only be performed after
6110 * the final acquisition of the segment lock before we attempt to write to
6111 * the segment.
6112 */
6113 if (!C_SEG_IS_ON_DISK_OR_SOQ(c_seg)) {
6114 c_slot_t cs = C_SEG_SLOT_FROM_INDEX(c_seg, c_indx);
6115 int32_t *data = &c_seg->c_store.c_buffer[cs->c_offset];
6116 /* assume that the compressed data holds at least one int32_t */
6117 assert(UNPACK_C_SIZE(cs) > sizeof(*data));
6118 /*
6119 * This bit is known to be in the payload of a MISS packet resulting from
6120 * the pattern used in the test pattern from decompression_failure.c.
6121 * Flipping it should result in many corrupted bits in the test page.
6122 */
6123 data[0] ^= 0x00000100;
6124 }
6125
6126 if (!already_writable) {
6127 lck_mtx_unlock_always(&c_seg->c_lock);
6128 C_SEG_WRITE_PROTECT(c_seg);
6129 lck_mtx_lock_spin_always(&c_seg->c_lock);
6130 }
6131
6132 C_SEG_WAKEUP_DONE(c_seg);
6133 lck_mtx_unlock_always(&c_seg->c_lock);
6134
6135 PAGE_REPLACEMENT_DISALLOWED(FALSE);
6136 }
6137
6138 /*
6139 * Serialize information about a specific segment
6140 * returns true if the segment was written or there's nothing to write for the segno
6141 * false if there's not enough space
6142 * argument size input - the size of the input buffer, output - the size written, set to 0 on failure
6143 */
6144 kern_return_t
vm_compressor_serialize_segment_debug_info(int segno,char * buf,size_t * size)6145 vm_compressor_serialize_segment_debug_info(int segno, char *buf, size_t *size)
6146 {
6147 size_t insize = *size;
6148 size_t offset = 0;
6149 *size = 0;
6150 if (c_segments[segno].c_segno < c_segments_available) {
6151 /* This check means there's no pointer assigned here so it must be an index in the free list.
6152 * if this was an active c_segment, .c_seg would be assigned to, which is a pointer, interpreted as an int it
6153 * would be higher than c_segments_available. See also assert to this effect right after assigning to c_seg in
6154 * c_seg_allocate()
6155 */
6156 return KERN_SUCCESS;
6157 }
6158 if (c_segments[segno].c_segno == (uint32_t)-1) {
6159 /* c_segno of the end of the free-list */
6160 return KERN_SUCCESS;
6161 }
6162
6163 const struct c_segment* c_seg = c_segments[segno].c_seg;
6164 if (c_seg->c_state == C_IS_FREE) {
6165 return KERN_SUCCESS; /* nothing needs to be done */
6166 }
6167
6168 int nslots = c_seg->c_nextslot;
6169 /* do we have enough space? */
6170 if (sizeof(struct c_segment_info) + (nslots * sizeof(struct c_slot_info)) > insize) {
6171 return KERN_NO_SPACE; /* not enough space, please call me again */
6172 }
6173
6174 struct c_segment_info* csi = (struct c_segment_info*)buf;
6175 offset += sizeof(struct c_segment_info);
6176
6177 csi->csi_mysegno = c_seg->c_mysegno;
6178 csi->csi_creation_ts = c_seg->c_creation_ts;
6179 csi->csi_swappedin_ts = c_seg->c_swappedin_ts;
6180 csi->csi_bytes_unused = c_seg->c_bytes_unused;
6181 csi->csi_bytes_used = c_seg->c_bytes_used;
6182 csi->csi_populated_offset = c_seg->c_populated_offset;
6183 csi->csi_state = c_seg->c_state;
6184 csi->csi_swappedin = c_seg->c_swappedin;
6185 csi->csi_on_minor_compact_q = c_seg->c_on_minorcompact_q;
6186 csi->csi_has_donated_pages = c_seg->c_has_donated_pages;
6187 csi->csi_slots_used = (uint16_t)c_seg->c_slots_used;
6188 csi->csi_slot_var_array_len = c_seg->c_slot_var_array_len;
6189 csi->csi_slots_len = (uint16_t)nslots;
6190 #if TRACK_C_SEGMENT_UTILIZATION
6191 csi->csi_decompressions_since_swapin = c_seg->c_decompressions_since_swapin;
6192 #else
6193 csi->csi_decompressions_since_swapin = 0;
6194 #endif /* TRACK_C_SEGMENT_UTILIZATION */
6195
6196 for (int si = 0; si < nslots; ++si) {
6197 /* see also c_seg_validate() for some of the details */
6198 const struct c_slot* cs = C_SEG_SLOT_FROM_INDEX(c_seg, si);
6199 struct c_slot_info* ssi = (struct c_slot_info*)(buf + offset);
6200 ssi->csi_size = UNPACK_C_SIZE(cs);
6201 offset += sizeof(struct c_slot_info);
6202 }
6203 *size = offset;
6204 return KERN_SUCCESS;
6205 }
6206
6207 #endif /* DEVELOPMENT || DEBUG */
6208
6209
6210 #if CONFIG_TRACK_UNMODIFIED_ANON_PAGES
6211
6212 struct vnode;
6213 extern void vm_swapfile_open(const char *path, struct vnode **vp);
6214 extern int vm_swapfile_preallocate(struct vnode *vp, uint64_t *size, boolean_t *pin);
6215
6216 struct vnode *uncompressed_vp0 = NULL;
6217 struct vnode *uncompressed_vp1 = NULL;
6218 uint32_t uncompressed_file0_free_pages = 0, uncompressed_file1_free_pages = 0;
6219 uint64_t uncompressed_file0_free_offset = 0, uncompressed_file1_free_offset = 0;
6220
6221 uint64_t compressor_ro_uncompressed = 0;
6222 uint64_t compressor_ro_uncompressed_total_returned = 0;
6223 uint64_t compressor_ro_uncompressed_skip_returned = 0;
6224 uint64_t compressor_ro_uncompressed_get = 0;
6225 uint64_t compressor_ro_uncompressed_put = 0;
6226 uint64_t compressor_ro_uncompressed_swap_usage = 0;
6227
6228 extern void vnode_put(struct vnode* vp);
6229 extern int vnode_getwithref(struct vnode* vp);
6230 extern int vm_swapfile_io(struct vnode *vp, uint64_t offset, uint64_t start, int npages, int flags, void *upl_ctx);
6231
6232 #define MAX_OFFSET_PAGES (255)
6233 uint64_t uncompressed_file0_space_bitmap[MAX_OFFSET_PAGES];
6234 uint64_t uncompressed_file1_space_bitmap[MAX_OFFSET_PAGES];
6235
6236 #define UNCOMPRESSED_FILEIDX_OFFSET_MASK (((uint32_t)1<<31ull) - 1)
6237 #define UNCOMPRESSED_FILEIDX_SHIFT (29)
6238 #define UNCOMPRESSED_FILEIDX_MASK (3)
6239 #define UNCOMPRESSED_OFFSET_SHIFT (29)
6240 #define UNCOMPRESSED_OFFSET_MASK (7)
6241
6242 static uint32_t
vm_uncompressed_extract_swap_file(int slot)6243 vm_uncompressed_extract_swap_file(int slot)
6244 {
6245 uint32_t fileidx = (((uint32_t)slot & UNCOMPRESSED_FILEIDX_OFFSET_MASK) >> UNCOMPRESSED_FILEIDX_SHIFT) & UNCOMPRESSED_FILEIDX_MASK;
6246 return fileidx;
6247 }
6248
6249 static uint32_t
vm_uncompressed_extract_swap_offset(int slot)6250 vm_uncompressed_extract_swap_offset(int slot)
6251 {
6252 return slot & (uint32_t)(~(UNCOMPRESSED_OFFSET_MASK << UNCOMPRESSED_OFFSET_SHIFT));
6253 }
6254
6255 static void
vm_uncompressed_return_space_to_swap(int slot)6256 vm_uncompressed_return_space_to_swap(int slot)
6257 {
6258 PAGE_REPLACEMENT_ALLOWED(TRUE);
6259 uint32_t fileidx = vm_uncompressed_extract_swap_file(slot);
6260 if (fileidx == 1) {
6261 uint32_t free_offset = vm_uncompressed_extract_swap_offset(slot);
6262 uint64_t pgidx = free_offset / PAGE_SIZE_64;
6263 uint64_t chunkidx = pgidx / 64;
6264 uint64_t chunkoffset = pgidx % 64;
6265 #if DEVELOPMENT || DEBUG
6266 uint64_t vaddr = (uint64_t)&uncompressed_file0_space_bitmap[chunkidx];
6267 uint64_t maxvaddr = (uint64_t)&uncompressed_file0_space_bitmap[MAX_OFFSET_PAGES];
6268 assertf(vaddr < maxvaddr, "0x%llx 0x%llx", vaddr, maxvaddr);
6269 #endif /*DEVELOPMENT || DEBUG*/
6270 assertf((uncompressed_file0_space_bitmap[chunkidx] & ((uint64_t)1 << chunkoffset)),
6271 "0x%x %llu %llu", slot, chunkidx, chunkoffset);
6272 uncompressed_file0_space_bitmap[chunkidx] &= ~((uint64_t)1 << chunkoffset);
6273 assertf(!(uncompressed_file0_space_bitmap[chunkidx] & ((uint64_t)1 << chunkoffset)),
6274 "0x%x %llu %llu", slot, chunkidx, chunkoffset);
6275
6276 uncompressed_file0_free_pages++;
6277 } else {
6278 uint32_t free_offset = vm_uncompressed_extract_swap_offset(slot);
6279 uint64_t pgidx = free_offset / PAGE_SIZE_64;
6280 uint64_t chunkidx = pgidx / 64;
6281 uint64_t chunkoffset = pgidx % 64;
6282 assertf((uncompressed_file1_space_bitmap[chunkidx] & ((uint64_t)1 << chunkoffset)),
6283 "%llu %llu", chunkidx, chunkoffset);
6284 uncompressed_file1_space_bitmap[chunkidx] &= ~((uint64_t)1 << chunkoffset);
6285
6286 uncompressed_file1_free_pages++;
6287 }
6288 compressor_ro_uncompressed_swap_usage--;
6289 PAGE_REPLACEMENT_ALLOWED(FALSE);
6290 }
6291
6292 static int
vm_uncompressed_reserve_space_in_swap()6293 vm_uncompressed_reserve_space_in_swap()
6294 {
6295 int slot = 0;
6296 if (uncompressed_file0_free_pages == 0 && uncompressed_file1_free_pages == 0) {
6297 return -1;
6298 }
6299
6300 PAGE_REPLACEMENT_ALLOWED(TRUE);
6301 if (uncompressed_file0_free_pages) {
6302 uint64_t chunkidx = 0;
6303 uint64_t chunkoffset = 0;
6304 while (uncompressed_file0_space_bitmap[chunkidx] == 0xffffffffffffffff) {
6305 chunkidx++;
6306 }
6307 while (uncompressed_file0_space_bitmap[chunkidx] & ((uint64_t)1 << chunkoffset)) {
6308 chunkoffset++;
6309 }
6310
6311 assertf((uncompressed_file0_space_bitmap[chunkidx] & ((uint64_t)1 << chunkoffset)) == 0,
6312 "%llu %llu", chunkidx, chunkoffset);
6313 #if DEVELOPMENT || DEBUG
6314 uint64_t vaddr = (uint64_t)&uncompressed_file0_space_bitmap[chunkidx];
6315 uint64_t maxvaddr = (uint64_t)&uncompressed_file0_space_bitmap[MAX_OFFSET_PAGES];
6316 assertf(vaddr < maxvaddr, "0x%llx 0x%llx", vaddr, maxvaddr);
6317 #endif /*DEVELOPMENT || DEBUG*/
6318 uncompressed_file0_space_bitmap[chunkidx] |= ((uint64_t)1 << chunkoffset);
6319 uncompressed_file0_free_offset = ((chunkidx * 64) + chunkoffset) * PAGE_SIZE_64;
6320 assertf((uncompressed_file0_space_bitmap[chunkidx] & ((uint64_t)1 << chunkoffset)),
6321 "%llu %llu", chunkidx, chunkoffset);
6322
6323 assert(uncompressed_file0_free_offset <= (1 << UNCOMPRESSED_OFFSET_SHIFT));
6324 slot = (int)((1 << UNCOMPRESSED_FILEIDX_SHIFT) + uncompressed_file0_free_offset);
6325 uncompressed_file0_free_pages--;
6326 } else {
6327 uint64_t chunkidx = 0;
6328 uint64_t chunkoffset = 0;
6329 while (uncompressed_file1_space_bitmap[chunkidx] == 0xFFFFFFFFFFFFFFFF) {
6330 chunkidx++;
6331 }
6332 while (uncompressed_file1_space_bitmap[chunkidx] & ((uint64_t)1 << chunkoffset)) {
6333 chunkoffset++;
6334 }
6335 assert((uncompressed_file1_space_bitmap[chunkidx] & ((uint64_t)1 << chunkoffset)) == 0);
6336 uncompressed_file1_space_bitmap[chunkidx] |= ((uint64_t)1 << chunkoffset);
6337 uncompressed_file1_free_offset = ((chunkidx * 64) + chunkoffset) * PAGE_SIZE_64;
6338 slot = (int)((2 << UNCOMPRESSED_FILEIDX_SHIFT) + uncompressed_file1_free_offset);
6339 uncompressed_file1_free_pages--;
6340 }
6341 compressor_ro_uncompressed_swap_usage++;
6342 PAGE_REPLACEMENT_ALLOWED(FALSE);
6343 return slot;
6344 }
6345
6346 #define MAX_IO_REQ (16)
6347 struct _uncompressor_io_req {
6348 uint64_t addr;
6349 bool inuse;
6350 } uncompressor_io_req[MAX_IO_REQ];
6351
6352 int
vm_uncompressed_put(ppnum_t pn,int * slot)6353 vm_uncompressed_put(ppnum_t pn, int *slot)
6354 {
6355 int retval = 0;
6356 struct vnode *uncompressed_vp = NULL;
6357 uint64_t uncompress_offset = 0;
6358
6359 again:
6360 if (uncompressed_vp0 == NULL) {
6361 PAGE_REPLACEMENT_ALLOWED(TRUE);
6362 if (uncompressed_vp0 == NULL) {
6363 uint64_t size = (MAX_OFFSET_PAGES * 1024 * 1024ULL);
6364 vm_swapfile_open("/private/var/vm/uncompressedswap0", &uncompressed_vp0);
6365 if (uncompressed_vp0 == NULL) {
6366 PAGE_REPLACEMENT_ALLOWED(FALSE);
6367 return KERN_NO_ACCESS;
6368 }
6369 vm_swapfile_preallocate(uncompressed_vp0, &size, NULL);
6370 uncompressed_file0_free_pages = (uint32_t)atop(size);
6371 bzero(uncompressed_file0_space_bitmap, sizeof(uint64_t) * MAX_OFFSET_PAGES);
6372
6373 int i = 0;
6374 for (; i < MAX_IO_REQ; i++) {
6375 kmem_alloc(kernel_map, (vm_offset_t*)&uncompressor_io_req[i].addr, PAGE_SIZE_64, KMA_NOFAIL | KMA_KOBJECT, VM_KERN_MEMORY_COMPRESSOR);
6376 uncompressor_io_req[i].inuse = false;
6377 }
6378
6379 vm_swapfile_open("/private/var/vm/uncompressedswap1", &uncompressed_vp1);
6380 assert(uncompressed_vp1);
6381 vm_swapfile_preallocate(uncompressed_vp1, &size, NULL);
6382 uncompressed_file1_free_pages = (uint32_t)atop(size);
6383 bzero(uncompressed_file1_space_bitmap, sizeof(uint64_t) * MAX_OFFSET_PAGES);
6384 PAGE_REPLACEMENT_ALLOWED(FALSE);
6385 } else {
6386 PAGE_REPLACEMENT_ALLOWED(FALSE);
6387 delay(100);
6388 goto again;
6389 }
6390 }
6391
6392 int swapinfo = vm_uncompressed_reserve_space_in_swap();
6393 if (swapinfo == -1) {
6394 *slot = 0;
6395 return KERN_RESOURCE_SHORTAGE;
6396 }
6397
6398 if (vm_uncompressed_extract_swap_file(swapinfo) == 1) {
6399 uncompressed_vp = uncompressed_vp0;
6400 } else {
6401 uncompressed_vp = uncompressed_vp1;
6402 }
6403 uncompress_offset = vm_uncompressed_extract_swap_offset(swapinfo);
6404 if ((retval = vnode_getwithref(uncompressed_vp)) != 0) {
6405 os_log_error_with_startup_serial(OS_LOG_DEFAULT, "vm_uncompressed_put: vnode_getwithref on swapfile failed with %d\n", retval);
6406 } else {
6407 int i = 0;
6408 retry:
6409 PAGE_REPLACEMENT_ALLOWED(TRUE);
6410 for (i = 0; i < MAX_IO_REQ; i++) {
6411 if (uncompressor_io_req[i].inuse == false) {
6412 uncompressor_io_req[i].inuse = true;
6413 break;
6414 }
6415 }
6416 if (i == MAX_IO_REQ) {
6417 assert_wait((event_t)&uncompressor_io_req, THREAD_UNINT);
6418 PAGE_REPLACEMENT_ALLOWED(FALSE);
6419 thread_block(THREAD_CONTINUE_NULL);
6420 goto retry;
6421 }
6422 PAGE_REPLACEMENT_ALLOWED(FALSE);
6423 void *addr = pmap_map_compressor_page(pn);
6424 memcpy((void*)uncompressor_io_req[i].addr, addr, PAGE_SIZE_64);
6425 pmap_unmap_compressor_page(pn, addr);
6426
6427 retval = vm_swapfile_io(uncompressed_vp, uncompress_offset, (uint64_t)uncompressor_io_req[i].addr, 1, SWAP_WRITE, NULL);
6428 if (retval) {
6429 *slot = 0;
6430 } else {
6431 *slot = (int)swapinfo;
6432 ((c_slot_mapping_t)(slot))->s_uncompressed = 1;
6433 }
6434 vnode_put(uncompressed_vp);
6435 PAGE_REPLACEMENT_ALLOWED(TRUE);
6436 uncompressor_io_req[i].inuse = false;
6437 thread_wakeup((event_t)&uncompressor_io_req);
6438 PAGE_REPLACEMENT_ALLOWED(FALSE);
6439 }
6440 return retval;
6441 }
6442
6443 int
vm_uncompressed_get(ppnum_t pn,int * slot,__unused vm_compressor_options_t flags)6444 vm_uncompressed_get(ppnum_t pn, int *slot, __unused vm_compressor_options_t flags)
6445 {
6446 int retval = 0;
6447 struct vnode *uncompressed_vp = NULL;
6448 uint32_t fileidx = vm_uncompressed_extract_swap_file(*slot);
6449 uint64_t uncompress_offset = vm_uncompressed_extract_swap_offset(*slot);
6450
6451 if (__improbable(flags & C_KDP)) {
6452 return -2;
6453 }
6454
6455 if (fileidx == 1) {
6456 uncompressed_vp = uncompressed_vp0;
6457 } else {
6458 uncompressed_vp = uncompressed_vp1;
6459 }
6460
6461 if ((retval = vnode_getwithref(uncompressed_vp)) != 0) {
6462 os_log_error_with_startup_serial(OS_LOG_DEFAULT, "vm_uncompressed_put: vnode_getwithref on swapfile failed with %d\n", retval);
6463 } else {
6464 int i = 0;
6465 retry:
6466 PAGE_REPLACEMENT_ALLOWED(TRUE);
6467 for (i = 0; i < MAX_IO_REQ; i++) {
6468 if (uncompressor_io_req[i].inuse == false) {
6469 uncompressor_io_req[i].inuse = true;
6470 break;
6471 }
6472 }
6473 if (i == MAX_IO_REQ) {
6474 assert_wait((event_t)&uncompressor_io_req, THREAD_UNINT);
6475 PAGE_REPLACEMENT_ALLOWED(FALSE);
6476 thread_block(THREAD_CONTINUE_NULL);
6477 goto retry;
6478 }
6479 PAGE_REPLACEMENT_ALLOWED(FALSE);
6480 retval = vm_swapfile_io(uncompressed_vp, uncompress_offset, (uint64_t)uncompressor_io_req[i].addr, 1, SWAP_READ, NULL);
6481 vnode_put(uncompressed_vp);
6482 void *addr = pmap_map_compressor_page(pn);
6483 memcpy(addr, (void*)uncompressor_io_req[i].addr, PAGE_SIZE_64);
6484 pmap_unmap_compressor_page(pn, addr);
6485 PAGE_REPLACEMENT_ALLOWED(TRUE);
6486 uncompressor_io_req[i].inuse = false;
6487 thread_wakeup((event_t)&uncompressor_io_req);
6488 PAGE_REPLACEMENT_ALLOWED(FALSE);
6489 }
6490 return retval;
6491 }
6492
6493 int
vm_uncompressed_free(int * slot,__unused vm_compressor_options_t flags)6494 vm_uncompressed_free(int *slot, __unused vm_compressor_options_t flags)
6495 {
6496 vm_uncompressed_return_space_to_swap(*slot);
6497 *slot = 0;
6498 return 0;
6499 }
6500
6501 #endif /*CONFIG_TRACK_UNMODIFIED_ANON_PAGES*/
6502