1 /*
2 * Copyright (c) 2000-2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /*
30 * Telemetry from the VM is usually colected at a daily cadence.
31 * All of those events are in this file along with a single thread
32 * call for reporting them.
33 *
34 * NB: The freezer subsystem has its own telemetry based on its budget interval
35 * so it's not included here.
36 */
37
38 #include <kern/thread_call.h>
39 #include <libkern/coreanalytics/coreanalytics.h>
40 #include <os/log.h>
41 #include <vm/vm_page.h>
42 #include <vm/vm_compressor_internal.h>
43 #if CONFIG_EXCLAVES
44 #include <kern/exclaves_memory.h>
45 #endif /* CONFIG_EXCLAVES */
46
47 #include "vm_compressor_backing_store_internal.h"
48
49 void vm_analytics_tick(void *arg0, void *arg1);
50
51 #define ANALYTICS_PERIOD_HOURS (24ULL)
52
53 static thread_call_t vm_analytics_thread_call;
54
55 CA_EVENT(vm_swapusage,
56 CA_INT, max_alloced,
57 CA_INT, max_used,
58 CA_INT, trial_deployment_id,
59 CA_STATIC_STRING(CA_UUID_LEN), trial_treatment_id,
60 CA_STATIC_STRING(CA_UUID_LEN), trial_experiment_id);
61
62 CA_EVENT(mlock_failures,
63 CA_INT, over_global_limit,
64 CA_INT, over_user_limit,
65 CA_INT, trial_deployment_id,
66 CA_STATIC_STRING(CA_UUID_LEN), trial_treatment_id,
67 CA_STATIC_STRING(CA_UUID_LEN), trial_experiment_id);
68
69 /*
70 * NB: It's a good practice to include these trial
71 * identifiers in all of our events so that we can
72 * measure the impact of any A/B tests on these metrics.
73 */
74 extern uuid_string_t trial_treatment_id;
75 extern uuid_string_t trial_experiment_id;
76 extern int trial_deployment_id;
77
78 static void
add_trial_uuids(char * treatment_id,char * experiment_id)79 add_trial_uuids(char *treatment_id, char *experiment_id)
80 {
81 strlcpy(treatment_id, trial_treatment_id, CA_UUID_LEN);
82 strlcpy(experiment_id, trial_experiment_id, CA_UUID_LEN);
83 }
84
85 static void
report_vm_swapusage()86 report_vm_swapusage()
87 {
88 uint64_t max_alloced, max_used;
89 ca_event_t event = CA_EVENT_ALLOCATE(vm_swapusage);
90 CA_EVENT_TYPE(vm_swapusage) * e = event->data;
91
92 vm_swap_reset_max_segs_tracking(&max_alloced, &max_used);
93 e->max_alloced = max_alloced;
94 e->max_used = max_used;
95 add_trial_uuids(e->trial_treatment_id, e->trial_experiment_id);
96 e->trial_deployment_id = trial_deployment_id;
97 CA_EVENT_SEND(event);
98 }
99
100 static void
report_mlock_failures()101 report_mlock_failures()
102 {
103 ca_event_t event = CA_EVENT_ALLOCATE(mlock_failures);
104 CA_EVENT_TYPE(mlock_failures) * e = event->data;
105
106 e->over_global_limit = os_atomic_load_wide(&vm_add_wire_count_over_global_limit, relaxed);
107 e->over_user_limit = os_atomic_load_wide(&vm_add_wire_count_over_user_limit, relaxed);
108
109 os_atomic_store_wide(&vm_add_wire_count_over_global_limit, 0, relaxed);
110 os_atomic_store_wide(&vm_add_wire_count_over_user_limit, 0, relaxed);
111
112 add_trial_uuids(e->trial_treatment_id, e->trial_experiment_id);
113 e->trial_deployment_id = trial_deployment_id;
114 CA_EVENT_SEND(event);
115 }
116
117 #if XNU_TARGET_OS_WATCH
118 CA_EVENT(compressor_age,
119 CA_INT, hour1,
120 CA_INT, hour6,
121 CA_INT, hour12,
122 CA_INT, hour24,
123 CA_INT, hour36,
124 CA_INT, hour48,
125 CA_INT, hourMax,
126 CA_INT, trial_deployment_id,
127 CA_STATIC_STRING(CA_UUID_LEN), trial_treatment_id,
128 CA_STATIC_STRING(CA_UUID_LEN), trial_experiment_id);
129
130 /**
131 * Compressor age bucket descriptor.
132 */
133 typedef struct {
134 /* Number of segments in this bucket. */
135 uint64_t count;
136 /* The bucket's lower bound (inclusive) */
137 uint64_t lower;
138 /* The bucket's upper bound (exclusive) */
139 uint64_t upper;
140 } c_reporting_bucket_t;
141 #define C_REPORTING_BUCKETS_MAX (UINT64_MAX)
142 #ifndef ARRAY_SIZE
143 #define ARRAY_SIZE(x) (sizeof(x) / sizeof(x[0]))
144 #endif
145 #define HR_TO_S(x) ((x) * 60 * 60)
146
147 /**
148 * Report the age of segments in the compressor.
149 */
150 static void
report_compressor_age()151 report_compressor_age()
152 {
153 /* If the compressor is not configured, do nothing and return early. */
154 if (vm_compressor_mode == VM_PAGER_NOT_CONFIGURED) {
155 os_log(OS_LOG_DEFAULT, "%s: vm_compressor_mode == VM_PAGER_NOT_CONFIGURED, returning early", __func__);
156 return;
157 }
158
159 const queue_head_t *c_queues[] = {&c_age_list_head, &c_major_list_head};
160 c_reporting_bucket_t c_buckets[] = {
161 {.count = 0, .lower = HR_TO_S(0), .upper = HR_TO_S(1)}, /* [0, 1) hours */
162 {.count = 0, .lower = HR_TO_S(1), .upper = HR_TO_S(6)}, /* [1, 6) hours */
163 {.count = 0, .lower = HR_TO_S(6), .upper = HR_TO_S(12)}, /* [6, 12) hours */
164 {.count = 0, .lower = HR_TO_S(12), .upper = HR_TO_S(24)}, /* [12, 24) hours */
165 {.count = 0, .lower = HR_TO_S(24), .upper = HR_TO_S(36)}, /* [24, 36) hours */
166 {.count = 0, .lower = HR_TO_S(36), .upper = HR_TO_S(48)}, /* [36, 48) hours */
167 {.count = 0, .lower = HR_TO_S(48), .upper = C_REPORTING_BUCKETS_MAX}, /* [48, MAX) hours */
168 };
169 clock_sec_t now;
170 clock_nsec_t nsec;
171
172 /* Collect the segments and update the bucket counts. */
173 lck_mtx_lock_spin_always(c_list_lock);
174 for (unsigned q = 0; q < ARRAY_SIZE(c_queues); q++) {
175 c_segment_t c_seg = (c_segment_t) queue_first(c_queues[q]);
176 while (!queue_end(c_queues[q], (queue_entry_t) c_seg)) {
177 for (unsigned b = 0; b < ARRAY_SIZE(c_buckets); b++) {
178 uint32_t creation_ts = c_seg->c_creation_ts;
179 clock_get_system_nanotime(&now, &nsec);
180 clock_sec_t age = now - creation_ts;
181 if ((age >= c_buckets[b].lower) &&
182 (age < c_buckets[b].upper)) {
183 c_buckets[b].count++;
184 break;
185 }
186 }
187 c_seg = (c_segment_t) queue_next(&c_seg->c_age_list);
188 }
189 }
190 lck_mtx_unlock_always(c_list_lock);
191
192 /* Send the ages to CoreAnalytics. */
193 ca_event_t event = CA_EVENT_ALLOCATE(compressor_age);
194 CA_EVENT_TYPE(compressor_age) * e = event->data;
195 e->hour1 = c_buckets[0].count;
196 e->hour6 = c_buckets[1].count;
197 e->hour12 = c_buckets[2].count;
198 e->hour24 = c_buckets[3].count;
199 e->hour36 = c_buckets[4].count;
200 e->hour48 = c_buckets[5].count;
201 e->hourMax = c_buckets[6].count;
202 add_trial_uuids(e->trial_treatment_id, e->trial_experiment_id);
203 e->trial_deployment_id = trial_deployment_id;
204 CA_EVENT_SEND(event);
205 }
206 #endif /* XNU_TARGET_OS_WATCH */
207
208
209 extern uint64_t max_mem;
210 CA_EVENT(accounting_health, CA_INT, percentage);
211 /**
212 * Report health of resident vm page accounting.
213 */
214 static void
report_accounting_health()215 report_accounting_health()
216 {
217 /**
218 * @note If a new accounting bucket is added, it must also be added in
219 * MemoryMaintenance sysstatuscheck, which panics when accounting reaches
220 * unhealthy levels.
221 */
222 int64_t pages = (vm_page_wire_count
223 + vm_page_free_count
224 + vm_page_inactive_count
225 + vm_page_active_count
226 + VM_PAGE_COMPRESSOR_COUNT
227 + vm_page_speculative_count
228 #if CONFIG_SECLUDED_MEMORY
229 + vm_page_secluded_count
230 #endif /* CONFIG_SECLUDED_MEMORY */
231 );
232 int64_t percentage = (pages * 100) / (max_mem >> PAGE_SHIFT);
233
234 /* Send the percentage health to CoreAnalytics. */
235 ca_event_t event = CA_EVENT_ALLOCATE(accounting_health);
236 CA_EVENT_TYPE(accounting_health) * e = event->data;
237 e->percentage = percentage;
238 CA_EVENT_SEND(event);
239 }
240
241 static void
schedule_analytics_thread_call()242 schedule_analytics_thread_call()
243 {
244 static const uint64_t analytics_period_ns = ANALYTICS_PERIOD_HOURS * 60 * 60 * NSEC_PER_SEC;
245 uint64_t analytics_period_absolutetime;
246 nanoseconds_to_absolutetime(analytics_period_ns, &analytics_period_absolutetime);
247
248 thread_call_enter_delayed(vm_analytics_thread_call, analytics_period_absolutetime + mach_absolute_time());
249 }
250
251 /*
252 * This is the main entry point for reporting periodic analytics.
253 * It's called once every ANALYTICS_PERIOD_HOURS hours.
254 */
255 void
vm_analytics_tick(void * arg0,void * arg1)256 vm_analytics_tick(void *arg0, void *arg1)
257 {
258 #pragma unused(arg0, arg1)
259 report_vm_swapusage();
260 report_mlock_failures();
261 #if XNU_TARGET_OS_WATCH
262 report_compressor_age();
263 #endif /* XNU_TARGET_OS_WATCH */
264 report_accounting_health();
265 #if CONFIG_EXCLAVES
266 exclaves_memory_report_accounting();
267 #endif /* CONFIG_EXCLAVES */
268 schedule_analytics_thread_call();
269 }
270
271 static void
vm_analytics_init()272 vm_analytics_init()
273 {
274 vm_analytics_thread_call = thread_call_allocate_with_options(vm_analytics_tick, NULL, THREAD_CALL_PRIORITY_KERNEL, THREAD_CALL_OPTIONS_ONCE);
275 schedule_analytics_thread_call();
276 }
277
278 STARTUP(THREAD_CALL, STARTUP_RANK_MIDDLE, vm_analytics_init);
279