1 /*
2 * Copyright (c) 2003-2007 Apple Inc. All rights reserved.
3 */
4 #include <sys/param.h>
5 #include <sys/kernel.h>
6 #include <sys/sysctl.h>
7
8 #include <machine/machine_routines.h>
9
10 #include <mach/host_info.h>
11 #include <mach/mach_host.h>
12 #include <arm/cpuid.h>
13 #include <kern/hvg_hypercall.h>
14 #include <vm/pmap.h>
15 #include <kern/zalloc.h>
16 #include <libkern/libkern.h>
17 #include <pexpert/device_tree.h>
18 #include <kern/task.h>
19
20 #if HYPERVISOR
21 #include <kern/hv_support.h>
22 #include <kern/bits.h>
23 #endif
24
25 extern uint64_t wake_abstime;
26
27 #if DEVELOPMENT || DEBUG
28 /* Various tuneables to modulate selection of WFE in the idle path */
29 extern int wfe_rec_max;
30 extern int wfe_allowed;
31
32 extern int wfe_rec_none;
33 extern uint32_t idle_proximate_timer_wfe;
34 extern uint32_t idle_proximate_io_wfe_masked;
35 extern uint32_t idle_proximate_io_wfe_unmasked;
36
37 static
38 SYSCTL_INT(_machdep, OID_AUTO, wfe_rec_max,
39 CTLFLAG_RW, &wfe_rec_max, 0,
40 "");
41
42 static
43 SYSCTL_INT(_machdep, OID_AUTO, wfe_allowed,
44 CTLFLAG_RW, &wfe_allowed, 0,
45 "");
46
47 static
48 SYSCTL_INT(_machdep, OID_AUTO, idle_timer_wfe,
49 CTLFLAG_RW, &idle_proximate_timer_wfe, 0,
50 "");
51
52 static
53 SYSCTL_INT(_machdep, OID_AUTO, idle_io_wfe_masked,
54 CTLFLAG_RW, &idle_proximate_io_wfe_masked, 0,
55 "");
56 static
57 SYSCTL_INT(_machdep, OID_AUTO, idle_io_wfe_unmasked,
58 CTLFLAG_RW, &idle_proximate_io_wfe_unmasked, 0,
59 "");
60
61 static
62 SYSCTL_INT(_machdep, OID_AUTO, wfe_rec_none,
63 CTLFLAG_RW, &wfe_rec_none, 0,
64 "");
65
66 extern uint64_t wfe_rec_override_mat;
67 SYSCTL_QUAD(_machdep, OID_AUTO, wfe_rec_override_mat,
68 CTLFLAG_RW, &wfe_rec_override_mat,
69 "");
70
71 extern uint64_t wfe_rec_clamp;
72 SYSCTL_QUAD(_machdep, OID_AUTO, wfe_rec_clamp,
73 CTLFLAG_RW, &wfe_rec_clamp,
74 "");
75
76 #endif
77
78 static
79 SYSCTL_QUAD(_machdep, OID_AUTO, wake_abstime,
80 CTLFLAG_RD, &wake_abstime,
81 "Absolute Time at the last wakeup");
82
83 static int
84 sysctl_time_since_reset SYSCTL_HANDLER_ARGS
85 {
86 #pragma unused(arg1, arg2, oidp)
87 uint64_t return_value = ml_get_time_since_reset();
88 return SYSCTL_OUT(req, &return_value, sizeof(return_value));
89 }
90
91 SYSCTL_PROC(_machdep, OID_AUTO, time_since_reset,
92 CTLFLAG_RD | CTLTYPE_QUAD | CTLFLAG_LOCKED,
93 0, 0, sysctl_time_since_reset, "I",
94 "Continuous time since last SOC boot/wake started");
95
96 static int
97 sysctl_wake_conttime SYSCTL_HANDLER_ARGS
98 {
99 #pragma unused(arg1, arg2, oidp)
100 uint64_t return_value = ml_get_conttime_wake_time();
101 return SYSCTL_OUT(req, &return_value, sizeof(return_value));
102 }
103
104 SYSCTL_PROC(_machdep, OID_AUTO, wake_conttime,
105 CTLFLAG_RD | CTLTYPE_QUAD | CTLFLAG_LOCKED,
106 0, 0, sysctl_wake_conttime, "I",
107 "Continuous Time at the last wakeup");
108
109 #if defined(HAS_IPI)
110 static int
cpu_signal_deferred_timer(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)111 cpu_signal_deferred_timer(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
112 {
113 int new_value = 0;
114 int changed = 0;
115
116 int old_value = (int)ml_cpu_signal_deferred_get_timer();
117
118 int error = sysctl_io_number(req, old_value, sizeof(int), &new_value, &changed);
119
120 if (error == 0 && changed) {
121 ml_cpu_signal_deferred_adjust_timer((uint64_t)new_value);
122 }
123
124 return error;
125 }
126
127 SYSCTL_PROC(_machdep, OID_AUTO, deferred_ipi_timeout,
128 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
129 0, 0,
130 cpu_signal_deferred_timer, "I", "Deferred IPI timeout (nanoseconds)");
131
132 #endif /* defined(HAS_IPI) */
133
134 /*
135 * For source compatibility, here's some machdep.cpu mibs that
136 * use host_info() to simulate reasonable answers.
137 */
138
139 SYSCTL_NODE(_machdep, OID_AUTO, cpu, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
140 "CPU info");
141
142 static int
143 arm_host_info SYSCTL_HANDLER_ARGS
144 {
145 __unused struct sysctl_oid *unused_oidp = oidp;
146
147 host_basic_info_data_t hinfo;
148 mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
149 #define BSD_HOST 1
150 kern_return_t kret = host_info((host_t)BSD_HOST,
151 HOST_BASIC_INFO, (host_info_t)&hinfo, &count);
152 if (KERN_SUCCESS != kret) {
153 return EINVAL;
154 }
155
156 if (sizeof(uint32_t) != arg2) {
157 panic("size mismatch");
158 }
159
160 uintptr_t woffset = (uintptr_t)arg1 / sizeof(uint32_t);
161 uint32_t datum = *(uint32_t *)(((uint32_t *)&hinfo) + woffset);
162 return SYSCTL_OUT(req, &datum, sizeof(datum));
163 }
164
165 /*
166 * machdep.cpu.cores_per_package
167 *
168 * x86: derived from CPUID data.
169 * ARM: how many physical cores we have in the AP; aka hw.physicalcpu_max
170 */
171 static
172 SYSCTL_PROC(_machdep_cpu, OID_AUTO, cores_per_package,
173 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
174 (void *)offsetof(host_basic_info_data_t, physical_cpu_max),
175 sizeof(integer_t),
176 arm_host_info, "I", "CPU cores per package");
177
178 /*
179 * machdep.cpu.core_count
180 *
181 * x86: derived from CPUID data.
182 * ARM: # active physical cores in the AP; aka hw.physicalcpu
183 */
184 static
185 SYSCTL_PROC(_machdep_cpu, OID_AUTO, core_count,
186 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
187 (void *)offsetof(host_basic_info_data_t, physical_cpu),
188 sizeof(integer_t),
189 arm_host_info, "I", "Number of enabled cores per package");
190
191 /*
192 * machdep.cpu.logical_per_package
193 *
194 * x86: derived from CPUID data. Returns ENOENT if HTT bit not set, but
195 * most x64 CPUs have that, so assume it's available.
196 * ARM: total # logical cores in the AP; aka hw.logicalcpu_max
197 */
198 static
199 SYSCTL_PROC(_machdep_cpu, OID_AUTO, logical_per_package,
200 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
201 (void *)offsetof(host_basic_info_data_t, logical_cpu_max),
202 sizeof(integer_t),
203 arm_host_info, "I", "CPU logical cpus per package");
204
205 /*
206 * machdep.cpu.thread_count
207 *
208 * x86: derived from CPUID data.
209 * ARM: # active logical cores in the AP; aka hw.logicalcpu
210 */
211 static
212 SYSCTL_PROC(_machdep_cpu, OID_AUTO, thread_count,
213 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
214 (void *)offsetof(host_basic_info_data_t, logical_cpu),
215 sizeof(integer_t),
216 arm_host_info, "I", "Number of enabled threads per package");
217
218 static SECURITY_READ_ONLY_LATE(char*) brand_string = NULL;
219 static SECURITY_READ_ONLY_LATE(size_t) brand_string_len = 0;
220
221 /*
222 * SecureDTLookupEntry() is only guaranteed to work before PE_init_iokit(),
223 * so we load the brand string (if available) in a startup handler.
224 */
225 __startup_func
226 static void
sysctl_load_brand_string(void)227 sysctl_load_brand_string(void)
228 {
229 DTEntry node;
230 void const *value = NULL;
231 unsigned int size = 0;
232
233 if (kSuccess != SecureDTLookupEntry(0, "/product", &node)) {
234 return;
235 }
236
237 if (kSuccess != SecureDTGetProperty(node, "product-soc-name", (void const **) &value, &size)) {
238 return;
239 }
240
241 if (size == 0) {
242 return;
243 }
244
245 brand_string = zalloc_permanent(size, ZALIGN_NONE);
246 if (brand_string == NULL) {
247 return;
248 }
249
250 memcpy(brand_string, value, size);
251 brand_string_len = size;
252 }
253 STARTUP(SYSCTL, STARTUP_RANK_MIDDLE, sysctl_load_brand_string);
254
255 /*
256 * machdep.cpu.brand_string
257 *
258 * x86: derived from CPUID data.
259 * ARM: Grab the product string from the device tree, if it exists.
260 * Otherwise, cons something up from the CPUID register.
261 * the value is already exported via the commpage. So keep it simple.
262 */
263 static int
264 make_brand_string SYSCTL_HANDLER_ARGS
265 {
266 __unused struct sysctl_oid *unused_oidp = oidp;
267 __unused void *unused_arg1 = arg1;
268 __unused int unused_arg2 = arg2;
269
270 if (brand_string != NULL) {
271 return SYSCTL_OUT(req, brand_string, brand_string_len);
272 }
273
274 const char *impl;
275
276 switch (cpuid_info()->arm_info.arm_implementor) {
277 case CPU_VID_APPLE:
278 impl = "Apple";
279 break;
280 case CPU_VID_ARM:
281 impl = "ARM";
282 break;
283 default:
284 impl = "ARM architecture";
285 break;
286 }
287
288 char buf[80];
289 snprintf(buf, sizeof(buf), "%s processor", impl);
290 return SYSCTL_OUT(req, buf, strlen(buf) + 1);
291 }
292
293 SYSCTL_PROC(_machdep_cpu, OID_AUTO, brand_string,
294 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_LOCKED,
295 0, 0, make_brand_string, "A", "CPU brand string");
296
297
298 static int
299 virtual_address_size SYSCTL_HANDLER_ARGS
300 {
301 #pragma unused(arg1, arg2, oidp)
302 int return_value = 64 - T0SZ_BOOT;
303 return SYSCTL_OUT(req, &return_value, sizeof(return_value));
304 }
305
306 static
307 SYSCTL_PROC(_machdep, OID_AUTO, virtual_address_size,
308 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
309 0, 0, virtual_address_size, "I",
310 "Number of addressable bits in userspace virtual addresses");
311
312
313 #if DEVELOPMENT || DEBUG
314 extern uint64_t TLockTimeOut;
315 SYSCTL_QUAD(_machdep, OID_AUTO, tlto,
316 CTLFLAG_RW | CTLFLAG_LOCKED, &TLockTimeOut,
317 "Ticket spinlock timeout (MATUs): use with care");
318
319 extern uint32_t timebase_validation;
320 SYSCTL_UINT(_machdep, OID_AUTO, timebase_validation,
321 CTLFLAG_RW | CTLFLAG_LOCKED, &timebase_validation, 0,
322 "Monotonicity validation of kernel mach_absolute_time()");
323
324 #if __WKDM_ISA_2P_WORKAROUND__
325 extern uint64_t wkdmdretries, wkdmdretriespb;
326 extern uint32_t simulate_wkdm2p_error, wkdm_isa_2p_war_required;
327 SYSCTL_QUAD(_machdep, OID_AUTO, wkdmdretries,
328 CTLFLAG_RW | CTLFLAG_LOCKED, &wkdmdretries,
329 "Number of WKDM errata retries");
330 SYSCTL_QUAD(_machdep, OID_AUTO, wkdmdretriespb,
331 CTLFLAG_RW | CTLFLAG_LOCKED, &wkdmdretriespb,
332 "Number of retries where payload was on page boundary");
333 SYSCTL_UINT(_machdep, OID_AUTO, simulate_wkdm2p_error,
334 CTLFLAG_RW | CTLFLAG_LOCKED,
335 &simulate_wkdm2p_error, 0, "");
336 SYSCTL_UINT(_machdep, OID_AUTO, wkdm_isa_2p_war_required,
337 CTLFLAG_RW | CTLFLAG_LOCKED,
338 &wkdm_isa_2p_war_required, 0, "");
339 #endif /* __WKDM_ISA_2P_WORKAROUND__ */
340
341
342 /*
343 * macro to generate a sysctl machdep.cpu.sysreg_* for a given system register
344 * using __builtin_arm_rsr64.
345 */
346 #define SYSCTL_PROC_MACHDEP_CPU_SYSREG(name) \
347 static int \
348 sysctl_sysreg_##name SYSCTL_HANDLER_ARGS \
349 { \
350 _Pragma("unused(arg1, arg2, oidp)") \
351 uint64_t return_value = __builtin_arm_rsr64(#name); \
352 return SYSCTL_OUT(req, &return_value, sizeof(return_value)); \
353 } \
354 SYSCTL_PROC(_machdep_cpu, OID_AUTO, sysreg_##name, \
355 CTLFLAG_RD | CTLTYPE_QUAD | CTLFLAG_LOCKED, \
356 0, 0, sysctl_sysreg_##name, "Q", \
357 #name " register on the current CPU");
358
359
360 // CPU system registers
361 // ARM64: AArch64 Vector Base Address Register
362 SYSCTL_PROC_MACHDEP_CPU_SYSREG(VBAR_EL1);
363 // ARM64: AArch64 Memory Attribute Indirection Register
364 SYSCTL_PROC_MACHDEP_CPU_SYSREG(MAIR_EL1);
365 // ARM64: AArch64 Translation table base register 1
366 SYSCTL_PROC_MACHDEP_CPU_SYSREG(TTBR1_EL1);
367 // ARM64: AArch64 System Control Register
368 SYSCTL_PROC_MACHDEP_CPU_SYSREG(SCTLR_EL1);
369 // ARM64: AArch64 Translation Control Register
370 SYSCTL_PROC_MACHDEP_CPU_SYSREG(TCR_EL1);
371 // ARM64: AArch64 Memory Model Feature Register 0
372 SYSCTL_PROC_MACHDEP_CPU_SYSREG(ID_AA64MMFR0_EL1);
373 // ARM64: AArch64 Instruction Set Attribute Register 1
374 SYSCTL_PROC_MACHDEP_CPU_SYSREG(ID_AA64ISAR1_EL1);
375 #if APPLE_ARM64_ARCH_FAMILY
376 // Apple ID Register
377 SYSCTL_PROC_MACHDEP_CPU_SYSREG(AIDR_EL1);
378 #endif /* APPLE_ARM64_ARCH_FAMILY */
379
380 #endif /* DEVELOPMENT || DEBUG */
381
382
383 #ifdef ML_IO_TIMEOUTS_ENABLED
384 /*
385 * Timeouts for ml_{io|phys}_{read|write}...
386 * RO on DEVELOPMENT/DEBUG kernels.
387 */
388
389 #if DEVELOPMENT || DEBUG
390 #define MMIO_TIMEOUT_FLAGS (CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED)
391 #else
392 #define MMIO_TIMEOUT_FLAGS (CTLFLAG_KERN | CTLFLAG_RD | CTLFLAG_LOCKED)
393 #endif
394
395 SYSCTL_QUAD(_machdep, OID_AUTO, report_phy_read_delay, MMIO_TIMEOUT_FLAGS,
396 &report_phy_read_delay_to, "Maximum time before io/phys read gets reported or panics");
397 SYSCTL_QUAD(_machdep, OID_AUTO, report_phy_write_delay, MMIO_TIMEOUT_FLAGS,
398 &report_phy_write_delay_to, "Maximum time before io/phys write gets reported or panics");
399 SYSCTL_QUAD(_machdep, OID_AUTO, trace_phy_read_delay, MMIO_TIMEOUT_FLAGS,
400 &trace_phy_read_delay_to, "Maximum time before io/phys read gets ktraced");
401 SYSCTL_QUAD(_machdep, OID_AUTO, trace_phy_write_delay, MMIO_TIMEOUT_FLAGS,
402 &trace_phy_write_delay_to, "Maximum time before io/phys write gets ktraced");
403
404 SYSCTL_INT(_machdep, OID_AUTO, phy_read_delay_panic, CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
405 &phy_read_panic, 0, "if set, report-phy-read-delay timeout panics");
406 SYSCTL_INT(_machdep, OID_AUTO, phy_write_delay_panic, CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
407 &phy_write_panic, 0, "if set, report-phy-write-delay timeout panics");
408
409 #if ML_IO_SIMULATE_STRETCHED_ENABLED
410 SYSCTL_QUAD(_machdep, OID_AUTO, sim_stretched_io_ns, CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
411 &simulate_stretched_io, "simulate stretched io in ml_read_io, ml_write_io");
412 #endif /* ML_IO_SIMULATE_STRETCHED_ENABLED */
413
414 #endif /* ML_IO_TIMEOUTS_ENABLED */
415
416 int opensource_kernel = 1;
417 SYSCTL_INT(_kern, OID_AUTO, opensource_kernel, CTLFLAG_KERN | CTLFLAG_RD | CTLFLAG_LOCKED,
418 &opensource_kernel, 0, "Opensource Kernel");
419
420 static int
421 machdep_ptrauth_enabled SYSCTL_HANDLER_ARGS
422 {
423 #pragma unused(arg1, arg2, oidp)
424
425 #if __has_feature(ptrauth_calls)
426 task_t task = current_task();
427 int ret = !ml_task_get_disable_user_jop(task);
428 #else
429 const int ret = 0;
430 #endif
431
432 return SYSCTL_OUT(req, &ret, sizeof(ret));
433 }
434
435 SYSCTL_PROC(_machdep, OID_AUTO, ptrauth_enabled,
436 CTLTYPE_INT | CTLFLAG_KERN | CTLFLAG_RD,
437 0, 0,
438 machdep_ptrauth_enabled, "I", "");
439
440