xref: /xnu-11215/osfmk/arm/commpage/commpage.c (revision 8d741a5d)
1 /*
2  * Copyright (c) 2007-2021 Apple Inc. All rights reserved.
3  * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
4  *
5  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6  *
7  * This file contains Original Code and/or Modifications of Original Code
8  * as defined in and that are subject to the Apple Public Source License
9  * Version 2.0 (the 'License'). You may not use this file except in
10  * compliance with the License. The rights granted to you under the License
11  * may not be used to create, or enable the creation or redistribution of,
12  * unlawful or unlicensed copies of an Apple operating system, or to
13  * circumvent, violate, or enable the circumvention or violation of, any
14  * terms of an Apple operating system software license agreement.
15  *
16  * Please obtain a copy of the License at
17  * http://www.opensource.apple.com/apsl/ and read it before using this file.
18  *
19  * The Original Code and all software distributed under the License are
20  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
21  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
22  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
23  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
24  * Please see the License for the specific language governing rights and
25  * limitations under the License.
26  *
27  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28  */
29 /*
30  * @OSF_COPYRIGHT@
31  */
32 /*
33  * @APPLE_FREE_COPYRIGHT@
34  */
35 /*
36  *	File:		arm/commpage/commpage.c
37  *	Purpose:	Set up and export a RO/RW page
38  */
39 #include <libkern/section_keywords.h>
40 #include <mach/mach_types.h>
41 #include <mach/machine.h>
42 #include <mach/vm_map.h>
43 #include <machine/cpu_capabilities.h>
44 #include <machine/commpage.h>
45 #include <machine/config.h>
46 #include <machine/pmap.h>
47 #include <vm/vm_kern.h>
48 #include <vm/vm_map.h>
49 #include <vm/vm_protos.h>
50 #include <ipc/ipc_port.h>
51 #include <arm/cpuid.h>          /* for cpuid_info() & cache_info() */
52 #include <arm/cpu_capabilities_public.h>
53 #include <arm/misc_protos.h>
54 #include <arm/rtclock.h>
55 #include <libkern/OSAtomic.h>
56 #include <stdatomic.h>
57 #include <kern/remote_time.h>
58 #include <kern/smr.h>
59 #include <machine/atomic.h>
60 #include <machine/machine_remote_time.h>
61 #include <machine/machine_routines.h>
62 
63 #include <sys/kdebug.h>
64 #include <sys/random.h>
65 
66 #if CONFIG_ATM
67 #include <atm/atm_internal.h>
68 #endif
69 
70 static int commpage_cpus( void );
71 
72 
73 static void commpage_init_cpu_capabilities( void );
74 
75 SECURITY_READ_ONLY_LATE(vm_address_t)   commPagePtr = 0;
76 SECURITY_READ_ONLY_LATE(vm_address_t)   commpage_rw_addr = 0;
77 SECURITY_READ_ONLY_LATE(vm_address_t)   commpage_kernel_ro_addr = 0;
78 SECURITY_READ_ONLY_LATE(uint64_t)       _cpu_capabilities = 0;
79 SECURITY_READ_ONLY_LATE(vm_address_t)   commpage_rw_text_addr = 0;
80 
81 extern user64_addr_t commpage_text64_location;
82 extern user32_addr_t commpage_text32_location;
83 
84 /* For sysctl access from BSD side */
85 extern int gARMv8Crc32;
86 extern int gARMv8Gpi;
87 extern int gARM_FEAT_FlagM;
88 extern int gARM_FEAT_FlagM2;
89 extern int gARM_FEAT_FHM;
90 extern int gARM_FEAT_DotProd;
91 extern int gARM_FEAT_SHA3;
92 extern int gARM_FEAT_RDM;
93 extern int gARM_FEAT_LSE;
94 extern int gARM_FEAT_SHA256;
95 extern int gARM_FEAT_SHA512;
96 extern int gARM_FEAT_SHA1;
97 extern int gARM_FEAT_AES;
98 extern int gARM_FEAT_PMULL;
99 extern int gARM_FEAT_SPECRES;
100 extern int gARM_FEAT_SB;
101 extern int gARM_FEAT_FRINTTS;
102 extern int gARM_FEAT_LRCPC;
103 extern int gARM_FEAT_LRCPC2;
104 extern int gARM_FEAT_FCMA;
105 extern int gARM_FEAT_JSCVT;
106 extern int gARM_FEAT_PAuth;
107 extern int gARM_FEAT_PAuth2;
108 extern int gARM_FEAT_FPAC;
109 extern int gARM_FEAT_FPACCOMBINE;
110 extern int gARM_FEAT_DPB;
111 extern int gARM_FEAT_DPB2;
112 extern int gARM_FEAT_BF16;
113 extern int gARM_FEAT_I8MM;
114 extern int gARM_FEAT_WFxT;
115 extern int gARM_FEAT_RPRES;
116 extern int gARM_FEAT_ECV;
117 extern int gARM_FEAT_LSE2;
118 extern int gARM_FEAT_CSV2;
119 extern int gARM_FEAT_CSV3;
120 extern int gARM_FEAT_DIT;
121 extern int gARM_AdvSIMD;
122 extern int gARM_AdvSIMD_HPFPCvt;
123 extern int gARM_FEAT_FP16;
124 extern int gARM_FEAT_SSBS;
125 extern int gARM_FEAT_BTI;
126 extern int gARM_FP_SyncExceptions;
127 extern int gARM_FEAT_SME;
128 extern int gARM_FEAT_SME2;
129 extern int gARM_SME_F32F32;
130 extern int gARM_SME_BI32I32;
131 extern int gARM_SME_B16F32;
132 extern int gARM_SME_F16F32;
133 extern int gARM_SME_I8I32;
134 extern int gARM_SME_I16I32;
135 extern int gARM_FEAT_SME_F64F64;
136 extern int gARM_FEAT_SME_I16I64;
137 extern int gARM_FEAT_AFP;
138 
139 extern int      gUCNormalMem;
140 
141 void
commpage_populate(void)142 commpage_populate(void)
143 {
144 	uint16_t        c2;
145 	int cpufamily;
146 
147 	// Create the data and the text commpage
148 	vm_map_address_t kernel_data_addr, kernel_text_addr, kernel_ro_data_addr, user_text_addr;
149 	pmap_create_commpages(&kernel_data_addr, &kernel_text_addr, &kernel_ro_data_addr, &user_text_addr);
150 
151 	commpage_rw_addr = kernel_data_addr;
152 	commpage_rw_text_addr = kernel_text_addr;
153 	commpage_kernel_ro_addr = kernel_ro_data_addr;
154 	commPagePtr = (vm_address_t) _COMM_PAGE_BASE_ADDRESS;
155 
156 #if __arm64__
157 	commpage_text64_location = user_text_addr;
158 	bcopy(_COMM_PAGE64_SIGNATURE_STRING, (void *)(_COMM_PAGE_SIGNATURE + _COMM_PAGE_RW_OFFSET),
159 	    MIN(_COMM_PAGE_SIGNATURELEN, strlen(_COMM_PAGE64_SIGNATURE_STRING)));
160 #endif
161 
162 	*((uint16_t*)(_COMM_PAGE_VERSION + _COMM_PAGE_RW_OFFSET)) = (uint16_t) _COMM_PAGE_THIS_VERSION;
163 
164 	commpage_init_cpu_capabilities();
165 	commpage_set_timestamp(0, 0, 0, 0, 0);
166 
167 	if (_cpu_capabilities & kCache32) {
168 		c2 = 32;
169 	} else if (_cpu_capabilities & kCache64) {
170 		c2 = 64;
171 	} else if (_cpu_capabilities & kCache128) {
172 		c2 = 128;
173 	} else {
174 		c2 = 0;
175 	}
176 
177 	*((uint16_t*)(_COMM_PAGE_CACHE_LINESIZE + _COMM_PAGE_RW_OFFSET)) = c2;
178 
179 	commpage_update_active_cpus();
180 	cpufamily = cpuid_get_cpufamily();
181 	*((uint8_t*)(_COMM_PAGE_CPU_CLUSTERS + _COMM_PAGE_RW_OFFSET)) = (uint8_t) ml_get_cluster_count();
182 	*((uint8_t*)(_COMM_PAGE_PHYSICAL_CPUS + _COMM_PAGE_RW_OFFSET)) = (uint8_t) machine_info.physical_cpu_max;
183 	*((uint8_t*)(_COMM_PAGE_LOGICAL_CPUS + _COMM_PAGE_RW_OFFSET)) = (uint8_t) machine_info.logical_cpu_max;
184 	*((uint64_t*)(_COMM_PAGE_MEMORY_SIZE + _COMM_PAGE_RW_OFFSET)) = machine_info.max_mem;
185 	*((uint32_t*)(_COMM_PAGE_CPUFAMILY + _COMM_PAGE_RW_OFFSET)) = (uint32_t)cpufamily;
186 	*((uint32_t*)(_COMM_PAGE_DEV_FIRM_LEGACY + _COMM_PAGE_RW_OFFSET)) = (uint32_t)PE_i_can_has_debugger(NULL);
187 	*((uint32_t*)(_COMM_PAGE_DEV_FIRM + _COMM_PAGE_RO_OFFSET)) = (uint32_t)PE_i_can_has_debugger(NULL);
188 	*((uint8_t*)(_COMM_PAGE_USER_TIMEBASE + _COMM_PAGE_RW_OFFSET)) = user_timebase_type();
189 
190 	// Populate logical CPU -> logical cluster table
191 	ml_map_cpus_to_clusters((uint8_t*)(_COMM_PAGE_CPU_TO_CLUSTER + _COMM_PAGE_RW_OFFSET));
192 
193 	*((uint8_t*)(_COMM_PAGE_CONT_HWCLOCK + _COMM_PAGE_RW_OFFSET)) = (uint8_t)user_cont_hwclock_allowed();
194 	*((uint8_t*)(_COMM_PAGE_KERNEL_PAGE_SHIFT_LEGACY + _COMM_PAGE_RW_OFFSET)) = (uint8_t) page_shift;
195 	*((uint8_t*)(_COMM_PAGE_KERNEL_PAGE_SHIFT + _COMM_PAGE_RO_OFFSET)) = (uint8_t) page_shift;
196 
197 #if __arm64__
198 	*((uint8_t*)(_COMM_PAGE_USER_PAGE_SHIFT_32_LEGACY + _COMM_PAGE_RW_OFFSET)) = (uint8_t) page_shift_user32;
199 	*((uint8_t*)(_COMM_PAGE_USER_PAGE_SHIFT_32 + _COMM_PAGE_RO_OFFSET)) = (uint8_t) page_shift_user32;
200 	*((uint8_t*)(_COMM_PAGE_USER_PAGE_SHIFT_64_LEGACY + _COMM_PAGE_RW_OFFSET)) = (uint8_t) SIXTEENK_PAGE_SHIFT;
201 	*((uint8_t*)(_COMM_PAGE_USER_PAGE_SHIFT_64 + _COMM_PAGE_RO_OFFSET)) = (uint8_t) SIXTEENK_PAGE_SHIFT;
202 #endif /* __arm64__ */
203 
204 	commpage_update_timebase();
205 	commpage_update_mach_continuous_time(0);
206 
207 	clock_sec_t secs;
208 	clock_usec_t microsecs;
209 	clock_get_boottime_microtime(&secs, &microsecs);
210 	commpage_update_boottime(secs * USEC_PER_SEC + microsecs);
211 
212 	/*
213 	 * set commpage approximate time to zero for initialization.
214 	 * scheduler shall populate correct value before running user thread
215 	 */
216 	*((uint64_t *)(_COMM_PAGE_APPROX_TIME + _COMM_PAGE_RW_OFFSET)) = 0;
217 #ifdef CONFIG_MACH_APPROXIMATE_TIME
218 	*((uint8_t *)(_COMM_PAGE_APPROX_TIME_SUPPORTED + _COMM_PAGE_RW_OFFSET)) = 1;
219 #else
220 	*((uint8_t *)(_COMM_PAGE_APPROX_TIME_SUPPORTED + _COMM_PAGE_RW_OFFSET)) = 0;
221 #endif
222 
223 	commpage_update_kdebug_state();
224 
225 #if CONFIG_ATM
226 	commpage_update_atm_diagnostic_config(atm_get_diagnostic_config());
227 #endif
228 
229 
230 	*((uint64_t*)(_COMM_PAGE_REMOTETIME_PARAMS + _COMM_PAGE_RW_OFFSET)) = BT_RESET_SENTINEL_TS;
231 
232 #if CONFIG_QUIESCE_COUNTER
233 	cpu_quiescent_set_storage((_Atomic uint64_t *)(_COMM_PAGE_CPU_QUIESCENT_COUNTER +
234 	    _COMM_PAGE_RW_OFFSET));
235 #endif /* CONFIG_QUIESCE_COUNTER */
236 
237 	/*
238 	 * Set random values for targets in Apple Security Bounty
239 	 * addr should be unmapped for userland processes
240 	 * kaddr should be unmapped for kernel
241 	 */
242 	uint64_t asb_value, asb_addr, asb_kvalue, asb_kaddr;
243 	uint64_t asb_rand_vals[] = {
244 		0x93e78adcded4d3d5, 0xd16c5b76ad99bccf, 0x67dfbbd12c4a594e, 0x7365636e6f6f544f,
245 		0x239a974c9811e04b, 0xbf60e7fa45741446, 0x8acf5210b466b05, 0x67dfbbd12c4a594e
246 	};
247 	const int nrandval = sizeof(asb_rand_vals) / sizeof(asb_rand_vals[0]);
248 	uint8_t randidx;
249 
250 	read_random(&randidx, sizeof(uint8_t));
251 	asb_value = asb_rand_vals[randidx++ % nrandval];
252 	*((uint64_t*)(_COMM_PAGE_ASB_TARGET_VALUE + _COMM_PAGE_RW_OFFSET)) = asb_value;
253 
254 	// userspace faulting address should be > MACH_VM_MAX_ADDRESS
255 	asb_addr = asb_rand_vals[randidx++ % nrandval];
256 	uint64_t user_min = MACH_VM_MAX_ADDRESS;
257 	uint64_t user_max = UINT64_MAX;
258 	asb_addr %= (user_max - user_min);
259 	asb_addr += user_min;
260 	*((uint64_t*)(_COMM_PAGE_ASB_TARGET_ADDRESS + _COMM_PAGE_RW_OFFSET)) = asb_addr;
261 
262 	asb_kvalue = asb_rand_vals[randidx++ % nrandval];
263 	*((uint64_t*)(_COMM_PAGE_ASB_TARGET_KERN_VALUE + _COMM_PAGE_RW_OFFSET)) = asb_kvalue;
264 
265 	// kernel faulting address should be < VM_MIN_KERNEL_ADDRESS
266 	asb_kaddr = asb_rand_vals[randidx++ % nrandval];
267 	uint64_t kernel_min = 0x0LL;
268 	uint64_t kernel_max = VM_MIN_KERNEL_ADDRESS;
269 	asb_kaddr %= (kernel_max - kernel_min);
270 	asb_kaddr += kernel_min;
271 	*((uint64_t*)(_COMM_PAGE_ASB_TARGET_KERN_ADDRESS + _COMM_PAGE_RW_OFFSET)) = asb_kaddr;
272 }
273 
274 #define COMMPAGE_TEXT_SEGMENT "__TEXT_EXEC"
275 #define COMMPAGE_TEXT_SECTION "__commpage_text"
276 
277 /* Get a pointer to the start of the ARM PFZ code section. This macro tell the
278  * linker that the storage for the variable here is at the start of the section */
279 extern char commpage_text_start[]
280 __SECTION_START_SYM(COMMPAGE_TEXT_SEGMENT, COMMPAGE_TEXT_SECTION);
281 
282 /* Get a pointer to the end of the ARM PFZ code section. This macro tell the
283  * linker that the storage for the variable here is at the end of the section */
284 extern char commpage_text_end[]
285 __SECTION_END_SYM(COMMPAGE_TEXT_SEGMENT, COMMPAGE_TEXT_SECTION);
286 
287 /* This is defined in the commpage text section as a symbol at the start of the preemptible
288  * functions */
289 extern char commpage_text_preemptible_functions;
290 
291 #if CONFIG_ARM_PFZ
292 static size_t size_of_pfz = 0;
293 #endif
294 
295 /* This is the opcode for brk #666 */
296 #define BRK_666_OPCODE 0xD4205340
297 
298 void
commpage_text_populate(void)299 commpage_text_populate(void)
300 {
301 #if CONFIG_ARM_PFZ
302 	size_t size_of_commpage_text = commpage_text_end - commpage_text_start;
303 	if (size_of_commpage_text == 0) {
304 		panic("ARM comm page text section %s,%s missing", COMMPAGE_TEXT_SEGMENT, COMMPAGE_TEXT_SECTION);
305 	}
306 	assert(size_of_commpage_text <= PAGE_SIZE);
307 	assert(size_of_commpage_text > 0);
308 
309 	/* Get the size of the PFZ half of the comm page text section. */
310 	size_of_pfz = &commpage_text_preemptible_functions - commpage_text_start;
311 
312 	// Copy the code segment of comm page text section into the PFZ
313 	memcpy((void *) _COMM_PAGE64_TEXT_START_ADDRESS, (void *) commpage_text_start, size_of_commpage_text);
314 
315 	// Make sure to populate the rest of it with brk 666 so that undefined code
316 	// doesn't get  run
317 	memset((char *) _COMM_PAGE64_TEXT_START_ADDRESS + size_of_commpage_text, BRK_666_OPCODE,
318 	    PAGE_SIZE - size_of_commpage_text);
319 #endif
320 }
321 
322 uint32_t
commpage_is_in_pfz64(addr64_t addr64)323 commpage_is_in_pfz64(addr64_t addr64)
324 {
325 #if CONFIG_ARM_PFZ
326 	if ((addr64 >= commpage_text64_location) &&
327 	    (addr64 < (commpage_text64_location + size_of_pfz))) {
328 		return 1;
329 	} else {
330 		return 0;
331 	}
332 #else
333 #pragma unused (addr64)
334 	return 0;
335 #endif
336 }
337 
338 
339 void
commpage_set_timestamp(uint64_t tbr,uint64_t secs,uint64_t frac,uint64_t scale,uint64_t tick_per_sec)340 commpage_set_timestamp(
341 	uint64_t        tbr,
342 	uint64_t        secs,
343 	uint64_t        frac,
344 	uint64_t        scale,
345 	uint64_t        tick_per_sec)
346 {
347 	new_commpage_timeofday_data_t *commpage_timeofday_datap;
348 
349 	if (commPagePtr == 0) {
350 		return;
351 	}
352 
353 	commpage_timeofday_datap =  (new_commpage_timeofday_data_t *)(_COMM_PAGE_NEWTIMEOFDAY_DATA + _COMM_PAGE_RW_OFFSET);
354 
355 	commpage_timeofday_datap->TimeStamp_tick = 0x0ULL;
356 
357 	__builtin_arm_dmb(DMB_ISH);
358 
359 	commpage_timeofday_datap->TimeStamp_sec = secs;
360 	commpage_timeofday_datap->TimeStamp_frac = frac;
361 	commpage_timeofday_datap->Ticks_scale = scale;
362 	commpage_timeofday_datap->Ticks_per_sec = tick_per_sec;
363 
364 	__builtin_arm_dmb(DMB_ISH);
365 
366 	commpage_timeofday_datap->TimeStamp_tick = tbr;
367 
368 }
369 
370 /*
371  * Update _COMM_PAGE_MEMORY_PRESSURE.  Called periodically from vm's compute_memory_pressure()
372  */
373 
374 void
commpage_set_memory_pressure(unsigned int pressure)375 commpage_set_memory_pressure(
376 	unsigned int    pressure )
377 {
378 	if (commPagePtr == 0) {
379 		return;
380 	}
381 	*((uint32_t *)(_COMM_PAGE_MEMORY_PRESSURE + _COMM_PAGE_RW_OFFSET)) = pressure;
382 }
383 
384 /*
385  * Determine number of CPUs on this system.
386  */
387 static int
commpage_cpus(void)388 commpage_cpus( void )
389 {
390 	int cpus;
391 
392 	cpus = machine_info.max_cpus;
393 
394 	if (cpus == 0) {
395 		panic("commpage cpus==0");
396 	}
397 	if (cpus > 0xFF) {
398 		cpus = 0xFF;
399 	}
400 
401 	return cpus;
402 }
403 
404 uint64_t
_get_cpu_capabilities(void)405 _get_cpu_capabilities(void)
406 {
407 	return _cpu_capabilities;
408 }
409 
410 vm_address_t
_get_commpage_priv_address(void)411 _get_commpage_priv_address(void)
412 {
413 	return commpage_rw_addr;
414 }
415 
416 vm_address_t
_get_commpage_ro_address(void)417 _get_commpage_ro_address(void)
418 {
419 	return commpage_kernel_ro_addr;
420 }
421 
422 vm_address_t
_get_commpage_text_priv_address(void)423 _get_commpage_text_priv_address(void)
424 {
425 	return commpage_rw_text_addr;
426 }
427 
428 #if defined(__arm64__)
429 /**
430  * Initializes all commpage entries and sysctls for EL0 visible features in ID_AA64ISAR0_EL1
431  */
432 static void
commpage_init_arm_optional_features_isar0(uint64_t * commpage_bits)433 commpage_init_arm_optional_features_isar0(uint64_t *commpage_bits)
434 {
435 	uint64_t bits = 0;
436 	uint64_t isar0 = __builtin_arm_rsr64("ID_AA64ISAR0_EL1");
437 
438 	if ((isar0 & ID_AA64ISAR0_EL1_TS_MASK) >= ID_AA64ISAR0_EL1_TS_FLAGM_EN) {
439 		gARM_FEAT_FlagM = 1;
440 		bits |= kHasFEATFlagM;
441 	}
442 	if ((isar0 & ID_AA64ISAR0_EL1_TS_MASK) >= ID_AA64ISAR0_EL1_TS_FLAGM2_EN) {
443 		gARM_FEAT_FlagM2 = 1;
444 		bits |= kHasFEATFlagM2;
445 	}
446 	if ((isar0 & ID_AA64ISAR0_EL1_FHM_MASK) >= ID_AA64ISAR0_EL1_FHM_8_2) {
447 		gARM_FEAT_FHM = 1;
448 		bits |= kHasFeatFHM;
449 	}
450 	if ((isar0 & ID_AA64ISAR0_EL1_DP_MASK) >= ID_AA64ISAR0_EL1_DP_EN) {
451 		gARM_FEAT_DotProd = 1;
452 		bits |= kHasFeatDotProd;
453 	}
454 	if ((isar0 & ID_AA64ISAR0_EL1_SHA3_MASK) >= ID_AA64ISAR0_EL1_SHA3_EN) {
455 		gARM_FEAT_SHA3 = 1;
456 		bits |= kHasFeatSHA3;
457 	}
458 	if ((isar0 & ID_AA64ISAR0_EL1_RDM_MASK) >= ID_AA64ISAR0_EL1_RDM_EN) {
459 		gARM_FEAT_RDM = 1;
460 		bits |= kHasFeatRDM;
461 	}
462 	if ((isar0 & ID_AA64ISAR0_EL1_ATOMIC_MASK) >= ID_AA64ISAR0_EL1_ATOMIC_8_1) {
463 		gARM_FEAT_LSE = 1;
464 		bits |= kHasFeatLSE;
465 	}
466 	if ((isar0 & ID_AA64ISAR0_EL1_SHA2_MASK) >= ID_AA64ISAR0_EL1_SHA2_512_EN) {
467 		gARM_FEAT_SHA512 = 1;
468 		bits |= kHasFeatSHA512;
469 	}
470 	if ((isar0 & ID_AA64ISAR0_EL1_CRC32_MASK) == ID_AA64ISAR0_EL1_CRC32_EN) {
471 		gARMv8Crc32 = 1;
472 		bits |= kHasARMv8Crc32;
473 	}
474 
475 #if __ARM_V8_CRYPTO_EXTENSIONS__
476 	/**
477 	 * T7000 has a bug in the ISAR0 register that reports that PMULL is not
478 	 * supported when it actually is. To work around this, for all of the crypto
479 	 * extensions, just check if they're supported using the board_config.h
480 	 * values.
481 	 */
482 	gARM_FEAT_PMULL = 1;
483 	gARM_FEAT_SHA1 = 1;
484 	gARM_FEAT_AES = 1;
485 	gARM_FEAT_SHA256 = 1;
486 	bits |= kHasARMv8Crypto;
487 #endif /* __ARM_V8_CRYPTO_EXTENSIONS__ */
488 
489 	*commpage_bits |= bits;
490 }
491 
492 /**
493  * Initializes all commpage entries and sysctls for EL0 visible features in ID_AA64ISAR1_EL1
494  */
495 static void
commpage_init_arm_optional_features_isar1(uint64_t * commpage_bits)496 commpage_init_arm_optional_features_isar1(uint64_t *commpage_bits)
497 {
498 	uint64_t bits = 0;
499 	uint64_t isar1 = __builtin_arm_rsr64("ID_AA64ISAR1_EL1");
500 	uint64_t sctlr = __builtin_arm_rsr64("SCTLR_EL1");
501 
502 	if ((isar1 & ID_AA64ISAR1_EL1_SPECRES_MASK) >= ID_AA64ISAR1_EL1_SPECRES_EN &&
503 	    sctlr & SCTLR_EnRCTX) {
504 		gARM_FEAT_SPECRES = 1;
505 		bits |= kHasFeatSPECRES;
506 	}
507 	if ((isar1 & ID_AA64ISAR1_EL1_SB_MASK) >= ID_AA64ISAR1_EL1_SB_EN) {
508 		gARM_FEAT_SB = 1;
509 		bits |= kHasFeatSB;
510 	}
511 	if ((isar1 & ID_AA64ISAR1_EL1_FRINTTS_MASK) >= ID_AA64ISAR1_EL1_FRINTTS_EN) {
512 		gARM_FEAT_FRINTTS = 1;
513 		bits |= kHasFeatFRINTTS;
514 	}
515 	if ((isar1 & ID_AA64ISAR1_EL1_GPI_MASK) >= ID_AA64ISAR1_EL1_GPI_EN) {
516 		gARMv8Gpi = 1;
517 		bits |= kHasArmv8GPI;
518 	}
519 	if ((isar1 & ID_AA64ISAR1_EL1_LRCPC_MASK) >= ID_AA64ISAR1_EL1_LRCPC_EN) {
520 		gARM_FEAT_LRCPC = 1;
521 		bits |= kHasFeatLRCPC;
522 	}
523 	if ((isar1 & ID_AA64ISAR1_EL1_LRCPC_MASK) >= ID_AA64ISAR1_EL1_LRCP2C_EN) {
524 		gARM_FEAT_LRCPC2 = 1;
525 		bits |= kHasFeatLRCPC2;
526 	}
527 	if ((isar1 & ID_AA64ISAR1_EL1_FCMA_MASK) >= ID_AA64ISAR1_EL1_FCMA_EN) {
528 		gARM_FEAT_FCMA = 1;
529 		bits |= kHasFeatFCMA;
530 	}
531 	if ((isar1 & ID_AA64ISAR1_EL1_JSCVT_MASK) >= ID_AA64ISAR1_EL1_JSCVT_EN) {
532 		gARM_FEAT_JSCVT = 1;
533 		bits |= kHasFeatJSCVT;
534 	}
535 	if ((isar1 & ID_AA64ISAR1_EL1_API_MASK) >= ID_AA64ISAR1_EL1_API_PAuth_EN) {
536 		gARM_FEAT_PAuth = 1;
537 		bits |= kHasFeatPAuth;
538 	}
539 	if ((isar1 & ID_AA64ISAR1_EL1_API_MASK) >= ID_AA64ISAR1_EL1_API_PAuth2_EN) {
540 		gARM_FEAT_PAuth2 = 1;
541 	}
542 	if ((isar1 & ID_AA64ISAR1_EL1_API_MASK) >= ID_AA64ISAR1_EL1_API_FPAC_EN) {
543 		gARM_FEAT_FPAC = 1;
544 	}
545 	if ((isar1 & ID_AA64ISAR1_EL1_API_MASK) >= ID_AA64ISAR1_EL1_API_FPACCOMBINE) {
546 		gARM_FEAT_FPACCOMBINE = 1;
547 	}
548 	if ((isar1 & ID_AA64ISAR1_EL1_DPB_MASK) >= ID_AA64ISAR1_EL1_DPB_EN) {
549 		gARM_FEAT_DPB = 1;
550 		bits |= kHasFeatDPB;
551 	}
552 	if ((isar1 & ID_AA64ISAR1_EL1_DPB_MASK) >= ID_AA64ISAR1_EL1_DPB2_EN) {
553 		gARM_FEAT_DPB2 = 1;
554 		bits |= kHasFeatDPB2;
555 	}
556 	if ((isar1 & ID_AA64ISAR1_EL1_BF16_MASK) >= ID_AA64ISAR1_EL1_BF16_EN) {
557 		gARM_FEAT_BF16 = 1;
558 	}
559 	if ((isar1 & ID_AA64ISAR1_EL1_I8MM_MASK) >= ID_AA64ISAR1_EL1_I8MM_EN) {
560 		gARM_FEAT_I8MM = 1;
561 	}
562 
563 	*commpage_bits |= bits;
564 }
565 
566 /**
567  * Initializes all commpage entries and sysctls for EL0 visible features in ID_AA64ISAR2_EL1
568  */
569 static void
commpage_init_arm_optional_features_isar2(void)570 commpage_init_arm_optional_features_isar2(void)
571 {
572 	uint64_t isar2 = __builtin_arm_rsr64("ID_AA64ISAR2_EL1");
573 
574 	if ((isar2 & ID_AA64ISAR2_EL1_WFxT_MASK) >= ID_AA64ISAR2_EL1_WFxT_EN) {
575 		gARM_FEAT_WFxT = 1;
576 	}
577 	if ((isar2 & ID_AA64ISAR2_EL1_RPRES_MASK) >= ID_AA64ISAR2_EL1_RPRES_EN) {
578 		gARM_FEAT_RPRES = 1;
579 	}
580 }
581 
582 /**
583  * Initializes all commpage entries and sysctls for EL0 visible features in ID_AA64MMFR0_EL1
584  */
585 static void
commpage_init_arm_optional_features_mmfr0(uint64_t * commpage_bits)586 commpage_init_arm_optional_features_mmfr0(uint64_t *commpage_bits)
587 {
588 	uint64_t bits = 0;
589 	uint64_t mmfr0 = __builtin_arm_rsr64("ID_AA64MMFR0_EL1");
590 
591 	if ((mmfr0 & ID_AA64MMFR0_EL1_ECV_MASK) >= ID_AA64MMFR0_EL1_ECV_EN) {
592 		gARM_FEAT_ECV = 1;
593 	}
594 
595 	*commpage_bits |= bits;
596 }
597 
598 /**
599  * Initializes all commpage entries and sysctls for EL0 visible features in ID_AA64MMFR2_EL1
600  */
601 static void
commpage_init_arm_optional_features_mmfr2(uint64_t * commpage_bits)602 commpage_init_arm_optional_features_mmfr2(uint64_t *commpage_bits)
603 {
604 	uint64_t bits = 0;
605 	uint64_t mmfr2 = __builtin_arm_rsr64("ID_AA64MMFR2_EL1");
606 
607 	if ((mmfr2 & ID_AA64MMFR2_EL1_AT_MASK) >= ID_AA64MMFR2_EL1_AT_LSE2_EN) {
608 		gARM_FEAT_LSE2 = 1;
609 		bits |= kHasFeatLSE2;
610 	}
611 
612 	*commpage_bits |= bits;
613 }
614 
615 /**
616  * Initializes all commpage entries and sysctls for EL0 visible features in ID_AA64PFR0_EL1
617  */
618 static void
commpage_init_arm_optional_features_pfr0(uint64_t * commpage_bits)619 commpage_init_arm_optional_features_pfr0(uint64_t *commpage_bits)
620 {
621 	uint64_t bits = 0;
622 	uint64_t pfr0 = __builtin_arm_rsr64("ID_AA64PFR0_EL1");
623 
624 	if ((pfr0 & ID_AA64PFR0_EL1_CSV3_MASK) >= ID_AA64PFR0_EL1_CSV3_EN) {
625 		gARM_FEAT_CSV3 = 1;
626 		bits |= kHasFeatCSV3;
627 	}
628 	if ((pfr0 & ID_AA64PFR0_EL1_CSV2_MASK) >= ID_AA64PFR0_EL1_CSV2_EN) {
629 		gARM_FEAT_CSV2 = 1;
630 		bits |= kHasFeatCSV2;
631 	}
632 	if ((pfr0 & ID_AA64PFR0_EL1_DIT_MASK) >= ID_AA64PFR0_EL1_DIT_EN) {
633 		gARM_FEAT_DIT = 1;
634 		bits |= kHasFeatDIT;
635 	}
636 	if ((pfr0 & ID_AA64PFR0_EL1_AdvSIMD_MASK) != ID_AA64PFR0_EL1_AdvSIMD_DIS) {
637 		gARM_AdvSIMD = 1;
638 		bits |= kHasAdvSIMD;
639 		if ((pfr0 & ID_AA64PFR0_EL1_AdvSIMD_MASK) >= ID_AA64PFR0_EL1_AdvSIMD_HPFPCVT) {
640 			gARM_AdvSIMD_HPFPCvt = 1;
641 			bits |= kHasAdvSIMD_HPFPCvt;
642 		}
643 		if ((pfr0 & ID_AA64PFR0_EL1_AdvSIMD_MASK) >= ID_AA64PFR0_EL1_AdvSIMD_FP16) {
644 			gARM_FEAT_FP16 = 1;
645 			bits |= kHasFeatFP16;
646 		}
647 	}
648 
649 	*commpage_bits |= bits;
650 }
651 
652 /**
653  * Initializes all commpage entries and sysctls for EL0 visible features in ID_AA64PFR1_EL1
654  */
655 static void
commpage_init_arm_optional_features_pfr1(uint64_t * commpage_bits)656 commpage_init_arm_optional_features_pfr1(uint64_t *commpage_bits)
657 {
658 	uint64_t pfr1 = __builtin_arm_rsr64("ID_AA64PFR1_EL1");
659 
660 	if ((pfr1 & ID_AA64PFR1_EL1_SSBS_MASK) >= ID_AA64PFR1_EL1_SSBS_EN) {
661 		gARM_FEAT_SSBS = 1;
662 	}
663 
664 	if ((pfr1 & ID_AA64PFR1_EL1_BT_MASK) >= ID_AA64PFR1_EL1_BT_EN) {
665 		gARM_FEAT_BTI = 1;
666 	}
667 
668 	unsigned int sme_version = arm_sme_version();
669 	if (sme_version >= 1) {
670 		gARM_FEAT_SME = 1;
671 		*commpage_bits |= kHasFeatSME;
672 	}
673 	if (sme_version >= 2) {
674 		gARM_FEAT_SME2 = 1;
675 		*commpage_bits |= kHasFeatSME2;
676 	}
677 
678 }
679 
680 /**
681  * Initializes all commpage entries and sysctls for EL0 visible features in ID_AA64SMFR0_EL1
682  */
683 __attribute__((target("sme")))
684 static void
commpage_init_arm_optional_features_smfr0(void)685 commpage_init_arm_optional_features_smfr0(void)
686 {
687 	if (arm_sme_version() == 0) {
688 		/*
689 		 * We can safely read ID_AA64SMFR0_EL1 on SME-less devices.  But
690 		 * arm_sme_version() == 0 could also mean that the user
691 		 * defeatured SME with a boot-arg.
692 		 */
693 		return;
694 	}
695 
696 	uint64_t smfr0 = __builtin_arm_rsr64("ID_AA64SMFR0_EL1");
697 
698 	/*
699 	 * ID_AA64SMFR0_EL1 has to be parsed differently from other feature ID
700 	 * registers.  See "Alternative ID scheme used for ID_AA64SMFR0_EL1" in
701 	 * the ARM ARM.
702 	 */
703 
704 	/* 1-bit fields */
705 	if (smfr0 & ID_AA64SMFR0_EL1_F32F32_EN) {
706 		gARM_SME_F32F32 = 1;
707 	}
708 	if (smfr0 & ID_AA64SMFR0_EL1_BI32I32_EN) {
709 		gARM_SME_BI32I32 = 1;
710 	}
711 	if (smfr0 & ID_AA64SMFR0_EL1_B16F32_EN) {
712 		gARM_SME_B16F32 = 1;
713 	}
714 	if (smfr0 & ID_AA64SMFR0_EL1_F16F32_EN) {
715 		gARM_SME_F16F32 = 1;
716 	}
717 	if (smfr0 & ID_AA64SMFR0_EL1_F64F64_EN) {
718 		gARM_FEAT_SME_F64F64 = 1;
719 	}
720 
721 	/* 4-bit fields (0 bits are ignored) */
722 	if ((smfr0 & ID_AA64SMFR0_EL1_I8I32_EN) == ID_AA64SMFR0_EL1_I8I32_EN) {
723 		gARM_SME_I8I32 = 1;
724 	}
725 	if ((smfr0 & ID_AA64SMFR0_EL1_I16I32_EN) == ID_AA64SMFR0_EL1_I16I32_EN) {
726 		gARM_SME_I16I32 = 1;
727 	}
728 	if ((smfr0 & ID_AA64SMFR0_EL1_I16I64_EN) == ID_AA64SMFR0_EL1_I16I64_EN) {
729 		gARM_FEAT_SME_I16I64 = 1;
730 	}
731 }
732 
733 static void
commpage_init_arm_optional_features_mmfr1(uint64_t * commpage_bits)734 commpage_init_arm_optional_features_mmfr1(uint64_t *commpage_bits)
735 {
736 	uint64_t bits = 0;
737 	const uint64_t mmfr1 = __builtin_arm_rsr64("ID_AA64MMFR1_EL1");
738 
739 	if ((mmfr1 & ID_AA64MMFR1_EL1_AFP_MASK) == ID_AA64MMFR1_EL1_AFP_EN) {
740 		gARM_FEAT_AFP = 1;
741 		bits |= kHasFeatAFP;
742 	}
743 
744 	*commpage_bits |= bits;
745 }
746 
747 /**
748  * Read the system register @name, attempt to set set bits of @mask if not
749  * already, test if bits were actually set, reset the register to its
750  * previous value if required, and 'return' @mask with only bits that
751  * were successfully set (or already set) in the system register. */
752 #define _test_sys_bits(name, mask) ({ \
753 	const uint64_t src = __builtin_arm_rsr64(#name); \
754     uint64_t test = src | mask; \
755     if (test != src) { \
756 	__builtin_arm_wsr64(#name, test); \
757 	test = __builtin_arm_rsr64(#name); \
758 	if (test != src) { \
759 	    __builtin_arm_wsr64(#name, src); \
760 	}\
761     } \
762     mask & test; \
763 })
764 
765 /**
766  * Reports whether FPU exceptions are supported.
767  * Possible FPU exceptions are :
768  * - input denormal;
769  * - inexact;
770  * - underflow;
771  * - overflow;
772  * - divide by 0;
773  * - invalid operation.
774  *
775  * Any of those can be supported or not but for now, we consider that
776  * it all or nothing : FPU exceptions support flag set <=> all 6 exceptions
777  * a supported.
778  */
779 static void
commpage_init_arm_optional_features_fpcr(uint64_t * commpage_bits)780 commpage_init_arm_optional_features_fpcr(uint64_t *commpage_bits)
781 {
782 	uint64_t support_mask = FPCR_IDE | FPCR_IXE | FPCR_UFE | FPCR_OFE |
783 	    FPCR_DZE | FPCR_IOE;
784 	uint64_t FPCR_bits = _test_sys_bits(FPCR, support_mask);
785 	if (FPCR_bits == support_mask) {
786 		gARM_FP_SyncExceptions = 1;
787 		*commpage_bits |= kHasFP_SyncExceptions;
788 	}
789 }
790 
791 /**
792  * Initializes all commpage entries and sysctls for ARM64 optional features accessible from EL0.
793  */
794 static void
commpage_init_arm_optional_features(uint64_t * commpage_bits)795 commpage_init_arm_optional_features(uint64_t *commpage_bits)
796 {
797 	commpage_init_arm_optional_features_isar0(commpage_bits);
798 	commpage_init_arm_optional_features_isar1(commpage_bits);
799 	commpage_init_arm_optional_features_isar2();
800 	commpage_init_arm_optional_features_mmfr0(commpage_bits);
801 	commpage_init_arm_optional_features_mmfr1(commpage_bits);
802 	commpage_init_arm_optional_features_mmfr2(commpage_bits);
803 	commpage_init_arm_optional_features_pfr0(commpage_bits);
804 	commpage_init_arm_optional_features_pfr1(commpage_bits);
805 	commpage_init_arm_optional_features_smfr0();
806 	commpage_init_arm_optional_features_fpcr(commpage_bits);
807 }
808 #endif /* __arm64__ */
809 
810 /*
811  * Initialize _cpu_capabilities vector
812  */
813 static void
commpage_init_cpu_capabilities(void)814 commpage_init_cpu_capabilities( void )
815 {
816 	uint64_t bits;
817 	int cpus;
818 	ml_cpu_info_t cpu_info;
819 
820 	bits = 0;
821 	ml_cpu_get_info(&cpu_info);
822 
823 	switch (cpu_info.cache_line_size) {
824 	case 128:
825 		bits |= kCache128;
826 		break;
827 	case 64:
828 		bits |= kCache64;
829 		break;
830 	case 32:
831 		bits |= kCache32;
832 		break;
833 	default:
834 		break;
835 	}
836 	cpus = commpage_cpus();
837 
838 	if (cpus == 1) {
839 		bits |= kUP;
840 	}
841 
842 	bits |= (cpus << kNumCPUsShift);
843 
844 	bits |= kFastThreadLocalStorage;        // TPIDRURO for TLS
845 
846 	bits |= kHasVfp;
847 
848 #if defined(__arm64__)
849 	bits |= kHasFMA;
850 #endif
851 	bits |= kHasEvent;
852 #ifdef __arm64__
853 	commpage_init_arm_optional_features(&bits);
854 #endif
855 
856 
857 
858 #if HAS_UCNORMAL_MEM
859 	gUCNormalMem = 1;
860 	bits |= kHasUCNormalMemory;
861 #endif
862 
863 	_cpu_capabilities = bits;
864 
865 	*((uint32_t *)(_COMM_PAGE_CPU_CAPABILITIES + _COMM_PAGE_RW_OFFSET)) = (uint32_t)_cpu_capabilities;
866 	*((uint64_t *)(_COMM_PAGE_CPU_CAPABILITIES64 + _COMM_PAGE_RW_OFFSET)) = _cpu_capabilities;
867 
868 }
869 
870 /*
871  * Updated every time a logical CPU goes offline/online
872  */
873 void
commpage_update_active_cpus(void)874 commpage_update_active_cpus(void)
875 {
876 	if (!commPagePtr) {
877 		return;
878 	}
879 	*((uint8_t *)(_COMM_PAGE_ACTIVE_CPUS + _COMM_PAGE_RW_OFFSET)) = (uint8_t)processor_avail_count;
880 
881 }
882 
883 /*
884  * Update the commpage bits for mach_absolute_time and mach_continuous_time (for userspace)
885  */
886 void
commpage_update_timebase(void)887 commpage_update_timebase(void)
888 {
889 	if (commPagePtr) {
890 		*((uint64_t*)(_COMM_PAGE_TIMEBASE_OFFSET + _COMM_PAGE_RW_OFFSET)) = rtclock_base_abstime;
891 	}
892 }
893 
894 /*
895  * Update the commpage with current kdebug state: whether tracing is enabled, a
896  * typefilter is present, and continuous time should be used for timestamps.
897  *
898  * Disregards configuration and set to 0 if tracing is disabled.
899  */
900 void
commpage_update_kdebug_state(void)901 commpage_update_kdebug_state(void)
902 {
903 	if (commPagePtr) {
904 		uint32_t state = kdebug_commpage_state();
905 		*((volatile uint32_t *)(_COMM_PAGE_KDEBUG_ENABLE + _COMM_PAGE_RW_OFFSET)) = state;
906 	}
907 }
908 
909 /* Ditto for atm_diagnostic_config */
910 void
commpage_update_atm_diagnostic_config(uint32_t diagnostic_config)911 commpage_update_atm_diagnostic_config(uint32_t diagnostic_config)
912 {
913 	if (commPagePtr) {
914 		*((volatile uint32_t*)(_COMM_PAGE_ATM_DIAGNOSTIC_CONFIG + _COMM_PAGE_RW_OFFSET)) = diagnostic_config;
915 	}
916 }
917 
918 /*
919  * Update the commpage data with the state of multiuser mode for
920  * this device. Allowing various services in userspace to avoid
921  * IPC in the (more common) non-multiuser environment.
922  */
923 void
commpage_update_multiuser_config(uint32_t multiuser_config)924 commpage_update_multiuser_config(uint32_t multiuser_config)
925 {
926 	if (commPagePtr) {
927 		*((volatile uint32_t *)(_COMM_PAGE_MULTIUSER_CONFIG + _COMM_PAGE_RW_OFFSET)) = multiuser_config;
928 	}
929 }
930 
931 /*
932  * update the commpage data for
933  * last known value of mach_absolute_time()
934  */
935 
936 void
commpage_update_mach_approximate_time(uint64_t abstime)937 commpage_update_mach_approximate_time(uint64_t abstime)
938 {
939 #ifdef CONFIG_MACH_APPROXIMATE_TIME
940 	if (!commPagePtr) {
941 		return;
942 	}
943 
944 	uint64_t *approx_time_base = (uint64_t *)(uintptr_t)(_COMM_PAGE_APPROX_TIME + _COMM_PAGE_RW_OFFSET);
945 
946 	uint64_t saved_data = os_atomic_load_wide(approx_time_base, relaxed);
947 	if (saved_data < abstime) {
948 		/*
949 		 * ignore the success/fail return value assuming that
950 		 * if the value has been updated since we last read it,
951 		 * someone else has written a timestamp that is new enough.
952 		 */
953 		__unused bool ret = os_atomic_cmpxchg(approx_time_base,
954 		    saved_data, abstime, relaxed);
955 	}
956 
957 
958 #else /* CONFIG_MACH_APPROXIMATE_TIME */
959 #pragma unused (abstime)
960 #endif
961 }
962 
963 /*
964  * update the commpage data's total system sleep time for
965  * userspace call to mach_continuous_time()
966  */
967 void
commpage_update_mach_continuous_time(uint64_t sleeptime)968 commpage_update_mach_continuous_time(uint64_t sleeptime)
969 {
970 	if (!commPagePtr) {
971 		return;
972 	}
973 
974 	uint64_t *cont_time_base = (uint64_t *)(uintptr_t)(_COMM_PAGE_CONT_TIMEBASE + _COMM_PAGE_RW_OFFSET);
975 
976 	os_atomic_store_wide(cont_time_base, sleeptime, relaxed);
977 
978 }
979 
980 void
commpage_update_mach_continuous_time_hw_offset(uint64_t offset)981 commpage_update_mach_continuous_time_hw_offset(uint64_t offset)
982 {
983 	*((uint64_t *)(_COMM_PAGE_CONT_HW_TIMEBASE + _COMM_PAGE_RW_OFFSET)) = offset;
984 }
985 
986 /*
987  * update the commpage's value for the boot time
988  */
989 void
commpage_update_boottime(uint64_t value)990 commpage_update_boottime(uint64_t value)
991 {
992 	if (!commPagePtr) {
993 		return;
994 	}
995 
996 	uint64_t *boottime_usec = (uint64_t *)(uintptr_t)(_COMM_PAGE_BOOTTIME_USEC + _COMM_PAGE_RW_OFFSET);
997 
998 	os_atomic_store_wide(boottime_usec, value, relaxed);
999 
1000 }
1001 
1002 /*
1003  * set the commpage's remote time params for
1004  * userspace call to mach_bridge_remote_time()
1005  */
1006 void
commpage_set_remotetime_params(double rate,uint64_t base_local_ts,uint64_t base_remote_ts)1007 commpage_set_remotetime_params(double rate, uint64_t base_local_ts, uint64_t base_remote_ts)
1008 {
1009 	if (commPagePtr) {
1010 #ifdef __arm64__
1011 		struct bt_params *paramsp = (struct bt_params *)(_COMM_PAGE_REMOTETIME_PARAMS + _COMM_PAGE_RW_OFFSET);
1012 		paramsp->base_local_ts = 0;
1013 		__builtin_arm_dmb(DMB_ISH);
1014 		paramsp->rate = rate;
1015 		paramsp->base_remote_ts = base_remote_ts;
1016 		__builtin_arm_dmb(DMB_ISH);
1017 		paramsp->base_local_ts = base_local_ts;  //This will act as a generation count
1018 #endif /* __arm64__ */
1019 	}
1020 }
1021 
1022 
1023 /*
1024  * update the commpage with if dtrace user land probes are enabled
1025  */
1026 void
commpage_update_dof(boolean_t enabled)1027 commpage_update_dof(boolean_t enabled)
1028 {
1029 #if CONFIG_DTRACE
1030 	*((uint8_t*)(_COMM_PAGE_DTRACE_DOF_ENABLED + _COMM_PAGE_RW_OFFSET)) = (enabled ? 1 : 0);
1031 #else
1032 	(void)enabled;
1033 #endif
1034 }
1035 
1036 /*
1037  * update the dyld global config flags
1038  */
1039 void
commpage_update_dyld_flags(uint64_t value)1040 commpage_update_dyld_flags(uint64_t value)
1041 {
1042 	*((uint64_t*)(_COMM_PAGE_DYLD_FLAGS + _COMM_PAGE_RW_OFFSET)) = value;
1043 
1044 }
1045