xref: /xnu-11215/osfmk/arm/cpu_data_internal.h (revision 8d741a5d)
1 /*
2  * Copyright (c) 2007-2023 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  *
31  */
32 
33 #ifndef ARM_CPU_DATA_INTERNAL
34 #define ARM_CPU_DATA_INTERNAL
35 
36 #include <mach_assert.h>
37 #include <kern/assert.h>
38 #include <kern/kern_types.h>
39 #include <kern/percpu.h>
40 #include <kern/processor.h>
41 #include <os/base.h>
42 #include <pexpert/pexpert.h>
43 #include <arm/dbgwrap.h>
44 #include <arm/machine_routines.h>
45 #include <arm64/proc_reg.h>
46 #include <arm/thread.h>
47 #include <arm/pmap.h>
48 #include <machine/monotonic.h>
49 #include <san/kcov_data.h>
50 
51 #define NSEC_PER_HZ     (NSEC_PER_SEC / 100)
52 
53 typedef struct reset_handler_data {
54 	vm_offset_t     assist_reset_handler;           /* Assist handler phys address */
55 	vm_offset_t     cpu_data_entries;                       /* CpuDataEntries phys address */
56 } reset_handler_data_t;
57 
58 #if !CONFIG_SPTM
59 extern  reset_handler_data_t    ResetHandlerData;
60 #endif
61 
62 /* Put the static check for cpumap_t here as it's defined in <kern/processor.h> */
63 static_assert(sizeof(cpumap_t) * CHAR_BIT >= MAX_CPUS, "cpumap_t bitvector is too small for current MAX_CPUS value");
64 
65 #define CPUWINDOWS_BASE_MASK            0xFFFFFFFFFFD00000UL
66 #define CPUWINDOWS_BASE                 (VM_MAX_KERNEL_ADDRESS & CPUWINDOWS_BASE_MASK)
67 #define CPUWINDOWS_TOP                  (CPUWINDOWS_BASE + (MAX_CPUS * CPUWINDOWS_MAX * ARM_PGBYTES))
68 
69 static_assert((CPUWINDOWS_BASE >= VM_MIN_KERNEL_ADDRESS) && ((CPUWINDOWS_TOP - 1) <= VM_MAX_KERNEL_ADDRESS),
70     "CPU copy windows too large for CPUWINDOWS_BASE_MASK value");
71 
72 typedef struct cpu_data_entry {
73 	void                           *cpu_data_paddr;         /* Cpu data physical address */
74 	struct  cpu_data               *cpu_data_vaddr;         /* Cpu data virtual address */
75 #if !defined(__arm64__)
76 #error Check cpu_data_entry padding for this architecture
77 #endif
78 } cpu_data_entry_t;
79 
80 
81 typedef struct rtclock_timer {
82 	mpqueue_head_t                  queue;
83 	uint64_t                        deadline;
84 	uint32_t                        is_set:1,
85 	    has_expired:1,
86 	:0;
87 } rtclock_timer_t;
88 
89 typedef struct {
90 	/*
91 	 * The wake variants of these counters are reset to 0 when the CPU wakes.
92 	 */
93 	uint64_t irq_ex_cnt;
94 	uint64_t irq_ex_cnt_wake;
95 	uint64_t ipi_cnt;
96 	uint64_t ipi_cnt_wake;
97 	uint64_t timer_cnt;
98 #if CONFIG_CPU_COUNTERS
99 	uint64_t pmi_cnt_wake;
100 #endif /* CONFIG_CPU_COUNTERS */
101 	uint64_t undef_ex_cnt;
102 	uint64_t unaligned_cnt;
103 	uint64_t vfp_cnt;
104 	uint64_t data_ex_cnt;
105 	uint64_t instr_ex_cnt;
106 } cpu_stat_t;
107 
108 __options_closed_decl(cpu_flags_t, uint16_t, {
109 	SleepState      = 0x0800,
110 	/* For the boot processor, StartedState means 'interrupts initialized' - it is already running */
111 	StartedState    = 0x1000,
112 	/* For the boot processor, InitState means 'cpu_data fully initialized' - it is already running */
113 	InitState       = 0x2000,
114 });
115 
116 __options_closed_decl(cpu_signal_t, unsigned int, {
117 	SIGPnop         = 0x00000000U,     /* Send IPI with no service */
118 	/* 0x1U unused */
119 	/* 0x2U unused */
120 	SIGPxcall       = 0x00000004U,     /* Call a function on a processor */
121 	SIGPast         = 0x00000008U,     /* Request AST check */
122 	SIGPdebug       = 0x00000010U,     /* Request Debug call */
123 	SIGPLWFlush     = 0x00000020U,     /* Request LWFlush call */
124 	SIGPLWClean     = 0x00000040U,     /* Request LWClean call */
125 	/* 0x80U unused */
126 	SIGPkppet       = 0x00000100U,     /* Request kperf PET handler */
127 	SIGPxcallImm    = 0x00000200U,     /* Send a cross-call, fail if already pending */
128 	SIGPTimerLocal  = 0x00000400U,     /* Update the decrementer via timer_queue_expire_local */
129 
130 	SIGPdisabled    = 0x80000000U,     /* Signal disabled */
131 });
132 
133 typedef struct cpu_data {
134 	unsigned short                  cpu_number;
135 	_Atomic cpu_flags_t             cpu_flags;
136 	int                             cpu_type;
137 	int                             cpu_subtype;
138 	int                             cpu_threadtype;
139 
140 	void *                          XNU_PTRAUTH_SIGNED_PTR("cpu_data.istackptr") istackptr;
141 	vm_offset_t                     intstack_top;
142 #if __arm64__
143 	void *                          XNU_PTRAUTH_SIGNED_PTR("cpu_data.excepstackptr") excepstackptr;
144 	vm_offset_t                     excepstack_top;
145 #endif
146 	thread_t                        cpu_active_thread;
147 	vm_offset_t                     cpu_active_stack;
148 	cpu_id_t                        cpu_id;
149 	volatile cpu_signal_t           cpu_signal;
150 	ast_t                           cpu_pending_ast;
151 	cache_dispatch_t                cpu_cache_dispatch;
152 
153 #if __arm64__
154 	uint64_t                        cpu_base_timebase;
155 	uint64_t                        cpu_timebase;
156 #endif
157 	bool                            cpu_hibernate; /* This cpu is currently hibernating the system */
158 	bool                            cpu_running;
159 	bool                            cluster_master;
160 #if __ARM_ARCH_8_5__
161 	bool                            sync_on_cswitch;
162 #endif /* __ARM_ARCH_8_5__ */
163 	/* true if processor_start() or processor_exit() is operating on this CPU */
164 	bool                            in_state_transition;
165 
166 	uint32_t                        cpu_decrementer;
167 	get_decrementer_t               cpu_get_decrementer_func;
168 	set_decrementer_t               cpu_set_decrementer_func;
169 	fiq_handler_t                   cpu_get_fiq_handler;
170 
171 	void                            *cpu_tbd_hardware_addr;
172 	void                            *cpu_tbd_hardware_val;
173 
174 	processor_idle_t                cpu_idle_notify;
175 	uint64_t                        cpu_idle_latency;
176 	uint64_t                        cpu_idle_pop;
177 
178 #if     __ARM_KERNEL_PROTECT__
179 	vm_offset_t                     cpu_exc_vectors;
180 #endif /* __ARM_KERNEL_PROTECT__ */
181 	vm_offset_t                     cpu_reset_handler;
182 	uintptr_t                       cpu_reset_assist;
183 	uint32_t                        cpu_reset_type;
184 
185 	unsigned int                    interrupt_source;
186 	void                            *cpu_int_state;
187 	IOInterruptHandler              interrupt_handler;
188 	void                            *interrupt_nub;
189 	void                            *interrupt_target;
190 	void                            *interrupt_refCon;
191 
192 	idle_timer_t                    idle_timer_notify;
193 	void                            *idle_timer_refcon;
194 	uint64_t                        idle_timer_deadline;
195 
196 	uint64_t                        rtcPop;
197 	rtclock_timer_t                 rtclock_timer;
198 	struct _rtclock_data_           *rtclock_datap;
199 
200 	arm_debug_state_t               *cpu_user_debug; /* Current debug state */
201 	vm_offset_t                     cpu_debug_interface_map;
202 
203 	volatile int                    debugger_active;
204 	volatile int                    PAB_active; /* Tells the console if we are dumping backtraces */
205 
206 	void                            *cpu_xcall_p0;
207 	void                            *cpu_xcall_p1;
208 	void                            *cpu_imm_xcall_p0;
209 	void                            *cpu_imm_xcall_p1;
210 
211 
212 #if     __arm64__
213 	vm_offset_t                     coresight_base[CORESIGHT_REGIONS];
214 #endif
215 
216 
217 	/* CCC ARMv8 registers */
218 	uint64_t                        cpu_regmap_paddr;
219 
220 	uint32_t                        cpu_phys_id;
221 	uint32_t                        cpu_l2_access_penalty;
222 	platform_error_handler_t        platform_error_handler;
223 
224 	int                             cpu_mcount_off;
225 
226 	#define ARM_CPU_ON_SLEEP_PATH   0x50535553UL
227 	volatile unsigned int           cpu_sleep_token;
228 	unsigned int                    cpu_sleep_token_last;
229 
230 	cluster_type_t                  cpu_cluster_type;
231 	uint32_t                        cpu_cluster_id;
232 	uint32_t                        cpu_l2_id;
233 	uint32_t                        cpu_l2_size;
234 	uint32_t                        cpu_l3_id;
235 	uint32_t                        cpu_l3_size;
236 
237 	enum {
238 		CPU_NOT_HALTED = 0,
239 		CPU_HALTED,
240 		CPU_HALTED_WITH_STATE
241 	}                               halt_status;
242 #if defined(HAS_APPLE_PAC)
243 	uint64_t                        rop_key;
244 	uint64_t                        jop_key;
245 #endif /* defined(HAS_APPLE_PAC) */
246 
247 	/* large structs with large alignment requirements */
248 
249 	/* double-buffered performance counter data */
250 	uint64_t                        *cpu_kpc_buf[2];
251 	/* PMC shadow and reload value buffers */
252 	uint64_t                        *cpu_kpc_shadow;
253 	uint64_t                        *cpu_kpc_reload;
254 
255 #if CONFIG_CPU_COUNTERS
256 	struct mt_cpu                   cpu_monotonic;
257 #endif /* CONFIG_CPU_COUNTERS */
258 
259 	cpu_stat_t                      cpu_stat;
260 #if !XNU_MONITOR
261 	struct pmap_cpu_data            cpu_pmap_cpu_data;
262 #endif
263 	dbgwrap_thread_state_t          halt_state;
264 #if DEVELOPMENT || DEBUG
265 	uint64_t                        wfe_count;
266 	uint64_t                        wfe_deadline_checks;
267 	uint64_t                        wfe_terminations;
268 #endif
269 #if CONFIG_KCOV
270 	kcov_cpu_data_t                 cpu_kcov_data;
271 #endif
272 #if __arm64__
273 	/**
274 	 * Stash the state of the system when an IPI is received. This will be
275 	 * dumped in the case a panic is getting triggered.
276 	 */
277 	uint64_t ipi_pc;
278 	uint64_t ipi_lr;
279 	uint64_t ipi_fp;
280 
281 	/* Encoded data to store in TPIDR_EL0 on context switch */
282 	uint64_t                        cpu_tpidr_el0;
283 #endif
284 
285 #ifdef APPLEEVEREST
286 	/* PAs used to apply pio locks in early boot. */
287 	uint64_t cpu_reg_paddr;
288 	uint64_t acc_reg_paddr;
289 	uint64_t cpm_reg_paddr;
290 #endif
291 
292 } cpu_data_t;
293 
294 extern  cpu_data_entry_t                CpuDataEntries[MAX_CPUS];
295 PERCPU_DECL(cpu_data_t, cpu_data);
296 #define BootCpuData                     __PERCPU_NAME(cpu_data)
297 extern  boot_args                      *BootArgs;
298 
299 #if __arm64__
300 extern  unsigned int                    LowResetVectorBase;
301 extern  unsigned int                    LowResetVectorEnd;
302 #if WITH_CLASSIC_S2R
303 extern  uint8_t                         SleepToken[8];
304 #endif
305 extern  unsigned int                    LowExceptionVectorBase;
306 #else
307 #error Unknown arch
308 #endif
309 
310 extern cpu_data_t      *cpu_datap(int cpu);
311 extern cpu_data_t      *cpu_data_alloc(boolean_t is_boot);
312 extern void             cpu_stack_alloc(cpu_data_t*);
313 extern void             cpu_data_init(cpu_data_t *cpu_data_ptr);
314 extern void             cpu_data_register(cpu_data_t *cpu_data_ptr);
315 extern cpu_data_t      *processor_to_cpu_datap( processor_t processor);
316 
317 #if __arm64__
318 typedef struct sysreg_restore {
319 	uint64_t                tcr_el1;
320 } sysreg_restore_t;
321 
322 extern sysreg_restore_t sysreg_restore;
323 #endif  /* __arm64__ */
324 
325 #endif  /* ARM_CPU_DATA_INTERNAL */
326