xref: /xnu-11215/iokit/Kernel/arm/AppleARMSMP.cpp (revision 8d741a5d)
1 /*
2  * Copyright (c) 2019 Apple Computer, Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 extern "C" {
30 #include <kern/debug.h>
31 #include <pexpert/pexpert.h>
32 #include <pexpert/arm64/board_config.h>
33 };
34 
35 #include <kern/bits.h>
36 #include <kern/processor.h>
37 #include <kern/thread.h>
38 #include <kperf/kperf.h>
39 #include <machine/machine_routines.h>
40 #include <libkern/OSAtomic.h>
41 #include <libkern/c++/OSCollection.h>
42 #include <IOKit/IODeviceTreeSupport.h>
43 #include <IOKit/IOLib.h>
44 #include <IOKit/IOPlatformActions.h>
45 #include <IOKit/IOPMGR.h>
46 #include <IOKit/IOReturn.h>
47 #include <IOKit/IOService.h>
48 #include <IOKit/PassthruInterruptController.h>
49 #include <IOKit/pwr_mgt/RootDomain.h>
50 #include <IOKit/pwr_mgt/IOPMPrivate.h>
51 #include <Kernel/IOKitKernelInternal.h>
52 
53 #if USE_APPLEARMSMP
54 
55 // FIXME: These are in <kern/misc_protos.h> but that file has other deps that aren't being resolved
56 extern "C" void console_suspend();
57 extern "C" void console_resume();
58 
59 static PassthruInterruptController *gCPUIC;
60 static IOPMGR *gPMGR;
61 static IOInterruptController *gAIC;
62 static bool aic_ipis = false;
63 static const ml_topology_info *topology_info;
64 
65 // cpu_id of the boot processor
66 static unsigned int boot_cpu;
67 
68 // array index is a cpu_id (so some elements may be NULL)
69 static processor_t *machProcessors;
70 
71 static uint64_t cpu_power_state_mask;
72 static uint64_t all_clusters_mask;
73 static uint64_t online_clusters_mask;
74 
75 static void
processor_idle_wrapper(cpu_id_t,boolean_t enter,uint64_t * new_timeout_ticks)76 processor_idle_wrapper(cpu_id_t /*cpu_id*/, boolean_t enter, uint64_t *new_timeout_ticks)
77 {
78 	if (enter) {
79 		gPMGR->enterCPUIdle(new_timeout_ticks);
80 	} else {
81 		gPMGR->exitCPUIdle(new_timeout_ticks);
82 	}
83 }
84 
85 static void
idle_timer_wrapper(void *,uint64_t * new_timeout_ticks)86 idle_timer_wrapper(void */*refCon*/, uint64_t *new_timeout_ticks)
87 {
88 	gPMGR->updateCPUIdle(new_timeout_ticks);
89 }
90 
91 static OSDictionary *
matching_dict_for_cpu_id(unsigned int cpu_id)92 matching_dict_for_cpu_id(unsigned int cpu_id)
93 {
94 	// The cpu-id property in EDT doesn't necessarily match the dynamically
95 	// assigned logical ID in XNU, so look up the cpu node by the physical
96 	// (cluster/core) ID instead.
97 	OSSymbolConstPtr cpuTypeSymbol = OSSymbol::withCString("cpu");
98 	OSSymbolConstPtr cpuIdSymbol = OSSymbol::withCString("reg");
99 	OSDataPtr cpuId = OSData::withValue(topology_info->cpus[cpu_id].phys_id);
100 
101 	OSDictionary *propMatch = OSDictionary::withCapacity(4);
102 	propMatch->setObject(gIODTTypeKey, cpuTypeSymbol);
103 	propMatch->setObject(cpuIdSymbol, cpuId);
104 
105 	OSDictionary *matching = IOService::serviceMatching("IOPlatformDevice");
106 	matching->setObject(gIOPropertyMatchKey, propMatch);
107 
108 	propMatch->release();
109 	cpuTypeSymbol->release();
110 	cpuIdSymbol->release();
111 	cpuId->release();
112 
113 	return matching;
114 }
115 
116 static void
register_aic_handlers(const ml_topology_cpu * cpu_info,ipi_handler_t ipi_handler,perfmon_interrupt_handler_func pmi_handler)117 register_aic_handlers(const ml_topology_cpu *cpu_info,
118     ipi_handler_t ipi_handler,
119     perfmon_interrupt_handler_func pmi_handler)
120 {
121 	OSDictionary *matching = matching_dict_for_cpu_id(cpu_info->cpu_id);
122 	IOService *cpu = IOService::waitForMatchingService(matching, UINT64_MAX);
123 	matching->release();
124 
125 	OSArray *irqs = (OSArray *) cpu->getProperty(gIOInterruptSpecifiersKey);
126 	if (!irqs) {
127 		panic("Error finding interrupts for CPU %d", cpu_info->cpu_id);
128 	}
129 
130 	unsigned int irqcount = irqs->getCount();
131 
132 	if (irqcount == 3) {
133 		// Legacy configuration, for !HAS_IPI chips (pre-Skye).
134 		if (cpu->registerInterrupt(0, NULL, (IOInterruptAction)ipi_handler, NULL) != kIOReturnSuccess ||
135 		    cpu->enableInterrupt(0) != kIOReturnSuccess ||
136 		    cpu->registerInterrupt(2, NULL, (IOInterruptAction)ipi_handler, NULL) != kIOReturnSuccess ||
137 		    cpu->enableInterrupt(2) != kIOReturnSuccess) {
138 			panic("Error registering IPIs");
139 		}
140 #if !defined(HAS_IPI)
141 		// Ideally this should be decided by EDT, but first we need to update EDT
142 		// to default to fast IPIs on modern platforms.
143 		aic_ipis = true;
144 #endif
145 	}
146 
147 	// Conditional, because on Skye and later, we use an FIQ instead of an external IRQ.
148 	if (pmi_handler && irqcount == 1) {
149 		if (cpu->registerInterrupt(1, NULL, (IOInterruptAction)(void (*)(void))pmi_handler, NULL) != kIOReturnSuccess ||
150 		    cpu->enableInterrupt(1) != kIOReturnSuccess) {
151 			panic("Error registering PMI");
152 		}
153 	}
154 }
155 
156 static void
cpu_boot_thread(void *,wait_result_t)157 cpu_boot_thread(void */*unused0*/, wait_result_t /*unused1*/)
158 {
159 	OSDictionary *matching = IOService::serviceMatching("IOPlatformExpert");
160 	IOService::waitForMatchingService(matching, UINT64_MAX);
161 	matching->release();
162 
163 	gCPUIC = new PassthruInterruptController;
164 	if (!gCPUIC || !gCPUIC->init()) {
165 		panic("Can't initialize PassthruInterruptController");
166 	}
167 	gAIC = static_cast<IOInterruptController *>(gCPUIC->waitForChildController());
168 
169 	ml_set_max_cpus(topology_info->max_cpu_id + 1);
170 
171 	matching = IOService::serviceMatching("IOPMGR");
172 	gPMGR = OSDynamicCast(IOPMGR,
173 	    IOService::waitForMatchingService(matching, UINT64_MAX));
174 	matching->release();
175 
176 	const size_t array_size = (topology_info->max_cpu_id + 1) * sizeof(*machProcessors);
177 	machProcessors = static_cast<processor_t *>(zalloc_permanent(array_size, ZALIGN_PTR));
178 
179 	for (unsigned int cpu = 0; cpu < topology_info->num_cpus; cpu++) {
180 		const ml_topology_cpu *cpu_info = &topology_info->cpus[cpu];
181 		const unsigned int cpu_id = cpu_info->cpu_id;
182 		ml_processor_info_t this_processor_info;
183 		ipi_handler_t ipi_handler;
184 		perfmon_interrupt_handler_func pmi_handler;
185 
186 		memset(&this_processor_info, 0, sizeof(this_processor_info));
187 		this_processor_info.cpu_id = reinterpret_cast<cpu_id_t>(cpu_id);
188 		this_processor_info.phys_id = cpu_info->phys_id;
189 		this_processor_info.log_id = cpu_id;
190 		this_processor_info.cluster_id = cpu_info->cluster_id;
191 		this_processor_info.cluster_type = cpu_info->cluster_type;
192 		this_processor_info.l2_cache_size = cpu_info->l2_cache_size;
193 		this_processor_info.l2_cache_id = cpu_info->l2_cache_id;
194 		this_processor_info.l3_cache_size = cpu_info->l3_cache_size;
195 		this_processor_info.l3_cache_id = cpu_info->l3_cache_id;
196 
197 		gPMGR->initCPUIdle(&this_processor_info);
198 		this_processor_info.processor_idle = &processor_idle_wrapper;
199 		this_processor_info.idle_timer = &idle_timer_wrapper;
200 
201 		kern_return_t result = ml_processor_register(&this_processor_info,
202 		    &machProcessors[cpu_id], &ipi_handler, &pmi_handler);
203 		if (result == KERN_FAILURE) {
204 			panic("ml_processor_register failed: %d", result);
205 		}
206 		register_aic_handlers(cpu_info, ipi_handler, pmi_handler);
207 	}
208 
209 #if USE_APPLEARMSMP
210 	/*
211 	 * Now that all of the processors are registered with the kernel,
212 	 * it's safe to boot them all up.
213 	 *
214 	 * These phases are separated to ensure all the psets and their
215 	 * relationships are initialized before other processors start
216 	 * traversing those linkages.
217 	 *
218 	 * The boot cpu must be 'booted' first to finish initializing its IPI
219 	 * handler before other processors could start sending it IPIs.
220 	 */
221 
222 	processor_boot(machProcessors[boot_cpu]);
223 
224 	for (unsigned int cpu = 0; cpu < topology_info->num_cpus; cpu++) {
225 		const ml_topology_cpu *cpu_info = &topology_info->cpus[cpu];
226 		const unsigned int cpu_id = cpu_info->cpu_id;
227 		if (cpu_id != boot_cpu) {
228 			processor_boot(machProcessors[cpu_id]);
229 		}
230 	}
231 #endif /* USE_APPLEARMSMP */
232 
233 	ml_cpu_init_completed();
234 	IOService::publishResource(gIOAllCPUInitializedKey, kOSBooleanTrue);
235 }
236 
237 void
IOCPUInitialize(void)238 IOCPUInitialize(void)
239 {
240 	topology_info = ml_get_topology_info();
241 	boot_cpu = topology_info->boot_cpu->cpu_id;
242 
243 	for (unsigned int i = 0; i < topology_info->num_clusters; i++) {
244 		bit_set(all_clusters_mask, topology_info->clusters[i].cluster_id);
245 	}
246 	// iBoot powers up every cluster (at least for now)
247 	online_clusters_mask = all_clusters_mask;
248 
249 	thread_t thread;
250 	kernel_thread_start(&cpu_boot_thread, NULL, &thread);
251 	thread_set_thread_name(thread, "cpu_boot_thread");
252 	thread_deallocate(thread);
253 }
254 
255 static unsigned int
target_to_cpu_id(cpu_id_t in)256 target_to_cpu_id(cpu_id_t in)
257 {
258 	return (unsigned int)(uintptr_t)in;
259 }
260 
261 /*
262  * This is IOKit KPI, but not used by anyone today.
263  */
264 kern_return_t __abortlike
PE_cpu_start_from_kext(cpu_id_t target,__unused vm_offset_t start_paddr,__unused vm_offset_t arg_paddr)265 PE_cpu_start_from_kext(cpu_id_t target,
266     __unused vm_offset_t start_paddr, __unused vm_offset_t arg_paddr)
267 {
268 	panic("PE_cpu_start_from_kext unimplemented");
269 }
270 
271 
272 // Release a CPU from reset.  Runs from a different CPU (obviously).
273 void
PE_cpu_start_internal(cpu_id_t target,__unused vm_offset_t start_paddr,__unused vm_offset_t arg_paddr)274 PE_cpu_start_internal(cpu_id_t target,
275     __unused vm_offset_t start_paddr, __unused vm_offset_t arg_paddr)
276 {
277 	unsigned int cpu_id = target_to_cpu_id(target);
278 
279 	assert(cpu_id != cpu_number()); /* we can't be already on the CPU to be started */
280 
281 #if APPLEVIRTUALPLATFORM
282 	/* When running virtualized, the reset vector address must be passed to PMGR explicitly */
283 	extern vm_offset_t reset_vector_vaddr;
284 	gPMGR->enableCPUCore(cpu_id, ml_vtophys(reset_vector_vaddr));
285 #else /* APPLEVIRTUALPLATFORM */
286 	gPMGR->enableCPUCore(cpu_id, 0);
287 #endif /* APPLEVIRTUALPLATFORM */
288 }
289 
290 // Initialize a CPU when it first comes up.  Runs on the target CPU.
291 // |bootb| is true on the initial boot, false on S2R resume.
292 void
PE_cpu_machine_init(cpu_id_t target,boolean_t bootb)293 PE_cpu_machine_init(cpu_id_t target, boolean_t bootb)
294 {
295 	unsigned int cpu_id = target_to_cpu_id(target);
296 
297 	if (!bootb && cpu_id == boot_cpu && ml_is_quiescing()) {
298 		IOCPURunPlatformActiveActions();
299 	}
300 
301 	ml_broadcast_cpu_event(CPU_BOOTED, cpu_id);
302 
303 	assert_ml_cpu_signal_is_enabled(false);
304 
305 	/* Send myself the first IPI to clear SIGPdisabled. */
306 	PE_cpu_signal(target, target);
307 
308 	if (ml_get_interrupts_enabled()) {
309 		/*
310 		 * Only the boot CPU during processor_boot reaches here with
311 		 * interrupts enabled. Other CPUs enable interrupts in
312 		 * processor_cpu_reinit.
313 		 */
314 		assert(bootb);
315 		assert3u(cpu_id, ==, boot_cpu);
316 		ml_wait_for_cpu_signal_to_enable();
317 		assert_ml_cpu_signal_is_enabled(true);
318 		ml_cpu_up();
319 	}
320 }
321 
322 /*
323  * This is IOKit KPI, but not used by anyone today.
324  */
325 void __abortlike
PE_cpu_halt(cpu_id_t target)326 PE_cpu_halt(cpu_id_t target)
327 {
328 	panic("PE_cpu_halt unimplemented");
329 }
330 
331 void
PE_cpu_signal(cpu_id_t,cpu_id_t target)332 PE_cpu_signal(cpu_id_t /*source*/, cpu_id_t target)
333 {
334 	struct ml_topology_cpu *cpu = &topology_info->cpus[target_to_cpu_id(target)];
335 	if (aic_ipis) {
336 		gAIC->sendIPI(cpu->cpu_id, false);
337 	} else {
338 		ml_cpu_signal(cpu->phys_id);
339 	}
340 }
341 
342 void
PE_cpu_signal_deferred(cpu_id_t,cpu_id_t target)343 PE_cpu_signal_deferred(cpu_id_t /*source*/, cpu_id_t target)
344 {
345 	struct ml_topology_cpu *cpu = &topology_info->cpus[target_to_cpu_id(target)];
346 	if (aic_ipis) {
347 		gAIC->sendIPI(cpu->cpu_id, true);
348 	} else {
349 		ml_cpu_signal_deferred(cpu->phys_id);
350 	}
351 }
352 
353 void
PE_cpu_signal_cancel(cpu_id_t,cpu_id_t target)354 PE_cpu_signal_cancel(cpu_id_t /*source*/, cpu_id_t target)
355 {
356 	struct ml_topology_cpu *cpu = &topology_info->cpus[target_to_cpu_id(target)];
357 	if (aic_ipis) {
358 		gAIC->cancelDeferredIPI(cpu->cpu_id);
359 	} else {
360 		ml_cpu_signal_retract(cpu->phys_id);
361 	}
362 }
363 
364 // Brings down one CPU core for S2R.  Runs on the target CPU.
365 void
PE_cpu_machine_quiesce(cpu_id_t target)366 PE_cpu_machine_quiesce(cpu_id_t target)
367 {
368 	unsigned int cpu_id = target_to_cpu_id(target);
369 
370 	if (cpu_id == boot_cpu) {
371 		IOCPURunPlatformQuiesceActions();
372 	} else {
373 		gPMGR->disableCPUCore(cpu_id);
374 	}
375 
376 	ml_broadcast_cpu_event(CPU_DOWN, cpu_id);
377 	ml_arm_sleep();
378 }
379 
380 static bool
is_cluster_powering_down(int cpu_id)381 is_cluster_powering_down(int cpu_id)
382 {
383 	// Don't kill the cluster power if any other CPUs in this cluster are still awake
384 	unsigned int target_cluster_id = topology_info->cpus[cpu_id].cluster_id;
385 	for (int i = 0; i < topology_info->num_cpus; i++) {
386 		if (topology_info->cpus[i].cluster_id == target_cluster_id &&
387 		    cpu_id != i &&
388 		    bit_test(cpu_power_state_mask, i)) {
389 			return false;
390 		}
391 	}
392 	return true;
393 }
394 
395 // Takes one secondary CPU core offline at runtime.  Runs on the target CPU.
396 // Returns true if the platform code should go into deep sleep WFI, false otherwise.
397 bool
PE_cpu_down(cpu_id_t target)398 PE_cpu_down(cpu_id_t target)
399 {
400 	unsigned int cpu_id = target_to_cpu_id(target);
401 	if (ml_is_quiescing()) {
402 		assert(cpu_id != boot_cpu);
403 	}
404 	gPMGR->disableCPUCore(cpu_id);
405 	ml_broadcast_cpu_event(CPU_DOWN, cpu_id);
406 	return topology_info->cluster_power_down && is_cluster_powering_down(cpu_id);
407 }
408 
409 void
PE_handle_ext_interrupt(void)410 PE_handle_ext_interrupt(void)
411 {
412 	gCPUIC->externalInterrupt();
413 }
414 
415 void
PE_cpu_power_disable(int cpu_id)416 PE_cpu_power_disable(int cpu_id)
417 {
418 	assert(bit_test(cpu_power_state_mask, cpu_id));
419 
420 	if (cpu_id == boot_cpu && ml_is_quiescing()) {
421 		return;
422 	}
423 
424 	bit_clear(cpu_power_state_mask, cpu_id);
425 
426 	if (!topology_info->cluster_power_down) {
427 		return;
428 	}
429 
430 	// Don't kill the cluster power if any other CPUs in this cluster are still awake
431 	unsigned int target_cluster_id = topology_info->cpus[cpu_id].cluster_id;
432 	if (!is_cluster_powering_down(cpu_id)) {
433 		return;
434 	}
435 
436 	if (processor_should_kprintf(machProcessors[cpu_id], false)) {
437 		kprintf("%s>turning off power to cluster %d\n", __FUNCTION__, target_cluster_id);
438 	}
439 	ml_broadcast_cpu_event(CLUSTER_EXIT_REQUESTED, target_cluster_id);
440 	bit_clear(online_clusters_mask, target_cluster_id);
441 	gPMGR->disableCPUCluster(target_cluster_id);
442 }
443 
444 bool
PE_cpu_power_check_kdp(int cpu_id)445 PE_cpu_power_check_kdp(int cpu_id)
446 {
447 	if (!topology_info || !topology_info->cluster_power_down) {
448 		return true;
449 	}
450 
451 	unsigned int cluster_id = topology_info->cpus[cpu_id].cluster_id;
452 	return bit_test(online_clusters_mask, cluster_id);
453 }
454 
455 void
PE_cpu_power_enable(int cpu_id)456 PE_cpu_power_enable(int cpu_id)
457 {
458 	assert(!bit_test(cpu_power_state_mask, cpu_id));
459 
460 	if (cpu_id == boot_cpu && ml_is_quiescing()) {
461 		return;
462 	}
463 
464 	bit_set(cpu_power_state_mask, cpu_id);
465 
466 	if (!topology_info->cluster_power_down) {
467 		return;
468 	}
469 
470 	unsigned int cluster_id = topology_info->cpus[cpu_id].cluster_id;
471 	if (!bit_test(online_clusters_mask, cluster_id)) {
472 		if (processor_should_kprintf(machProcessors[cpu_id], true)) {
473 			kprintf("%s>turning on power to cluster %d\n", __FUNCTION__, cluster_id);
474 		}
475 		gPMGR->enableCPUCluster(cluster_id);
476 		bit_set(online_clusters_mask, cluster_id);
477 		ml_broadcast_cpu_event(CLUSTER_ACTIVE, cluster_id);
478 	}
479 }
480 
481 void
IOCPUSleepKernel(void)482 IOCPUSleepKernel(void)
483 {
484 	IOPMrootDomain  *rootDomain = IOService::getPMRootDomain();
485 	unsigned int i;
486 
487 	printf("IOCPUSleepKernel enter\n");
488 	sched_override_available_cores_for_sleep();
489 
490 	rootDomain->tracePoint( kIOPMTracePointSleepPlatformActions );
491 	IOPlatformActionsPreSleep();
492 	rootDomain->tracePoint( kIOPMTracePointSleepCPUs );
493 
494 	integer_t old_pri;
495 	thread_t self = current_thread();
496 
497 	/*
498 	 * We need to boost this thread's priority to the maximum kernel priority to
499 	 * ensure we can urgently preempt ANY thread currently executing on the
500 	 * target CPU.  Note that realtime threads have their own mechanism to eventually
501 	 * demote their priority below MAXPRI_KERNEL if they hog the CPU for too long.
502 	 */
503 	old_pri = thread_kern_get_pri(self);
504 	thread_kern_set_pri(self, thread_kern_get_kernel_maxpri());
505 
506 	// Sleep the non-boot CPUs.
507 	ml_set_is_quiescing(true);
508 	for (i = 0; i < topology_info->num_cpus; i++) {
509 		unsigned int cpu_id = topology_info->cpus[i].cpu_id;
510 		if (cpu_id != boot_cpu) {
511 			processor_sleep(machProcessors[cpu_id]);
512 		}
513 	}
514 
515 	console_suspend();
516 
517 	rootDomain->tracePoint( kIOPMTracePointSleepPlatformDriver );
518 	rootDomain->stop_watchdog_timer();
519 
520 	/*
521 	 * Now sleep the boot CPU, including calling the kQueueQuiesce actions.
522 	 * The system sleeps here.
523 	 */
524 	processor_sleep(machProcessors[boot_cpu]);
525 
526 	/*
527 	 * The system is now coming back from sleep on the boot CPU.
528 	 * The kQueueActive actions have already been called.
529 	 *
530 	 * The reconfig engine is programmed to power up all clusters on S2R resume.
531 	 */
532 	online_clusters_mask = all_clusters_mask;
533 
534 	ml_set_is_quiescing(false);
535 
536 	rootDomain->start_watchdog_timer();
537 
538 	console_resume();
539 
540 	rootDomain->tracePoint( kIOPMTracePointWakeCPUs );
541 
542 	for (i = 0; i < topology_info->num_cpus; i++) {
543 		unsigned int cpu_id = topology_info->cpus[i].cpu_id;
544 		if (cpu_id != boot_cpu) {
545 			processor_wake(machProcessors[cpu_id]);
546 		}
547 	}
548 
549 	rootDomain->tracePoint( kIOPMTracePointWakePlatformActions );
550 	IOPlatformActionsPostResume();
551 
552 	sched_restore_available_cores_after_sleep();
553 
554 	thread_kern_set_pri(self, old_pri);
555 	printf("IOCPUSleepKernel exit\n");
556 }
557 
558 #endif /* USE_APPLEARMSMP */
559