1 /*
2 * Copyright (c) 1999-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #define IOKIT_ENABLE_SHARED_PTR
30
31 extern "C" {
32 #include <pexpert/pexpert.h>
33 #include <kern/cpu_number.h>
34 extern void kperf_kernel_configure(char *);
35 }
36
37 #include <machine/machine_routines.h>
38 #include <IOKit/IOLib.h>
39 #include <IOKit/IOPlatformExpert.h>
40 #include <IOKit/pwr_mgt/RootDomain.h>
41 #include <IOKit/pwr_mgt/IOPMPrivate.h>
42 #include <libkern/c++/OSSharedPtr.h>
43 #include <IOKit/IOUserClient.h>
44 #include <IOKit/IOKitKeysPrivate.h>
45 #include <IOKit/IOCPU.h>
46 #include "IOKitKernelInternal.h"
47
48 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
49
50 #include <kern/queue.h>
51 #include <kern/sched_prim.h>
52 #include <kern/processor.h>
53
54 extern "C" void console_suspend();
55 extern "C" void console_resume();
56 extern "C" void sched_override_available_cores_for_sleep(void);
57 extern "C" void sched_restore_available_cores_after_sleep(void);
58
59 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
60
61 static IOLock *gIOCPUsLock;
62 static OSSharedPtr<OSArray> gIOCPUs;
63 static OSSharedPtr<const OSSymbol> gIOCPUStateKey;
64 static OSSharedPtr<OSString> gIOCPUStateNames[kIOCPUStateCount];
65
66 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
67
68 #if !USE_APPLEARMSMP
69
70 void
IOCPUInitialize(void)71 IOCPUInitialize(void)
72 {
73 gIOCPUsLock = IOLockAlloc();
74 gIOCPUs = OSArray::withCapacity(1);
75
76 gIOCPUStateKey = OSSymbol::withCStringNoCopy("IOCPUState");
77
78 gIOCPUStateNames[kIOCPUStateUnregistered] =
79 OSString::withCStringNoCopy("Unregistered");
80 gIOCPUStateNames[kIOCPUStateUninitalized] =
81 OSString::withCStringNoCopy("Uninitalized");
82 gIOCPUStateNames[kIOCPUStateStopped] =
83 OSString::withCStringNoCopy("Stopped");
84 gIOCPUStateNames[kIOCPUStateRunning] =
85 OSString::withCStringNoCopy("Running");
86 }
87
88 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
89
90 /*
91 * This is IOKit KPI, but not used by anyone today.
92 */
93 kern_return_t __abortlike
PE_cpu_start_from_kext(cpu_id_t target,__unused vm_offset_t start_paddr,__unused vm_offset_t arg_paddr)94 PE_cpu_start_from_kext(cpu_id_t target,
95 __unused vm_offset_t start_paddr, __unused vm_offset_t arg_paddr)
96 {
97 panic("PE_cpu_start_from_kext unimplemented");
98 }
99
100 void
PE_cpu_start_internal(cpu_id_t target,vm_offset_t start_paddr,vm_offset_t arg_paddr)101 PE_cpu_start_internal(cpu_id_t target,
102 vm_offset_t start_paddr, vm_offset_t arg_paddr)
103 {
104 IOCPU *targetCPU = (IOCPU *)target;
105
106 targetCPU->startCPU(start_paddr, arg_paddr);
107 }
108
109 /*
110 * This is IOKit public KPI, though nothing uses it.
111 */
112 void __abortlike
PE_cpu_halt(cpu_id_t target)113 PE_cpu_halt(cpu_id_t target)
114 {
115 panic("PE_cpu_halt unimplemented");
116 }
117
118 void
PE_cpu_signal(cpu_id_t source,cpu_id_t target)119 PE_cpu_signal(cpu_id_t source, cpu_id_t target)
120 {
121 IOCPU *sourceCPU = (IOCPU *)source;
122 IOCPU *targetCPU = (IOCPU *)target;
123
124 sourceCPU->signalCPU(targetCPU);
125 }
126
127 void
PE_cpu_signal_deferred(cpu_id_t source,cpu_id_t target)128 PE_cpu_signal_deferred(cpu_id_t source, cpu_id_t target)
129 {
130 IOCPU *sourceCPU = (IOCPU *)source;
131 IOCPU *targetCPU = (IOCPU *)target;
132
133 sourceCPU->signalCPUDeferred(targetCPU);
134 }
135
136 void
PE_cpu_signal_cancel(cpu_id_t source,cpu_id_t target)137 PE_cpu_signal_cancel(cpu_id_t source, cpu_id_t target)
138 {
139 IOCPU *sourceCPU = (IOCPU *)source;
140 IOCPU *targetCPU = (IOCPU *)target;
141
142 sourceCPU->signalCPUCancel(targetCPU);
143 }
144
145 void
PE_cpu_machine_init(cpu_id_t target,boolean_t bootb)146 PE_cpu_machine_init(cpu_id_t target, boolean_t bootb)
147 {
148 IOCPU *targetCPU = OSDynamicCast(IOCPU, (OSObject *)target);
149
150 if (targetCPU == NULL) {
151 panic("%s: invalid target CPU %p", __func__, target);
152 }
153
154 #if defined(__arm64__)
155 assert_ml_cpu_signal_is_enabled(false);
156 #endif /* defined(__arm64__) */
157
158 targetCPU->initCPU(bootb);
159
160 #if defined(__arm64__)
161 if (!bootb && (targetCPU->getCPUNumber() == (UInt32)master_cpu)) {
162 assert(ml_is_quiescing());
163 }
164
165 if (ml_get_interrupts_enabled()) {
166 assert(bootb);
167 assert3u(targetCPU->getCPUNumber(), ==, (UInt32)master_cpu);
168 /*
169 * We want to assert that the AIC self-IPI actually arrives
170 * here, but after much trials and tribulations, I found that
171 * registering that interrupt handler is deeply entangled with
172 * and asynchronous to the CPU booting, so it can only be a
173 * 'hopefully it'll happen later' thing. We will still check
174 * that it did happen before we next enter S2R.
175 *
176 * We'll publish that the boot processor can have timers
177 * migrated to it a little earlier than it is truly ready,
178 * but fortunately that only happens on next S2R, by which time
179 * setup should have completed.
180 */
181 bool intr = ml_set_interrupts_enabled(FALSE);
182
183 ml_cpu_up();
184
185 ml_set_interrupts_enabled(intr);
186 }
187 #endif /* defined(__arm64__) */
188 }
189
190 void
PE_cpu_machine_quiesce(cpu_id_t target)191 PE_cpu_machine_quiesce(cpu_id_t target)
192 {
193 IOCPU *targetCPU = (IOCPU*)target;
194 #if defined(__arm64__)
195 if (targetCPU->getCPUNumber() == (UInt32)master_cpu) {
196 assert(ml_is_quiescing());
197 }
198 #endif /* defined(__arm64__) */
199 targetCPU->quiesceCPU();
200 }
201
202 #if defined(__arm64__)
203 static perfmon_interrupt_handler_func pmi_handler = NULL;
204
205 kern_return_t
PE_cpu_perfmon_interrupt_install_handler(perfmon_interrupt_handler_func handler)206 PE_cpu_perfmon_interrupt_install_handler(perfmon_interrupt_handler_func handler)
207 {
208 pmi_handler = handler;
209
210 return KERN_SUCCESS;
211 }
212
213 void
PE_cpu_perfmon_interrupt_enable(cpu_id_t target,boolean_t enable)214 PE_cpu_perfmon_interrupt_enable(cpu_id_t target, boolean_t enable)
215 {
216 IOCPU *targetCPU = (IOCPU*)target;
217
218 if (targetCPU == nullptr) {
219 return;
220 }
221
222 if (enable) {
223 targetCPU->getProvider()->registerInterrupt(1, targetCPU, (IOInterruptAction)(void (*)(void))pmi_handler, NULL);
224 targetCPU->getProvider()->enableInterrupt(1);
225 } else {
226 targetCPU->getProvider()->disableInterrupt(1);
227 }
228 }
229 #endif
230
231 bool
PE_cpu_power_check_kdp(int cpu_id)232 PE_cpu_power_check_kdp(int cpu_id)
233 {
234 return true;
235 }
236
237 #endif /* !USE_APPLEARMSMP */
238
239 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
240
241 #define super IOService
242
243 OSDefineMetaClassAndAbstractStructors(IOCPU, IOService);
244 OSMetaClassDefineReservedUnused(IOCPU, 0);
245 OSMetaClassDefineReservedUnused(IOCPU, 1);
246 OSMetaClassDefineReservedUnused(IOCPU, 2);
247 OSMetaClassDefineReservedUnused(IOCPU, 3);
248 OSMetaClassDefineReservedUnused(IOCPU, 4);
249 OSMetaClassDefineReservedUnused(IOCPU, 5);
250 OSMetaClassDefineReservedUnused(IOCPU, 6);
251 OSMetaClassDefineReservedUnused(IOCPU, 7);
252
253 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
254
255 #if !USE_APPLEARMSMP
256 void
IOCPUSleepKernel(void)257 IOCPUSleepKernel(void)
258 {
259 #if defined(__x86_64__)
260 extern IOCPU *currentShutdownTarget;
261 #endif
262 unsigned int cnt, numCPUs;
263 IOCPU *target;
264 IOCPU *bootCPU = NULL;
265 IOPMrootDomain *rootDomain = IOService::getPMRootDomain();
266
267 printf("IOCPUSleepKernel enter\n");
268 sched_override_available_cores_for_sleep();
269
270 rootDomain->tracePoint( kIOPMTracePointSleepPlatformActions );
271 IOPlatformActionsPreSleep();
272 rootDomain->tracePoint( kIOPMTracePointSleepCPUs );
273
274 numCPUs = gIOCPUs->getCount();
275 #if defined(__x86_64__)
276 currentShutdownTarget = NULL;
277 #endif
278
279 integer_t old_pri;
280 thread_t self = current_thread();
281
282 /*
283 * We need to boost this thread's priority to the maximum kernel priority to
284 * ensure we can urgently preempt ANY thread currently executing on the
285 * target CPU. Note that realtime threads have their own mechanism to eventually
286 * demote their priority below MAXPRI_KERNEL if they hog the CPU for too long.
287 */
288 old_pri = thread_kern_get_pri(self);
289 thread_kern_set_pri(self, thread_kern_get_kernel_maxpri());
290
291 // Sleep the CPUs.
292 ml_set_is_quiescing(true);
293 cnt = numCPUs;
294 while (cnt--) {
295 target = OSDynamicCast(IOCPU, gIOCPUs->getObject(cnt));
296
297 // We make certain that the bootCPU is the last to sleep
298 // We'll skip it for now, and halt it after finishing the
299 // non-boot CPU's.
300 if (target->getCPUNumber() == (UInt32)master_cpu) {
301 bootCPU = target;
302 } else if (target->getCPUState() == kIOCPUStateRunning) {
303 #if defined(__x86_64__)
304 currentShutdownTarget = target;
305 #endif
306 target->haltCPU();
307 processor_sleep(target->getMachProcessor());
308 }
309 }
310
311 assert(bootCPU != NULL);
312 assert(cpu_number() == master_cpu);
313
314 console_suspend();
315
316 rootDomain->tracePoint( kIOPMTracePointSleepPlatformDriver );
317 rootDomain->stop_watchdog_timer();
318
319 /*
320 * Now sleep the boot CPU, including calling the kQueueQuiesce actions.
321 * On Intel, the system sleeps here, and it does not actually sleep
322 * the boot processor.
323 */
324
325 bootCPU->haltCPU();
326 #if __arm64__
327 /*
328 * On ARM, we sleep the boot procesor, transitioning to the idle thread
329 * and its interrupt stack drives the rest of sleep.
330 */
331 processor_sleep(bootCPU->getMachProcessor());
332 #endif /* __arm64__ */
333 ml_set_is_quiescing(false);
334
335 /*
336 * The system is now coming back from sleep on the boot CPU.
337 * The kQueueActive actions have already been called.
338 */
339
340 rootDomain->start_watchdog_timer();
341
342 console_resume();
343
344 rootDomain->tracePoint( kIOPMTracePointWakeCPUs );
345
346 // Wake the other CPUs.
347 for (cnt = 0; cnt < numCPUs; cnt++) {
348 target = OSDynamicCast(IOCPU, gIOCPUs->getObject(cnt));
349
350 // Skip the already-woken boot CPU.
351 if (target->getCPUNumber() != (UInt32)master_cpu) {
352 if (target->getCPUState() == kIOCPUStateRunning) {
353 panic("Spurious wakeup of cpu %u", (unsigned int)(target->getCPUNumber()));
354 }
355
356 if (target->getCPUState() == kIOCPUStateStopped) {
357 processor_wake(target->getMachProcessor());
358 }
359 }
360 }
361
362 rootDomain->tracePoint( kIOPMTracePointWakePlatformActions );
363 IOPlatformActionsPostResume();
364
365 sched_restore_available_cores_after_sleep();
366
367 thread_kern_set_pri(self, old_pri);
368 printf("IOCPUSleepKernel exit\n");
369 }
370
371 static bool
is_IOCPU_disabled(void)372 is_IOCPU_disabled(void)
373 {
374 return false;
375 }
376 #else /* !USE_APPLEARMSMP */
377 static bool
is_IOCPU_disabled(void)378 is_IOCPU_disabled(void)
379 {
380 return true;
381 }
382 #endif /* !USE_APPLEARMSMP */
383
384 bool
start(IOService * provider)385 IOCPU::start(IOService *provider)
386 {
387 if (is_IOCPU_disabled()) {
388 return false;
389 }
390
391 if (!super::start(provider)) {
392 return false;
393 }
394
395 _cpuGroup = gIOCPUs;
396 cpuNub = provider;
397
398 IOLockLock(gIOCPUsLock);
399 gIOCPUs->setObject(this);
400 IOLockUnlock(gIOCPUsLock);
401
402 // Correct the bus, cpu and timebase frequencies in the device tree.
403 if (gPEClockFrequencyInfo.bus_frequency_hz < 0x100000000ULL) {
404 OSSharedPtr<OSData> busFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.bus_clock_rate_hz, 4);
405 provider->setProperty("bus-frequency", busFrequency.get());
406 } else {
407 OSSharedPtr<OSData> busFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.bus_frequency_hz, 8);
408 provider->setProperty("bus-frequency", busFrequency.get());
409 }
410
411 if (gPEClockFrequencyInfo.cpu_frequency_hz < 0x100000000ULL) {
412 OSSharedPtr<OSData> cpuFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.cpu_clock_rate_hz, 4);
413 provider->setProperty("clock-frequency", cpuFrequency.get());
414 } else {
415 OSSharedPtr<OSData> cpuFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.cpu_frequency_hz, 8);
416 provider->setProperty("clock-frequency", cpuFrequency.get());
417 }
418
419 OSSharedPtr<OSData> timebaseFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.timebase_frequency_hz, 4);
420 provider->setProperty("timebase-frequency", timebaseFrequency.get());
421
422 super::setProperty("IOCPUID", getRegistryEntryID(), sizeof(uint64_t) * 8);
423
424 setCPUNumber(0);
425 setCPUState(kIOCPUStateUnregistered);
426
427 return true;
428 }
429
430 void
detach(IOService * provider)431 IOCPU::detach(IOService *provider)
432 {
433 if (is_IOCPU_disabled()) {
434 return;
435 }
436
437 super::detach(provider);
438 IOLockLock(gIOCPUsLock);
439 unsigned int index = gIOCPUs->getNextIndexOfObject(this, 0);
440 if (index != (unsigned int)-1) {
441 gIOCPUs->removeObject(index);
442 }
443 IOLockUnlock(gIOCPUsLock);
444 }
445
446 OSObject *
getProperty(const OSSymbol * aKey) const447 IOCPU::getProperty(const OSSymbol *aKey) const
448 {
449 if (aKey == gIOCPUStateKey) {
450 return gIOCPUStateNames[_cpuState].get();
451 }
452 #pragma clang diagnostic push
453 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
454 return super::getProperty(aKey);
455 #pragma clang diagnostic pop
456 }
457
458 bool
setProperty(const OSSymbol * aKey,OSObject * anObject)459 IOCPU::setProperty(const OSSymbol *aKey, OSObject *anObject)
460 {
461 if (aKey == gIOCPUStateKey) {
462 return false;
463 }
464
465 return super::setProperty(aKey, anObject);
466 }
467
468 bool
serializeProperties(OSSerialize * serialize) const469 IOCPU::serializeProperties(OSSerialize *serialize) const
470 {
471 bool result;
472 OSSharedPtr<OSDictionary> dict = dictionaryWithProperties();
473 if (!dict) {
474 return false;
475 }
476 dict->setObject(gIOCPUStateKey.get(), gIOCPUStateNames[_cpuState].get());
477 result = dict->serialize(serialize);
478 return result;
479 }
480
481 IOReturn
setProperties(OSObject * properties)482 IOCPU::setProperties(OSObject *properties)
483 {
484 OSDictionary *dict = OSDynamicCast(OSDictionary, properties);
485 OSString *stateStr;
486 IOReturn result;
487
488 if (dict == NULL) {
489 return kIOReturnUnsupported;
490 }
491
492 stateStr = OSDynamicCast(OSString, dict->getObject(gIOCPUStateKey.get()));
493 if (stateStr != NULL) {
494 result = IOUserClient::clientHasPrivilege(current_task(), kIOClientPrivilegeAdministrator);
495 if (result != kIOReturnSuccess) {
496 return result;
497 }
498
499 if (setProperty(gIOCPUStateKey.get(), stateStr)) {
500 return kIOReturnSuccess;
501 }
502
503 return kIOReturnUnsupported;
504 }
505
506 return kIOReturnUnsupported;
507 }
508
509 void
signalCPU(IOCPU *)510 IOCPU::signalCPU(IOCPU */*target*/)
511 {
512 }
513
514 void
signalCPUDeferred(IOCPU * target)515 IOCPU::signalCPUDeferred(IOCPU *target)
516 {
517 // Our CPU may not support deferred IPIs,
518 // so send a regular IPI by default
519 signalCPU(target);
520 }
521
522 void
signalCPUCancel(IOCPU *)523 IOCPU::signalCPUCancel(IOCPU */*target*/)
524 {
525 // Meant to cancel signals sent by
526 // signalCPUDeferred; unsupported
527 // by default
528 }
529
530 void
enableCPUTimeBase(bool)531 IOCPU::enableCPUTimeBase(bool /*enable*/)
532 {
533 }
534
535 UInt32
getCPUNumber(void)536 IOCPU::getCPUNumber(void)
537 {
538 return _cpuNumber;
539 }
540
541 void
setCPUNumber(UInt32 cpuNumber)542 IOCPU::setCPUNumber(UInt32 cpuNumber)
543 {
544 _cpuNumber = cpuNumber;
545 super::setProperty("IOCPUNumber", _cpuNumber, 32);
546 }
547
548 UInt32
getCPUState(void)549 IOCPU::getCPUState(void)
550 {
551 return _cpuState;
552 }
553
554 void
setCPUState(UInt32 cpuState)555 IOCPU::setCPUState(UInt32 cpuState)
556 {
557 if (cpuState < kIOCPUStateCount) {
558 _cpuState = cpuState;
559 }
560 }
561
562 OSArray *
getCPUGroup(void)563 IOCPU::getCPUGroup(void)
564 {
565 return _cpuGroup.get();
566 }
567
568 UInt32
getCPUGroupSize(void)569 IOCPU::getCPUGroupSize(void)
570 {
571 return _cpuGroup->getCount();
572 }
573
574 processor_t
getMachProcessor(void)575 IOCPU::getMachProcessor(void)
576 {
577 return machProcessor;
578 }
579
580
581 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
582
583 #undef super
584 #define super IOInterruptController
585
586 OSDefineMetaClassAndStructors(IOCPUInterruptController, IOInterruptController);
587
588 OSMetaClassDefineReservedUnused(IOCPUInterruptController, 1);
589 OSMetaClassDefineReservedUnused(IOCPUInterruptController, 2);
590 OSMetaClassDefineReservedUnused(IOCPUInterruptController, 3);
591 OSMetaClassDefineReservedUnused(IOCPUInterruptController, 4);
592 OSMetaClassDefineReservedUnused(IOCPUInterruptController, 5);
593
594
595
596 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
597
598 IOReturn
initCPUInterruptController(int sources)599 IOCPUInterruptController::initCPUInterruptController(int sources)
600 {
601 return initCPUInterruptController(sources, sources);
602 }
603
604 IOReturn
initCPUInterruptController(int sources,int cpus)605 IOCPUInterruptController::initCPUInterruptController(int sources, int cpus)
606 {
607 int cnt;
608
609 if (!super::init()) {
610 return kIOReturnInvalid;
611 }
612
613 numSources = sources;
614 numCPUs = cpus;
615
616 vectors = (IOInterruptVector *)zalloc_permanent(numSources *
617 sizeof(IOInterruptVector), ZALIGN(IOInterruptVector));
618
619 // Allocate a lock for each vector
620 for (cnt = 0; cnt < numSources; cnt++) {
621 vectors[cnt].interruptLock = IOLockAlloc();
622 if (vectors[cnt].interruptLock == NULL) {
623 for (cnt = 0; cnt < numSources; cnt++) {
624 if (vectors[cnt].interruptLock != NULL) {
625 IOLockFree(vectors[cnt].interruptLock);
626 }
627 }
628 return kIOReturnNoResources;
629 }
630 }
631
632 ml_set_max_cpus(numSources);
633 return kIOReturnSuccess;
634 }
635
636 void
registerCPUInterruptController(void)637 IOCPUInterruptController::registerCPUInterruptController(void)
638 {
639 setProperty(gPlatformInterruptControllerName, kOSBooleanTrue);
640 registerService();
641
642 getPlatform()->registerInterruptController(gPlatformInterruptControllerName,
643 this);
644 }
645
646 void
setCPUInterruptProperties(IOService * service)647 IOCPUInterruptController::setCPUInterruptProperties(IOService *service)
648 {
649 int cnt;
650 OSSharedPtr<OSArray> specifier;
651 OSSharedPtr<OSArray> controller;
652 long tmpLong;
653
654 if ((service->propertyExists(gIOInterruptControllersKey)) &&
655 (service->propertyExists(gIOInterruptSpecifiersKey))) {
656 return;
657 }
658
659 // Create the interrupt specifer array.
660 specifier = OSArray::withCapacity(numSources);
661 for (cnt = 0; cnt < numSources; cnt++) {
662 tmpLong = cnt;
663 OSSharedPtr<OSData> tmpData = OSData::withValue(tmpLong);
664 specifier->setObject(tmpData.get());
665 }
666
667 // Create the interrupt controller array.
668 controller = OSArray::withCapacity(numSources);
669 for (cnt = 0; cnt < numSources; cnt++) {
670 controller->setObject(gPlatformInterruptControllerName);
671 }
672
673 // Put the two arrays into the property table.
674 service->setProperty(gIOInterruptControllersKey, controller.get());
675 service->setProperty(gIOInterruptSpecifiersKey, specifier.get());
676 }
677
678 void
enableCPUInterrupt(IOCPU * cpu)679 IOCPUInterruptController::enableCPUInterrupt(IOCPU *cpu)
680 {
681 IOInterruptHandler handler = OSMemberFunctionCast(
682 IOInterruptHandler, this, &IOCPUInterruptController::handleInterrupt);
683
684 assert(numCPUs > 0);
685
686 ml_install_interrupt_handler(cpu, cpu->getCPUNumber(), this, handler, NULL);
687
688 IOTakeLock(vectors[0].interruptLock);
689 ++enabledCPUs;
690
691 if (enabledCPUs == numCPUs) {
692 IOService::cpusRunning();
693 thread_wakeup(this);
694 }
695 IOUnlock(vectors[0].interruptLock);
696 }
697
698 IOReturn
registerInterrupt(IOService * nub,int source,void * target,IOInterruptHandler handler,void * refCon)699 IOCPUInterruptController::registerInterrupt(IOService *nub,
700 int source,
701 void *target,
702 IOInterruptHandler handler,
703 void *refCon)
704 {
705 IOInterruptVector *vector;
706
707 // Interrupts must be enabled, as this can allocate memory.
708 assert(ml_get_interrupts_enabled() == TRUE);
709
710 if (source >= numSources) {
711 return kIOReturnNoResources;
712 }
713
714 vector = &vectors[source];
715
716 // Get the lock for this vector.
717 IOTakeLock(vector->interruptLock);
718
719 // Make sure the vector is not in use.
720 if (vector->interruptRegistered) {
721 IOUnlock(vector->interruptLock);
722 return kIOReturnNoResources;
723 }
724
725 // Fill in vector with the client's info.
726 vector->handler = handler;
727 vector->nub = nub;
728 vector->source = source;
729 vector->target = target;
730 vector->refCon = refCon;
731
732 // Get the vector ready. It starts hard disabled.
733 vector->interruptDisabledHard = 1;
734 vector->interruptDisabledSoft = 1;
735 vector->interruptRegistered = 1;
736
737 IOUnlock(vector->interruptLock);
738
739 IOTakeLock(vectors[0].interruptLock);
740 if (enabledCPUs != numCPUs) {
741 assert_wait(this, THREAD_UNINT);
742 IOUnlock(vectors[0].interruptLock);
743 thread_block(THREAD_CONTINUE_NULL);
744 } else {
745 IOUnlock(vectors[0].interruptLock);
746 }
747
748 return kIOReturnSuccess;
749 }
750
751 IOReturn
getInterruptType(IOService *,int,int * interruptType)752 IOCPUInterruptController::getInterruptType(IOService */*nub*/,
753 int /*source*/,
754 int *interruptType)
755 {
756 if (interruptType == NULL) {
757 return kIOReturnBadArgument;
758 }
759
760 *interruptType = kIOInterruptTypeLevel;
761
762 return kIOReturnSuccess;
763 }
764
765 IOReturn
enableInterrupt(IOService *,int)766 IOCPUInterruptController::enableInterrupt(IOService */*nub*/,
767 int /*source*/)
768 {
769 // ml_set_interrupts_enabled(true);
770 return kIOReturnSuccess;
771 }
772
773 IOReturn
disableInterrupt(IOService *,int)774 IOCPUInterruptController::disableInterrupt(IOService */*nub*/,
775 int /*source*/)
776 {
777 // ml_set_interrupts_enabled(false);
778 return kIOReturnSuccess;
779 }
780
781 IOReturn
causeInterrupt(IOService *,int)782 IOCPUInterruptController::causeInterrupt(IOService */*nub*/,
783 int /*source*/)
784 {
785 ml_cause_interrupt();
786 return kIOReturnSuccess;
787 }
788
789 IOReturn
handleInterrupt(void *,IOService *,int source)790 IOCPUInterruptController::handleInterrupt(void */*refCon*/,
791 IOService */*nub*/,
792 int source)
793 {
794 IOInterruptVector *vector;
795
796 vector = &vectors[source];
797
798 if (!vector->interruptRegistered) {
799 return kIOReturnInvalid;
800 }
801
802 vector->handler(vector->target, vector->refCon,
803 vector->nub, vector->source);
804
805 return kIOReturnSuccess;
806 }
807
808 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
809