1 /*
2 * Copyright (c) 2017 Apple Inc. All rights reserved.
3 */
4
5 #include <IOKit/perfcontrol/IOPerfControl.h>
6
7 #include <stdatomic.h>
8
9 #include <kern/thread_group.h>
10 #include <kern/task.h>
11 #include <sys/coalition.h>
12 #include <kern/coalition.h>
13
14 #undef super
15 #define super OSObject
16 OSDefineMetaClassAndStructors(IOPerfControlClient, OSObject);
17
18 static IOPerfControlClient::IOPerfControlClientShared *_Atomic gIOPerfControlClientShared;
19
20 bool
init(IOService * driver,uint64_t maxWorkCapacity)21 IOPerfControlClient::init(IOService *driver, uint64_t maxWorkCapacity)
22 {
23 // TODO: Remove this limit and implement dynamic table growth if workloads are found that exceed this
24 if (maxWorkCapacity > kMaxWorkTableNumEntries) {
25 maxWorkCapacity = kMaxWorkTableNumEntries;
26 }
27
28 if (!super::init()) {
29 return false;
30 }
31
32 shared = atomic_load_explicit(&gIOPerfControlClientShared, memory_order_acquire);
33 if (shared == nullptr) {
34 IOPerfControlClient::IOPerfControlClientShared *expected = shared;
35 shared = kalloc_type(IOPerfControlClientShared, Z_WAITOK);
36 if (!shared) {
37 return false;
38 }
39
40 atomic_init(&shared->maxDriverIndex, 0);
41
42 shared->interface = PerfControllerInterface{
43 .version = PERFCONTROL_INTERFACE_VERSION_NONE,
44 .registerDevice =
45 [](IOService *device) {
46 return kIOReturnSuccess;
47 },
48 .unregisterDevice =
49 [](IOService *device) {
50 return kIOReturnSuccess;
51 },
52 .workCanSubmit =
53 [](IOService *device, PerfControllerInterface::WorkState *state, WorkSubmitArgs *args) {
54 return false;
55 },
56 .workSubmit =
57 [](IOService *device, uint64_t token, PerfControllerInterface::WorkState *state, WorkSubmitArgs *args) {
58 },
59 .workBegin =
60 [](IOService *device, uint64_t token, PerfControllerInterface::WorkState *state, WorkBeginArgs *args) {
61 },
62 .workEnd =
63 [](IOService *device, uint64_t token, PerfControllerInterface::WorkState *state, WorkEndArgs *args, bool done) {
64 },
65 .workUpdate =
66 [](IOService *device, uint64_t token, PerfControllerInterface::WorkState *state, WorkUpdateArgs *args) {
67 },
68 };
69
70 shared->interfaceLock = IOLockAlloc();
71 if (!shared->interfaceLock) {
72 goto shared_init_error;
73 }
74
75 shared->deviceRegistrationList = OSSet::withCapacity(4);
76 if (!shared->deviceRegistrationList) {
77 goto shared_init_error;
78 }
79
80 if (!atomic_compare_exchange_strong_explicit(&gIOPerfControlClientShared, &expected, shared, memory_order_acq_rel,
81 memory_order_acquire)) {
82 IOLockFree(shared->interfaceLock);
83 shared->deviceRegistrationList->release();
84 kfree_type(IOPerfControlClientShared, shared);
85 shared = expected;
86 }
87 }
88 workTable = NULL;
89 workTableLock = NULL;
90
91 // Note: driverIndex is not guaranteed to be unique if maxDriverIndex wraps around. It is intended for debugging only.
92 driverIndex = atomic_fetch_add_explicit(&shared->maxDriverIndex, 1, memory_order_relaxed) + 1;
93
94 // + 1 since index 0 is unused for kIOPerfControlClientWorkUntracked
95 workTableLength = maxWorkCapacity + 1;
96 assertf(workTableLength <= kWorkTableMaxSize, "%zu exceeds max allowed capacity of %zu", workTableLength, kWorkTableMaxSize);
97 if (maxWorkCapacity > 0) {
98 workTable = kalloc_type(WorkTableEntry, workTableLength, Z_WAITOK_ZERO);
99 if (!workTable) {
100 goto error;
101 }
102 workTableNextIndex = 1;
103
104 workTableLock = IOSimpleLockAlloc();
105 if (!workTableLock) {
106 goto error;
107 }
108 }
109
110 bzero(&clientData, sizeof(clientData));
111
112 return true;
113
114 error:
115 if (workTable) {
116 kfree_type(WorkTableEntry, workTableLength, workTable);
117 workTable = NULL;
118 }
119 if (workTableLock) {
120 IOSimpleLockFree(workTableLock);
121 workTableLock = NULL;
122 }
123 return false;
124 shared_init_error:
125 if (shared) {
126 if (shared->interfaceLock) {
127 IOLockFree(shared->interfaceLock);
128 }
129 if (shared->deviceRegistrationList) {
130 shared->deviceRegistrationList->release();
131 }
132 kfree_type(IOPerfControlClientShared, shared);
133 shared = nullptr;
134 }
135 return false;
136 }
137
138 void
free()139 IOPerfControlClient::free()
140 {
141 if (workTable) {
142 kfree_type(WorkTableEntry, workTableLength, workTable);
143 }
144 if (workTableLock) {
145 IOSimpleLockFree(workTableLock);
146 }
147 super::free();
148 }
149
150 IOPerfControlClient *
copyClient(IOService * driver,uint64_t maxWorkCapacity)151 IOPerfControlClient::copyClient(IOService *driver, uint64_t maxWorkCapacity)
152 {
153 IOPerfControlClient *client = new IOPerfControlClient;
154 if (!client || !client->init(driver, maxWorkCapacity)) {
155 panic("could not create IOPerfControlClient");
156 }
157 return client;
158 }
159
160 /* Convert the per driver token into a globally unique token for the performance
161 * controller's consumption. This is achieved by setting the driver's unique
162 * index onto the high order bits. The performance controller is shared between
163 * all drivers and must track all instances separately, while each driver has
164 * its own token table, so this step is needed to avoid token collisions between
165 * drivers.
166 */
167 inline uint64_t
tokenToGlobalUniqueToken(uint64_t token)168 IOPerfControlClient::tokenToGlobalUniqueToken(uint64_t token)
169 {
170 return token | (static_cast<uint64_t>(driverIndex) << kWorkTableIndexBits);
171 }
172
173 /* Accounting resources used in a work item to the containing coalition.
174 * Contigent upon the PerfController signaling that it wants resource accounting
175 * in the registerDevice()/registerDriverDevice calls. More device types can
176 * be added here in the future.
177 */
178 void
accountResources(coalition_t coal,PerfControllerInterface::PerfDeviceID device_type,PerfControllerInterface::ResourceAccounting * resources)179 IOPerfControlClient::accountResources(coalition_t coal, PerfControllerInterface::PerfDeviceID device_type, PerfControllerInterface::ResourceAccounting *resources)
180 {
181 switch (device_type) {
182 case PerfControllerInterface::PerfDeviceID::kANE:
183 if (coal != nullptr) {
184 coalition_update_ane_stats(coal, resources->mach_time_delta, resources->energy_nj_delta);
185 }
186 break;
187
188 default:
189 assertf(false, "Unexpected device type for IOPerfControlClient::accountResources: %llu", static_cast<uint64_t>(device_type));
190 }
191 }
192
193 /* With this implementation, tokens returned to the driver differ from tokens
194 * passed to the performance controller. This implementation has the nice
195 * property that tokens returns to the driver will aways be between 1 and
196 * the value of maxWorkCapacity passed by the driver to copyClient. The tokens
197 * the performance controller sees will match on the lower order bits and have
198 * the driver index set on the high order bits.
199 */
200 uint64_t
allocateToken(thread_group * thread_group)201 IOPerfControlClient::allocateToken(thread_group *thread_group)
202 {
203 uint64_t token = kIOPerfControlClientWorkUntracked;
204
205 #if CONFIG_THREAD_GROUPS
206 auto s = IOSimpleLockLockDisableInterrupt(workTableLock);
207
208 uint64_t num_tries = 0;
209 size_t index = workTableNextIndex;
210 // - 1 since entry 0 is for kIOPerfControlClientWorkUntracked
211 while (num_tries < workTableLength - 1) {
212 if (workTable[index].thread_group == nullptr) {
213 thread_group_retain(thread_group);
214 workTable[index].thread_group = thread_group;
215 if (clientData.driverState.resource_accounting) {
216 auto *coalition = task_get_coalition(current_task(), COALITION_TYPE_RESOURCE);
217 assert(coalition != nullptr);
218 coalition_retain(coalition);
219 workTable[index].coal = coalition;
220 }
221 token = index;
222 // next integer between 1 and workTableLength - 1
223 workTableNextIndex = (index % (workTableLength - 1)) + 1;
224 break;
225 }
226 // next integer between 1 and workTableLength - 1
227 index = (index % (workTableLength - 1)) + 1;
228 num_tries += 1;
229 }
230 #if (DEVELOPMENT || DEBUG)
231 if (token == kIOPerfControlClientWorkUntracked) {
232 /* When investigating a panic here, first check that the driver is not leaking tokens.
233 * If the driver is not leaking tokens and maximum is less than kMaxWorkTableNumEntries,
234 * the driver should be modified to pass a larger value to copyClient.
235 * If the driver is not leaking tokens and maximum is equal to kMaxWorkTableNumEntries,
236 * this code will have to be modified to support dynamic table growth to support larger
237 * numbers of tokens.
238 */
239 panic("Tokens allocated for this device exceeded maximum of %zu.",
240 workTableLength - 1); // - 1 since entry 0 is for kIOPerfControlClientWorkUntracked
241 }
242 #endif
243
244 IOSimpleLockUnlockEnableInterrupt(workTableLock, s);
245 #endif
246
247 return token;
248 }
249
250 void
deallocateToken(uint64_t token)251 IOPerfControlClient::deallocateToken(uint64_t token)
252 {
253 #if CONFIG_THREAD_GROUPS
254 assertf(token != kIOPerfControlClientWorkUntracked, "Attempt to deallocate token kIOPerfControlClientWorkUntracked\n");
255 assertf(token <= workTableLength, "Attempt to deallocate token %llu which is greater than the table size of %zu\n", token, workTableLength);
256 auto s = IOSimpleLockLockDisableInterrupt(workTableLock);
257
258 auto &entry = workTable[token];
259 auto *thread_group = entry.thread_group;
260 auto *coal = entry.coal;
261 bzero(&entry, sizeof(entry));
262 workTableNextIndex = token;
263
264 IOSimpleLockUnlockEnableInterrupt(workTableLock, s);
265
266 // This can call into the performance controller if the last reference is dropped here. Are we sure
267 // the driver isn't holding any locks? If not, we may want to async this to another context.
268 thread_group_release(thread_group);
269 if (coal != nullptr) {
270 coalition_release(coal);
271 }
272 #endif
273 }
274
275 IOPerfControlClient::WorkTableEntry *
getEntryForToken(uint64_t token)276 IOPerfControlClient::getEntryForToken(uint64_t token)
277 {
278 if (token == kIOPerfControlClientWorkUntracked) {
279 return nullptr;
280 }
281
282 if (token >= workTableLength) {
283 panic("Invalid work token (%llu): index out of bounds.", token);
284 }
285
286 WorkTableEntry *entry = &workTable[token];
287 assertf(entry->thread_group, "Invalid work token: %llu", token);
288 return entry;
289 }
290
291 void
markEntryStarted(uint64_t token,bool started)292 IOPerfControlClient::markEntryStarted(uint64_t token, bool started)
293 {
294 if (token == kIOPerfControlClientWorkUntracked) {
295 return;
296 }
297
298 if (token >= workTableLength) {
299 panic("Invalid work token (%llu): index out of bounds.", token);
300 }
301
302 workTable[token].started = started;
303 }
304
305 #if CONFIG_THREAD_GROUPS
306
307 static struct thread_group *
threadGroupForDextService(IOService * device)308 threadGroupForDextService(IOService *device)
309 {
310 assert(device);
311
312 if (!device->hasUserServer()) {
313 return NULL;
314 }
315
316 // Devices associated with a dext driver, must be called from dext
317 // context to ensure that thread_group reference is valid.
318 thread_t thread = current_thread();
319 assert(get_threadtask(thread) != kernel_task);
320 struct thread_group * thread_group = thread_group_get(thread);
321 assert(thread_group != nullptr);
322 return thread_group;
323 }
324
325 #endif /* CONFIG_THREAD_GROUPS */
326
327 IOReturn
registerDevice(IOService * driver,IOService * device)328 IOPerfControlClient::registerDevice(IOService *driver, IOService *device)
329 {
330 IOReturn ret = kIOReturnSuccess;
331 #if CONFIG_THREAD_GROUPS
332 IOLockLock(shared->interfaceLock);
333
334 clientData.device = device;
335
336 if (device) {
337 struct thread_group *dext_thread_group = threadGroupForDextService(device);
338 if (dext_thread_group) {
339 if (clientData.driverState.has_target_thread_group) {
340 panic("driverState has already been initialized");
341 }
342 clientData.driverState.has_target_thread_group = true;
343 clientData.driverState.target_thread_group_id = thread_group_get_id(dext_thread_group);
344 clientData.driverState.target_thread_group_data = thread_group_get_machine_data(dext_thread_group);
345
346 clientData.target_thread_group = dext_thread_group;
347 thread_group_retain(dext_thread_group);
348 }
349 }
350
351 if (shared->interface.version >= PERFCONTROL_INTERFACE_VERSION_3) {
352 ret = shared->interface.registerDriverDevice(driver, device, &clientData.driverState);
353 } else if (shared->interface.version >= PERFCONTROL_INTERFACE_VERSION_1) {
354 ret = shared->interface.registerDevice(device);
355 } else {
356 shared->deviceRegistrationList->setObject(this);
357 }
358
359 IOLockUnlock(shared->interfaceLock);
360 #endif
361 return ret;
362 }
363
364 void
unregisterDevice(IOService * driver,IOService * device)365 IOPerfControlClient::unregisterDevice(IOService *driver, IOService *device)
366 {
367 #if CONFIG_THREAD_GROUPS
368 IOLockLock(shared->interfaceLock);
369
370 if (shared->interface.version >= PERFCONTROL_INTERFACE_VERSION_3) {
371 shared->interface.unregisterDriverDevice(driver, device, &clientData.driverState);
372 } else if (shared->interface.version >= PERFCONTROL_INTERFACE_VERSION_1) {
373 shared->interface.unregisterDevice(device);
374 } else {
375 shared->deviceRegistrationList->removeObject(this);
376 }
377
378 if (clientData.driverState.has_target_thread_group) {
379 thread_group_release(clientData.target_thread_group);
380 clientData.target_thread_group = nullptr;
381
382 clientData.driverState.has_target_thread_group = false;
383 clientData.driverState.target_thread_group_id = ~0ull;
384 clientData.driverState.target_thread_group_data = nullptr;
385 }
386
387 clientData.device = nullptr;
388
389 IOLockUnlock(shared->interfaceLock);
390 #endif
391 }
392
393 uint64_t
workSubmit(IOService * device,WorkSubmitArgs * args)394 IOPerfControlClient::workSubmit(IOService *device, WorkSubmitArgs *args)
395 {
396 #if CONFIG_THREAD_GROUPS
397 auto *thread_group = thread_group_get(current_thread());
398 if (!thread_group) {
399 return kIOPerfControlClientWorkUntracked;
400 }
401
402 PerfControllerInterface::WorkState state{
403 .thread_group_id = thread_group_get_id(thread_group),
404 .thread_group_data = thread_group_get_machine_data(thread_group),
405 .work_data = nullptr,
406 .work_data_size = 0,
407 .started = false,
408 .driver_state = &clientData.driverState
409 };
410 if (!shared->interface.workCanSubmit(device, &state, args)) {
411 return kIOPerfControlClientWorkUntracked;
412 }
413
414 uint64_t token = allocateToken(thread_group);
415 if (token != kIOPerfControlClientWorkUntracked) {
416 state.work_data = &workTable[token].perfcontrol_data;
417 state.work_data_size = sizeof(workTable[token].perfcontrol_data);
418 shared->interface.workSubmit(device, tokenToGlobalUniqueToken(token), &state, args);
419 }
420 return token;
421 #else
422 return kIOPerfControlClientWorkUntracked;
423 #endif
424 }
425
426 uint64_t
workSubmitAndBegin(IOService * device,WorkSubmitArgs * submitArgs,WorkBeginArgs * beginArgs)427 IOPerfControlClient::workSubmitAndBegin(IOService *device, WorkSubmitArgs *submitArgs, WorkBeginArgs *beginArgs)
428 {
429 #if CONFIG_THREAD_GROUPS
430 auto *thread_group = thread_group_get(current_thread());
431 if (!thread_group) {
432 return kIOPerfControlClientWorkUntracked;
433 }
434
435 PerfControllerInterface::WorkState state{
436 .thread_group_id = thread_group_get_id(thread_group),
437 .thread_group_data = thread_group_get_machine_data(thread_group),
438 .work_data = nullptr,
439 .work_data_size = 0,
440 .started = false,
441 .driver_state = &clientData.driverState
442 };
443 if (!shared->interface.workCanSubmit(device, &state, submitArgs)) {
444 return kIOPerfControlClientWorkUntracked;
445 }
446
447 uint64_t token = allocateToken(thread_group);
448 if (token != kIOPerfControlClientWorkUntracked) {
449 auto &entry = workTable[token];
450 state.work_data = &entry.perfcontrol_data;
451 state.work_data_size = sizeof(workTable[token].perfcontrol_data);
452 shared->interface.workSubmit(device, tokenToGlobalUniqueToken(token), &state, submitArgs);
453 state.started = true;
454 shared->interface.workBegin(device, tokenToGlobalUniqueToken(token), &state, beginArgs);
455 markEntryStarted(token, true);
456 }
457 return token;
458 #else
459 return kIOPerfControlClientWorkUntracked;
460 #endif
461 }
462
463 void
workBegin(IOService * device,uint64_t token,WorkBeginArgs * args)464 IOPerfControlClient::workBegin(IOService *device, uint64_t token, WorkBeginArgs *args)
465 {
466 #if CONFIG_THREAD_GROUPS
467 WorkTableEntry *entry = getEntryForToken(token);
468 if (entry == nullptr) {
469 return;
470 }
471
472 assertf(!entry->started, "Work for token %llu was already started", token);
473
474 PerfControllerInterface::WorkState state{
475 .thread_group_id = thread_group_get_id(entry->thread_group),
476 .thread_group_data = thread_group_get_machine_data(entry->thread_group),
477 .work_data = &entry->perfcontrol_data,
478 .work_data_size = sizeof(entry->perfcontrol_data),
479 .started = true,
480 .driver_state = &clientData.driverState
481 };
482 shared->interface.workBegin(device, tokenToGlobalUniqueToken(token), &state, args);
483 markEntryStarted(token, true);
484 #endif
485 }
486
487 void
workEnd(IOService * device,uint64_t token,WorkEndArgs * args,bool done)488 IOPerfControlClient::workEnd(IOService *device, uint64_t token, WorkEndArgs *args, bool done)
489 {
490 #if CONFIG_THREAD_GROUPS
491 WorkTableEntry *entry = getEntryForToken(token);
492 if (entry == nullptr) {
493 return;
494 }
495
496 PerfControllerInterface::WorkState state{
497 .thread_group_id = thread_group_get_id(entry->thread_group),
498 .thread_group_data = thread_group_get_machine_data(entry->thread_group),
499 .work_data = &entry->perfcontrol_data,
500 .work_data_size = sizeof(entry->perfcontrol_data),
501 .started = entry->started,
502 .driver_state = &clientData.driverState
503 };
504
505 if (shared->interface.version >= PERFCONTROL_INTERFACE_VERSION_4) {
506 PerfControllerInterface::ResourceAccounting resources;
507 shared->interface.workEndWithResources(device, tokenToGlobalUniqueToken(token), &state, args, &resources, done);
508 if (clientData.driverState.resource_accounting) {
509 accountResources(workTable[token].coal, clientData.driverState.device_type, &resources);
510 }
511 } else {
512 shared->interface.workEnd(device, tokenToGlobalUniqueToken(token), &state, args, done);
513 }
514
515 if (done) {
516 deallocateToken(token);
517 } else {
518 markEntryStarted(token, false);
519 }
520 #endif
521 }
522
523 static _Atomic uint64_t unique_work_context_id = 1ull;
524
525 class IOPerfControlWorkContext : public OSObject
526 {
527 OSDeclareDefaultStructors(IOPerfControlWorkContext);
528
529 public:
530 uint64_t id;
531 struct thread_group *thread_group;
532 coalition_t coal;
533 bool started;
534 uint8_t perfcontrol_data[32];
535
536 bool init() override;
537 void reset();
538 void free() override;
539 };
540
541 OSDefineMetaClassAndStructors(IOPerfControlWorkContext, OSObject);
542
543 bool
init()544 IOPerfControlWorkContext::init()
545 {
546 if (!super::init()) {
547 return false;
548 }
549 id = atomic_fetch_add_explicit(&unique_work_context_id, 1, memory_order_relaxed) + 1;
550 reset();
551 return true;
552 }
553
554 void
reset()555 IOPerfControlWorkContext::reset()
556 {
557 thread_group = nullptr;
558 coal = nullptr;
559 started = false;
560 bzero(perfcontrol_data, sizeof(perfcontrol_data));
561 }
562
563 void
free()564 IOPerfControlWorkContext::free()
565 {
566 assertf(thread_group == nullptr, "IOPerfControlWorkContext ID %llu being released without calling workEnd!\n", id);
567 assertf(coal == nullptr, "IOPerfControlWorkContext ID %llu being released without calling workEnd!\n", id);
568 super::free();
569 }
570
571 OSObject *
copyWorkContext()572 IOPerfControlClient::copyWorkContext()
573 {
574 IOPerfControlWorkContext *context = new IOPerfControlWorkContext;
575
576 if (context == nullptr) {
577 return nullptr;
578 }
579
580 if (!context->init()) {
581 context->free();
582 return nullptr;
583 }
584
585 return context;
586 }
587
588 bool
workSubmitAndBeginWithContext(IOService * device,OSObject * context,WorkSubmitArgs * submitArgs,WorkBeginArgs * beginArgs)589 IOPerfControlClient::workSubmitAndBeginWithContext(IOService *device, OSObject *context, WorkSubmitArgs *submitArgs, WorkBeginArgs *beginArgs)
590 {
591 #if CONFIG_THREAD_GROUPS
592
593 if (workSubmitWithContext(device, context, submitArgs) == false) {
594 return false;
595 }
596
597 IOPerfControlWorkContext *work_context = OSDynamicCast(IOPerfControlWorkContext, context);
598
599 PerfControllerInterface::WorkState state{
600 .thread_group_id = thread_group_get_id(work_context->thread_group),
601 .thread_group_data = thread_group_get_machine_data(work_context->thread_group),
602 .work_data = &work_context->perfcontrol_data,
603 .work_data_size = sizeof(work_context->perfcontrol_data),
604 .started = true,
605 .driver_state = &clientData.driverState
606 };
607
608 shared->interface.workBegin(device, work_context->id, &state, beginArgs);
609
610 work_context->started = true;
611
612 return true;
613 #else
614 return false;
615 #endif
616 }
617
618 bool
workSubmitWithContext(IOService * device,OSObject * context,WorkSubmitArgs * args)619 IOPerfControlClient::workSubmitWithContext(IOService *device, OSObject *context, WorkSubmitArgs *args)
620 {
621 #if CONFIG_THREAD_GROUPS
622 IOPerfControlWorkContext *work_context = OSDynamicCast(IOPerfControlWorkContext, context);
623
624 if (work_context == nullptr) {
625 return false;
626 }
627
628 auto *thread_group = thread_group_get(current_thread());
629 assert(thread_group != nullptr);
630
631 assertf(!work_context->started, "IOPerfControlWorkContext ID %llu was already started", work_context->id);
632 assertf(work_context->thread_group == nullptr, "IOPerfControlWorkContext ID %llu has already taken a refcount on TG 0x%p \n", work_context->id, (void *)(work_context->thread_group));
633
634 PerfControllerInterface::WorkState state{
635 .thread_group_id = thread_group_get_id(thread_group),
636 .thread_group_data = thread_group_get_machine_data(thread_group),
637 .work_data = nullptr,
638 .work_data_size = 0,
639 .started = false,
640 .driver_state = &clientData.driverState
641 };
642 if (!shared->interface.workCanSubmit(device, &state, args)) {
643 return false;
644 }
645
646 work_context->thread_group = thread_group_retain(thread_group);
647 if (clientData.driverState.resource_accounting) {
648 auto *coalition = task_get_coalition(current_task(), COALITION_TYPE_RESOURCE);
649 assert(coalition != nullptr);
650 work_context->coal = coalition;
651 coalition_retain(coalition);
652 }
653
654 state.work_data = &work_context->perfcontrol_data;
655 state.work_data_size = sizeof(work_context->perfcontrol_data);
656
657 shared->interface.workSubmit(device, work_context->id, &state, args);
658
659 return true;
660 #else
661 return false;
662 #endif
663 }
664
665 void
workUpdateWithContext(IOService * device,OSObject * context,WorkUpdateArgs * args)666 IOPerfControlClient::workUpdateWithContext(IOService *device, OSObject *context, WorkUpdateArgs *args)
667 {
668 #if CONFIG_THREAD_GROUPS
669 IOPerfControlWorkContext *work_context = OSDynamicCast(IOPerfControlWorkContext, context);
670
671 if (work_context == nullptr) {
672 return;
673 }
674
675 if (work_context->thread_group == nullptr) {
676 // This Work Context has not taken a refcount on a TG
677 return;
678 }
679
680 PerfControllerInterface::WorkState state{
681 .thread_group_id = thread_group_get_id(work_context->thread_group),
682 .thread_group_data = thread_group_get_machine_data(work_context->thread_group),
683 .work_data = &work_context->perfcontrol_data,
684 .work_data_size = sizeof(work_context->perfcontrol_data),
685 .driver_state = &clientData.driverState
686 };
687 shared->interface.workUpdate(device, work_context->id, &state, args);
688 #endif
689 }
690
691 void
workBeginWithContext(IOService * device,OSObject * context,WorkBeginArgs * args)692 IOPerfControlClient::workBeginWithContext(IOService *device, OSObject *context, WorkBeginArgs *args)
693 {
694 #if CONFIG_THREAD_GROUPS
695 IOPerfControlWorkContext *work_context = OSDynamicCast(IOPerfControlWorkContext, context);
696
697 if (work_context == nullptr) {
698 return;
699 }
700
701 if (work_context->thread_group == nullptr) {
702 // This Work Context has not taken a refcount on a TG
703 return;
704 }
705
706 assertf(!work_context->started, "IOPerfControlWorkContext %llu was already started", work_context->id);
707
708 PerfControllerInterface::WorkState state{
709 .thread_group_id = thread_group_get_id(work_context->thread_group),
710 .thread_group_data = thread_group_get_machine_data(work_context->thread_group),
711 .work_data = &work_context->perfcontrol_data,
712 .work_data_size = sizeof(work_context->perfcontrol_data),
713 .started = true,
714 .driver_state = &clientData.driverState
715 };
716 shared->interface.workBegin(device, work_context->id, &state, args);
717
718 work_context->started = true;
719 #endif
720 }
721
722 void
workEndWithContext(IOService * device,OSObject * context,WorkEndArgs * args,bool done)723 IOPerfControlClient::workEndWithContext(IOService *device, OSObject *context, WorkEndArgs *args, bool done)
724 {
725 #if CONFIG_THREAD_GROUPS
726 IOPerfControlWorkContext *work_context = OSDynamicCast(IOPerfControlWorkContext, context);
727
728 if (work_context == nullptr) {
729 return;
730 }
731
732 if (work_context->thread_group == nullptr) {
733 return;
734 }
735
736 PerfControllerInterface::WorkState state{
737 .thread_group_id = thread_group_get_id(work_context->thread_group),
738 .thread_group_data = thread_group_get_machine_data(work_context->thread_group),
739 .work_data = &work_context->perfcontrol_data,
740 .work_data_size = sizeof(work_context->perfcontrol_data),
741 .started = work_context->started,
742 .driver_state = &clientData.driverState
743 };
744
745 if (shared->interface.version >= PERFCONTROL_INTERFACE_VERSION_4) {
746 PerfControllerInterface::ResourceAccounting resources;
747 shared->interface.workEndWithResources(device, work_context->id, &state, args, &resources, done);
748 if (clientData.driverState.resource_accounting) {
749 accountResources(work_context->coal, clientData.driverState.device_type, &resources);
750 }
751 } else {
752 shared->interface.workEnd(device, work_context->id, &state, args, done);
753 }
754
755 if (done) {
756 thread_group_release(work_context->thread_group);
757 if (work_context->coal != nullptr) {
758 coalition_release(work_context->coal);
759 }
760 work_context->reset();
761 } else {
762 work_context->started = false;
763 }
764
765 return;
766 #else
767 return;
768 #endif
769 }
770
771 IOReturn
registerPerformanceController(PerfControllerInterface * pci)772 IOPerfControlClient::registerPerformanceController(PerfControllerInterface *pci)
773 {
774 IOReturn result = kIOReturnError;
775
776 IOLockLock(shared->interfaceLock);
777
778 if (shared->interface.version == PERFCONTROL_INTERFACE_VERSION_NONE) {
779 shared->interface.version = pci->version;
780
781 if (pci->version >= PERFCONTROL_INTERFACE_VERSION_1) {
782 assert(pci->registerDevice && pci->unregisterDevice && pci->workCanSubmit && pci->workSubmit && pci->workBegin && pci->workEnd);
783 shared->interface.registerDevice = pci->registerDevice;
784 shared->interface.unregisterDevice = pci->unregisterDevice;
785 shared->interface.workCanSubmit = pci->workCanSubmit;
786 shared->interface.workSubmit = pci->workSubmit;
787 shared->interface.workBegin = pci->workBegin;
788 shared->interface.workEnd = pci->workEnd;
789 }
790
791 if (pci->version >= PERFCONTROL_INTERFACE_VERSION_2) {
792 if (pci->workUpdate != nullptr) {
793 shared->interface.workUpdate = pci->workUpdate;
794 }
795 }
796
797 if (pci->version >= PERFCONTROL_INTERFACE_VERSION_3) {
798 assert(pci->registerDriverDevice && pci->unregisterDriverDevice);
799 shared->interface.registerDriverDevice = pci->registerDriverDevice;
800 shared->interface.unregisterDriverDevice = pci->unregisterDriverDevice;
801 }
802
803 if (pci->version >= PERFCONTROL_INTERFACE_VERSION_4) {
804 assert(pci->workEndWithResources);
805 shared->interface.workEndWithResources = pci->workEndWithResources;
806 }
807
808 result = kIOReturnSuccess;
809
810 OSObject *obj;
811 while ((obj = shared->deviceRegistrationList->getAnyObject())) {
812 IOPerfControlClient *client = OSDynamicCast(IOPerfControlClient, obj);
813 IOPerfControlClientData *clientData = client->getClientData();
814 if (clientData && clientData->device) {
815 if (pci->version >= PERFCONTROL_INTERFACE_VERSION_3) {
816 pci->registerDriverDevice(clientData->device->getProvider(), clientData->device, &(clientData->driverState));
817 } else if (pci->version >= PERFCONTROL_INTERFACE_VERSION_1) {
818 pci->registerDevice(clientData->device);
819 }
820 }
821 shared->deviceRegistrationList->removeObject(obj);
822 }
823 }
824
825 IOLockUnlock(shared->interfaceLock);
826
827 return result;
828 }
829