xref: /xnu-11215/osfmk/kern/task_policy.c (revision 4f1223e8)
1 /*
2  * Copyright (c) 2000-2020 Apple Computer, Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <kern/policy_internal.h>
30 #include <mach/task_policy.h>
31 #include <mach/task.h>
32 #include <mach/mach_types.h>
33 #include <mach/task_server.h>
34 #include <kern/host.h>                  /* host_priv_self()        */
35 #include <mach/host_priv.h>             /* host_get_special_port() */
36 #include <mach/host_special_ports.h>    /* RESOURCE_NOTIFY_PORT    */
37 #include <kern/sched.h>
38 #include <kern/task.h>
39 #include <mach/thread_policy.h>
40 #include <sys/errno.h>
41 #include <sys/resource.h>
42 #include <machine/limits.h>
43 #include <kern/ledger.h>
44 #include <kern/thread_call.h>
45 #include <kern/sfi.h>
46 #include <kern/coalition.h>
47 #if CONFIG_TELEMETRY
48 #include <kern/telemetry.h>
49 #endif
50 #if !defined(XNU_TARGET_OS_OSX)
51 #include <kern/kalloc.h>
52 #include <sys/errno.h>
53 #endif /* !defined(XNU_TARGET_OS_OSX) */
54 
55 #if IMPORTANCE_INHERITANCE
56 #include <ipc/ipc_importance.h>
57 #if IMPORTANCE_TRACE
58 #include <mach/machine/sdt.h>
59 #endif /* IMPORTANCE_TRACE */
60 #endif /* IMPORTANCE_INHERITACE */
61 
62 #include <sys/kdebug.h>
63 
64 /*
65  *  Task Policy
66  *
67  *  This subsystem manages task and thread IO priority and backgrounding,
68  *  as well as importance inheritance, process suppression, task QoS, and apptype.
69  *  These properties have a suprising number of complex interactions, so they are
70  *  centralized here in one state machine to simplify the implementation of those interactions.
71  *
72  *  Architecture:
73  *  Threads and tasks have two policy fields: requested, effective.
74  *  Requested represents the wishes of each interface that influences task policy.
75  *  Effective represents the distillation of that policy into a set of behaviors.
76  *
77  *  Each thread making a modification in the policy system passes a 'pending' struct,
78  *  which tracks updates that will be applied after dropping the policy engine lock.
79  *
80  *  Each interface that has an input into the task policy state machine controls a field in requested.
81  *  If the interface has a getter, it returns what is in the field in requested, but that is
82  *  not necessarily what is actually in effect.
83  *
84  *  All kernel subsystems that behave differently based on task policy call into
85  *  the proc_get_effective_(task|thread)_policy functions, which return the decision of the task policy state machine
86  *  for that subsystem by querying only the 'effective' field.
87  *
88  *  Policy change operations:
89  *  Here are the steps to change a policy on a task or thread:
90  *  1) Lock task
91  *  2) Change requested field for the relevant policy
92  *  3) Run a task policy update, which recalculates effective based on requested,
93  *     then takes a diff between the old and new versions of requested and calls the relevant
94  *     other subsystems to apply these changes, and updates the pending field.
95  *  4) Unlock task
96  *  5) Run task policy update complete, which looks at the pending field to update
97  *     subsystems which cannot be touched while holding the task lock.
98  *
99  *  To add a new requested policy, add the field in the requested struct, the flavor in task.h,
100  *  the setter and getter in proc_(set|get)_task_policy*,
101  *  then set up the effects of that behavior in task_policy_update*. If the policy manifests
102  *  itself as a distinct effective policy, add it to the effective struct and add it to the
103  *  proc_get_effective_task_policy accessor.
104  *
105  *  Most policies are set via proc_set_task_policy, but policies that don't fit that interface
106  *  roll their own lock/set/update/unlock/complete code inside this file.
107  *
108  *
109  *  Suppression policy
110  *
111  *  These are a set of behaviors that can be requested for a task.  They currently have specific
112  *  implied actions when they're enabled, but they may be made customizable in the future.
113  *
114  *  When the affected task is boosted, we temporarily disable the suppression behaviors
115  *  so that the affected process has a chance to run so it can call the API to permanently
116  *  disable the suppression behaviors.
117  *
118  *  Locking
119  *
120  *  Changing task policy on a task takes the task lock.
121  *  Changing task policy on a thread takes the thread mutex.
122  *  Task policy changes that affect threads will take each thread's mutex to update it if necessary.
123  *
124  *  Querying the effective policy does not take a lock, because callers
125  *  may run in interrupt context or other place where locks are not OK.
126  *
127  *  This means that any notification of state change needs to be externally synchronized.
128  *  We do this by idempotent callouts after the state has changed to ask
129  *  other subsystems to update their view of the world.
130  *
131  * TODO: Move all cpu/wakes/io monitor code into a separate file
132  * TODO: Move all importance code over to importance subsystem
133  * TODO: Move all taskwatch code into a separate file
134  * TODO: Move all VM importance code into a separate file
135  */
136 
137 /* Task policy related helper functions */
138 static void proc_set_task_policy_locked(task_t task, int category, int flavor, int value, int value2);
139 
140 static void task_policy_update_locked(task_t task, task_pend_token_t pend_token);
141 static void task_policy_update_internal_locked(task_t task, bool in_create, task_pend_token_t pend_token);
142 
143 /* For attributes that have two scalars as input/output */
144 static void proc_set_task_policy2(task_t task, int category, int flavor, int value1, int value2);
145 static void proc_get_task_policy2(task_t task, int category, int flavor, int *value1, int *value2);
146 
147 static boolean_t task_policy_update_coalition_focal_tasks(task_t task, int prev_role, int next_role, task_pend_token_t pend_token);
148 
149 static uint64_t task_requested_bitfield(task_t task);
150 static uint64_t task_effective_bitfield(task_t task);
151 
152 /* Convenience functions for munging a policy bitfield into a tracepoint */
153 static uintptr_t trequested_0(task_t task);
154 static uintptr_t trequested_1(task_t task);
155 static uintptr_t teffective_0(task_t task);
156 static uintptr_t teffective_1(task_t task);
157 
158 /* CPU limits helper functions */
159 static int task_set_cpuusage(task_t task, uint8_t percentage, uint64_t interval, uint64_t deadline, int scope, int entitled);
160 static int task_get_cpuusage(task_t task, uint8_t *percentagep, uint64_t *intervalp, uint64_t *deadlinep, int *scope);
161 static int task_enable_cpumon_locked(task_t task);
162 static int task_disable_cpumon(task_t task);
163 static int task_clear_cpuusage_locked(task_t task, int cpumon_entitled);
164 static int task_apply_resource_actions(task_t task, int type);
165 static void task_action_cpuusage(thread_call_param_t param0, thread_call_param_t param1);
166 
167 #ifdef MACH_BSD
168 typedef struct proc *   proc_t;
169 int                     proc_pid(struct proc *proc);
170 extern int              proc_selfpid(void);
171 extern char *           proc_name_address(void *p);
172 extern const char *     proc_best_name(proc_t proc);
173 
174 extern int proc_pidpathinfo_internal(proc_t p, uint64_t arg,
175     char *buffer, uint32_t buffersize,
176     int32_t *retval);
177 #endif /* MACH_BSD */
178 
179 
180 #if CONFIG_TASKWATCH
181 /* Taskwatch related helper functions */
182 static void set_thread_appbg(thread_t thread, int setbg, int importance);
183 static void add_taskwatch_locked(task_t task, task_watch_t * twp);
184 static void remove_taskwatch_locked(task_t task, task_watch_t * twp);
185 static void task_watch_lock(void);
186 static void task_watch_unlock(void);
187 static void apply_appstate_watchers(task_t task);
188 
189 typedef struct task_watcher {
190 	queue_chain_t   tw_links;       /* queueing of threads */
191 	task_t          tw_task;        /* task that is being watched */
192 	thread_t        tw_thread;      /* thread that is watching the watch_task */
193 	int             tw_state;       /* the current app state of the thread */
194 	int             tw_importance;  /* importance prior to backgrounding */
195 } task_watch_t;
196 
197 typedef struct thread_watchlist {
198 	thread_t        thread;         /* thread being worked on for taskwatch action */
199 	int             importance;     /* importance to be restored if thread is being made active */
200 } thread_watchlist_t;
201 
202 #endif /* CONFIG_TASKWATCH */
203 
204 extern int memorystatus_update_priority_for_appnap(proc_t p, boolean_t is_appnap);
205 
206 /* Importance Inheritance related helper functions */
207 
208 #if IMPORTANCE_INHERITANCE
209 
210 static void task_importance_mark_live_donor(task_t task, boolean_t donating);
211 static void task_importance_mark_receiver(task_t task, boolean_t receiving);
212 static void task_importance_mark_denap_receiver(task_t task, boolean_t denap);
213 
214 static boolean_t task_is_marked_live_importance_donor(task_t task);
215 static boolean_t task_is_importance_receiver(task_t task);
216 static boolean_t task_is_importance_denap_receiver(task_t task);
217 
218 static int task_importance_hold_internal_assertion(task_t target_task, uint32_t count);
219 
220 static void task_add_importance_watchport(task_t task, mach_port_t port, int *boostp);
221 static void task_importance_update_live_donor(task_t target_task);
222 
223 static void task_set_boost_locked(task_t task, boolean_t boost_active);
224 
225 #endif /* IMPORTANCE_INHERITANCE */
226 
227 #if IMPORTANCE_TRACE
228 #define __imptrace_only
229 #else /* IMPORTANCE_TRACE */
230 #define __imptrace_only __unused
231 #endif /* !IMPORTANCE_TRACE */
232 
233 #if IMPORTANCE_INHERITANCE
234 #define __imp_only
235 #else
236 #define __imp_only __unused
237 #endif
238 
239 /*
240  * Default parameters for certain policies
241  */
242 
243 int proc_standard_daemon_tier = THROTTLE_LEVEL_TIER1;
244 int proc_suppressed_disk_tier = THROTTLE_LEVEL_TIER1;
245 
246 int proc_graphics_timer_qos   = (LATENCY_QOS_TIER_0 & 0xFF);
247 
248 const int proc_default_bg_iotier  = THROTTLE_LEVEL_TIER2;
249 
250 /* Latency/throughput QoS fields remain zeroed, i.e. TIER_UNSPECIFIED at creation */
251 const struct task_requested_policy default_task_requested_policy = {
252 	.trp_bg_iotier = proc_default_bg_iotier
253 };
254 const struct task_effective_policy default_task_effective_policy = {};
255 
256 /*
257  * Default parameters for CPU usage monitor.
258  *
259  * Default setting is 50% over 3 minutes.
260  */
261 #define         DEFAULT_CPUMON_PERCENTAGE 50
262 #define         DEFAULT_CPUMON_INTERVAL   (3 * 60)
263 
264 uint8_t         proc_max_cpumon_percentage;
265 uint64_t        proc_max_cpumon_interval;
266 
267 kern_return_t
qos_latency_policy_validate(task_latency_qos_t ltier)268 qos_latency_policy_validate(task_latency_qos_t ltier)
269 {
270 	if ((ltier != LATENCY_QOS_TIER_UNSPECIFIED) &&
271 	    ((ltier > LATENCY_QOS_TIER_5) || (ltier < LATENCY_QOS_TIER_0))) {
272 		return KERN_INVALID_ARGUMENT;
273 	}
274 
275 	return KERN_SUCCESS;
276 }
277 
278 kern_return_t
qos_throughput_policy_validate(task_throughput_qos_t ttier)279 qos_throughput_policy_validate(task_throughput_qos_t ttier)
280 {
281 	if ((ttier != THROUGHPUT_QOS_TIER_UNSPECIFIED) &&
282 	    ((ttier > THROUGHPUT_QOS_TIER_5) || (ttier < THROUGHPUT_QOS_TIER_0))) {
283 		return KERN_INVALID_ARGUMENT;
284 	}
285 
286 	return KERN_SUCCESS;
287 }
288 
289 static kern_return_t
task_qos_policy_validate(task_qos_policy_t qosinfo,mach_msg_type_number_t count)290 task_qos_policy_validate(task_qos_policy_t qosinfo, mach_msg_type_number_t count)
291 {
292 	if (count < TASK_QOS_POLICY_COUNT) {
293 		return KERN_INVALID_ARGUMENT;
294 	}
295 
296 	task_latency_qos_t ltier = qosinfo->task_latency_qos_tier;
297 	task_throughput_qos_t ttier = qosinfo->task_throughput_qos_tier;
298 
299 	kern_return_t kr = qos_latency_policy_validate(ltier);
300 
301 	if (kr != KERN_SUCCESS) {
302 		return kr;
303 	}
304 
305 	kr = qos_throughput_policy_validate(ttier);
306 
307 	return kr;
308 }
309 
310 uint32_t
qos_extract(uint32_t qv)311 qos_extract(uint32_t qv)
312 {
313 	return qv & 0xFF;
314 }
315 
316 uint32_t
qos_latency_policy_package(uint32_t qv)317 qos_latency_policy_package(uint32_t qv)
318 {
319 	return (qv == LATENCY_QOS_TIER_UNSPECIFIED) ? LATENCY_QOS_TIER_UNSPECIFIED : ((0xFF << 16) | qv);
320 }
321 
322 uint32_t
qos_throughput_policy_package(uint32_t qv)323 qos_throughput_policy_package(uint32_t qv)
324 {
325 	return (qv == THROUGHPUT_QOS_TIER_UNSPECIFIED) ? THROUGHPUT_QOS_TIER_UNSPECIFIED : ((0xFE << 16) | qv);
326 }
327 
328 #define TASK_POLICY_SUPPRESSION_DISABLE  0x1
329 #define TASK_POLICY_SUPPRESSION_IOTIER2  0x2
330 #define TASK_POLICY_SUPPRESSION_NONDONOR 0x4
331 /* TEMPORARY boot-arg controlling task_policy suppression (App Nap) */
332 static boolean_t task_policy_suppression_flags = TASK_POLICY_SUPPRESSION_IOTIER2 |
333     TASK_POLICY_SUPPRESSION_NONDONOR;
334 
335 static void
task_set_requested_apptype(task_t task,uint64_t apptype,__unused boolean_t update_tg_flag)336 task_set_requested_apptype(task_t task, uint64_t apptype, __unused boolean_t update_tg_flag)
337 {
338 	task->requested_policy.trp_apptype = apptype;
339 #if CONFIG_THREAD_GROUPS
340 	if (update_tg_flag && task_is_app(task)) {
341 		task_coalition_thread_group_application_set(task);
342 	}
343 #endif /* CONFIG_THREAD_GROUPS */
344 }
345 
346 kern_return_t
task_policy_set(task_t task,task_policy_flavor_t flavor,task_policy_t policy_info,mach_msg_type_number_t count)347 task_policy_set(
348 	task_t                                  task,
349 	task_policy_flavor_t    flavor,
350 	task_policy_t                   policy_info,
351 	mach_msg_type_number_t  count)
352 {
353 	kern_return_t           result = KERN_SUCCESS;
354 
355 	if (task == TASK_NULL || task == kernel_task) {
356 		return KERN_INVALID_ARGUMENT;
357 	}
358 
359 	switch (flavor) {
360 	case TASK_CATEGORY_POLICY: {
361 		task_category_policy_t info = (task_category_policy_t)policy_info;
362 
363 		if (count < TASK_CATEGORY_POLICY_COUNT) {
364 			return KERN_INVALID_ARGUMENT;
365 		}
366 
367 #if !defined(XNU_TARGET_OS_OSX)
368 		/* On embedded, you can't modify your own role. */
369 		if (current_task() == task) {
370 			return KERN_INVALID_ARGUMENT;
371 		}
372 #endif
373 
374 		switch (info->role) {
375 		case TASK_FOREGROUND_APPLICATION:
376 		case TASK_BACKGROUND_APPLICATION:
377 		case TASK_DEFAULT_APPLICATION:
378 			proc_set_task_policy(task,
379 			    TASK_POLICY_ATTRIBUTE, TASK_POLICY_ROLE,
380 			    info->role);
381 			break;
382 
383 		case TASK_CONTROL_APPLICATION:
384 			if (task != current_task() || !task_is_privileged(task)) {
385 				result = KERN_INVALID_ARGUMENT;
386 			} else {
387 				proc_set_task_policy(task,
388 				    TASK_POLICY_ATTRIBUTE, TASK_POLICY_ROLE,
389 				    info->role);
390 			}
391 			break;
392 
393 		case TASK_GRAPHICS_SERVER:
394 			/* TODO: Restrict this role to FCFS <rdar://problem/12552788> */
395 			if (task != current_task() || !task_is_privileged(task)) {
396 				result = KERN_INVALID_ARGUMENT;
397 			} else {
398 				proc_set_task_policy(task,
399 				    TASK_POLICY_ATTRIBUTE, TASK_POLICY_ROLE,
400 				    info->role);
401 			}
402 			break;
403 		default:
404 			result = KERN_INVALID_ARGUMENT;
405 			break;
406 		} /* switch (info->role) */
407 
408 		break;
409 	}
410 
411 /* Desired energy-efficiency/performance "quality-of-service" */
412 	case TASK_BASE_QOS_POLICY:
413 	case TASK_OVERRIDE_QOS_POLICY:
414 	{
415 		task_qos_policy_t qosinfo = (task_qos_policy_t)policy_info;
416 		kern_return_t kr = task_qos_policy_validate(qosinfo, count);
417 
418 		if (kr != KERN_SUCCESS) {
419 			return kr;
420 		}
421 
422 
423 		uint32_t lqos = qos_extract(qosinfo->task_latency_qos_tier);
424 		uint32_t tqos = qos_extract(qosinfo->task_throughput_qos_tier);
425 
426 		proc_set_task_policy2(task, TASK_POLICY_ATTRIBUTE,
427 		    flavor == TASK_BASE_QOS_POLICY ? TASK_POLICY_BASE_LATENCY_AND_THROUGHPUT_QOS : TASK_POLICY_OVERRIDE_LATENCY_AND_THROUGHPUT_QOS,
428 		    lqos, tqos);
429 	}
430 	break;
431 
432 	case TASK_BASE_LATENCY_QOS_POLICY:
433 	{
434 		task_qos_policy_t qosinfo = (task_qos_policy_t)policy_info;
435 		kern_return_t kr = task_qos_policy_validate(qosinfo, count);
436 
437 		if (kr != KERN_SUCCESS) {
438 			return kr;
439 		}
440 
441 		uint32_t lqos = qos_extract(qosinfo->task_latency_qos_tier);
442 
443 		proc_set_task_policy(task, TASK_POLICY_ATTRIBUTE, TASK_BASE_LATENCY_QOS_POLICY, lqos);
444 	}
445 	break;
446 
447 	case TASK_BASE_THROUGHPUT_QOS_POLICY:
448 	{
449 		task_qos_policy_t qosinfo = (task_qos_policy_t)policy_info;
450 		kern_return_t kr = task_qos_policy_validate(qosinfo, count);
451 
452 		if (kr != KERN_SUCCESS) {
453 			return kr;
454 		}
455 
456 		uint32_t tqos = qos_extract(qosinfo->task_throughput_qos_tier);
457 
458 		proc_set_task_policy(task, TASK_POLICY_ATTRIBUTE, TASK_BASE_THROUGHPUT_QOS_POLICY, tqos);
459 	}
460 	break;
461 
462 	case TASK_SUPPRESSION_POLICY:
463 	{
464 #if !defined(XNU_TARGET_OS_OSX)
465 		/*
466 		 * Suppression policy is not enabled for embedded
467 		 * because apps aren't marked as denap receivers
468 		 */
469 		result = KERN_INVALID_ARGUMENT;
470 		break;
471 #else /* !defined(XNU_TARGET_OS_OSX) */
472 
473 		task_suppression_policy_t info = (task_suppression_policy_t)policy_info;
474 
475 		if (count < TASK_SUPPRESSION_POLICY_COUNT) {
476 			return KERN_INVALID_ARGUMENT;
477 		}
478 
479 		struct task_qos_policy qosinfo;
480 
481 		qosinfo.task_latency_qos_tier = info->timer_throttle;
482 		qosinfo.task_throughput_qos_tier = info->throughput_qos;
483 
484 		kern_return_t kr = task_qos_policy_validate(&qosinfo, TASK_QOS_POLICY_COUNT);
485 
486 		if (kr != KERN_SUCCESS) {
487 			return kr;
488 		}
489 
490 		/* TEMPORARY disablement of task suppression */
491 		if (info->active &&
492 		    (task_policy_suppression_flags & TASK_POLICY_SUPPRESSION_DISABLE)) {
493 			return KERN_SUCCESS;
494 		}
495 
496 		struct task_pend_token pend_token = {};
497 
498 		task_lock(task);
499 
500 		KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
501 		    (IMPORTANCE_CODE(IMP_TASK_SUPPRESSION, info->active)) | DBG_FUNC_START,
502 		    proc_selfpid(), task_pid(task), trequested_0(task),
503 		    trequested_1(task), 0);
504 
505 		task->requested_policy.trp_sup_active      = (info->active)         ? 1 : 0;
506 		task->requested_policy.trp_sup_lowpri_cpu  = (info->lowpri_cpu)     ? 1 : 0;
507 		task->requested_policy.trp_sup_timer       = qos_extract(info->timer_throttle);
508 		task->requested_policy.trp_sup_disk        = (info->disk_throttle)  ? 1 : 0;
509 		task->requested_policy.trp_sup_throughput  = qos_extract(info->throughput_qos);
510 		task->requested_policy.trp_sup_cpu         = (info->suppressed_cpu) ? 1 : 0;
511 		task->requested_policy.trp_sup_bg_sockets  = (info->background_sockets) ? 1 : 0;
512 
513 		task_policy_update_locked(task, &pend_token);
514 
515 		KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
516 		    (IMPORTANCE_CODE(IMP_TASK_SUPPRESSION, info->active)) | DBG_FUNC_END,
517 		    proc_selfpid(), task_pid(task), trequested_0(task),
518 		    trequested_1(task), 0);
519 
520 		task_unlock(task);
521 
522 		task_policy_update_complete_unlocked(task, &pend_token);
523 
524 		break;
525 
526 #endif /* !defined(XNU_TARGET_OS_OSX) */
527 	}
528 
529 	default:
530 		result = KERN_INVALID_ARGUMENT;
531 		break;
532 	}
533 
534 	return result;
535 }
536 
537 /* Sets BSD 'nice' value on the task */
538 kern_return_t
task_importance(task_t task,integer_t importance)539 task_importance(
540 	task_t                          task,
541 	integer_t                       importance)
542 {
543 	if (task == TASK_NULL || task == kernel_task) {
544 		return KERN_INVALID_ARGUMENT;
545 	}
546 
547 	task_lock(task);
548 
549 	if (!task->active) {
550 		task_unlock(task);
551 
552 		return KERN_TERMINATED;
553 	}
554 
555 	if (proc_get_effective_task_policy(task, TASK_POLICY_ROLE) >= TASK_CONTROL_APPLICATION) {
556 		task_unlock(task);
557 
558 		return KERN_INVALID_ARGUMENT;
559 	}
560 
561 	task->importance = importance;
562 
563 	struct task_pend_token pend_token = {};
564 
565 	task_policy_update_locked(task, &pend_token);
566 
567 	task_unlock(task);
568 
569 	task_policy_update_complete_unlocked(task, &pend_token);
570 
571 	return KERN_SUCCESS;
572 }
573 
574 kern_return_t
task_policy_get(task_t task,task_policy_flavor_t flavor,task_policy_t policy_info,mach_msg_type_number_t * count,boolean_t * get_default)575 task_policy_get(
576 	task_t                                  task,
577 	task_policy_flavor_t    flavor,
578 	task_policy_t                   policy_info,
579 	mach_msg_type_number_t  *count,
580 	boolean_t                               *get_default)
581 {
582 	if (task == TASK_NULL || task == kernel_task) {
583 		return KERN_INVALID_ARGUMENT;
584 	}
585 
586 	switch (flavor) {
587 	case TASK_CATEGORY_POLICY:
588 	{
589 		task_category_policy_t          info = (task_category_policy_t)policy_info;
590 
591 		if (*count < TASK_CATEGORY_POLICY_COUNT) {
592 			return KERN_INVALID_ARGUMENT;
593 		}
594 
595 		if (*get_default) {
596 			info->role = TASK_UNSPECIFIED;
597 		} else {
598 			info->role = proc_get_task_policy(task, TASK_POLICY_ATTRIBUTE, TASK_POLICY_ROLE);
599 		}
600 		break;
601 	}
602 
603 	case TASK_BASE_QOS_POLICY: /* FALLTHRU */
604 	case TASK_OVERRIDE_QOS_POLICY:
605 	{
606 		task_qos_policy_t info = (task_qos_policy_t)policy_info;
607 
608 		if (*count < TASK_QOS_POLICY_COUNT) {
609 			return KERN_INVALID_ARGUMENT;
610 		}
611 
612 		if (*get_default) {
613 			info->task_latency_qos_tier = LATENCY_QOS_TIER_UNSPECIFIED;
614 			info->task_throughput_qos_tier = THROUGHPUT_QOS_TIER_UNSPECIFIED;
615 		} else if (flavor == TASK_BASE_QOS_POLICY) {
616 			int value1, value2;
617 
618 			proc_get_task_policy2(task, TASK_POLICY_ATTRIBUTE, TASK_POLICY_BASE_LATENCY_AND_THROUGHPUT_QOS, &value1, &value2);
619 
620 			info->task_latency_qos_tier = qos_latency_policy_package(value1);
621 			info->task_throughput_qos_tier = qos_throughput_policy_package(value2);
622 		} else if (flavor == TASK_OVERRIDE_QOS_POLICY) {
623 			int value1, value2;
624 
625 			proc_get_task_policy2(task, TASK_POLICY_ATTRIBUTE, TASK_POLICY_OVERRIDE_LATENCY_AND_THROUGHPUT_QOS, &value1, &value2);
626 
627 			info->task_latency_qos_tier = qos_latency_policy_package(value1);
628 			info->task_throughput_qos_tier = qos_throughput_policy_package(value2);
629 		}
630 
631 		break;
632 	}
633 
634 	case TASK_POLICY_STATE:
635 	{
636 		task_policy_state_t info = (task_policy_state_t)policy_info;
637 
638 		if (*count < TASK_POLICY_STATE_COUNT) {
639 			return KERN_INVALID_ARGUMENT;
640 		}
641 
642 		/* Only root can get this info */
643 		if (!task_is_privileged(current_task())) {
644 			return KERN_PROTECTION_FAILURE;
645 		}
646 
647 		if (*get_default) {
648 			info->requested = 0;
649 			info->effective = 0;
650 			info->pending = 0;
651 			info->imp_assertcnt = 0;
652 			info->imp_externcnt = 0;
653 			info->flags = 0;
654 			info->imp_transitions = 0;
655 		} else {
656 			task_lock(task);
657 
658 			info->requested = task_requested_bitfield(task);
659 			info->effective = task_effective_bitfield(task);
660 			info->pending   = 0;
661 
662 			info->tps_requested_policy = *(uint64_t*)(&task->requested_policy);
663 			info->tps_effective_policy = *(uint64_t*)(&task->effective_policy);
664 
665 			info->flags = 0;
666 			if (task->task_imp_base != NULL) {
667 				info->imp_assertcnt = task->task_imp_base->iit_assertcnt;
668 				info->imp_externcnt = IIT_EXTERN(task->task_imp_base);
669 				info->flags |= (task_is_marked_importance_receiver(task) ? TASK_IMP_RECEIVER : 0);
670 				info->flags |= (task_is_marked_importance_denap_receiver(task) ? TASK_DENAP_RECEIVER : 0);
671 				info->flags |= (task_is_marked_importance_donor(task) ? TASK_IMP_DONOR : 0);
672 				info->flags |= (task_is_marked_live_importance_donor(task) ? TASK_IMP_LIVE_DONOR : 0);
673 				info->flags |= (get_task_pidsuspended(task) ? TASK_IS_PIDSUSPENDED : 0);
674 				info->imp_transitions = task->task_imp_base->iit_transitions;
675 			} else {
676 				info->imp_assertcnt = 0;
677 				info->imp_externcnt = 0;
678 				info->imp_transitions = 0;
679 			}
680 			task_unlock(task);
681 		}
682 
683 		break;
684 	}
685 
686 	case TASK_SUPPRESSION_POLICY:
687 	{
688 		task_suppression_policy_t info = (task_suppression_policy_t)policy_info;
689 
690 		if (*count < TASK_SUPPRESSION_POLICY_COUNT) {
691 			return KERN_INVALID_ARGUMENT;
692 		}
693 
694 		task_lock(task);
695 
696 		if (*get_default) {
697 			info->active            = 0;
698 			info->lowpri_cpu        = 0;
699 			info->timer_throttle    = LATENCY_QOS_TIER_UNSPECIFIED;
700 			info->disk_throttle     = 0;
701 			info->cpu_limit         = 0;
702 			info->suspend           = 0;
703 			info->throughput_qos    = 0;
704 			info->suppressed_cpu    = 0;
705 		} else {
706 			info->active            = task->requested_policy.trp_sup_active;
707 			info->lowpri_cpu        = task->requested_policy.trp_sup_lowpri_cpu;
708 			info->timer_throttle    = qos_latency_policy_package(task->requested_policy.trp_sup_timer);
709 			info->disk_throttle     = task->requested_policy.trp_sup_disk;
710 			info->cpu_limit         = 0;
711 			info->suspend           = 0;
712 			info->throughput_qos    = qos_throughput_policy_package(task->requested_policy.trp_sup_throughput);
713 			info->suppressed_cpu    = task->requested_policy.trp_sup_cpu;
714 			info->background_sockets = task->requested_policy.trp_sup_bg_sockets;
715 		}
716 
717 		task_unlock(task);
718 		break;
719 	}
720 
721 	default:
722 		return KERN_INVALID_ARGUMENT;
723 	}
724 
725 	return KERN_SUCCESS;
726 }
727 
728 /*
729  * Called at task creation
730  * We calculate the correct effective but don't apply it to anything yet.
731  * The threads, etc will inherit from the task as they get created.
732  */
733 void
task_policy_create(task_t task,task_t parent_task)734 task_policy_create(task_t task, task_t parent_task)
735 {
736 	task_set_requested_apptype(task, parent_task->requested_policy.trp_apptype, true);
737 
738 	task->requested_policy.trp_int_darwinbg     = parent_task->requested_policy.trp_int_darwinbg;
739 	task->requested_policy.trp_ext_darwinbg     = parent_task->requested_policy.trp_ext_darwinbg;
740 	task->requested_policy.trp_int_iotier       = parent_task->requested_policy.trp_int_iotier;
741 	task->requested_policy.trp_ext_iotier       = parent_task->requested_policy.trp_ext_iotier;
742 	task->requested_policy.trp_int_iopassive    = parent_task->requested_policy.trp_int_iopassive;
743 	task->requested_policy.trp_ext_iopassive    = parent_task->requested_policy.trp_ext_iopassive;
744 	task->requested_policy.trp_bg_iotier        = parent_task->requested_policy.trp_bg_iotier;
745 	task->requested_policy.trp_terminated       = parent_task->requested_policy.trp_terminated;
746 	task->requested_policy.trp_qos_clamp        = parent_task->requested_policy.trp_qos_clamp;
747 
748 	if (task->requested_policy.trp_apptype == TASK_APPTYPE_DAEMON_ADAPTIVE && !task_is_exec_copy(task)) {
749 		/* Do not update the apptype for exec copy task */
750 		if (parent_task->requested_policy.trp_boosted) {
751 			task_set_requested_apptype(task, TASK_APPTYPE_DAEMON_INTERACTIVE, true);
752 			task_importance_mark_donor(task, TRUE);
753 		} else {
754 			task_set_requested_apptype(task, TASK_APPTYPE_DAEMON_BACKGROUND, true);
755 			task_importance_mark_receiver(task, FALSE);
756 		}
757 	}
758 
759 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
760 	    (IMPORTANCE_CODE(IMP_UPDATE, (IMP_UPDATE_TASK_CREATE | TASK_POLICY_TASK))) | DBG_FUNC_START,
761 	    task_pid(task), teffective_0(task),
762 	    teffective_1(task), task->priority, 0);
763 
764 	task_policy_update_internal_locked(task, true, NULL);
765 
766 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
767 	    (IMPORTANCE_CODE(IMP_UPDATE, (IMP_UPDATE_TASK_CREATE | TASK_POLICY_TASK))) | DBG_FUNC_END,
768 	    task_pid(task), teffective_0(task),
769 	    teffective_1(task), task->priority, 0);
770 
771 	task_importance_update_live_donor(task);
772 }
773 
774 
775 static void
task_policy_update_locked(task_t task,task_pend_token_t pend_token)776 task_policy_update_locked(task_t task, task_pend_token_t pend_token)
777 {
778 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
779 	    (IMPORTANCE_CODE(IMP_UPDATE, TASK_POLICY_TASK) | DBG_FUNC_START),
780 	    task_pid(task), teffective_0(task),
781 	    teffective_1(task), task->priority, 0);
782 
783 	task_policy_update_internal_locked(task, false, pend_token);
784 
785 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
786 	    (IMPORTANCE_CODE(IMP_UPDATE, TASK_POLICY_TASK)) | DBG_FUNC_END,
787 	    task_pid(task), teffective_0(task),
788 	    teffective_1(task), task->priority, 0);
789 }
790 
791 /*
792  * One state update function TO RULE THEM ALL
793  *
794  * This function updates the task or thread effective policy fields
795  * and pushes the results to the relevant subsystems.
796  *
797  * Must call update_complete after unlocking the task,
798  * as some subsystems cannot be updated while holding the task lock.
799  *
800  * Called with task locked, not thread
801  */
802 
803 static void
task_policy_update_internal_locked(task_t task,bool in_create,task_pend_token_t pend_token)804 task_policy_update_internal_locked(task_t task, bool in_create, task_pend_token_t pend_token)
805 {
806 	/*
807 	 * Step 1:
808 	 *  Gather requested policy and effective coalition state
809 	 */
810 
811 	struct task_requested_policy requested = task->requested_policy;
812 	bool coalition_is_bg = task_get_effective_jetsam_coalition_policy(task, TASK_POLICY_DARWIN_BG);
813 
814 	/*
815 	 * Step 2:
816 	 *  Calculate new effective policies from requested policy and task state
817 	 *  Rules:
818 	 *      Don't change requested, it won't take effect
819 	 */
820 
821 	struct task_effective_policy next = {};
822 
823 	/* Capture properties from coalition */
824 	next.tep_coalition_bg = coalition_is_bg;
825 
826 	/* Update task role */
827 	next.tep_role = requested.trp_role;
828 
829 	/* Set task qos clamp and ceiling */
830 
831 	thread_qos_t role_clamp = THREAD_QOS_UNSPECIFIED;
832 
833 	if (requested.trp_apptype == TASK_APPTYPE_APP_DEFAULT) {
834 		switch (next.tep_role) {
835 		case TASK_FOREGROUND_APPLICATION:
836 			/* Foreground apps get urgent scheduler priority */
837 			next.tep_qos_ui_is_urgent = 1;
838 			next.tep_qos_ceiling = THREAD_QOS_UNSPECIFIED;
839 			break;
840 
841 		case TASK_BACKGROUND_APPLICATION:
842 			/* This is really 'non-focal but on-screen' */
843 			next.tep_qos_ceiling = THREAD_QOS_UNSPECIFIED;
844 			break;
845 
846 		case TASK_DEFAULT_APPLICATION:
847 			/* This is 'may render UI but we don't know if it's focal/nonfocal' */
848 			next.tep_qos_ceiling = THREAD_QOS_UNSPECIFIED;
849 			break;
850 
851 		case TASK_NONUI_APPLICATION:
852 			/* i.e. 'off-screen' */
853 			next.tep_qos_ceiling = THREAD_QOS_LEGACY;
854 			break;
855 
856 		case TASK_CONTROL_APPLICATION:
857 		case TASK_GRAPHICS_SERVER:
858 			next.tep_qos_ui_is_urgent = 1;
859 			next.tep_qos_ceiling = THREAD_QOS_UNSPECIFIED;
860 			break;
861 
862 		case TASK_THROTTLE_APPLICATION:
863 			/* i.e. 'TAL launch' */
864 			next.tep_qos_ceiling = THREAD_QOS_UTILITY;
865 			role_clamp = THREAD_QOS_UTILITY;
866 			break;
867 
868 		case TASK_DARWINBG_APPLICATION:
869 			/* i.e. 'DARWIN_BG throttled background application' */
870 			next.tep_qos_ceiling = THREAD_QOS_BACKGROUND;
871 			break;
872 
873 		case TASK_UNSPECIFIED:
874 		default:
875 			/* Apps that don't have an application role get
876 			 * USER_INTERACTIVE and USER_INITIATED squashed to LEGACY */
877 			next.tep_qos_ceiling = THREAD_QOS_LEGACY;
878 			break;
879 		}
880 	} else {
881 		/* Daemons and dext get USER_INTERACTIVE squashed to USER_INITIATED */
882 		next.tep_qos_ceiling = THREAD_QOS_USER_INITIATED;
883 	}
884 
885 	if (role_clamp != THREAD_QOS_UNSPECIFIED) {
886 		if (requested.trp_qos_clamp != THREAD_QOS_UNSPECIFIED) {
887 			next.tep_qos_clamp = MIN(role_clamp, requested.trp_qos_clamp);
888 		} else {
889 			next.tep_qos_clamp = role_clamp;
890 		}
891 	} else {
892 		next.tep_qos_clamp = requested.trp_qos_clamp;
893 	}
894 
895 	/* Calculate DARWIN_BG */
896 	bool wants_darwinbg        = false;
897 	bool wants_all_sockets_bg  = false; /* Do I want my existing sockets to be bg */
898 	bool wants_watchersbg      = false; /* Do I want my pidbound threads to be bg */
899 	bool adaptive_bg_only      = false; /* This task is BG only because it's adaptive unboosted */
900 
901 	/* Adaptive daemons are DARWIN_BG unless boosted, and don't get network throttled. */
902 	if (requested.trp_apptype == TASK_APPTYPE_DAEMON_ADAPTIVE &&
903 	    requested.trp_boosted == 0) {
904 		wants_darwinbg = true;
905 		adaptive_bg_only = true;
906 	}
907 
908 	/*
909 	 * If DARWIN_BG has been requested at either level, it's engaged.
910 	 * Only true DARWIN_BG changes cause watchers to transition.
911 	 *
912 	 * Backgrounding due to apptype does.
913 	 */
914 	if (requested.trp_int_darwinbg || requested.trp_ext_darwinbg ||
915 	    next.tep_role == TASK_DARWINBG_APPLICATION) {
916 		wants_watchersbg = wants_all_sockets_bg = wants_darwinbg = true;
917 		adaptive_bg_only = false;
918 	}
919 
920 	if (next.tep_coalition_bg) {
921 		wants_watchersbg = wants_all_sockets_bg = wants_darwinbg = true;
922 		adaptive_bg_only = false;
923 	}
924 
925 	/* Application launching in special Transparent App Lifecycle throttle mode */
926 	if (requested.trp_apptype == TASK_APPTYPE_APP_DEFAULT &&
927 	    requested.trp_role == TASK_THROTTLE_APPLICATION) {
928 		next.tep_tal_engaged = 1;
929 	}
930 
931 	/* Background daemons are always DARWIN_BG, no exceptions, and don't get network throttled. */
932 	if (requested.trp_apptype == TASK_APPTYPE_DAEMON_BACKGROUND) {
933 		wants_darwinbg = true;
934 		adaptive_bg_only = false;
935 	}
936 
937 	if (next.tep_qos_clamp == THREAD_QOS_BACKGROUND ||
938 	    next.tep_qos_clamp == THREAD_QOS_MAINTENANCE) {
939 		wants_darwinbg = true;
940 		adaptive_bg_only = false;
941 	}
942 
943 	/* Calculate side effects of DARWIN_BG */
944 
945 	if (wants_darwinbg) {
946 		next.tep_darwinbg = 1;
947 		/* darwinbg tasks always create bg sockets, but we don't always loop over all sockets */
948 		next.tep_new_sockets_bg = 1;
949 		next.tep_lowpri_cpu = 1;
950 	}
951 
952 	if (wants_all_sockets_bg) {
953 		next.tep_all_sockets_bg = 1;
954 	}
955 
956 	if (wants_watchersbg) {
957 		next.tep_watchers_bg = 1;
958 	}
959 
960 	next.tep_adaptive_bg = adaptive_bg_only;
961 
962 	/* Calculate low CPU priority */
963 
964 	boolean_t wants_lowpri_cpu = false;
965 
966 	if (wants_darwinbg) {
967 		wants_lowpri_cpu = true;
968 	}
969 
970 	if (requested.trp_sup_lowpri_cpu && requested.trp_boosted == 0) {
971 		wants_lowpri_cpu = true;
972 	}
973 
974 	if (wants_lowpri_cpu) {
975 		next.tep_lowpri_cpu = 1;
976 	}
977 
978 	/* Calculate IO policy */
979 
980 	/* Update BG IO policy (so we can see if it has changed) */
981 	next.tep_bg_iotier = requested.trp_bg_iotier;
982 
983 	int iopol = THROTTLE_LEVEL_TIER0;
984 
985 	if (wants_darwinbg) {
986 		iopol = MAX(iopol, requested.trp_bg_iotier);
987 	}
988 
989 	if (requested.trp_apptype == TASK_APPTYPE_DAEMON_STANDARD) {
990 		iopol = MAX(iopol, proc_standard_daemon_tier);
991 	}
992 
993 	if (requested.trp_sup_disk && requested.trp_boosted == 0) {
994 		iopol = MAX(iopol, proc_suppressed_disk_tier);
995 	}
996 
997 	if (next.tep_qos_clamp != THREAD_QOS_UNSPECIFIED) {
998 		iopol = MAX(iopol, thread_qos_policy_params.qos_iotier[next.tep_qos_clamp]);
999 	}
1000 
1001 	iopol = MAX(iopol, requested.trp_int_iotier);
1002 	iopol = MAX(iopol, requested.trp_ext_iotier);
1003 
1004 	next.tep_io_tier = iopol;
1005 
1006 	/* Calculate Passive IO policy */
1007 
1008 	if (requested.trp_ext_iopassive || requested.trp_int_iopassive) {
1009 		next.tep_io_passive = 1;
1010 	}
1011 
1012 	/* Calculate suppression-active flag */
1013 	boolean_t appnap_transition = false;
1014 
1015 	if (requested.trp_sup_active && requested.trp_boosted == 0) {
1016 		next.tep_sup_active = 1;
1017 	}
1018 
1019 	if (task->effective_policy.tep_sup_active != next.tep_sup_active) {
1020 		appnap_transition = true;
1021 	}
1022 
1023 	/* Calculate timer QOS */
1024 	int latency_qos = requested.trp_base_latency_qos;
1025 
1026 	if (requested.trp_sup_timer && requested.trp_boosted == 0) {
1027 		latency_qos = requested.trp_sup_timer;
1028 	}
1029 
1030 	if (next.tep_qos_clamp != THREAD_QOS_UNSPECIFIED) {
1031 		latency_qos = MAX(latency_qos, (int)thread_qos_policy_params.qos_latency_qos[next.tep_qos_clamp]);
1032 	}
1033 
1034 	if (requested.trp_over_latency_qos != 0) {
1035 		latency_qos = requested.trp_over_latency_qos;
1036 	}
1037 
1038 	/* Treat the windowserver special */
1039 	if (requested.trp_role == TASK_GRAPHICS_SERVER) {
1040 		latency_qos = proc_graphics_timer_qos;
1041 	}
1042 
1043 	next.tep_latency_qos = latency_qos;
1044 
1045 	/* Calculate throughput QOS */
1046 	int through_qos = requested.trp_base_through_qos;
1047 
1048 	if (requested.trp_sup_throughput && requested.trp_boosted == 0) {
1049 		through_qos = requested.trp_sup_throughput;
1050 	}
1051 
1052 	if (next.tep_qos_clamp != THREAD_QOS_UNSPECIFIED) {
1053 		through_qos = MAX(through_qos, (int)thread_qos_policy_params.qos_through_qos[next.tep_qos_clamp]);
1054 	}
1055 
1056 	if (requested.trp_over_through_qos != 0) {
1057 		through_qos = requested.trp_over_through_qos;
1058 	}
1059 
1060 	next.tep_through_qos = through_qos;
1061 
1062 	/* Calculate suppressed CPU priority */
1063 	if (requested.trp_sup_cpu && requested.trp_boosted == 0) {
1064 		next.tep_suppressed_cpu = 1;
1065 	}
1066 
1067 	/*
1068 	 * Calculate background sockets
1069 	 * Don't take into account boosting to limit transition frequency.
1070 	 */
1071 	if (requested.trp_sup_bg_sockets) {
1072 		next.tep_all_sockets_bg = 1;
1073 		next.tep_new_sockets_bg = 1;
1074 	}
1075 
1076 	/* Apply SFI Managed class bit */
1077 	next.tep_sfi_managed = requested.trp_sfi_managed;
1078 
1079 	/* Calculate 'live donor' status for live importance */
1080 	switch (requested.trp_apptype) {
1081 	case TASK_APPTYPE_APP_TAL:
1082 	case TASK_APPTYPE_APP_DEFAULT:
1083 		if (requested.trp_ext_darwinbg == 1 ||
1084 		    next.tep_coalition_bg ||
1085 		    (next.tep_sup_active == 1 &&
1086 		    (task_policy_suppression_flags & TASK_POLICY_SUPPRESSION_NONDONOR)) ||
1087 		    next.tep_role == TASK_DARWINBG_APPLICATION) {
1088 			next.tep_live_donor = 0;
1089 		} else {
1090 			next.tep_live_donor = 1;
1091 		}
1092 		break;
1093 
1094 	case TASK_APPTYPE_DAEMON_INTERACTIVE:
1095 	case TASK_APPTYPE_DAEMON_STANDARD:
1096 	case TASK_APPTYPE_DAEMON_ADAPTIVE:
1097 	case TASK_APPTYPE_DAEMON_BACKGROUND:
1098 	case TASK_APPTYPE_DRIVER:
1099 	default:
1100 		next.tep_live_donor = 0;
1101 		break;
1102 	}
1103 
1104 	if (requested.trp_terminated) {
1105 		/*
1106 		 * Shoot down the throttles that slow down exit or response to SIGTERM
1107 		 * We don't need to shoot down:
1108 		 * passive        (don't want to cause others to throttle)
1109 		 * all_sockets_bg (don't need to iterate FDs on every exit)
1110 		 * new_sockets_bg (doesn't matter for exiting process)
1111 		 * pidsuspend     (jetsam-ed BG process shouldn't run again)
1112 		 * watchers_bg    (watcher threads don't need to be unthrottled)
1113 		 * latency_qos    (affects userspace timers only)
1114 		 */
1115 
1116 		next.tep_terminated     = 1;
1117 		next.tep_darwinbg       = 0;
1118 		next.tep_lowpri_cpu     = 0;
1119 		next.tep_io_tier        = THROTTLE_LEVEL_TIER0;
1120 		next.tep_tal_engaged    = 0;
1121 		next.tep_role           = TASK_UNSPECIFIED;
1122 		next.tep_suppressed_cpu = 0;
1123 	}
1124 
1125 	/*
1126 	 * Step 3:
1127 	 *  Swap out old policy for new policy
1128 	 */
1129 
1130 	struct task_effective_policy prev = task->effective_policy;
1131 
1132 	/* This is the point where the new values become visible to other threads */
1133 	task->effective_policy = next;
1134 
1135 	/* Don't do anything further to a half-formed task */
1136 	if (in_create) {
1137 		return;
1138 	}
1139 
1140 	if (task == kernel_task) {
1141 		panic("Attempting to set task policy on kernel_task");
1142 	}
1143 
1144 	/*
1145 	 * Step 4:
1146 	 *  Pend updates that can't be done while holding the task lock
1147 	 */
1148 
1149 	if (prev.tep_all_sockets_bg != next.tep_all_sockets_bg) {
1150 		pend_token->tpt_update_sockets = 1;
1151 	}
1152 
1153 	/* Only re-scan the timer list if the qos level is getting less strong */
1154 	if (prev.tep_latency_qos > next.tep_latency_qos) {
1155 		pend_token->tpt_update_timers = 1;
1156 	}
1157 
1158 #if CONFIG_TASKWATCH
1159 	if (prev.tep_watchers_bg != next.tep_watchers_bg) {
1160 		pend_token->tpt_update_watchers = 1;
1161 	}
1162 #endif /* CONFIG_TASKWATCH */
1163 
1164 	if (prev.tep_live_donor != next.tep_live_donor) {
1165 		pend_token->tpt_update_live_donor = 1;
1166 	}
1167 
1168 	/*
1169 	 * Step 5:
1170 	 *  Update other subsystems as necessary if something has changed
1171 	 */
1172 
1173 	bool update_threads = false, update_sfi = false, update_termination = false;
1174 
1175 	/*
1176 	 * Check for the attributes that thread_policy_update_internal_locked() consults,
1177 	 *  and trigger thread policy re-evaluation.
1178 	 */
1179 	if (prev.tep_io_tier != next.tep_io_tier ||
1180 	    prev.tep_bg_iotier != next.tep_bg_iotier ||
1181 	    prev.tep_io_passive != next.tep_io_passive ||
1182 	    prev.tep_darwinbg != next.tep_darwinbg ||
1183 	    prev.tep_qos_clamp != next.tep_qos_clamp ||
1184 	    prev.tep_qos_ceiling != next.tep_qos_ceiling ||
1185 	    prev.tep_qos_ui_is_urgent != next.tep_qos_ui_is_urgent ||
1186 	    prev.tep_latency_qos != next.tep_latency_qos ||
1187 	    prev.tep_through_qos != next.tep_through_qos ||
1188 	    prev.tep_lowpri_cpu != next.tep_lowpri_cpu ||
1189 	    prev.tep_new_sockets_bg != next.tep_new_sockets_bg ||
1190 	    prev.tep_terminated != next.tep_terminated ||
1191 	    prev.tep_adaptive_bg != next.tep_adaptive_bg) {
1192 		update_threads = true;
1193 	}
1194 
1195 	/*
1196 	 * Check for the attributes that sfi_thread_classify() consults,
1197 	 *  and trigger SFI re-evaluation.
1198 	 */
1199 	if (prev.tep_latency_qos != next.tep_latency_qos ||
1200 	    prev.tep_role != next.tep_role ||
1201 	    prev.tep_sfi_managed != next.tep_sfi_managed) {
1202 		update_sfi = true;
1203 	}
1204 
1205 	/* Reflect task role transitions into the coalition role counters */
1206 	if (prev.tep_role != next.tep_role) {
1207 		if (task_policy_update_coalition_focal_tasks(task, prev.tep_role, next.tep_role, pend_token)) {
1208 			update_sfi = true;
1209 		}
1210 	}
1211 
1212 	if (prev.tep_terminated != next.tep_terminated) {
1213 		update_termination = true;
1214 	}
1215 
1216 	bool update_priority = false;
1217 
1218 	int16_t priority     = BASEPRI_DEFAULT;
1219 	int16_t max_priority = MAXPRI_USER;
1220 
1221 	if (next.tep_lowpri_cpu) {
1222 		priority = MAXPRI_THROTTLE;
1223 		max_priority = MAXPRI_THROTTLE;
1224 	} else if (next.tep_suppressed_cpu) {
1225 		priority = MAXPRI_SUPPRESSED;
1226 		max_priority = MAXPRI_SUPPRESSED;
1227 	} else {
1228 		switch (next.tep_role) {
1229 		case TASK_CONTROL_APPLICATION:
1230 			priority = BASEPRI_CONTROL;
1231 			break;
1232 		case TASK_GRAPHICS_SERVER:
1233 			priority = BASEPRI_GRAPHICS;
1234 			max_priority = MAXPRI_RESERVED;
1235 			break;
1236 		default:
1237 			break;
1238 		}
1239 
1240 		/* factor in 'nice' value */
1241 		priority += task->importance;
1242 
1243 		if (task->effective_policy.tep_qos_clamp != THREAD_QOS_UNSPECIFIED) {
1244 			int16_t qos_clamp_priority = thread_qos_policy_params.qos_pri[task->effective_policy.tep_qos_clamp];
1245 
1246 			priority        = MIN(priority, qos_clamp_priority);
1247 			max_priority    = MIN(max_priority, qos_clamp_priority);
1248 		}
1249 
1250 		if (priority > max_priority) {
1251 			priority = max_priority;
1252 		} else if (priority < MINPRI) {
1253 			priority = MINPRI;
1254 		}
1255 	}
1256 
1257 	assert(priority <= max_priority);
1258 
1259 	/* avoid extra work if priority isn't changing */
1260 	if (priority != task->priority ||
1261 	    max_priority != task->max_priority) {
1262 		/* update the scheduling priority for the task */
1263 		task->max_priority  = max_priority;
1264 		task->priority      = priority;
1265 		update_priority     = true;
1266 	}
1267 
1268 	/* Loop over the threads in the task:
1269 	 * only once
1270 	 * only if necessary
1271 	 * with one thread mutex hold per thread
1272 	 */
1273 	if (update_threads || update_priority || update_sfi) {
1274 		thread_t thread;
1275 
1276 		queue_iterate(&task->threads, thread, thread_t, task_threads) {
1277 			struct task_pend_token thread_pend_token = {};
1278 
1279 			if (update_sfi) {
1280 				thread_pend_token.tpt_update_thread_sfi = 1;
1281 			}
1282 
1283 			if (update_priority || update_threads) {
1284 				/* Check if we need to reevaluate turnstile push */
1285 				if (pend_token->tpt_update_turnstile) {
1286 					thread_pend_token.tpt_update_turnstile = 1;
1287 				}
1288 				thread_policy_update_tasklocked(thread,
1289 				    task->priority, task->max_priority,
1290 				    &thread_pend_token);
1291 			}
1292 
1293 			assert(!thread_pend_token.tpt_update_sockets);
1294 
1295 			// Slightly risky, as we still hold the task lock...
1296 			thread_policy_update_complete_unlocked(thread, &thread_pend_token);
1297 		}
1298 	}
1299 
1300 	/*
1301 	 * Use the app-nap transitions to influence the
1302 	 * transition of the process within the jetsam band
1303 	 * [and optionally its live-donor status]
1304 	 * On macOS only.
1305 	 */
1306 	if (appnap_transition) {
1307 		if (task->effective_policy.tep_sup_active == 1) {
1308 			memorystatus_update_priority_for_appnap(((proc_t) get_bsdtask_info(task)), TRUE);
1309 		} else {
1310 			memorystatus_update_priority_for_appnap(((proc_t) get_bsdtask_info(task)), FALSE);
1311 		}
1312 	}
1313 
1314 	if (update_termination) {
1315 		/*
1316 		 * This update is done after the terminated bit is set,
1317 		 * and all updates other than this one will check that bit,
1318 		 * so we know that it will be the last update.  (This path
1319 		 * skips the check for the terminated bit.)
1320 		 */
1321 		if (task_set_game_mode_locked(task, false)) {
1322 			pend_token->tpt_update_game_mode = 1;
1323 		}
1324 		if (task_set_carplay_mode_locked(task, false)) {
1325 			pend_token->tpt_update_carplay_mode = 1;
1326 		}
1327 	}
1328 }
1329 
1330 
1331 /*
1332  * Yet another layering violation. We reach out and bang on the coalition directly.
1333  */
1334 static boolean_t
task_policy_update_coalition_focal_tasks(task_t task,int prev_role,int next_role,task_pend_token_t pend_token)1335 task_policy_update_coalition_focal_tasks(task_t            task,
1336     int               prev_role,
1337     int               next_role,
1338     task_pend_token_t pend_token)
1339 {
1340 	boolean_t sfi_transition = FALSE;
1341 	uint32_t new_count = 0;
1342 
1343 	/* task moving into/out-of the foreground */
1344 	if (prev_role != TASK_FOREGROUND_APPLICATION && next_role == TASK_FOREGROUND_APPLICATION) {
1345 		if (task_coalition_adjust_focal_count(task, 1, &new_count) && (new_count == 1)) {
1346 			sfi_transition = TRUE;
1347 			pend_token->tpt_update_tg_ui_flag = TRUE;
1348 		}
1349 	} else if (prev_role == TASK_FOREGROUND_APPLICATION && next_role != TASK_FOREGROUND_APPLICATION) {
1350 		if (task_coalition_adjust_focal_count(task, -1, &new_count) && (new_count == 0)) {
1351 			sfi_transition = TRUE;
1352 			pend_token->tpt_update_tg_ui_flag = TRUE;
1353 		}
1354 	}
1355 
1356 	/* task moving into/out-of background */
1357 	if (prev_role != TASK_BACKGROUND_APPLICATION && next_role == TASK_BACKGROUND_APPLICATION) {
1358 		if (task_coalition_adjust_nonfocal_count(task, 1, &new_count) && (new_count == 1)) {
1359 			sfi_transition = TRUE;
1360 		}
1361 	} else if (prev_role == TASK_BACKGROUND_APPLICATION && next_role != TASK_BACKGROUND_APPLICATION) {
1362 		if (task_coalition_adjust_nonfocal_count(task, -1, &new_count) && (new_count == 0)) {
1363 			sfi_transition = TRUE;
1364 		}
1365 	}
1366 
1367 	if (sfi_transition) {
1368 		pend_token->tpt_update_coal_sfi = 1;
1369 	}
1370 	return sfi_transition;
1371 }
1372 
1373 /*
1374  * Called with coalition locked to push updates from coalition policy
1375  * into its member tasks
1376  */
1377 void
coalition_policy_update_task(task_t task,coalition_pend_token_t coal_pend_token)1378 coalition_policy_update_task(task_t task, coalition_pend_token_t coal_pend_token)
1379 {
1380 	/*
1381 	 * Push a task policy update incorporating the new state
1382 	 * of the coalition, but because we have the coalition locked,
1383 	 * we can't do task_policy_update_complete_unlocked in this function.
1384 	 *
1385 	 * Instead, we stash the pend token on the task, and ask the coalition
1386 	 * to come around later after the lock is dropped to do the follow-up.
1387 	 */
1388 
1389 	task_pend_token_t task_pend_token = &task->pended_coalition_changes;
1390 
1391 	assert(task_pend_token->tpt_value == 0);
1392 
1393 	task_lock(task);
1394 
1395 	task_policy_update_locked(task, task_pend_token);
1396 
1397 	task_unlock(task);
1398 
1399 	if (task_pend_token->tpt_update_timers) {
1400 		/*
1401 		 * ml_timer_evaluate can be batched, so defer it to happen
1402 		 * once at the coalition level
1403 		 */
1404 		coal_pend_token->cpt_update_timers = 1;
1405 		task_pend_token->tpt_update_timers = 0;
1406 	}
1407 
1408 	if (task_pend_token->tpt_value != 0) {
1409 		/*
1410 		 * We need to come look at this task after unlocking
1411 		 * the coalition to do pended work.
1412 		 */
1413 		coal_pend_token->cpt_update_j_coal_tasks = 1;
1414 	}
1415 }
1416 
1417 /*
1418  * Called with task unlocked to do things that can't be done while holding the task lock
1419  */
1420 void
task_policy_update_complete_unlocked(task_t task,task_pend_token_t pend_token)1421 task_policy_update_complete_unlocked(task_t task, task_pend_token_t pend_token)
1422 {
1423 #ifdef MACH_BSD
1424 	if (pend_token->tpt_update_sockets) {
1425 		proc_apply_task_networkbg(task_pid(task), THREAD_NULL);
1426 	}
1427 #endif /* MACH_BSD */
1428 
1429 	/* The timer throttle has been removed or reduced, we need to look for expired timers and fire them */
1430 	if (pend_token->tpt_update_timers) {
1431 		ml_timer_evaluate();
1432 	}
1433 
1434 #if CONFIG_TASKWATCH
1435 	if (pend_token->tpt_update_watchers) {
1436 		apply_appstate_watchers(task);
1437 	}
1438 #endif /* CONFIG_TASKWATCH */
1439 
1440 	if (pend_token->tpt_update_live_donor) {
1441 		task_importance_update_live_donor(task);
1442 	}
1443 
1444 #if CONFIG_SCHED_SFI
1445 	/* use the resource coalition for SFI re-evaluation */
1446 	if (pend_token->tpt_update_coal_sfi) {
1447 		coalition_for_each_task(task->coalition[COALITION_TYPE_RESOURCE],
1448 		    ^ bool (task_t each_task) {
1449 			thread_t thread;
1450 
1451 			/* skip the task we're re-evaluating on behalf of: it's already updated */
1452 			if (each_task == task) {
1453 			        return false;
1454 			}
1455 
1456 			task_lock(each_task);
1457 
1458 			queue_iterate(&each_task->threads, thread, thread_t, task_threads) {
1459 			        sfi_reevaluate(thread);
1460 			}
1461 
1462 			task_unlock(each_task);
1463 
1464 			return false;
1465 		});
1466 	}
1467 #endif /* CONFIG_SCHED_SFI */
1468 
1469 #if CONFIG_THREAD_GROUPS
1470 	if (pend_token->tpt_update_tg_ui_flag) {
1471 		task_coalition_thread_group_focal_update(task);
1472 	}
1473 	if (pend_token->tpt_update_tg_app_flag) {
1474 		task_coalition_thread_group_application_set(task);
1475 	}
1476 	if (pend_token->tpt_update_game_mode) {
1477 		task_coalition_thread_group_game_mode_update(task);
1478 	}
1479 	if (pend_token->tpt_update_carplay_mode) {
1480 		task_coalition_thread_group_carplay_mode_update(task);
1481 	}
1482 #endif /* CONFIG_THREAD_GROUPS */
1483 }
1484 
1485 /*
1486  * Initiate a task policy state transition
1487  *
1488  * Everything that modifies requested except functions that need to hold the task lock
1489  * should use this function
1490  *
1491  * Argument validation should be performed before reaching this point.
1492  *
1493  * TODO: Do we need to check task->active?
1494  */
1495 void
proc_set_task_policy(task_t task,int category,int flavor,int value)1496 proc_set_task_policy(task_t     task,
1497     int        category,
1498     int        flavor,
1499     int        value)
1500 {
1501 	struct task_pend_token pend_token = {};
1502 
1503 	task_lock(task);
1504 
1505 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1506 	    (IMPORTANCE_CODE(flavor, (category | TASK_POLICY_TASK))) | DBG_FUNC_START,
1507 	    task_pid(task), trequested_0(task),
1508 	    trequested_1(task), value, 0);
1509 
1510 	proc_set_task_policy_locked(task, category, flavor, value, 0);
1511 
1512 	task_policy_update_locked(task, &pend_token);
1513 
1514 
1515 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1516 	    (IMPORTANCE_CODE(flavor, (category | TASK_POLICY_TASK))) | DBG_FUNC_END,
1517 	    task_pid(task), trequested_0(task),
1518 	    trequested_1(task), tpending(&pend_token), 0);
1519 
1520 	task_unlock(task);
1521 
1522 	task_policy_update_complete_unlocked(task, &pend_token);
1523 }
1524 
1525 /*
1526  * Variant of proc_set_task_policy() that sets two scalars in the requested policy structure.
1527  * Same locking rules apply.
1528  */
1529 void
proc_set_task_policy2(task_t task,int category,int flavor,int value,int value2)1530 proc_set_task_policy2(task_t    task,
1531     int       category,
1532     int       flavor,
1533     int       value,
1534     int       value2)
1535 {
1536 	struct task_pend_token pend_token = {};
1537 
1538 	task_lock(task);
1539 
1540 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1541 	    (IMPORTANCE_CODE(flavor, (category | TASK_POLICY_TASK))) | DBG_FUNC_START,
1542 	    task_pid(task), trequested_0(task),
1543 	    trequested_1(task), value, 0);
1544 
1545 	proc_set_task_policy_locked(task, category, flavor, value, value2);
1546 
1547 	task_policy_update_locked(task, &pend_token);
1548 
1549 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1550 	    (IMPORTANCE_CODE(flavor, (category | TASK_POLICY_TASK))) | DBG_FUNC_END,
1551 	    task_pid(task), trequested_0(task),
1552 	    trequested_1(task), tpending(&pend_token), 0);
1553 
1554 	task_unlock(task);
1555 
1556 	task_policy_update_complete_unlocked(task, &pend_token);
1557 }
1558 
1559 /*
1560  * Set the requested state for a specific flavor to a specific value.
1561  *
1562  *  TODO:
1563  *  Verify that arguments to non iopol things are 1 or 0
1564  */
1565 static void
proc_set_task_policy_locked(task_t task,int category,int flavor,int value,int value2)1566 proc_set_task_policy_locked(task_t      task,
1567     int         category,
1568     int         flavor,
1569     int         value,
1570     int         value2)
1571 {
1572 	int tier, passive;
1573 
1574 	struct task_requested_policy requested = task->requested_policy;
1575 
1576 	switch (flavor) {
1577 	/* Category: EXTERNAL and INTERNAL */
1578 
1579 	case TASK_POLICY_DARWIN_BG:
1580 		if (category == TASK_POLICY_EXTERNAL) {
1581 			requested.trp_ext_darwinbg = value;
1582 		} else {
1583 			requested.trp_int_darwinbg = value;
1584 		}
1585 		break;
1586 
1587 	case TASK_POLICY_IOPOL:
1588 		proc_iopol_to_tier(value, &tier, &passive);
1589 		if (category == TASK_POLICY_EXTERNAL) {
1590 			requested.trp_ext_iotier  = tier;
1591 			requested.trp_ext_iopassive = passive;
1592 		} else {
1593 			requested.trp_int_iotier  = tier;
1594 			requested.trp_int_iopassive = passive;
1595 		}
1596 		break;
1597 
1598 	case TASK_POLICY_IO:
1599 		if (category == TASK_POLICY_EXTERNAL) {
1600 			requested.trp_ext_iotier = value;
1601 		} else {
1602 			requested.trp_int_iotier = value;
1603 		}
1604 		break;
1605 
1606 	case TASK_POLICY_PASSIVE_IO:
1607 		if (category == TASK_POLICY_EXTERNAL) {
1608 			requested.trp_ext_iopassive = value;
1609 		} else {
1610 			requested.trp_int_iopassive = value;
1611 		}
1612 		break;
1613 
1614 	/* Category: INTERNAL */
1615 
1616 	case TASK_POLICY_DARWIN_BG_IOPOL:
1617 		assert(category == TASK_POLICY_INTERNAL);
1618 		proc_iopol_to_tier(value, &tier, &passive);
1619 		requested.trp_bg_iotier = tier;
1620 		break;
1621 
1622 	/* Category: ATTRIBUTE */
1623 
1624 	case TASK_POLICY_BOOST:
1625 		assert(category == TASK_POLICY_ATTRIBUTE);
1626 		requested.trp_boosted = value;
1627 		break;
1628 
1629 	case TASK_POLICY_ROLE:
1630 		assert(category == TASK_POLICY_ATTRIBUTE);
1631 		requested.trp_role = value;
1632 		break;
1633 
1634 	case TASK_POLICY_TERMINATED:
1635 		assert(category == TASK_POLICY_ATTRIBUTE);
1636 		requested.trp_terminated = value;
1637 		break;
1638 
1639 	case TASK_BASE_LATENCY_QOS_POLICY:
1640 		assert(category == TASK_POLICY_ATTRIBUTE);
1641 		requested.trp_base_latency_qos = value;
1642 		break;
1643 
1644 	case TASK_BASE_THROUGHPUT_QOS_POLICY:
1645 		assert(category == TASK_POLICY_ATTRIBUTE);
1646 		requested.trp_base_through_qos = value;
1647 		break;
1648 
1649 	case TASK_POLICY_SFI_MANAGED:
1650 		assert(category == TASK_POLICY_ATTRIBUTE);
1651 		requested.trp_sfi_managed = value;
1652 		break;
1653 
1654 	case TASK_POLICY_BASE_LATENCY_AND_THROUGHPUT_QOS:
1655 		assert(category == TASK_POLICY_ATTRIBUTE);
1656 		requested.trp_base_latency_qos = value;
1657 		requested.trp_base_through_qos = value2;
1658 		break;
1659 
1660 	case TASK_POLICY_OVERRIDE_LATENCY_AND_THROUGHPUT_QOS:
1661 		assert(category == TASK_POLICY_ATTRIBUTE);
1662 		requested.trp_over_latency_qos = value;
1663 		requested.trp_over_through_qos = value2;
1664 		break;
1665 
1666 	default:
1667 		panic("unknown task policy: %d %d %d %d", category, flavor, value, value2);
1668 		break;
1669 	}
1670 
1671 	task->requested_policy = requested;
1672 }
1673 
1674 /*
1675  * Gets what you set. Effective values may be different.
1676  */
1677 int
proc_get_task_policy(task_t task,int category,int flavor)1678 proc_get_task_policy(task_t     task,
1679     int        category,
1680     int        flavor)
1681 {
1682 	int value = 0;
1683 
1684 	task_lock(task);
1685 
1686 	struct task_requested_policy requested = task->requested_policy;
1687 
1688 	switch (flavor) {
1689 	case TASK_POLICY_DARWIN_BG:
1690 		if (category == TASK_POLICY_EXTERNAL) {
1691 			value = requested.trp_ext_darwinbg;
1692 		} else {
1693 			value = requested.trp_int_darwinbg;
1694 		}
1695 		break;
1696 	case TASK_POLICY_IOPOL:
1697 		if (category == TASK_POLICY_EXTERNAL) {
1698 			value = proc_tier_to_iopol(requested.trp_ext_iotier,
1699 			    requested.trp_ext_iopassive);
1700 		} else {
1701 			value = proc_tier_to_iopol(requested.trp_int_iotier,
1702 			    requested.trp_int_iopassive);
1703 		}
1704 		break;
1705 	case TASK_POLICY_IO:
1706 		if (category == TASK_POLICY_EXTERNAL) {
1707 			value = requested.trp_ext_iotier;
1708 		} else {
1709 			value = requested.trp_int_iotier;
1710 		}
1711 		break;
1712 	case TASK_POLICY_PASSIVE_IO:
1713 		if (category == TASK_POLICY_EXTERNAL) {
1714 			value = requested.trp_ext_iopassive;
1715 		} else {
1716 			value = requested.trp_int_iopassive;
1717 		}
1718 		break;
1719 	case TASK_POLICY_DARWIN_BG_IOPOL:
1720 		assert(category == TASK_POLICY_INTERNAL);
1721 		value = proc_tier_to_iopol(requested.trp_bg_iotier, 0);
1722 		break;
1723 	case TASK_POLICY_ROLE:
1724 		assert(category == TASK_POLICY_ATTRIBUTE);
1725 		value = requested.trp_role;
1726 		break;
1727 	case TASK_POLICY_SFI_MANAGED:
1728 		assert(category == TASK_POLICY_ATTRIBUTE);
1729 		value = requested.trp_sfi_managed;
1730 		break;
1731 	default:
1732 		panic("unknown policy_flavor %d", flavor);
1733 		break;
1734 	}
1735 
1736 	task_unlock(task);
1737 
1738 	return value;
1739 }
1740 
1741 /*
1742  * Variant of proc_get_task_policy() that returns two scalar outputs.
1743  */
1744 void
proc_get_task_policy2(task_t task,__assert_only int category,int flavor,int * value1,int * value2)1745 proc_get_task_policy2(task_t task,
1746     __assert_only int category,
1747     int flavor,
1748     int *value1,
1749     int *value2)
1750 {
1751 	task_lock(task);
1752 
1753 	struct task_requested_policy requested = task->requested_policy;
1754 
1755 	switch (flavor) {
1756 	case TASK_POLICY_BASE_LATENCY_AND_THROUGHPUT_QOS:
1757 		assert(category == TASK_POLICY_ATTRIBUTE);
1758 		*value1 = requested.trp_base_latency_qos;
1759 		*value2 = requested.trp_base_through_qos;
1760 		break;
1761 
1762 	case TASK_POLICY_OVERRIDE_LATENCY_AND_THROUGHPUT_QOS:
1763 		assert(category == TASK_POLICY_ATTRIBUTE);
1764 		*value1 = requested.trp_over_latency_qos;
1765 		*value2 = requested.trp_over_through_qos;
1766 		break;
1767 
1768 	default:
1769 		panic("unknown policy_flavor %d", flavor);
1770 		break;
1771 	}
1772 
1773 	task_unlock(task);
1774 }
1775 
1776 /*
1777  * Function for querying effective state for relevant subsystems
1778  * Gets what is actually in effect, for subsystems which pull policy instead of receive updates.
1779  *
1780  * ONLY the relevant subsystem should query this.
1781  * NEVER take a value from the 'effective' function and stuff it into a setter.
1782  *
1783  * NOTE: This accessor does not take the task lock.
1784  * Notifications of state updates need to be externally synchronized with state queries.
1785  * This routine *MUST* remain interrupt safe, as it is potentially invoked
1786  * within the context of a timer interrupt.  It is also called in KDP context for stackshot.
1787  */
1788 int
proc_get_effective_task_policy(task_t task,int flavor)1789 proc_get_effective_task_policy(task_t   task,
1790     int      flavor)
1791 {
1792 	int value = 0;
1793 
1794 	switch (flavor) {
1795 	case TASK_POLICY_DARWIN_BG:
1796 		/*
1797 		 * This backs the KPI call proc_pidbackgrounded to find
1798 		 * out if a pid is backgrounded.
1799 		 * It is used to communicate state to the VM system, as well as
1800 		 * prioritizing requests to the graphics system.
1801 		 * Returns 1 for background mode, 0 for normal mode
1802 		 */
1803 		value = task->effective_policy.tep_darwinbg;
1804 		break;
1805 	case TASK_POLICY_ALL_SOCKETS_BG:
1806 		/*
1807 		 * do_background_socket() calls this to determine what it should do to the proc's sockets
1808 		 * Returns 1 for background mode, 0 for normal mode
1809 		 *
1810 		 * This consults both thread and task so un-DBGing a thread while the task is BG
1811 		 * doesn't get you out of the network throttle.
1812 		 */
1813 		value = task->effective_policy.tep_all_sockets_bg;
1814 		break;
1815 	case TASK_POLICY_SUP_ACTIVE:
1816 		/*
1817 		 * Is the task in AppNap? This is used to determine the urgency
1818 		 * that's passed to the performance management subsystem for threads
1819 		 * that are running at a priority <= MAXPRI_THROTTLE.
1820 		 */
1821 		value = task->effective_policy.tep_sup_active;
1822 		break;
1823 	case TASK_POLICY_LATENCY_QOS:
1824 		/*
1825 		 * timer arming calls into here to find out the timer coalescing level
1826 		 * Returns a QoS tier (0-6)
1827 		 */
1828 		value = task->effective_policy.tep_latency_qos;
1829 		break;
1830 	case TASK_POLICY_THROUGH_QOS:
1831 		/*
1832 		 * This value is passed into the urgency callout from the scheduler
1833 		 * to the performance management subsystem.
1834 		 * Returns a QoS tier (0-6)
1835 		 */
1836 		value = task->effective_policy.tep_through_qos;
1837 		break;
1838 	case TASK_POLICY_ROLE:
1839 		/*
1840 		 * This controls various things that ask whether a process is foreground,
1841 		 * like SFI, VM, access to GPU, etc
1842 		 */
1843 		value = task->effective_policy.tep_role;
1844 		break;
1845 	case TASK_POLICY_WATCHERS_BG:
1846 		/*
1847 		 * This controls whether or not a thread watching this process should be BG.
1848 		 */
1849 		value = task->effective_policy.tep_watchers_bg;
1850 		break;
1851 	case TASK_POLICY_SFI_MANAGED:
1852 		/*
1853 		 * This controls whether or not a process is targeted for specific control by thermald.
1854 		 */
1855 		value = task->effective_policy.tep_sfi_managed;
1856 		break;
1857 	case TASK_POLICY_TERMINATED:
1858 		/*
1859 		 * This controls whether or not a process has its throttling properties shot down for termination.
1860 		 */
1861 		value = task->effective_policy.tep_terminated;
1862 		break;
1863 	default:
1864 		panic("unknown policy_flavor %d", flavor);
1865 		break;
1866 	}
1867 
1868 	return value;
1869 }
1870 
1871 /*
1872  * Convert from IOPOL_* values to throttle tiers.
1873  *
1874  * TODO: Can this be made more compact, like an array lookup
1875  * Note that it is possible to support e.g. IOPOL_PASSIVE_STANDARD in the future
1876  */
1877 
1878 void
proc_iopol_to_tier(int iopolicy,int * tier,int * passive)1879 proc_iopol_to_tier(int iopolicy, int *tier, int *passive)
1880 {
1881 	*passive = 0;
1882 	*tier = 0;
1883 	switch (iopolicy) {
1884 	case IOPOL_IMPORTANT:
1885 		*tier = THROTTLE_LEVEL_TIER0;
1886 		break;
1887 	case IOPOL_PASSIVE:
1888 		*tier = THROTTLE_LEVEL_TIER0;
1889 		*passive = 1;
1890 		break;
1891 	case IOPOL_STANDARD:
1892 		*tier = THROTTLE_LEVEL_TIER1;
1893 		break;
1894 	case IOPOL_UTILITY:
1895 		*tier = THROTTLE_LEVEL_TIER2;
1896 		break;
1897 	case IOPOL_THROTTLE:
1898 		*tier = THROTTLE_LEVEL_TIER3;
1899 		break;
1900 	default:
1901 		panic("unknown I/O policy %d", iopolicy);
1902 		break;
1903 	}
1904 }
1905 
1906 int
proc_tier_to_iopol(int tier,int passive)1907 proc_tier_to_iopol(int tier, int passive)
1908 {
1909 	if (passive == 1) {
1910 		switch (tier) {
1911 		case THROTTLE_LEVEL_TIER0:
1912 			return IOPOL_PASSIVE;
1913 		default:
1914 			panic("unknown passive tier %d", tier);
1915 			return IOPOL_DEFAULT;
1916 		}
1917 	} else {
1918 		switch (tier) {
1919 		case THROTTLE_LEVEL_NONE:
1920 		case THROTTLE_LEVEL_TIER0:
1921 			return IOPOL_DEFAULT;
1922 		case THROTTLE_LEVEL_TIER1:
1923 			return IOPOL_STANDARD;
1924 		case THROTTLE_LEVEL_TIER2:
1925 			return IOPOL_UTILITY;
1926 		case THROTTLE_LEVEL_TIER3:
1927 			return IOPOL_THROTTLE;
1928 		default:
1929 			panic("unknown tier %d", tier);
1930 			return IOPOL_DEFAULT;
1931 		}
1932 	}
1933 }
1934 
1935 int
proc_darwin_role_to_task_role(int darwin_role,task_role_t * task_role)1936 proc_darwin_role_to_task_role(int darwin_role, task_role_t* task_role)
1937 {
1938 	integer_t role = TASK_UNSPECIFIED;
1939 
1940 	switch (darwin_role) {
1941 	case PRIO_DARWIN_ROLE_DEFAULT:
1942 		role = TASK_UNSPECIFIED;
1943 		break;
1944 	case PRIO_DARWIN_ROLE_UI_FOCAL:
1945 		role = TASK_FOREGROUND_APPLICATION;
1946 		break;
1947 	case PRIO_DARWIN_ROLE_UI:
1948 		role = TASK_DEFAULT_APPLICATION;
1949 		break;
1950 	case PRIO_DARWIN_ROLE_NON_UI:
1951 		role = TASK_NONUI_APPLICATION;
1952 		break;
1953 	case PRIO_DARWIN_ROLE_UI_NON_FOCAL:
1954 		role = TASK_BACKGROUND_APPLICATION;
1955 		break;
1956 	case PRIO_DARWIN_ROLE_TAL_LAUNCH:
1957 		role = TASK_THROTTLE_APPLICATION;
1958 		break;
1959 	case PRIO_DARWIN_ROLE_DARWIN_BG:
1960 		role = TASK_DARWINBG_APPLICATION;
1961 		break;
1962 	default:
1963 		return EINVAL;
1964 	}
1965 
1966 	*task_role = role;
1967 
1968 	return 0;
1969 }
1970 
1971 int
proc_task_role_to_darwin_role(task_role_t task_role)1972 proc_task_role_to_darwin_role(task_role_t task_role)
1973 {
1974 	switch (task_role) {
1975 	case TASK_FOREGROUND_APPLICATION:
1976 		return PRIO_DARWIN_ROLE_UI_FOCAL;
1977 	case TASK_BACKGROUND_APPLICATION:
1978 		return PRIO_DARWIN_ROLE_UI_NON_FOCAL;
1979 	case TASK_NONUI_APPLICATION:
1980 		return PRIO_DARWIN_ROLE_NON_UI;
1981 	case TASK_DEFAULT_APPLICATION:
1982 		return PRIO_DARWIN_ROLE_UI;
1983 	case TASK_THROTTLE_APPLICATION:
1984 		return PRIO_DARWIN_ROLE_TAL_LAUNCH;
1985 	case TASK_DARWINBG_APPLICATION:
1986 		return PRIO_DARWIN_ROLE_DARWIN_BG;
1987 	case TASK_UNSPECIFIED:
1988 	default:
1989 		return PRIO_DARWIN_ROLE_DEFAULT;
1990 	}
1991 }
1992 
1993 
1994 /* TODO: remove this variable when interactive daemon audit period is over */
1995 static TUNABLE(bool, ipc_importance_interactive_receiver,
1996     "imp_interactive_receiver", false);
1997 
1998 /*
1999  * Called at process exec to initialize the apptype, qos clamp, and qos seed of a process
2000  *
2001  * TODO: Make this function more table-driven instead of ad-hoc
2002  */
2003 void
proc_set_task_spawnpolicy(task_t task,thread_t thread,int apptype,int qos_clamp,task_role_t role,ipc_port_t * portwatch_ports,uint32_t portwatch_count)2004 proc_set_task_spawnpolicy(task_t task, thread_t thread, int apptype, int qos_clamp, task_role_t role,
2005     ipc_port_t * portwatch_ports, uint32_t portwatch_count)
2006 {
2007 	struct task_pend_token pend_token = {};
2008 
2009 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
2010 	    (IMPORTANCE_CODE(IMP_TASK_APPTYPE, apptype)) | DBG_FUNC_START,
2011 	    task_pid(task), trequested_0(task), trequested_1(task),
2012 	    apptype, 0);
2013 
2014 	if (apptype != TASK_APPTYPE_NONE) {
2015 		/*
2016 		 * Reset the receiver and denap state inherited from the
2017 		 * task's parent, but only if we are going to reset it via the
2018 		 * provided apptype.
2019 		 */
2020 		if (task_is_importance_receiver(task)) {
2021 			task_importance_mark_receiver(task, FALSE);
2022 		}
2023 		if (task_is_importance_denap_receiver(task)) {
2024 			task_importance_mark_denap_receiver(task, FALSE);
2025 		}
2026 	}
2027 
2028 	switch (apptype) {
2029 	case TASK_APPTYPE_APP_DEFAULT:
2030 		/* Apps become donors via the 'live-donor' flag instead of the static donor flag */
2031 		task_importance_mark_donor(task, FALSE);
2032 		task_importance_mark_live_donor(task, TRUE);
2033 		// importance_receiver == FALSE
2034 #if defined(XNU_TARGET_OS_OSX)
2035 		/* Apps are de-nap recievers on macOS for suppression behaviors */
2036 		task_importance_mark_denap_receiver(task, TRUE);
2037 #endif /* !defined(XNU_TARGET_OS_OSX) */
2038 		break;
2039 
2040 	case TASK_APPTYPE_DAEMON_INTERACTIVE:
2041 		task_importance_mark_donor(task, TRUE);
2042 		task_importance_mark_live_donor(task, FALSE);
2043 		// importance_denap_receiver == FALSE
2044 
2045 		/*
2046 		 * A boot arg controls whether interactive daemons are importance receivers.
2047 		 * Normally, they are not.  But for testing their behavior as an adaptive
2048 		 * daemon, the boot-arg can be set.
2049 		 *
2050 		 * TODO: remove this when the interactive daemon audit period is over.
2051 		 */
2052 		task_importance_mark_receiver(task, /* FALSE */ ipc_importance_interactive_receiver);
2053 		break;
2054 
2055 	case TASK_APPTYPE_DAEMON_STANDARD:
2056 		task_importance_mark_donor(task, TRUE);
2057 		task_importance_mark_live_donor(task, FALSE);
2058 		// importance_denap_receiver == FALSE
2059 		// importance_receiver == FALSE
2060 		break;
2061 
2062 	case TASK_APPTYPE_DAEMON_ADAPTIVE:
2063 		task_importance_mark_donor(task, FALSE);
2064 		task_importance_mark_live_donor(task, FALSE);
2065 		task_importance_mark_receiver(task, TRUE);
2066 		// importance_denap_receiver == FALSE
2067 		break;
2068 
2069 	case TASK_APPTYPE_DAEMON_BACKGROUND:
2070 		task_importance_mark_donor(task, FALSE);
2071 		task_importance_mark_live_donor(task, FALSE);
2072 		// importance_denap_receiver == FALSE
2073 		// importance_receiver == FALSE
2074 		break;
2075 
2076 	case TASK_APPTYPE_DRIVER:
2077 		task_importance_mark_donor(task, FALSE);
2078 		task_importance_mark_live_donor(task, FALSE);
2079 		// importance_denap_receiver == FALSE
2080 		// importance_receiver == FALSE
2081 		break;
2082 
2083 	case TASK_APPTYPE_NONE:
2084 		break;
2085 	}
2086 
2087 	if (portwatch_ports != NULL && apptype == TASK_APPTYPE_DAEMON_ADAPTIVE) {
2088 		int portwatch_boosts = 0;
2089 
2090 		for (uint32_t i = 0; i < portwatch_count; i++) {
2091 			ipc_port_t port = NULL;
2092 
2093 			if (IP_VALID(port = portwatch_ports[i])) {
2094 				int boost = 0;
2095 				task_add_importance_watchport(task, port, &boost);
2096 				portwatch_boosts += boost;
2097 			}
2098 		}
2099 
2100 		if (portwatch_boosts > 0) {
2101 			task_importance_hold_internal_assertion(task, portwatch_boosts);
2102 		}
2103 	}
2104 
2105 	/* Redirect the turnstile push of watchports to task */
2106 	if (portwatch_count && portwatch_ports != NULL) {
2107 		task_add_turnstile_watchports(task, thread, portwatch_ports, portwatch_count);
2108 	}
2109 
2110 	task_lock(task);
2111 
2112 	if (apptype != TASK_APPTYPE_NONE) {
2113 		task_set_requested_apptype(task, apptype, false);
2114 		if (task_is_app(task)) {
2115 			pend_token.tpt_update_tg_app_flag = 1;
2116 		}
2117 	}
2118 
2119 #if !defined(XNU_TARGET_OS_OSX)
2120 	/* Remove this after launchd starts setting it properly */
2121 	if (apptype == TASK_APPTYPE_APP_DEFAULT && role == TASK_UNSPECIFIED) {
2122 		task->requested_policy.trp_role = TASK_FOREGROUND_APPLICATION;
2123 	} else
2124 #endif
2125 	if (role != TASK_UNSPECIFIED) {
2126 		task->requested_policy.trp_role = (uint32_t)role;
2127 	}
2128 
2129 	if (qos_clamp != THREAD_QOS_UNSPECIFIED) {
2130 		task->requested_policy.trp_qos_clamp = qos_clamp;
2131 	}
2132 
2133 	task_policy_update_locked(task, &pend_token);
2134 
2135 	task_unlock(task);
2136 
2137 	/* Ensure the donor bit is updated to be in sync with the new live donor status */
2138 	pend_token.tpt_update_live_donor = 1;
2139 
2140 	task_policy_update_complete_unlocked(task, &pend_token);
2141 
2142 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
2143 	    (IMPORTANCE_CODE(IMP_TASK_APPTYPE, apptype)) | DBG_FUNC_END,
2144 	    task_pid(task), trequested_0(task), trequested_1(task),
2145 	    task_is_importance_receiver(task), 0);
2146 }
2147 
2148 /*
2149  * Inherit task role across exec
2150  */
2151 void
proc_inherit_task_role(task_t new_task,task_t old_task)2152 proc_inherit_task_role(task_t new_task,
2153     task_t old_task)
2154 {
2155 	int role;
2156 
2157 	/* inherit the role from old task to new task */
2158 	role = proc_get_task_policy(old_task, TASK_POLICY_ATTRIBUTE, TASK_POLICY_ROLE);
2159 	proc_set_task_policy(new_task, TASK_POLICY_ATTRIBUTE, TASK_POLICY_ROLE, role);
2160 }
2161 
2162 extern void * XNU_PTRAUTH_SIGNED_PTR("initproc") initproc;
2163 
2164 /*
2165  * Compute the default main thread qos for a task
2166  */
2167 thread_qos_t
task_compute_main_thread_qos(task_t task)2168 task_compute_main_thread_qos(task_t task)
2169 {
2170 	thread_qos_t primordial_qos = THREAD_QOS_UNSPECIFIED;
2171 
2172 	thread_qos_t qos_clamp = task->requested_policy.trp_qos_clamp;
2173 
2174 	switch (task->requested_policy.trp_apptype) {
2175 	case TASK_APPTYPE_APP_TAL:
2176 	case TASK_APPTYPE_APP_DEFAULT:
2177 		primordial_qos = THREAD_QOS_USER_INTERACTIVE;
2178 		break;
2179 
2180 	case TASK_APPTYPE_DAEMON_INTERACTIVE:
2181 	case TASK_APPTYPE_DAEMON_STANDARD:
2182 	case TASK_APPTYPE_DAEMON_ADAPTIVE:
2183 	case TASK_APPTYPE_DRIVER:
2184 		primordial_qos = THREAD_QOS_LEGACY;
2185 		break;
2186 
2187 	case TASK_APPTYPE_DAEMON_BACKGROUND:
2188 		primordial_qos = THREAD_QOS_BACKGROUND;
2189 		break;
2190 	}
2191 
2192 	if (get_bsdtask_info(task) == initproc) {
2193 		/* PID 1 gets a special case */
2194 		primordial_qos = MAX(primordial_qos, THREAD_QOS_USER_INITIATED);
2195 	}
2196 
2197 	if (qos_clamp != THREAD_QOS_UNSPECIFIED) {
2198 		if (primordial_qos != THREAD_QOS_UNSPECIFIED) {
2199 			primordial_qos = MIN(qos_clamp, primordial_qos);
2200 		} else {
2201 			primordial_qos = qos_clamp;
2202 		}
2203 	}
2204 
2205 	return primordial_qos;
2206 }
2207 
2208 
2209 /* for process_policy to check before attempting to set */
2210 boolean_t
proc_task_is_tal(task_t task)2211 proc_task_is_tal(task_t task)
2212 {
2213 	return (task->requested_policy.trp_apptype == TASK_APPTYPE_APP_TAL) ? TRUE : FALSE;
2214 }
2215 
2216 int
task_get_apptype(task_t task)2217 task_get_apptype(task_t task)
2218 {
2219 	return task->requested_policy.trp_apptype;
2220 }
2221 
2222 boolean_t
task_is_daemon(task_t task)2223 task_is_daemon(task_t task)
2224 {
2225 	switch (task->requested_policy.trp_apptype) {
2226 	case TASK_APPTYPE_DAEMON_INTERACTIVE:
2227 	case TASK_APPTYPE_DAEMON_STANDARD:
2228 	case TASK_APPTYPE_DAEMON_ADAPTIVE:
2229 	case TASK_APPTYPE_DAEMON_BACKGROUND:
2230 		return TRUE;
2231 	default:
2232 		return FALSE;
2233 	}
2234 }
2235 
2236 bool
task_is_driver(task_t task)2237 task_is_driver(task_t task)
2238 {
2239 	if (!task) {
2240 		return FALSE;
2241 	}
2242 	return task->requested_policy.trp_apptype == TASK_APPTYPE_DRIVER;
2243 }
2244 
2245 boolean_t
task_is_app(task_t task)2246 task_is_app(task_t task)
2247 {
2248 	switch (task->requested_policy.trp_apptype) {
2249 	case TASK_APPTYPE_APP_DEFAULT:
2250 	case TASK_APPTYPE_APP_TAL:
2251 		return TRUE;
2252 	default:
2253 		return FALSE;
2254 	}
2255 }
2256 
2257 
2258 /* for telemetry */
2259 integer_t
task_grab_latency_qos(task_t task)2260 task_grab_latency_qos(task_t task)
2261 {
2262 	return qos_latency_policy_package(proc_get_effective_task_policy(task, TASK_POLICY_LATENCY_QOS));
2263 }
2264 
2265 /* update the darwin background action state in the flags field for libproc */
2266 int
proc_get_darwinbgstate(task_t task,uint32_t * flagsp)2267 proc_get_darwinbgstate(task_t task, uint32_t * flagsp)
2268 {
2269 	if (task->requested_policy.trp_ext_darwinbg) {
2270 		*flagsp |= PROC_FLAG_EXT_DARWINBG;
2271 	}
2272 
2273 	if (task->requested_policy.trp_int_darwinbg) {
2274 		*flagsp |= PROC_FLAG_DARWINBG;
2275 	}
2276 
2277 #if !defined(XNU_TARGET_OS_OSX)
2278 	if (task->requested_policy.trp_apptype == TASK_APPTYPE_DAEMON_BACKGROUND) {
2279 		*flagsp |= PROC_FLAG_IOS_APPLEDAEMON;
2280 	}
2281 
2282 	if (task->requested_policy.trp_apptype == TASK_APPTYPE_DAEMON_ADAPTIVE) {
2283 		*flagsp |= PROC_FLAG_IOS_IMPPROMOTION;
2284 	}
2285 #endif /* !defined(XNU_TARGET_OS_OSX) */
2286 
2287 	if (task->requested_policy.trp_apptype == TASK_APPTYPE_APP_DEFAULT ||
2288 	    task->requested_policy.trp_apptype == TASK_APPTYPE_APP_TAL) {
2289 		*flagsp |= PROC_FLAG_APPLICATION;
2290 	}
2291 
2292 	if (task->requested_policy.trp_apptype == TASK_APPTYPE_DAEMON_ADAPTIVE) {
2293 		*flagsp |= PROC_FLAG_ADAPTIVE;
2294 	}
2295 
2296 	if (task->requested_policy.trp_apptype == TASK_APPTYPE_DAEMON_ADAPTIVE &&
2297 	    task->requested_policy.trp_boosted == 1) {
2298 		*flagsp |= PROC_FLAG_ADAPTIVE_IMPORTANT;
2299 	}
2300 
2301 	if (task_is_importance_donor(task)) {
2302 		*flagsp |= PROC_FLAG_IMPORTANCE_DONOR;
2303 	}
2304 
2305 	if (task->effective_policy.tep_sup_active) {
2306 		*flagsp |= PROC_FLAG_SUPPRESSED;
2307 	}
2308 
2309 	return 0;
2310 }
2311 
2312 /*
2313  * Tracepoint data... Reading the tracepoint data can be somewhat complicated.
2314  * The current scheme packs as much data into a single tracepoint as it can.
2315  *
2316  * Each task/thread requested/effective structure is 64 bits in size. Any
2317  * given tracepoint will emit either requested or effective data, but not both.
2318  *
2319  * A tracepoint may emit any of task, thread, or task & thread data.
2320  *
2321  * The type of data emitted varies with pointer size. Where possible, both
2322  * task and thread data are emitted. In LP32 systems, the first and second
2323  * halves of either the task or thread data is emitted.
2324  *
2325  * The code uses uintptr_t array indexes instead of high/low to avoid
2326  * confusion WRT big vs little endian.
2327  *
2328  * The truth table for the tracepoint data functions is below, and has the
2329  * following invariants:
2330  *
2331  * 1) task and thread are uintptr_t*
2332  * 2) task may never be NULL
2333  *
2334  *
2335  *                                     LP32            LP64
2336  * trequested_0(task, NULL)            task[0]         task[0]
2337  * trequested_1(task, NULL)            task[1]         NULL
2338  * trequested_0(task, thread)          thread[0]       task[0]
2339  * trequested_1(task, thread)          thread[1]       thread[0]
2340  *
2341  * Basically, you get a full task or thread on LP32, and both on LP64.
2342  *
2343  * The uintptr_t munging here is squicky enough to deserve a comment.
2344  *
2345  * The variables we are accessing are laid out in memory like this:
2346  *
2347  * [            LP64 uintptr_t  0          ]
2348  * [ LP32 uintptr_t 0 ] [ LP32 uintptr_t 1 ]
2349  *
2350  *      1   2   3   4     5   6   7   8
2351  *
2352  */
2353 
2354 static uintptr_t
trequested_0(task_t task)2355 trequested_0(task_t task)
2356 {
2357 	static_assert(sizeof(struct task_requested_policy) == sizeof(uint64_t), "size invariant violated");
2358 
2359 	uintptr_t* raw = (uintptr_t*)&task->requested_policy;
2360 
2361 	return raw[0];
2362 }
2363 
2364 static uintptr_t
trequested_1(task_t task)2365 trequested_1(task_t task)
2366 {
2367 #if defined __LP64__
2368 	(void)task;
2369 	return 0;
2370 #else
2371 	uintptr_t* raw = (uintptr_t*)(&task->requested_policy);
2372 	return raw[1];
2373 #endif
2374 }
2375 
2376 static uintptr_t
teffective_0(task_t task)2377 teffective_0(task_t task)
2378 {
2379 	uintptr_t* raw = (uintptr_t*)&task->effective_policy;
2380 
2381 	return raw[0];
2382 }
2383 
2384 static uintptr_t
teffective_1(task_t task)2385 teffective_1(task_t task)
2386 {
2387 #if defined __LP64__
2388 	(void)task;
2389 	return 0;
2390 #else
2391 	uintptr_t* raw = (uintptr_t*)(&task->effective_policy);
2392 	return raw[1];
2393 #endif
2394 }
2395 
2396 /* dump pending for tracepoint */
2397 uint32_t
tpending(task_pend_token_t pend_token)2398 tpending(task_pend_token_t pend_token)
2399 {
2400 	return *(uint32_t*)(void*)(pend_token);
2401 }
2402 
2403 uint64_t
task_requested_bitfield(task_t task)2404 task_requested_bitfield(task_t task)
2405 {
2406 	uint64_t bits = 0;
2407 	struct task_requested_policy requested = task->requested_policy;
2408 
2409 	bits |= (requested.trp_int_darwinbg     ? POLICY_REQ_INT_DARWIN_BG  : 0);
2410 	bits |= (requested.trp_ext_darwinbg     ? POLICY_REQ_EXT_DARWIN_BG  : 0);
2411 	bits |= (requested.trp_int_iotier       ? (((uint64_t)requested.trp_int_iotier) << POLICY_REQ_INT_IO_TIER_SHIFT) : 0);
2412 	bits |= (requested.trp_ext_iotier       ? (((uint64_t)requested.trp_ext_iotier) << POLICY_REQ_EXT_IO_TIER_SHIFT) : 0);
2413 	bits |= (requested.trp_int_iopassive    ? POLICY_REQ_INT_PASSIVE_IO : 0);
2414 	bits |= (requested.trp_ext_iopassive    ? POLICY_REQ_EXT_PASSIVE_IO : 0);
2415 	bits |= (requested.trp_bg_iotier        ? (((uint64_t)requested.trp_bg_iotier) << POLICY_REQ_BG_IOTIER_SHIFT)   : 0);
2416 	bits |= (requested.trp_terminated       ? POLICY_REQ_TERMINATED     : 0);
2417 
2418 	bits |= (requested.trp_boosted          ? POLICY_REQ_BOOSTED        : 0);
2419 	bits |= (requested.trp_tal_enabled      ? POLICY_REQ_TAL_ENABLED    : 0);
2420 	bits |= (requested.trp_apptype          ? (((uint64_t)requested.trp_apptype) << POLICY_REQ_APPTYPE_SHIFT)  : 0);
2421 	bits |= (requested.trp_role             ? (((uint64_t)requested.trp_role) << POLICY_REQ_ROLE_SHIFT)     : 0);
2422 
2423 	bits |= (requested.trp_sup_active       ? POLICY_REQ_SUP_ACTIVE         : 0);
2424 	bits |= (requested.trp_sup_lowpri_cpu   ? POLICY_REQ_SUP_LOWPRI_CPU     : 0);
2425 	bits |= (requested.trp_sup_cpu          ? POLICY_REQ_SUP_CPU            : 0);
2426 	bits |= (requested.trp_sup_timer        ? (((uint64_t)requested.trp_sup_timer) << POLICY_REQ_SUP_TIMER_THROTTLE_SHIFT) : 0);
2427 	bits |= (requested.trp_sup_throughput   ? (((uint64_t)requested.trp_sup_throughput) << POLICY_REQ_SUP_THROUGHPUT_SHIFT)     : 0);
2428 	bits |= (requested.trp_sup_disk         ? POLICY_REQ_SUP_DISK_THROTTLE  : 0);
2429 	bits |= (requested.trp_sup_bg_sockets   ? POLICY_REQ_SUP_BG_SOCKETS     : 0);
2430 
2431 	bits |= (requested.trp_base_latency_qos ? (((uint64_t)requested.trp_base_latency_qos) << POLICY_REQ_BASE_LATENCY_QOS_SHIFT) : 0);
2432 	bits |= (requested.trp_over_latency_qos ? (((uint64_t)requested.trp_over_latency_qos) << POLICY_REQ_OVER_LATENCY_QOS_SHIFT) : 0);
2433 	bits |= (requested.trp_base_through_qos ? (((uint64_t)requested.trp_base_through_qos) << POLICY_REQ_BASE_THROUGH_QOS_SHIFT) : 0);
2434 	bits |= (requested.trp_over_through_qos ? (((uint64_t)requested.trp_over_through_qos) << POLICY_REQ_OVER_THROUGH_QOS_SHIFT) : 0);
2435 	bits |= (requested.trp_sfi_managed      ? POLICY_REQ_SFI_MANAGED        : 0);
2436 	bits |= (requested.trp_qos_clamp        ? (((uint64_t)requested.trp_qos_clamp) << POLICY_REQ_QOS_CLAMP_SHIFT)        : 0);
2437 
2438 	return bits;
2439 }
2440 
2441 uint64_t
task_effective_bitfield(task_t task)2442 task_effective_bitfield(task_t task)
2443 {
2444 	uint64_t bits = 0;
2445 	struct task_effective_policy effective = task->effective_policy;
2446 
2447 	bits |= (effective.tep_io_tier          ? (((uint64_t)effective.tep_io_tier) << POLICY_EFF_IO_TIER_SHIFT) : 0);
2448 	bits |= (effective.tep_io_passive       ? POLICY_EFF_IO_PASSIVE     : 0);
2449 	bits |= (effective.tep_darwinbg         ? POLICY_EFF_DARWIN_BG      : 0);
2450 	bits |= (effective.tep_lowpri_cpu       ? POLICY_EFF_LOWPRI_CPU     : 0);
2451 	bits |= (effective.tep_terminated       ? POLICY_EFF_TERMINATED     : 0);
2452 	bits |= (effective.tep_all_sockets_bg   ? POLICY_EFF_ALL_SOCKETS_BG : 0);
2453 	bits |= (effective.tep_new_sockets_bg   ? POLICY_EFF_NEW_SOCKETS_BG : 0);
2454 	bits |= (effective.tep_bg_iotier        ? (((uint64_t)effective.tep_bg_iotier) << POLICY_EFF_BG_IOTIER_SHIFT) : 0);
2455 	bits |= (effective.tep_qos_ui_is_urgent ? POLICY_EFF_QOS_UI_IS_URGENT : 0);
2456 
2457 	bits |= (effective.tep_tal_engaged      ? POLICY_EFF_TAL_ENGAGED    : 0);
2458 	bits |= (effective.tep_watchers_bg      ? POLICY_EFF_WATCHERS_BG    : 0);
2459 	bits |= (effective.tep_sup_active       ? POLICY_EFF_SUP_ACTIVE     : 0);
2460 	bits |= (effective.tep_suppressed_cpu   ? POLICY_EFF_SUP_CPU        : 0);
2461 	bits |= (effective.tep_role             ? (((uint64_t)effective.tep_role) << POLICY_EFF_ROLE_SHIFT)        : 0);
2462 	bits |= (effective.tep_latency_qos      ? (((uint64_t)effective.tep_latency_qos) << POLICY_EFF_LATENCY_QOS_SHIFT) : 0);
2463 	bits |= (effective.tep_through_qos      ? (((uint64_t)effective.tep_through_qos) << POLICY_EFF_THROUGH_QOS_SHIFT) : 0);
2464 	bits |= (effective.tep_sfi_managed      ? POLICY_EFF_SFI_MANAGED    : 0);
2465 	bits |= (effective.tep_qos_ceiling      ? (((uint64_t)effective.tep_qos_ceiling) << POLICY_EFF_QOS_CEILING_SHIFT) : 0);
2466 
2467 	return bits;
2468 }
2469 
2470 
2471 /*
2472  * Resource usage and CPU related routines
2473  */
2474 
2475 int
proc_get_task_ruse_cpu(task_t task,uint32_t * policyp,uint8_t * percentagep,uint64_t * intervalp,uint64_t * deadlinep)2476 proc_get_task_ruse_cpu(task_t task, uint32_t *policyp, uint8_t *percentagep, uint64_t *intervalp, uint64_t *deadlinep)
2477 {
2478 	int error = 0;
2479 	int scope;
2480 
2481 	task_lock(task);
2482 
2483 
2484 	error = task_get_cpuusage(task, percentagep, intervalp, deadlinep, &scope);
2485 	task_unlock(task);
2486 
2487 	/*
2488 	 * Reverse-map from CPU resource limit scopes back to policies (see comment below).
2489 	 */
2490 	if (scope == TASK_RUSECPU_FLAGS_PERTHR_LIMIT) {
2491 		*policyp = TASK_POLICY_RESOURCE_ATTRIBUTE_NOTIFY_EXC;
2492 	} else if (scope == TASK_RUSECPU_FLAGS_PROC_LIMIT) {
2493 		*policyp = TASK_POLICY_RESOURCE_ATTRIBUTE_THROTTLE;
2494 	} else if (scope == TASK_RUSECPU_FLAGS_DEADLINE) {
2495 		*policyp = TASK_POLICY_RESOURCE_ATTRIBUTE_NONE;
2496 	}
2497 
2498 	return error;
2499 }
2500 
2501 /*
2502  * Configure the default CPU usage monitor parameters.
2503  *
2504  * For tasks which have this mechanism activated: if any thread in the
2505  * process consumes more CPU than this, an EXC_RESOURCE exception will be generated.
2506  */
2507 void
proc_init_cpumon_params(void)2508 proc_init_cpumon_params(void)
2509 {
2510 	/*
2511 	 * The max CPU percentage can be configured via the boot-args and
2512 	 * a key in the device tree. The boot-args are honored first, then the
2513 	 * device tree.
2514 	 */
2515 	if (!PE_parse_boot_argn("max_cpumon_percentage", &proc_max_cpumon_percentage,
2516 	    sizeof(proc_max_cpumon_percentage))) {
2517 		uint64_t max_percentage = 0ULL;
2518 
2519 		if (!PE_get_default("kern.max_cpumon_percentage", &max_percentage,
2520 		    sizeof(max_percentage))) {
2521 			max_percentage = DEFAULT_CPUMON_PERCENTAGE;
2522 		}
2523 
2524 		assert(max_percentage <= UINT8_MAX);
2525 		proc_max_cpumon_percentage = (uint8_t) max_percentage;
2526 	}
2527 
2528 	if (proc_max_cpumon_percentage > 100) {
2529 		proc_max_cpumon_percentage = 100;
2530 	}
2531 
2532 	/*
2533 	 * The interval should be specified in seconds.
2534 	 *
2535 	 * Like the max CPU percentage, the max CPU interval can be configured
2536 	 * via boot-args and the device tree.
2537 	 */
2538 	if (!PE_parse_boot_argn("max_cpumon_interval", &proc_max_cpumon_interval,
2539 	    sizeof(proc_max_cpumon_interval))) {
2540 		if (!PE_get_default("kern.max_cpumon_interval", &proc_max_cpumon_interval,
2541 		    sizeof(proc_max_cpumon_interval))) {
2542 			proc_max_cpumon_interval = DEFAULT_CPUMON_INTERVAL;
2543 		}
2544 	}
2545 
2546 	proc_max_cpumon_interval *= NSEC_PER_SEC;
2547 
2548 	/* TEMPORARY boot arg to control App suppression */
2549 	PE_parse_boot_argn("task_policy_suppression_flags",
2550 	    &task_policy_suppression_flags,
2551 	    sizeof(task_policy_suppression_flags));
2552 
2553 	/* adjust suppression disk policy if called for in boot arg */
2554 	if (task_policy_suppression_flags & TASK_POLICY_SUPPRESSION_IOTIER2) {
2555 		proc_suppressed_disk_tier = THROTTLE_LEVEL_TIER2;
2556 	}
2557 }
2558 
2559 /*
2560  * Currently supported configurations for CPU limits.
2561  *
2562  * Policy				| Deadline-based CPU limit | Percentage-based CPU limit
2563  * -------------------------------------+--------------------------+------------------------------
2564  * PROC_POLICY_RSRCACT_THROTTLE		| ENOTSUP		   | Task-wide scope only
2565  * PROC_POLICY_RSRCACT_SUSPEND		| Task-wide scope only	   | ENOTSUP
2566  * PROC_POLICY_RSRCACT_TERMINATE	| Task-wide scope only	   | ENOTSUP
2567  * PROC_POLICY_RSRCACT_NOTIFY_KQ	| Task-wide scope only	   | ENOTSUP
2568  * PROC_POLICY_RSRCACT_NOTIFY_EXC	| ENOTSUP		   | Per-thread scope only
2569  *
2570  * A deadline-based CPU limit is actually a simple wallclock timer - the requested action is performed
2571  * after the specified amount of wallclock time has elapsed.
2572  *
2573  * A percentage-based CPU limit performs the requested action after the specified amount of actual CPU time
2574  * has been consumed -- regardless of how much wallclock time has elapsed -- by either the task as an
2575  * aggregate entity (so-called "Task-wide" or "Proc-wide" scope, whereby the CPU time consumed by all threads
2576  * in the task are added together), or by any one thread in the task (so-called "per-thread" scope).
2577  *
2578  * We support either deadline != 0 OR percentage != 0, but not both. The original intention in having them
2579  * share an API was to use actual CPU time as the basis of the deadline-based limit (as in: perform an action
2580  * after I have used some amount of CPU time; this is different than the recurring percentage/interval model)
2581  * but the potential consumer of the API at the time was insisting on wallclock time instead.
2582  *
2583  * Currently, requesting notification via an exception is the only way to get per-thread scope for a
2584  * CPU limit. All other types of notifications force task-wide scope for the limit.
2585  */
2586 int
proc_set_task_ruse_cpu(task_t task,uint16_t policy,uint8_t percentage,uint64_t interval,uint64_t deadline,int cpumon_entitled)2587 proc_set_task_ruse_cpu(task_t task, uint16_t policy, uint8_t percentage, uint64_t interval, uint64_t deadline,
2588     int cpumon_entitled)
2589 {
2590 	int error = 0;
2591 	int scope;
2592 
2593 	/*
2594 	 * Enforce the matrix of supported configurations for policy, percentage, and deadline.
2595 	 */
2596 	switch (policy) {
2597 	// If no policy is explicitly given, the default is to throttle.
2598 	case TASK_POLICY_RESOURCE_ATTRIBUTE_NONE:
2599 	case TASK_POLICY_RESOURCE_ATTRIBUTE_THROTTLE:
2600 		if (deadline != 0) {
2601 			return ENOTSUP;
2602 		}
2603 		scope = TASK_RUSECPU_FLAGS_PROC_LIMIT;
2604 		break;
2605 	case TASK_POLICY_RESOURCE_ATTRIBUTE_SUSPEND:
2606 	case TASK_POLICY_RESOURCE_ATTRIBUTE_TERMINATE:
2607 	case TASK_POLICY_RESOURCE_ATTRIBUTE_NOTIFY_KQ:
2608 		if (percentage != 0) {
2609 			return ENOTSUP;
2610 		}
2611 		scope = TASK_RUSECPU_FLAGS_DEADLINE;
2612 		break;
2613 	case TASK_POLICY_RESOURCE_ATTRIBUTE_NOTIFY_EXC:
2614 		if (deadline != 0) {
2615 			return ENOTSUP;
2616 		}
2617 		scope = TASK_RUSECPU_FLAGS_PERTHR_LIMIT;
2618 #ifdef CONFIG_NOMONITORS
2619 		return error;
2620 #endif /* CONFIG_NOMONITORS */
2621 		break;
2622 	default:
2623 		return EINVAL;
2624 	}
2625 
2626 	task_lock(task);
2627 	if (task != current_task()) {
2628 		task->policy_ru_cpu_ext = policy;
2629 	} else {
2630 		task->policy_ru_cpu = policy;
2631 	}
2632 	error = task_set_cpuusage(task, percentage, interval, deadline, scope, cpumon_entitled);
2633 	task_unlock(task);
2634 	return error;
2635 }
2636 
2637 /* TODO: get rid of these */
2638 #define TASK_POLICY_CPU_RESOURCE_USAGE          0
2639 #define TASK_POLICY_WIREDMEM_RESOURCE_USAGE     1
2640 #define TASK_POLICY_VIRTUALMEM_RESOURCE_USAGE   2
2641 #define TASK_POLICY_DISK_RESOURCE_USAGE         3
2642 #define TASK_POLICY_NETWORK_RESOURCE_USAGE      4
2643 #define TASK_POLICY_POWER_RESOURCE_USAGE        5
2644 
2645 #define TASK_POLICY_RESOURCE_USAGE_COUNT        6
2646 
2647 int
proc_clear_task_ruse_cpu(task_t task,int cpumon_entitled)2648 proc_clear_task_ruse_cpu(task_t task, int cpumon_entitled)
2649 {
2650 	int error = 0;
2651 	int action;
2652 	void * bsdinfo = NULL;
2653 
2654 	task_lock(task);
2655 	if (task != current_task()) {
2656 		task->policy_ru_cpu_ext = TASK_POLICY_RESOURCE_ATTRIBUTE_DEFAULT;
2657 	} else {
2658 		task->policy_ru_cpu = TASK_POLICY_RESOURCE_ATTRIBUTE_DEFAULT;
2659 	}
2660 
2661 	error = task_clear_cpuusage_locked(task, cpumon_entitled);
2662 	if (error != 0) {
2663 		goto out;
2664 	}
2665 
2666 	action = task->applied_ru_cpu;
2667 	if (task->applied_ru_cpu_ext != TASK_POLICY_RESOURCE_ATTRIBUTE_NONE) {
2668 		/* reset action */
2669 		task->applied_ru_cpu_ext = TASK_POLICY_RESOURCE_ATTRIBUTE_NONE;
2670 	}
2671 	if (action != TASK_POLICY_RESOURCE_ATTRIBUTE_NONE) {
2672 		bsdinfo = get_bsdtask_info(task);
2673 		task_unlock(task);
2674 		proc_restore_resource_actions(bsdinfo, TASK_POLICY_CPU_RESOURCE_USAGE, action);
2675 		goto out1;
2676 	}
2677 
2678 out:
2679 	task_unlock(task);
2680 out1:
2681 	return error;
2682 }
2683 
2684 /* used to apply resource limit related actions */
2685 static int
task_apply_resource_actions(task_t task,int type)2686 task_apply_resource_actions(task_t task, int type)
2687 {
2688 	int action = TASK_POLICY_RESOURCE_ATTRIBUTE_NONE;
2689 	void * bsdinfo = NULL;
2690 
2691 	switch (type) {
2692 	case TASK_POLICY_CPU_RESOURCE_USAGE:
2693 		break;
2694 	case TASK_POLICY_WIREDMEM_RESOURCE_USAGE:
2695 	case TASK_POLICY_VIRTUALMEM_RESOURCE_USAGE:
2696 	case TASK_POLICY_DISK_RESOURCE_USAGE:
2697 	case TASK_POLICY_NETWORK_RESOURCE_USAGE:
2698 	case TASK_POLICY_POWER_RESOURCE_USAGE:
2699 		return 0;
2700 
2701 	default:
2702 		return 1;
2703 	}
2704 	;
2705 
2706 	/* only cpu actions for now */
2707 	task_lock(task);
2708 
2709 	if (task->applied_ru_cpu_ext == TASK_POLICY_RESOURCE_ATTRIBUTE_NONE) {
2710 		/* apply action */
2711 		task->applied_ru_cpu_ext = task->policy_ru_cpu_ext;
2712 		action = task->applied_ru_cpu_ext;
2713 	} else {
2714 		action = task->applied_ru_cpu_ext;
2715 	}
2716 
2717 	if (action != TASK_POLICY_RESOURCE_ATTRIBUTE_NONE) {
2718 		bsdinfo = get_bsdtask_info(task);
2719 		task_unlock(task);
2720 		proc_apply_resource_actions(bsdinfo, TASK_POLICY_CPU_RESOURCE_USAGE, action);
2721 	} else {
2722 		task_unlock(task);
2723 	}
2724 
2725 	return 0;
2726 }
2727 
2728 /*
2729  * XXX This API is somewhat broken; we support multiple simultaneous CPU limits, but the get/set API
2730  * only allows for one at a time. This means that if there is a per-thread limit active, the other
2731  * "scopes" will not be accessible via this API. We could change it to pass in the scope of interest
2732  * to the caller, and prefer that, but there's no need for that at the moment.
2733  */
2734 static int
task_get_cpuusage(task_t task,uint8_t * percentagep,uint64_t * intervalp,uint64_t * deadlinep,int * scope)2735 task_get_cpuusage(task_t task, uint8_t *percentagep, uint64_t *intervalp, uint64_t *deadlinep, int *scope)
2736 {
2737 	*percentagep = 0;
2738 	*intervalp = 0;
2739 	*deadlinep = 0;
2740 
2741 	if ((task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_PERTHR_LIMIT) != 0) {
2742 		*scope = TASK_RUSECPU_FLAGS_PERTHR_LIMIT;
2743 		*percentagep = task->rusage_cpu_perthr_percentage;
2744 		*intervalp = task->rusage_cpu_perthr_interval;
2745 	} else if ((task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_PROC_LIMIT) != 0) {
2746 		*scope = TASK_RUSECPU_FLAGS_PROC_LIMIT;
2747 		*percentagep = task->rusage_cpu_percentage;
2748 		*intervalp = task->rusage_cpu_interval;
2749 	} else if ((task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_DEADLINE) != 0) {
2750 		*scope = TASK_RUSECPU_FLAGS_DEADLINE;
2751 		*deadlinep = task->rusage_cpu_deadline;
2752 	} else {
2753 		*scope = 0;
2754 	}
2755 
2756 	return 0;
2757 }
2758 
2759 /*
2760  * Suspend the CPU usage monitor for the task.  Return value indicates
2761  * if the mechanism was actually enabled.
2762  */
2763 int
task_suspend_cpumon(task_t task)2764 task_suspend_cpumon(task_t task)
2765 {
2766 	thread_t thread;
2767 
2768 	task_lock_assert_owned(task);
2769 
2770 	if ((task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_PERTHR_LIMIT) == 0) {
2771 		return KERN_INVALID_ARGUMENT;
2772 	}
2773 
2774 	/*
2775 	 * Suspend monitoring for the task, and propagate that change to each thread.
2776 	 */
2777 	task->rusage_cpu_flags &= ~(TASK_RUSECPU_FLAGS_PERTHR_LIMIT | TASK_RUSECPU_FLAGS_FATAL_CPUMON);
2778 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
2779 		act_set_astledger(thread);
2780 	}
2781 
2782 	return KERN_SUCCESS;
2783 }
2784 
2785 /*
2786  * Remove all traces of the CPU monitor.
2787  */
2788 int
task_disable_cpumon(task_t task)2789 task_disable_cpumon(task_t task)
2790 {
2791 	int kret;
2792 
2793 	task_lock_assert_owned(task);
2794 
2795 	kret = task_suspend_cpumon(task);
2796 	if (kret) {
2797 		return kret;
2798 	}
2799 
2800 	/* Once we clear these values, the monitor can't be resumed */
2801 	task->rusage_cpu_perthr_percentage = 0;
2802 	task->rusage_cpu_perthr_interval = 0;
2803 
2804 	return KERN_SUCCESS;
2805 }
2806 
2807 
2808 static int
task_enable_cpumon_locked(task_t task)2809 task_enable_cpumon_locked(task_t task)
2810 {
2811 	thread_t thread;
2812 	task_lock_assert_owned(task);
2813 
2814 	if (task->rusage_cpu_perthr_percentage == 0 ||
2815 	    task->rusage_cpu_perthr_interval == 0) {
2816 		return KERN_INVALID_ARGUMENT;
2817 	}
2818 
2819 	task->rusage_cpu_flags |= TASK_RUSECPU_FLAGS_PERTHR_LIMIT;
2820 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
2821 		act_set_astledger(thread);
2822 	}
2823 
2824 	return KERN_SUCCESS;
2825 }
2826 
2827 int
task_resume_cpumon(task_t task)2828 task_resume_cpumon(task_t task)
2829 {
2830 	kern_return_t kret;
2831 
2832 	if (!task) {
2833 		return EINVAL;
2834 	}
2835 
2836 	task_lock(task);
2837 	kret = task_enable_cpumon_locked(task);
2838 	task_unlock(task);
2839 
2840 	return kret;
2841 }
2842 
2843 
2844 /* duplicate values from bsd/sys/process_policy.h */
2845 #define PROC_POLICY_CPUMON_DISABLE      0xFF
2846 #define PROC_POLICY_CPUMON_DEFAULTS     0xFE
2847 
2848 static int
task_set_cpuusage(task_t task,uint8_t percentage,uint64_t interval,uint64_t deadline,int scope,int cpumon_entitled)2849 task_set_cpuusage(task_t task, uint8_t percentage, uint64_t interval, uint64_t deadline, int scope, int cpumon_entitled)
2850 {
2851 	uint64_t abstime = 0;
2852 	uint64_t limittime = 0;
2853 
2854 	lck_mtx_assert(&task->lock, LCK_MTX_ASSERT_OWNED);
2855 
2856 	/* By default, refill once per second */
2857 	if (interval == 0) {
2858 		interval = NSEC_PER_SEC;
2859 	}
2860 
2861 	if (percentage != 0) {
2862 		if (scope == TASK_RUSECPU_FLAGS_PERTHR_LIMIT) {
2863 			boolean_t warn = FALSE;
2864 
2865 			/*
2866 			 * A per-thread CPU limit on a task generates an exception
2867 			 * (LEDGER_ACTION_EXCEPTION) if any one thread in the task
2868 			 * exceeds the limit.
2869 			 */
2870 
2871 			if (percentage == PROC_POLICY_CPUMON_DISABLE) {
2872 				if (cpumon_entitled) {
2873 					/* 25095698 - task_disable_cpumon() should be reliable */
2874 					task_disable_cpumon(task);
2875 					return 0;
2876 				}
2877 
2878 				/*
2879 				 * This task wishes to disable the CPU usage monitor, but it's
2880 				 * missing the required entitlement:
2881 				 *     com.apple.private.kernel.override-cpumon
2882 				 *
2883 				 * Instead, treat this as a request to reset its params
2884 				 * back to the defaults.
2885 				 */
2886 				warn = TRUE;
2887 				percentage = PROC_POLICY_CPUMON_DEFAULTS;
2888 			}
2889 
2890 			if (percentage == PROC_POLICY_CPUMON_DEFAULTS) {
2891 				percentage = proc_max_cpumon_percentage;
2892 				interval   = proc_max_cpumon_interval;
2893 			}
2894 
2895 			if (percentage > 100) {
2896 				percentage = 100;
2897 			}
2898 
2899 			/*
2900 			 * Passing in an interval of -1 means either:
2901 			 * - Leave the interval as-is, if there's already a per-thread
2902 			 *   limit configured
2903 			 * - Use the system default.
2904 			 */
2905 			if (interval == -1ULL) {
2906 				if (task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_PERTHR_LIMIT) {
2907 					interval = task->rusage_cpu_perthr_interval;
2908 				} else {
2909 					interval = proc_max_cpumon_interval;
2910 				}
2911 			}
2912 
2913 			/*
2914 			 * Enforce global caps on CPU usage monitor here if the process is not
2915 			 * entitled to escape the global caps.
2916 			 */
2917 			if ((percentage > proc_max_cpumon_percentage) && (cpumon_entitled == 0)) {
2918 				warn = TRUE;
2919 				percentage = proc_max_cpumon_percentage;
2920 			}
2921 
2922 			if ((interval > proc_max_cpumon_interval) && (cpumon_entitled == 0)) {
2923 				warn = TRUE;
2924 				interval = proc_max_cpumon_interval;
2925 			}
2926 
2927 			if (warn) {
2928 				int       pid = 0;
2929 				const char *procname = "unknown";
2930 
2931 #ifdef MACH_BSD
2932 				pid = proc_selfpid();
2933 				void *cur_bsd_info = get_bsdtask_info(current_task());
2934 				if (cur_bsd_info != NULL) {
2935 					procname = proc_name_address(cur_bsd_info);
2936 				}
2937 #endif
2938 
2939 				printf("process %s[%d] denied attempt to escape CPU monitor"
2940 				    " (missing required entitlement).\n", procname, pid);
2941 			}
2942 
2943 			/* configure the limit values */
2944 			task->rusage_cpu_perthr_percentage = percentage;
2945 			task->rusage_cpu_perthr_interval = interval;
2946 
2947 			/* and enable the CPU monitor */
2948 			(void)task_enable_cpumon_locked(task);
2949 		} else if (scope == TASK_RUSECPU_FLAGS_PROC_LIMIT) {
2950 			/*
2951 			 * Currently, a proc-wide CPU limit always blocks if the limit is
2952 			 * exceeded (LEDGER_ACTION_BLOCK).
2953 			 */
2954 			task->rusage_cpu_flags |= TASK_RUSECPU_FLAGS_PROC_LIMIT;
2955 			task->rusage_cpu_percentage = percentage;
2956 			task->rusage_cpu_interval = interval;
2957 
2958 			limittime = (interval * percentage) / 100;
2959 			nanoseconds_to_absolutetime(limittime, &abstime);
2960 
2961 			ledger_set_limit(task->ledger, task_ledgers.cpu_time, abstime, 0);
2962 			ledger_set_period(task->ledger, task_ledgers.cpu_time, interval);
2963 			ledger_set_action(task->ledger, task_ledgers.cpu_time, LEDGER_ACTION_BLOCK);
2964 		}
2965 	}
2966 
2967 	if (deadline != 0) {
2968 		assert(scope == TASK_RUSECPU_FLAGS_DEADLINE);
2969 
2970 		/* if already in use, cancel and wait for it to cleanout */
2971 		if (task->rusage_cpu_callt != NULL) {
2972 			task_unlock(task);
2973 			thread_call_cancel_wait(task->rusage_cpu_callt);
2974 			task_lock(task);
2975 		}
2976 		if (task->rusage_cpu_callt == NULL) {
2977 			task->rusage_cpu_callt = thread_call_allocate_with_priority(task_action_cpuusage, (thread_call_param_t)task, THREAD_CALL_PRIORITY_KERNEL);
2978 		}
2979 		/* setup callout */
2980 		if (task->rusage_cpu_callt != 0) {
2981 			uint64_t save_abstime = 0;
2982 
2983 			task->rusage_cpu_flags |= TASK_RUSECPU_FLAGS_DEADLINE;
2984 			task->rusage_cpu_deadline = deadline;
2985 
2986 			nanoseconds_to_absolutetime(deadline, &abstime);
2987 			save_abstime = abstime;
2988 			clock_absolutetime_interval_to_deadline(save_abstime, &abstime);
2989 			thread_call_enter_delayed(task->rusage_cpu_callt, abstime);
2990 		}
2991 	}
2992 
2993 	return 0;
2994 }
2995 
2996 int
task_clear_cpuusage(task_t task,int cpumon_entitled)2997 task_clear_cpuusage(task_t task, int cpumon_entitled)
2998 {
2999 	int retval = 0;
3000 
3001 	task_lock(task);
3002 	retval = task_clear_cpuusage_locked(task, cpumon_entitled);
3003 	task_unlock(task);
3004 
3005 	return retval;
3006 }
3007 
3008 static int
task_clear_cpuusage_locked(task_t task,int cpumon_entitled)3009 task_clear_cpuusage_locked(task_t task, int cpumon_entitled)
3010 {
3011 	thread_call_t savecallt;
3012 
3013 	/* cancel percentage handling if set */
3014 	if (task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_PROC_LIMIT) {
3015 		task->rusage_cpu_flags &= ~TASK_RUSECPU_FLAGS_PROC_LIMIT;
3016 		ledger_set_limit(task->ledger, task_ledgers.cpu_time, LEDGER_LIMIT_INFINITY, 0);
3017 		task->rusage_cpu_percentage = 0;
3018 		task->rusage_cpu_interval = 0;
3019 	}
3020 
3021 	/*
3022 	 * Disable the CPU usage monitor.
3023 	 */
3024 	if (cpumon_entitled) {
3025 		task_disable_cpumon(task);
3026 	}
3027 
3028 	/* cancel deadline handling if set */
3029 	if (task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_DEADLINE) {
3030 		task->rusage_cpu_flags &= ~TASK_RUSECPU_FLAGS_DEADLINE;
3031 		if (task->rusage_cpu_callt != 0) {
3032 			savecallt = task->rusage_cpu_callt;
3033 			task->rusage_cpu_callt = NULL;
3034 			task->rusage_cpu_deadline = 0;
3035 			task_unlock(task);
3036 			thread_call_cancel_wait(savecallt);
3037 			thread_call_free(savecallt);
3038 			task_lock(task);
3039 		}
3040 	}
3041 	return 0;
3042 }
3043 
3044 /* called by ledger unit to enforce action due to resource usage criteria being met */
3045 static void
task_action_cpuusage(thread_call_param_t param0,__unused thread_call_param_t param1)3046 task_action_cpuusage(thread_call_param_t param0, __unused thread_call_param_t param1)
3047 {
3048 	task_t task = (task_t)param0;
3049 	(void)task_apply_resource_actions(task, TASK_POLICY_CPU_RESOURCE_USAGE);
3050 	return;
3051 }
3052 
3053 
3054 /*
3055  * Routines for taskwatch and pidbind
3056  */
3057 
3058 #if CONFIG_TASKWATCH
3059 
3060 LCK_MTX_DECLARE_ATTR(task_watch_mtx, &task_lck_grp, &task_lck_attr);
3061 
3062 static void
task_watch_lock(void)3063 task_watch_lock(void)
3064 {
3065 	lck_mtx_lock(&task_watch_mtx);
3066 }
3067 
3068 static void
task_watch_unlock(void)3069 task_watch_unlock(void)
3070 {
3071 	lck_mtx_unlock(&task_watch_mtx);
3072 }
3073 
3074 static void
add_taskwatch_locked(task_t task,task_watch_t * twp)3075 add_taskwatch_locked(task_t task, task_watch_t * twp)
3076 {
3077 	queue_enter(&task->task_watchers, twp, task_watch_t *, tw_links);
3078 	task->num_taskwatchers++;
3079 }
3080 
3081 static void
remove_taskwatch_locked(task_t task,task_watch_t * twp)3082 remove_taskwatch_locked(task_t task, task_watch_t * twp)
3083 {
3084 	queue_remove(&task->task_watchers, twp, task_watch_t *, tw_links);
3085 	task->num_taskwatchers--;
3086 }
3087 
3088 
3089 int
proc_lf_pidbind(task_t curtask,uint64_t tid,task_t target_task,int bind)3090 proc_lf_pidbind(task_t curtask, uint64_t tid, task_t target_task, int bind)
3091 {
3092 	thread_t target_thread = NULL;
3093 	int ret = 0, setbg = 0;
3094 	task_watch_t *twp = NULL;
3095 	task_t task = TASK_NULL;
3096 
3097 	target_thread = task_findtid(curtask, tid);
3098 	if (target_thread == NULL) {
3099 		return ESRCH;
3100 	}
3101 	/* holds thread reference */
3102 
3103 	if (bind != 0) {
3104 		/* task is still active ? */
3105 		task_lock(target_task);
3106 		if (target_task->active == 0) {
3107 			task_unlock(target_task);
3108 			ret = ESRCH;
3109 			goto out;
3110 		}
3111 		task_unlock(target_task);
3112 
3113 		twp = kalloc_type(task_watch_t, Z_WAITOK | Z_ZERO | Z_NOFAIL);
3114 
3115 		task_watch_lock();
3116 
3117 		if (target_thread->taskwatch != NULL) {
3118 			/* already bound to another task */
3119 			task_watch_unlock();
3120 
3121 			kfree_type(task_watch_t, twp);
3122 			ret = EBUSY;
3123 			goto out;
3124 		}
3125 
3126 		task_reference(target_task);
3127 
3128 		setbg = proc_get_effective_task_policy(target_task, TASK_POLICY_WATCHERS_BG);
3129 
3130 		twp->tw_task = target_task;             /* holds the task reference */
3131 		twp->tw_thread = target_thread;         /* holds the thread reference */
3132 		twp->tw_state = setbg;
3133 		twp->tw_importance = target_thread->importance;
3134 
3135 		add_taskwatch_locked(target_task, twp);
3136 
3137 		target_thread->taskwatch = twp;
3138 
3139 		task_watch_unlock();
3140 
3141 		if (setbg) {
3142 			set_thread_appbg(target_thread, setbg, INT_MIN);
3143 		}
3144 
3145 		/* retain the thread reference as it is in twp */
3146 		target_thread = NULL;
3147 	} else {
3148 		/* unbind */
3149 		task_watch_lock();
3150 		if ((twp = target_thread->taskwatch) != NULL) {
3151 			task = twp->tw_task;
3152 			target_thread->taskwatch = NULL;
3153 			remove_taskwatch_locked(task, twp);
3154 
3155 			task_watch_unlock();
3156 
3157 			task_deallocate(task);                  /* drop task ref in twp */
3158 			set_thread_appbg(target_thread, 0, twp->tw_importance);
3159 			thread_deallocate(target_thread);       /* drop thread ref in twp */
3160 			kfree_type(task_watch_t, twp);
3161 		} else {
3162 			task_watch_unlock();
3163 			ret = 0;                /* return success if it not alredy bound */
3164 			goto out;
3165 		}
3166 	}
3167 out:
3168 	thread_deallocate(target_thread);       /* drop thread ref acquired in this routine */
3169 	return ret;
3170 }
3171 
3172 static void
set_thread_appbg(thread_t thread,int setbg,__unused int importance)3173 set_thread_appbg(thread_t thread, int setbg, __unused int importance)
3174 {
3175 	int enable = (setbg ? TASK_POLICY_ENABLE : TASK_POLICY_DISABLE);
3176 
3177 	proc_set_thread_policy(thread, TASK_POLICY_ATTRIBUTE, TASK_POLICY_PIDBIND_BG, enable);
3178 }
3179 
3180 static void
apply_appstate_watchers(task_t task)3181 apply_appstate_watchers(task_t task)
3182 {
3183 	int numwatchers = 0, i, j, setbg;
3184 	thread_watchlist_t * threadlist;
3185 	task_watch_t * twp;
3186 
3187 retry:
3188 	/* if no watchers on the list return */
3189 	if ((numwatchers = task->num_taskwatchers) == 0) {
3190 		return;
3191 	}
3192 
3193 	threadlist = kalloc_type(thread_watchlist_t, numwatchers, Z_WAITOK | Z_ZERO);
3194 	if (threadlist == NULL) {
3195 		return;
3196 	}
3197 
3198 	task_watch_lock();
3199 	/*serialize application of app state changes */
3200 
3201 	if (task->watchapplying != 0) {
3202 		lck_mtx_sleep(&task_watch_mtx, LCK_SLEEP_DEFAULT, &task->watchapplying, THREAD_UNINT);
3203 		task_watch_unlock();
3204 		kfree_type(thread_watchlist_t, numwatchers, threadlist);
3205 		goto retry;
3206 	}
3207 
3208 	if (numwatchers != task->num_taskwatchers) {
3209 		task_watch_unlock();
3210 		kfree_type(thread_watchlist_t, numwatchers, threadlist);
3211 		goto retry;
3212 	}
3213 
3214 	setbg = proc_get_effective_task_policy(task, TASK_POLICY_WATCHERS_BG);
3215 
3216 	task->watchapplying = 1;
3217 	i = 0;
3218 	queue_iterate(&task->task_watchers, twp, task_watch_t *, tw_links) {
3219 		threadlist[i].thread = twp->tw_thread;
3220 		thread_reference(threadlist[i].thread);
3221 		if (setbg != 0) {
3222 			twp->tw_importance = twp->tw_thread->importance;
3223 			threadlist[i].importance = INT_MIN;
3224 		} else {
3225 			threadlist[i].importance = twp->tw_importance;
3226 		}
3227 		i++;
3228 		if (i > numwatchers) {
3229 			break;
3230 		}
3231 	}
3232 
3233 	task_watch_unlock();
3234 
3235 	for (j = 0; j < i; j++) {
3236 		set_thread_appbg(threadlist[j].thread, setbg, threadlist[j].importance);
3237 		thread_deallocate(threadlist[j].thread);
3238 	}
3239 	kfree_type(thread_watchlist_t, numwatchers, threadlist);
3240 
3241 
3242 	task_watch_lock();
3243 	task->watchapplying = 0;
3244 	thread_wakeup_one(&task->watchapplying);
3245 	task_watch_unlock();
3246 }
3247 
3248 void
thead_remove_taskwatch(thread_t thread)3249 thead_remove_taskwatch(thread_t thread)
3250 {
3251 	task_watch_t * twp;
3252 	int importance = 0;
3253 
3254 	task_watch_lock();
3255 	if ((twp = thread->taskwatch) != NULL) {
3256 		thread->taskwatch = NULL;
3257 		remove_taskwatch_locked(twp->tw_task, twp);
3258 	}
3259 	task_watch_unlock();
3260 	if (twp != NULL) {
3261 		thread_deallocate(twp->tw_thread);
3262 		task_deallocate(twp->tw_task);
3263 		importance = twp->tw_importance;
3264 		kfree_type(task_watch_t, twp);
3265 		/* remove the thread and networkbg */
3266 		set_thread_appbg(thread, 0, importance);
3267 	}
3268 }
3269 
3270 void
task_removewatchers(task_t task)3271 task_removewatchers(task_t task)
3272 {
3273 	queue_head_t queue;
3274 	task_watch_t *twp;
3275 
3276 	task_watch_lock();
3277 	queue_new_head(&task->task_watchers, &queue, task_watch_t *, tw_links);
3278 	queue_init(&task->task_watchers);
3279 
3280 	queue_iterate(&queue, twp, task_watch_t *, tw_links) {
3281 		/*
3282 		 * Since the linkage is removed and thead state cleanup is already set up,
3283 		 * remove the refernce from the thread.
3284 		 */
3285 		twp->tw_thread->taskwatch = NULL;       /* removed linkage, clear thread holding ref */
3286 	}
3287 
3288 	task->num_taskwatchers = 0;
3289 	task_watch_unlock();
3290 
3291 	while (!queue_empty(&queue)) {
3292 		queue_remove_first(&queue, twp, task_watch_t *, tw_links);
3293 		/* remove thread and network bg */
3294 		set_thread_appbg(twp->tw_thread, 0, twp->tw_importance);
3295 		thread_deallocate(twp->tw_thread);
3296 		task_deallocate(twp->tw_task);
3297 		kfree_type(task_watch_t, twp);
3298 	}
3299 }
3300 #endif /* CONFIG_TASKWATCH */
3301 
3302 /*
3303  * Routines for importance donation/inheritance/boosting
3304  */
3305 
3306 static void
task_importance_update_live_donor(task_t target_task)3307 task_importance_update_live_donor(task_t target_task)
3308 {
3309 #if IMPORTANCE_INHERITANCE
3310 
3311 	ipc_importance_task_t task_imp;
3312 
3313 	task_imp = ipc_importance_for_task(target_task, FALSE);
3314 	if (IIT_NULL != task_imp) {
3315 		ipc_importance_task_update_live_donor(task_imp);
3316 		ipc_importance_task_release(task_imp);
3317 	}
3318 #endif /* IMPORTANCE_INHERITANCE */
3319 }
3320 
3321 void
task_importance_mark_donor(task_t task,boolean_t donating)3322 task_importance_mark_donor(task_t task, boolean_t donating)
3323 {
3324 #if IMPORTANCE_INHERITANCE
3325 	ipc_importance_task_t task_imp;
3326 
3327 	task_imp = ipc_importance_for_task(task, FALSE);
3328 	if (IIT_NULL != task_imp) {
3329 		ipc_importance_task_mark_donor(task_imp, donating);
3330 		ipc_importance_task_release(task_imp);
3331 	}
3332 #endif /* IMPORTANCE_INHERITANCE */
3333 }
3334 
3335 void
task_importance_mark_live_donor(task_t task,boolean_t live_donating)3336 task_importance_mark_live_donor(task_t task, boolean_t live_donating)
3337 {
3338 #if IMPORTANCE_INHERITANCE
3339 	ipc_importance_task_t task_imp;
3340 
3341 	task_imp = ipc_importance_for_task(task, FALSE);
3342 	if (IIT_NULL != task_imp) {
3343 		ipc_importance_task_mark_live_donor(task_imp, live_donating);
3344 		ipc_importance_task_release(task_imp);
3345 	}
3346 #endif /* IMPORTANCE_INHERITANCE */
3347 }
3348 
3349 void
task_importance_mark_receiver(task_t task,boolean_t receiving)3350 task_importance_mark_receiver(task_t task, boolean_t receiving)
3351 {
3352 #if IMPORTANCE_INHERITANCE
3353 	ipc_importance_task_t task_imp;
3354 
3355 	task_imp = ipc_importance_for_task(task, FALSE);
3356 	if (IIT_NULL != task_imp) {
3357 		ipc_importance_task_mark_receiver(task_imp, receiving);
3358 		ipc_importance_task_release(task_imp);
3359 	}
3360 #endif /* IMPORTANCE_INHERITANCE */
3361 }
3362 
3363 void
task_importance_mark_denap_receiver(task_t task,boolean_t denap)3364 task_importance_mark_denap_receiver(task_t task, boolean_t denap)
3365 {
3366 #if IMPORTANCE_INHERITANCE
3367 	ipc_importance_task_t task_imp;
3368 
3369 	task_imp = ipc_importance_for_task(task, FALSE);
3370 	if (IIT_NULL != task_imp) {
3371 		ipc_importance_task_mark_denap_receiver(task_imp, denap);
3372 		ipc_importance_task_release(task_imp);
3373 	}
3374 #endif /* IMPORTANCE_INHERITANCE */
3375 }
3376 
3377 void
task_importance_reset(__imp_only task_t task)3378 task_importance_reset(__imp_only task_t task)
3379 {
3380 #if IMPORTANCE_INHERITANCE
3381 	ipc_importance_task_t task_imp;
3382 
3383 	/* TODO: Lower importance downstream before disconnect */
3384 	task_imp = task->task_imp_base;
3385 	ipc_importance_reset(task_imp, FALSE);
3386 	task_importance_update_live_donor(task);
3387 #endif /* IMPORTANCE_INHERITANCE */
3388 }
3389 
3390 void
task_importance_init_from_parent(__imp_only task_t new_task,__imp_only task_t parent_task)3391 task_importance_init_from_parent(__imp_only task_t new_task, __imp_only task_t parent_task)
3392 {
3393 #if IMPORTANCE_INHERITANCE
3394 	ipc_importance_task_t new_task_imp = IIT_NULL;
3395 
3396 	new_task->task_imp_base = NULL;
3397 	if (!parent_task) {
3398 		return;
3399 	}
3400 
3401 	if (task_is_marked_importance_donor(parent_task)) {
3402 		new_task_imp = ipc_importance_for_task(new_task, FALSE);
3403 		assert(IIT_NULL != new_task_imp);
3404 		ipc_importance_task_mark_donor(new_task_imp, TRUE);
3405 	}
3406 	if (task_is_marked_live_importance_donor(parent_task)) {
3407 		if (IIT_NULL == new_task_imp) {
3408 			new_task_imp = ipc_importance_for_task(new_task, FALSE);
3409 		}
3410 		assert(IIT_NULL != new_task_imp);
3411 		ipc_importance_task_mark_live_donor(new_task_imp, TRUE);
3412 	}
3413 	/* Do not inherit 'receiver' on fork, vfexec or true spawn */
3414 	if (task_is_exec_copy(new_task) &&
3415 	    task_is_marked_importance_receiver(parent_task)) {
3416 		if (IIT_NULL == new_task_imp) {
3417 			new_task_imp = ipc_importance_for_task(new_task, FALSE);
3418 		}
3419 		assert(IIT_NULL != new_task_imp);
3420 		ipc_importance_task_mark_receiver(new_task_imp, TRUE);
3421 	}
3422 	if (task_is_marked_importance_denap_receiver(parent_task)) {
3423 		if (IIT_NULL == new_task_imp) {
3424 			new_task_imp = ipc_importance_for_task(new_task, FALSE);
3425 		}
3426 		assert(IIT_NULL != new_task_imp);
3427 		ipc_importance_task_mark_denap_receiver(new_task_imp, TRUE);
3428 	}
3429 	if (IIT_NULL != new_task_imp) {
3430 		assert(new_task->task_imp_base == new_task_imp);
3431 		ipc_importance_task_release(new_task_imp);
3432 	}
3433 #endif /* IMPORTANCE_INHERITANCE */
3434 }
3435 
3436 #if IMPORTANCE_INHERITANCE
3437 /*
3438  * Sets the task boost bit to the provided value.  Does NOT run the update function.
3439  *
3440  * Task lock must be held.
3441  */
3442 static void
task_set_boost_locked(task_t task,boolean_t boost_active)3443 task_set_boost_locked(task_t task, boolean_t boost_active)
3444 {
3445 #if IMPORTANCE_TRACE
3446 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_BOOST, (boost_active ? IMP_BOOSTED : IMP_UNBOOSTED)) | DBG_FUNC_START),
3447 	    proc_selfpid(), task_pid(task), trequested_0(task), trequested_1(task), 0);
3448 #endif /* IMPORTANCE_TRACE */
3449 
3450 	task->requested_policy.trp_boosted = boost_active;
3451 
3452 #if IMPORTANCE_TRACE
3453 	if (boost_active == TRUE) {
3454 		DTRACE_BOOST2(boost, task_t, task, int, task_pid(task));
3455 	} else {
3456 		DTRACE_BOOST2(unboost, task_t, task, int, task_pid(task));
3457 	}
3458 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_BOOST, (boost_active ? IMP_BOOSTED : IMP_UNBOOSTED)) | DBG_FUNC_END),
3459 	    proc_selfpid(), task_pid(task),
3460 	    trequested_0(task), trequested_1(task), 0);
3461 #endif /* IMPORTANCE_TRACE */
3462 }
3463 
3464 /*
3465  * Sets the task boost bit to the provided value and applies the update.
3466  *
3467  * Task lock must be held.  Must call update complete after unlocking the task.
3468  */
3469 void
task_update_boost_locked(task_t task,boolean_t boost_active,task_pend_token_t pend_token)3470 task_update_boost_locked(task_t task, boolean_t boost_active, task_pend_token_t pend_token)
3471 {
3472 	task_set_boost_locked(task, boost_active);
3473 
3474 	task_policy_update_locked(task, pend_token);
3475 }
3476 
3477 /*
3478  * Check if this task should donate importance.
3479  *
3480  * May be called without taking the task lock. In that case, donor status can change
3481  * so you must check only once for each donation event.
3482  */
3483 boolean_t
task_is_importance_donor(task_t task)3484 task_is_importance_donor(task_t task)
3485 {
3486 	if (task->task_imp_base == IIT_NULL) {
3487 		return FALSE;
3488 	}
3489 	return ipc_importance_task_is_donor(task->task_imp_base);
3490 }
3491 
3492 /*
3493  * Query the status of the task's donor mark.
3494  */
3495 boolean_t
task_is_marked_importance_donor(task_t task)3496 task_is_marked_importance_donor(task_t task)
3497 {
3498 	if (task->task_imp_base == IIT_NULL) {
3499 		return FALSE;
3500 	}
3501 	return ipc_importance_task_is_marked_donor(task->task_imp_base);
3502 }
3503 
3504 /*
3505  * Query the status of the task's live donor and donor mark.
3506  */
3507 boolean_t
task_is_marked_live_importance_donor(task_t task)3508 task_is_marked_live_importance_donor(task_t task)
3509 {
3510 	if (task->task_imp_base == IIT_NULL) {
3511 		return FALSE;
3512 	}
3513 	return ipc_importance_task_is_marked_live_donor(task->task_imp_base);
3514 }
3515 
3516 
3517 /*
3518  * This routine may be called without holding task lock
3519  * since the value of imp_receiver can never be unset.
3520  */
3521 boolean_t
task_is_importance_receiver(task_t task)3522 task_is_importance_receiver(task_t task)
3523 {
3524 	if (task->task_imp_base == IIT_NULL) {
3525 		return FALSE;
3526 	}
3527 	return ipc_importance_task_is_marked_receiver(task->task_imp_base);
3528 }
3529 
3530 /*
3531  * Query the task's receiver mark.
3532  */
3533 boolean_t
task_is_marked_importance_receiver(task_t task)3534 task_is_marked_importance_receiver(task_t task)
3535 {
3536 	if (task->task_imp_base == IIT_NULL) {
3537 		return FALSE;
3538 	}
3539 	return ipc_importance_task_is_marked_receiver(task->task_imp_base);
3540 }
3541 
3542 /*
3543  * This routine may be called without holding task lock
3544  * since the value of de-nap receiver can never be unset.
3545  */
3546 boolean_t
task_is_importance_denap_receiver(task_t task)3547 task_is_importance_denap_receiver(task_t task)
3548 {
3549 	if (task->task_imp_base == IIT_NULL) {
3550 		return FALSE;
3551 	}
3552 	return ipc_importance_task_is_denap_receiver(task->task_imp_base);
3553 }
3554 
3555 /*
3556  * Query the task's de-nap receiver mark.
3557  */
3558 boolean_t
task_is_marked_importance_denap_receiver(task_t task)3559 task_is_marked_importance_denap_receiver(task_t task)
3560 {
3561 	if (task->task_imp_base == IIT_NULL) {
3562 		return FALSE;
3563 	}
3564 	return ipc_importance_task_is_marked_denap_receiver(task->task_imp_base);
3565 }
3566 
3567 /*
3568  * This routine may be called without holding task lock
3569  * since the value of imp_receiver can never be unset.
3570  */
3571 boolean_t
task_is_importance_receiver_type(task_t task)3572 task_is_importance_receiver_type(task_t task)
3573 {
3574 	if (task->task_imp_base == IIT_NULL) {
3575 		return FALSE;
3576 	}
3577 	return task_is_importance_receiver(task) ||
3578 	       task_is_importance_denap_receiver(task);
3579 }
3580 
3581 /*
3582  * External importance assertions are managed by the process in userspace
3583  * Internal importance assertions are the responsibility of the kernel
3584  * Assertions are changed from internal to external via task_importance_externalize_assertion
3585  */
3586 
3587 int
task_importance_hold_internal_assertion(task_t target_task,uint32_t count)3588 task_importance_hold_internal_assertion(task_t target_task, uint32_t count)
3589 {
3590 	ipc_importance_task_t task_imp;
3591 	kern_return_t ret;
3592 
3593 	/* may be first time, so allow for possible importance setup */
3594 	task_imp = ipc_importance_for_task(target_task, FALSE);
3595 	if (IIT_NULL == task_imp) {
3596 		return EOVERFLOW;
3597 	}
3598 	ret = ipc_importance_task_hold_internal_assertion(task_imp, count);
3599 	ipc_importance_task_release(task_imp);
3600 
3601 	return (KERN_SUCCESS != ret) ? ENOTSUP : 0;
3602 }
3603 
3604 int
task_importance_hold_file_lock_assertion(task_t target_task,uint32_t count)3605 task_importance_hold_file_lock_assertion(task_t target_task, uint32_t count)
3606 {
3607 	ipc_importance_task_t task_imp;
3608 	kern_return_t ret;
3609 
3610 	/* may be first time, so allow for possible importance setup */
3611 	task_imp = ipc_importance_for_task(target_task, FALSE);
3612 	if (IIT_NULL == task_imp) {
3613 		return EOVERFLOW;
3614 	}
3615 	ret = ipc_importance_task_hold_file_lock_assertion(task_imp, count);
3616 	ipc_importance_task_release(task_imp);
3617 
3618 	return (KERN_SUCCESS != ret) ? ENOTSUP : 0;
3619 }
3620 
3621 int
task_importance_hold_legacy_external_assertion(task_t target_task,uint32_t count)3622 task_importance_hold_legacy_external_assertion(task_t target_task, uint32_t count)
3623 {
3624 	ipc_importance_task_t task_imp;
3625 	kern_return_t ret;
3626 
3627 	/* must already have set up an importance */
3628 	task_imp = target_task->task_imp_base;
3629 	if (IIT_NULL == task_imp) {
3630 		return EOVERFLOW;
3631 	}
3632 	ret = ipc_importance_task_hold_legacy_external_assertion(task_imp, count);
3633 	return (KERN_SUCCESS != ret) ? ENOTSUP : 0;
3634 }
3635 
3636 int
task_importance_drop_file_lock_assertion(task_t target_task,uint32_t count)3637 task_importance_drop_file_lock_assertion(task_t target_task, uint32_t count)
3638 {
3639 	ipc_importance_task_t task_imp;
3640 	kern_return_t ret;
3641 
3642 	/* must already have set up an importance */
3643 	task_imp = target_task->task_imp_base;
3644 	if (IIT_NULL == task_imp) {
3645 		return EOVERFLOW;
3646 	}
3647 	ret = ipc_importance_task_drop_file_lock_assertion(target_task->task_imp_base, count);
3648 	return (KERN_SUCCESS != ret) ? EOVERFLOW : 0;
3649 }
3650 
3651 int
task_importance_drop_legacy_external_assertion(task_t target_task,uint32_t count)3652 task_importance_drop_legacy_external_assertion(task_t target_task, uint32_t count)
3653 {
3654 	ipc_importance_task_t task_imp;
3655 	kern_return_t ret;
3656 
3657 	/* must already have set up an importance */
3658 	task_imp = target_task->task_imp_base;
3659 	if (IIT_NULL == task_imp) {
3660 		return EOVERFLOW;
3661 	}
3662 	ret = ipc_importance_task_drop_legacy_external_assertion(task_imp, count);
3663 	return (KERN_SUCCESS != ret) ? EOVERFLOW : 0;
3664 }
3665 
3666 static void
task_add_importance_watchport(task_t task,mach_port_t port,int * boostp)3667 task_add_importance_watchport(task_t task, mach_port_t port, int *boostp)
3668 {
3669 	int boost = 0;
3670 
3671 	__imptrace_only int released_pid = 0;
3672 	__imptrace_only int pid = task_pid(task);
3673 
3674 	ipc_importance_task_t release_imp_task = IIT_NULL;
3675 
3676 	if (IP_VALID(port) != 0) {
3677 		ipc_importance_task_t new_imp_task = ipc_importance_for_task(task, FALSE);
3678 
3679 		ip_mq_lock(port);
3680 
3681 		/*
3682 		 * The port must have been marked tempowner already.
3683 		 * This also filters out ports whose receive rights
3684 		 * are already enqueued in a message, as you can't
3685 		 * change the right's destination once it's already
3686 		 * on its way.
3687 		 */
3688 		if (port->ip_tempowner != 0) {
3689 			assert(port->ip_impdonation != 0);
3690 
3691 			boost = port->ip_impcount;
3692 			if (IIT_NULL != ip_get_imp_task(port)) {
3693 				/*
3694 				 * if this port is already bound to a task,
3695 				 * release the task reference and drop any
3696 				 * watchport-forwarded boosts
3697 				 */
3698 				release_imp_task = ip_get_imp_task(port);
3699 				port->ip_imp_task = IIT_NULL;
3700 			}
3701 
3702 			/* mark the port is watching another task (reference held in port->ip_imp_task) */
3703 			if (ipc_importance_task_is_marked_receiver(new_imp_task)) {
3704 				port->ip_imp_task = new_imp_task;
3705 				new_imp_task = IIT_NULL;
3706 			}
3707 		}
3708 		ip_mq_unlock(port);
3709 
3710 		if (IIT_NULL != new_imp_task) {
3711 			ipc_importance_task_release(new_imp_task);
3712 		}
3713 
3714 		if (IIT_NULL != release_imp_task) {
3715 			if (boost > 0) {
3716 				ipc_importance_task_drop_internal_assertion(release_imp_task, boost);
3717 			}
3718 
3719 			// released_pid = task_pid(release_imp_task); /* TODO: Need ref-safe way to get pid */
3720 			ipc_importance_task_release(release_imp_task);
3721 		}
3722 #if IMPORTANCE_TRACE
3723 		KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_WATCHPORT, 0)) | DBG_FUNC_NONE,
3724 		    proc_selfpid(), pid, boost, released_pid, 0);
3725 #endif /* IMPORTANCE_TRACE */
3726 	}
3727 
3728 	*boostp = boost;
3729 	return;
3730 }
3731 
3732 #endif /* IMPORTANCE_INHERITANCE */
3733 
3734 /*
3735  * Routines for VM to query task importance
3736  */
3737 
3738 
3739 /*
3740  * Order to be considered while estimating importance
3741  * for low memory notification and purging purgeable memory.
3742  */
3743 #define TASK_IMPORTANCE_FOREGROUND     4
3744 #define TASK_IMPORTANCE_NOTDARWINBG    1
3745 
3746 
3747 /*
3748  * (Un)Mark the task as a privileged listener for memory notifications.
3749  * if marked, this task will be among the first to be notified amongst
3750  * the bulk of all other tasks when the system enters a pressure level
3751  * of interest to this task.
3752  */
3753 int
task_low_mem_privileged_listener(task_t task,boolean_t new_value,boolean_t * old_value)3754 task_low_mem_privileged_listener(task_t task, boolean_t new_value, boolean_t *old_value)
3755 {
3756 	if (old_value != NULL) {
3757 		*old_value = (boolean_t)task->low_mem_privileged_listener;
3758 	} else {
3759 		task_lock(task);
3760 		task->low_mem_privileged_listener = (uint32_t)new_value;
3761 		task_unlock(task);
3762 	}
3763 
3764 	return 0;
3765 }
3766 
3767 /*
3768  * Checks if the task is already notified.
3769  *
3770  * Condition: task lock should be held while calling this function.
3771  */
3772 boolean_t
task_has_been_notified(task_t task,int pressurelevel)3773 task_has_been_notified(task_t task, int pressurelevel)
3774 {
3775 	if (task == NULL) {
3776 		return FALSE;
3777 	}
3778 
3779 	if (pressurelevel == kVMPressureWarning) {
3780 		return task->low_mem_notified_warn ? TRUE : FALSE;
3781 	} else if (pressurelevel == kVMPressureCritical) {
3782 		return task->low_mem_notified_critical ? TRUE : FALSE;
3783 	} else {
3784 		return TRUE;
3785 	}
3786 }
3787 
3788 
3789 /*
3790  * Checks if the task is used for purging.
3791  *
3792  * Condition: task lock should be held while calling this function.
3793  */
3794 boolean_t
task_used_for_purging(task_t task,int pressurelevel)3795 task_used_for_purging(task_t task, int pressurelevel)
3796 {
3797 	if (task == NULL) {
3798 		return FALSE;
3799 	}
3800 
3801 	if (pressurelevel == kVMPressureWarning) {
3802 		return task->purged_memory_warn ? TRUE : FALSE;
3803 	} else if (pressurelevel == kVMPressureCritical) {
3804 		return task->purged_memory_critical ? TRUE : FALSE;
3805 	} else {
3806 		return TRUE;
3807 	}
3808 }
3809 
3810 
3811 /*
3812  * Mark the task as notified with memory notification.
3813  *
3814  * Condition: task lock should be held while calling this function.
3815  */
3816 void
task_mark_has_been_notified(task_t task,int pressurelevel)3817 task_mark_has_been_notified(task_t task, int pressurelevel)
3818 {
3819 	if (task == NULL) {
3820 		return;
3821 	}
3822 
3823 	if (pressurelevel == kVMPressureWarning) {
3824 		task->low_mem_notified_warn = 1;
3825 	} else if (pressurelevel == kVMPressureCritical) {
3826 		task->low_mem_notified_critical = 1;
3827 	}
3828 }
3829 
3830 
3831 /*
3832  * Mark the task as purged.
3833  *
3834  * Condition: task lock should be held while calling this function.
3835  */
3836 void
task_mark_used_for_purging(task_t task,int pressurelevel)3837 task_mark_used_for_purging(task_t task, int pressurelevel)
3838 {
3839 	if (task == NULL) {
3840 		return;
3841 	}
3842 
3843 	if (pressurelevel == kVMPressureWarning) {
3844 		task->purged_memory_warn = 1;
3845 	} else if (pressurelevel == kVMPressureCritical) {
3846 		task->purged_memory_critical = 1;
3847 	}
3848 }
3849 
3850 
3851 /*
3852  * Mark the task eligible for low memory notification.
3853  *
3854  * Condition: task lock should be held while calling this function.
3855  */
3856 void
task_clear_has_been_notified(task_t task,int pressurelevel)3857 task_clear_has_been_notified(task_t task, int pressurelevel)
3858 {
3859 	if (task == NULL) {
3860 		return;
3861 	}
3862 
3863 	if (pressurelevel == kVMPressureWarning) {
3864 		task->low_mem_notified_warn = 0;
3865 	} else if (pressurelevel == kVMPressureCritical) {
3866 		task->low_mem_notified_critical = 0;
3867 	}
3868 }
3869 
3870 
3871 /*
3872  * Mark the task eligible for purging its purgeable memory.
3873  *
3874  * Condition: task lock should be held while calling this function.
3875  */
3876 void
task_clear_used_for_purging(task_t task)3877 task_clear_used_for_purging(task_t task)
3878 {
3879 	if (task == NULL) {
3880 		return;
3881 	}
3882 
3883 	task->purged_memory_warn = 0;
3884 	task->purged_memory_critical = 0;
3885 }
3886 
3887 
3888 /*
3889  * Estimate task importance for purging its purgeable memory
3890  * and low memory notification.
3891  *
3892  * Importance is calculated in the following order of criteria:
3893  * -Task role : Background vs Foreground
3894  * -Boost status: Not boosted vs Boosted
3895  * -Darwin BG status.
3896  *
3897  * Returns: Estimated task importance. Less important task will have lower
3898  *          estimated importance.
3899  */
3900 int
task_importance_estimate(task_t task)3901 task_importance_estimate(task_t task)
3902 {
3903 	int task_importance = 0;
3904 
3905 	if (task == NULL) {
3906 		return 0;
3907 	}
3908 
3909 	if (proc_get_effective_task_policy(task, TASK_POLICY_ROLE) == TASK_FOREGROUND_APPLICATION) {
3910 		task_importance += TASK_IMPORTANCE_FOREGROUND;
3911 	}
3912 
3913 	if (proc_get_effective_task_policy(task, TASK_POLICY_DARWIN_BG) == 0) {
3914 		task_importance += TASK_IMPORTANCE_NOTDARWINBG;
3915 	}
3916 
3917 	return task_importance;
3918 }
3919 
3920 boolean_t
task_has_assertions(task_t task)3921 task_has_assertions(task_t task)
3922 {
3923 	return task->task_imp_base->iit_assertcnt? TRUE : FALSE;
3924 }
3925 
3926 
3927 kern_return_t
send_resource_violation(typeof(send_cpu_usage_violation) sendfunc,task_t violator,struct ledger_entry_info * linfo,resource_notify_flags_t flags)3928 send_resource_violation(typeof(send_cpu_usage_violation) sendfunc,
3929     task_t violator,
3930     struct ledger_entry_info *linfo,
3931     resource_notify_flags_t flags)
3932 {
3933 #ifndef MACH_BSD
3934 	return KERN_NOT_SUPPORTED;
3935 #else
3936 	kern_return_t   kr = KERN_SUCCESS;
3937 	proc_t          proc = NULL;
3938 	posix_path_t    proc_path = "";
3939 	proc_name_t     procname = "<unknown>";
3940 	int             pid = -1;
3941 	clock_sec_t     secs;
3942 	clock_nsec_t    nsecs;
3943 	mach_timespec_t timestamp;
3944 	thread_t        curthread = current_thread();
3945 	ipc_port_t      dstport = MACH_PORT_NULL;
3946 
3947 	if (!violator) {
3948 		kr = KERN_INVALID_ARGUMENT; goto finish;
3949 	}
3950 
3951 	/* extract violator information */
3952 	task_lock(violator);
3953 	if (!(proc = get_bsdtask_info(violator))) {
3954 		task_unlock(violator);
3955 		kr = KERN_INVALID_ARGUMENT; goto finish;
3956 	}
3957 	(void)mig_strncpy(procname, proc_best_name(proc), sizeof(procname));
3958 	pid = task_pid(violator);
3959 	if (flags & kRNFatalLimitFlag) {
3960 		kr = proc_pidpathinfo_internal(proc, 0, proc_path,
3961 		    sizeof(proc_path), NULL);
3962 	}
3963 	task_unlock(violator);
3964 	if (kr) {
3965 		goto finish;
3966 	}
3967 
3968 	/* violation time ~ now */
3969 	clock_get_calendar_nanotime(&secs, &nsecs);
3970 	timestamp.tv_sec = (int32_t)secs;
3971 	timestamp.tv_nsec = (int32_t)nsecs;
3972 	/* 25567702 tracks widening mach_timespec_t */
3973 
3974 	/* send message */
3975 	kr = host_get_special_port(host_priv_self(), HOST_LOCAL_NODE,
3976 	    HOST_RESOURCE_NOTIFY_PORT, &dstport);
3977 	if (kr) {
3978 		goto finish;
3979 	}
3980 
3981 	thread_set_honor_qlimit(curthread);
3982 	kr = sendfunc(dstport,
3983 	    procname, pid, proc_path, timestamp,
3984 	    linfo->lei_balance, linfo->lei_last_refill,
3985 	    linfo->lei_limit, linfo->lei_refill_period,
3986 	    flags);
3987 	thread_clear_honor_qlimit(curthread);
3988 
3989 	ipc_port_release_send(dstport);
3990 
3991 finish:
3992 	return kr;
3993 #endif      /* MACH_BSD */
3994 }
3995 
3996 kern_return_t
send_resource_violation_with_fatal_port(typeof(send_port_space_violation) sendfunc,task_t violator,int64_t current_size,int64_t limit,mach_port_t fatal_port,resource_notify_flags_t flags)3997 send_resource_violation_with_fatal_port(typeof(send_port_space_violation) sendfunc,
3998     task_t violator,
3999     int64_t current_size,
4000     int64_t limit,
4001     mach_port_t fatal_port,
4002     resource_notify_flags_t flags)
4003 {
4004 #ifndef MACH_BSD
4005 	kr = KERN_NOT_SUPPORTED; goto finish;
4006 #else
4007 	kern_return_t   kr = KERN_SUCCESS;
4008 	proc_t          proc = NULL;
4009 	proc_name_t     procname = "<unknown>";
4010 	int             pid = -1;
4011 	clock_sec_t     secs;
4012 	clock_nsec_t    nsecs;
4013 	mach_timespec_t timestamp;
4014 	thread_t        curthread = current_thread();
4015 	ipc_port_t      dstport = MACH_PORT_NULL;
4016 
4017 	if (!violator) {
4018 		kr = KERN_INVALID_ARGUMENT; goto finish;
4019 	}
4020 
4021 	/* extract violator information; no need to acquire task lock */
4022 	assert(violator == current_task());
4023 	if (!(proc = get_bsdtask_info(violator))) {
4024 		kr = KERN_INVALID_ARGUMENT; goto finish;
4025 	}
4026 	(void)mig_strncpy(procname, proc_best_name(proc), sizeof(procname));
4027 	pid = task_pid(violator);
4028 
4029 	/* violation time ~ now */
4030 	clock_get_calendar_nanotime(&secs, &nsecs);
4031 	timestamp.tv_sec = (int32_t)secs;
4032 	timestamp.tv_nsec = (int32_t)nsecs;
4033 	/* 25567702 tracks widening mach_timespec_t */
4034 
4035 	/* send message */
4036 	kr = task_get_special_port(current_task(), TASK_RESOURCE_NOTIFY_PORT, &dstport);
4037 	if (dstport == MACH_PORT_NULL) {
4038 		kr = host_get_special_port(host_priv_self(), HOST_LOCAL_NODE,
4039 		    HOST_RESOURCE_NOTIFY_PORT, &dstport);
4040 		if (kr) {
4041 			goto finish;
4042 		}
4043 	}
4044 
4045 	thread_set_honor_qlimit(curthread);
4046 	kr = sendfunc(dstport,
4047 	    procname, pid, timestamp,
4048 	    current_size, limit, fatal_port,
4049 	    flags);
4050 	thread_clear_honor_qlimit(curthread);
4051 
4052 	ipc_port_release_send(dstport);
4053 
4054 #endif /* MACH_BSD */
4055 finish:
4056 	return kr;
4057 }
4058 
4059 /*
4060  * Resource violations trace four 64-bit integers.  For K32, two additional
4061  * codes are allocated, the first with the low nibble doubled.  So if the K64
4062  * code is 0x042, the K32 codes would be 0x044 and 0x45.
4063  */
4064 #ifdef __LP64__
4065 void
trace_resource_violation(uint16_t code,struct ledger_entry_info * linfo)4066 trace_resource_violation(uint16_t code,
4067     struct ledger_entry_info *linfo)
4068 {
4069 	KERNEL_DBG_IST_SANE(KDBG_CODE(DBG_MACH, DBG_MACH_RESOURCE, code),
4070 	    linfo->lei_balance, linfo->lei_last_refill,
4071 	    linfo->lei_limit, linfo->lei_refill_period);
4072 }
4073 #else /* K32 */
4074 /* TODO: create/find a trace_two_LLs() for K32 systems */
4075 #define MASK32 0xffffffff
4076 void
trace_resource_violation(uint16_t code,struct ledger_entry_info * linfo)4077 trace_resource_violation(uint16_t code,
4078     struct ledger_entry_info *linfo)
4079 {
4080 	int8_t lownibble = (code & 0x3) * 2;
4081 	int16_t codeA = (code & 0xffc) | lownibble;
4082 	int16_t codeB = codeA + 1;
4083 
4084 	int32_t balance_high = (linfo->lei_balance >> 32) & MASK32;
4085 	int32_t balance_low = linfo->lei_balance & MASK32;
4086 	int32_t last_refill_high = (linfo->lei_last_refill >> 32) & MASK32;
4087 	int32_t last_refill_low = linfo->lei_last_refill & MASK32;
4088 
4089 	int32_t limit_high = (linfo->lei_limit >> 32) & MASK32;
4090 	int32_t limit_low = linfo->lei_limit & MASK32;
4091 	int32_t refill_period_high = (linfo->lei_refill_period >> 32) & MASK32;
4092 	int32_t refill_period_low = linfo->lei_refill_period & MASK32;
4093 
4094 	KERNEL_DBG_IST_SANE(KDBG_CODE(DBG_MACH, DBG_MACH_RESOURCE, codeA),
4095 	    balance_high, balance_low,
4096 	    last_refill_high, last_refill_low);
4097 	KERNEL_DBG_IST_SANE(KDBG_CODE(DBG_MACH, DBG_MACH_RESOURCE, codeB),
4098 	    limit_high, limit_low,
4099 	    refill_period_high, refill_period_low);
4100 }
4101 #endif /* K64/K32 */
4102