1 /*
2 * Copyright (c) 2000-2018 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <mach/mach_types.h>
29 #include <mach/machine/vm_param.h>
30 #include <mach/task.h>
31
32 #include <kern/kern_types.h>
33 #include <kern/ledger.h>
34 #include <kern/processor.h>
35 #include <kern/thread.h>
36 #include <kern/task.h>
37 #include <kern/spl.h>
38 #include <kern/ast.h>
39 #include <kern/monotonic.h>
40 #include <machine/monotonic.h>
41 #include <ipc/ipc_port.h>
42 #include <ipc/ipc_object.h>
43 #include <vm/vm_map_xnu.h>
44 #include <vm/vm_kern.h>
45 #include <vm/pmap.h>
46 #include <vm/vm_protos.h> /* last */
47 #include <sys/resource.h>
48 #include <sys/signal.h>
49 #include <sys/errno.h>
50 #include <sys/proc_require.h>
51
52 #include <machine/limits.h>
53 #include <sys/codesign.h> /* CS_CDHASH_LEN */
54
55 #undef thread_should_halt
56
57 /* BSD KERN COMPONENT INTERFACE */
58
59 extern unsigned int not_in_kdp; /* Skip acquiring locks if we're in kdp */
60
61 thread_t get_firstthread(task_t);
62 int get_task_userstop(task_t);
63 int get_thread_userstop(thread_t);
64 boolean_t current_thread_aborted(void);
65 void task_act_iterate_wth_args(task_t, void (*)(thread_t, void *), void *);
66 kern_return_t get_signalact(task_t, thread_t *, int);
67 int fill_task_rusage(task_t task, rusage_info_current *ri);
68 int fill_task_io_rusage(task_t task, rusage_info_current *ri);
69 int fill_task_qos_rusage(task_t task, rusage_info_current *ri);
70 uint64_t get_task_logical_writes(task_t task, bool external);
71 void fill_task_billed_usage(task_t task, rusage_info_current *ri);
72 void task_bsdtask_kill(task_t);
73
74 extern uint64_t get_dispatchqueue_serialno_offset_from_proc(void *p);
75 extern uint64_t get_dispatchqueue_label_offset_from_proc(void *p);
76 extern uint64_t proc_uniqueid_task(void *p, void *t);
77 extern int proc_pidversion(void *p);
78 extern int proc_getcdhash(void *p, char *cdhash);
79
80 int mach_to_bsd_errno(kern_return_t mach_err);
81 kern_return_t kern_return_for_errno(int bsd_errno);
82
83 #if MACH_BSD
84 extern void psignal(void *, int);
85 #endif
86
87 /*
88 *
89 */
90 void *
get_bsdtask_info(task_t t)91 get_bsdtask_info(task_t t)
92 {
93 void *proc_from_task = task_get_proc_raw(t);
94 proc_require(proc_from_task, PROC_REQUIRE_ALLOW_NULL | PROC_REQUIRE_ALLOW_ALL);
95 return task_has_proc(t) ? proc_from_task : NULL;
96 }
97
98 void
task_bsdtask_kill(task_t t)99 task_bsdtask_kill(task_t t)
100 {
101 void * bsd_info = get_bsdtask_info(t);
102 if (bsd_info != NULL) {
103 psignal(bsd_info, SIGKILL);
104 }
105 }
106 /*
107 *
108 */
109 void *
get_bsdthreadtask_info(thread_t th)110 get_bsdthreadtask_info(thread_t th)
111 {
112 return get_thread_ro(th)->tro_proc;
113 }
114
115 /*
116 *
117 */
118 void
set_bsdtask_info(task_t t,void * v)119 set_bsdtask_info(task_t t, void * v)
120 {
121 void *proc_from_task = task_get_proc_raw(t);
122 if (v == NULL) {
123 task_clear_has_proc(t);
124 } else {
125 if (v != proc_from_task) {
126 panic("set_bsdtask_info trying to set random bsd_info %p", v);
127 }
128 task_set_has_proc(t);
129 }
130 }
131
132 __abortlike
133 static void
__thread_ro_circularity_panic(thread_t th,thread_ro_t tro)134 __thread_ro_circularity_panic(thread_t th, thread_ro_t tro)
135 {
136 panic("tro %p points back to %p instead of %p", tro, tro->tro_owner, th);
137 }
138
139 __attribute__((always_inline))
140 thread_ro_t
get_thread_ro_unchecked(thread_t th)141 get_thread_ro_unchecked(thread_t th)
142 {
143 return th->t_tro;
144 }
145
146 thread_ro_t
get_thread_ro(thread_t th)147 get_thread_ro(thread_t th)
148 {
149 thread_ro_t tro = th->t_tro;
150
151 zone_require_ro(ZONE_ID_THREAD_RO, sizeof(struct thread_ro), tro);
152 if (tro->tro_owner != th) {
153 __thread_ro_circularity_panic(th, tro);
154 }
155 return tro;
156 }
157
158 __attribute__((always_inline))
159 thread_ro_t
current_thread_ro_unchecked(void)160 current_thread_ro_unchecked(void)
161 {
162 return get_thread_ro_unchecked(current_thread());
163 }
164
165 thread_ro_t
current_thread_ro(void)166 current_thread_ro(void)
167 {
168 return get_thread_ro(current_thread());
169 }
170
171 void
clear_thread_ro_proc(thread_t th)172 clear_thread_ro_proc(thread_t th)
173 {
174 thread_ro_t tro = get_thread_ro(th);
175
176 zalloc_ro_clear_field(ZONE_ID_THREAD_RO, tro, tro_proc);
177 }
178
179 struct uthread *
get_bsdthread_info(thread_t th)180 get_bsdthread_info(thread_t th)
181 {
182 return (struct uthread *)((uintptr_t)th + sizeof(struct thread));
183 }
184
185 thread_t
get_machthread(struct uthread * uth)186 get_machthread(struct uthread *uth)
187 {
188 return (struct thread *)((uintptr_t)uth - sizeof(struct thread));
189 }
190
191 /*
192 * This is used to remember any FS error from VNOP_PAGEIN code when
193 * invoked under vm_fault(). The value is an errno style value. It can
194 * be retrieved by exception handlers using thread_get_state().
195 */
196 void
set_thread_pagein_error(thread_t th,int error)197 set_thread_pagein_error(thread_t th, int error)
198 {
199 assert(th == current_thread());
200 if (error == 0 || th->t_pagein_error == 0) {
201 th->t_pagein_error = error;
202 }
203 }
204
205 #if defined(__x86_64__)
206 /*
207 * Returns non-zero if the thread has a non-NULL task
208 * and that task has an LDT.
209 */
210 int
thread_task_has_ldt(thread_t th)211 thread_task_has_ldt(thread_t th)
212 {
213 task_t task = get_threadtask(th);
214 return task && task->i386_ldt != 0;
215 }
216 #endif /* __x86_64__ */
217
218 /*
219 * XXX
220 */
221 int get_thread_lock_count(thread_t th); /* forced forward */
222 int
get_thread_lock_count(thread_t th __unused)223 get_thread_lock_count(thread_t th __unused)
224 {
225 /*
226 * TODO: one day: resurect counting locks held to disallow
227 * holding locks across upcalls.
228 *
229 * never worked on arm.
230 */
231 return 0;
232 }
233
234 /*
235 * Returns a thread reference.
236 */
237 thread_t
get_firstthread(task_t task)238 get_firstthread(task_t task)
239 {
240 thread_t thread = THREAD_NULL;
241 task_lock(task);
242
243 if (!task->active) {
244 task_unlock(task);
245 return THREAD_NULL;
246 }
247
248 thread = (thread_t)(void *)queue_first(&task->threads);
249
250 if (queue_end(&task->threads, (queue_entry_t)thread)) {
251 task_unlock(task);
252 return THREAD_NULL;
253 }
254
255 thread_reference(thread);
256 task_unlock(task);
257 return thread;
258 }
259
260 kern_return_t
get_signalact(task_t task,thread_t * result_out,int setast)261 get_signalact(
262 task_t task,
263 thread_t *result_out,
264 int setast)
265 {
266 kern_return_t result = KERN_SUCCESS;
267 thread_t inc, thread = THREAD_NULL;
268
269 task_lock(task);
270
271 if (!task->active) {
272 task_unlock(task);
273
274 return KERN_FAILURE;
275 }
276
277 for (inc = (thread_t)(void *)queue_first(&task->threads);
278 !queue_end(&task->threads, (queue_entry_t)inc);) {
279 thread_mtx_lock(inc);
280 if (inc->active &&
281 (inc->sched_flags & TH_SFLAG_ABORTED_MASK) != TH_SFLAG_ABORT) {
282 thread = inc;
283 break;
284 }
285 thread_mtx_unlock(inc);
286
287 inc = (thread_t)(void *)queue_next(&inc->task_threads);
288 }
289
290 if (result_out) {
291 *result_out = thread;
292 }
293
294 if (thread) {
295 if (setast) {
296 act_set_astbsd(thread);
297 }
298
299 thread_mtx_unlock(thread);
300 } else {
301 result = KERN_FAILURE;
302 }
303
304 task_unlock(task);
305
306 return result;
307 }
308
309
310 kern_return_t
check_actforsig(task_t task,thread_t thread,int setast)311 check_actforsig(
312 task_t task,
313 thread_t thread,
314 int setast)
315 {
316 kern_return_t result = KERN_FAILURE;
317 thread_t inc;
318
319 task_lock(task);
320
321 if (!task->active) {
322 task_unlock(task);
323
324 return KERN_FAILURE;
325 }
326
327 for (inc = (thread_t)(void *)queue_first(&task->threads);
328 !queue_end(&task->threads, (queue_entry_t)inc);) {
329 if (inc == thread) {
330 thread_mtx_lock(inc);
331
332 if (inc->active &&
333 (inc->sched_flags & TH_SFLAG_ABORTED_MASK) != TH_SFLAG_ABORT) {
334 result = KERN_SUCCESS;
335 break;
336 }
337
338 thread_mtx_unlock(inc);
339 break;
340 }
341
342 inc = (thread_t)(void *)queue_next(&inc->task_threads);
343 }
344
345 if (result == KERN_SUCCESS) {
346 if (setast) {
347 act_set_astbsd(thread);
348 }
349
350 thread_mtx_unlock(thread);
351 }
352
353 task_unlock(task);
354
355 return result;
356 }
357
358 ledger_t
get_task_ledger(task_t t)359 get_task_ledger(task_t t)
360 {
361 return t->ledger;
362 }
363
364 /*
365 * This is only safe to call from a thread executing in
366 * in the task's context or if the task is locked. Otherwise,
367 * the map could be switched for the task (and freed) before
368 * we go to return it here.
369 */
370 vm_map_t
get_task_map(task_t t)371 get_task_map(task_t t)
372 {
373 return t->map;
374 }
375
376 vm_map_t
get_task_map_reference(task_t t)377 get_task_map_reference(task_t t)
378 {
379 vm_map_t m;
380
381 if (t == NULL) {
382 return VM_MAP_NULL;
383 }
384
385 task_lock(t);
386 if (!t->active) {
387 task_unlock(t);
388 return VM_MAP_NULL;
389 }
390 m = t->map;
391 vm_map_reference(m);
392 task_unlock(t);
393 return m;
394 }
395
396 /*
397 *
398 */
399 ipc_space_t
get_task_ipcspace(task_t t)400 get_task_ipcspace(task_t t)
401 {
402 return t->itk_space;
403 }
404
405 int
get_task_numacts(task_t t)406 get_task_numacts(task_t t)
407 {
408 return t->thread_count;
409 }
410
411 /* does this machine need 64bit register set for signal handler */
412 int
is_64signalregset(void)413 is_64signalregset(void)
414 {
415 if (task_has_64Bit_data(current_task())) {
416 return 1;
417 }
418
419 return 0;
420 }
421
422 /*
423 * Swap in a new map for the task/thread pair; the old map reference is
424 * returned. Also does a pmap switch if thread provided is current thread.
425 */
426 vm_map_t
swap_task_map(task_t task,thread_t thread,vm_map_t map)427 swap_task_map(task_t task, thread_t thread, vm_map_t map)
428 {
429 vm_map_t old_map;
430 boolean_t doswitch = (thread == current_thread()) ? TRUE : FALSE;
431
432 if (task != get_threadtask(thread)) {
433 panic("swap_task_map");
434 }
435
436 task_lock(task);
437 mp_disable_preemption();
438
439 old_map = task->map;
440 thread->map = task->map = map;
441 vm_commit_pagezero_status(map);
442
443 if (doswitch) {
444 PMAP_SWITCH_USER(thread, map, cpu_number());
445 }
446 mp_enable_preemption();
447 task_unlock(task);
448
449 return old_map;
450 }
451
452 /*
453 *
454 * This is only safe to call from a thread executing in
455 * in the task's context or if the task is locked. Otherwise,
456 * the map could be switched for the task (and freed) before
457 * we go to return it here.
458 */
459 pmap_t
get_task_pmap(task_t t)460 get_task_pmap(task_t t)
461 {
462 return t->map->pmap;
463 }
464
465 /*
466 *
467 */
468 uint64_t
get_task_resident_size(task_t task)469 get_task_resident_size(task_t task)
470 {
471 uint64_t val;
472
473 ledger_get_balance(task->ledger, task_ledgers.phys_mem, (ledger_amount_t *) &val);
474 return val;
475 }
476
477 uint64_t
get_task_compressed(task_t task)478 get_task_compressed(task_t task)
479 {
480 uint64_t val;
481
482 ledger_get_balance(task->ledger, task_ledgers.internal_compressed, (ledger_amount_t *) &val);
483 return val;
484 }
485
486 uint64_t
get_task_resident_max(task_t task)487 get_task_resident_max(task_t task)
488 {
489 uint64_t val;
490
491 ledger_get_lifetime_max(task->ledger, task_ledgers.phys_mem, (ledger_amount_t *) &val);
492 return val;
493 }
494
495 /*
496 * Get the balance for a given field in the task ledger.
497 * Returns 0 if the entry is invalid.
498 */
499 static uint64_t
get_task_ledger_balance(task_t task,int entry)500 get_task_ledger_balance(task_t task, int entry)
501 {
502 ledger_amount_t balance = 0;
503
504 ledger_get_balance(task->ledger, entry, &balance);
505 return balance;
506 }
507
508 uint64_t
get_task_purgeable_size(task_t task)509 get_task_purgeable_size(task_t task)
510 {
511 kern_return_t ret;
512 ledger_amount_t balance = 0;
513 uint64_t volatile_size = 0;
514
515 ret = ledger_get_balance(task->ledger, task_ledgers.purgeable_volatile, &balance);
516 if (ret != KERN_SUCCESS) {
517 return 0;
518 }
519
520 volatile_size += balance;
521
522 ret = ledger_get_balance(task->ledger, task_ledgers.purgeable_volatile_compressed, &balance);
523 if (ret != KERN_SUCCESS) {
524 return 0;
525 }
526
527 volatile_size += balance;
528
529 return volatile_size;
530 }
531
532 /*
533 *
534 */
535 uint64_t
get_task_phys_footprint(task_t task)536 get_task_phys_footprint(task_t task)
537 {
538 return get_task_ledger_balance(task, task_ledgers.phys_footprint);
539 }
540
541 #if CONFIG_LEDGER_INTERVAL_MAX
542 /*
543 *
544 */
545 uint64_t
get_task_phys_footprint_interval_max(task_t task,int reset)546 get_task_phys_footprint_interval_max(task_t task, int reset)
547 {
548 kern_return_t ret;
549 ledger_amount_t max;
550
551 ret = ledger_get_interval_max(task->ledger, task_ledgers.phys_footprint, &max, reset);
552
553 if (KERN_SUCCESS == ret) {
554 return max;
555 }
556
557 return 0;
558 }
559 #endif /* CONFIG_LEDGER_INTERVAL_MAX */
560
561 /*
562 *
563 */
564 uint64_t
get_task_phys_footprint_lifetime_max(task_t task)565 get_task_phys_footprint_lifetime_max(task_t task)
566 {
567 kern_return_t ret;
568 ledger_amount_t max;
569
570 ret = ledger_get_lifetime_max(task->ledger, task_ledgers.phys_footprint, &max);
571
572 if (KERN_SUCCESS == ret) {
573 return max;
574 }
575
576 return 0;
577 }
578
579 /*
580 *
581 */
582 uint64_t
get_task_phys_footprint_limit(task_t task)583 get_task_phys_footprint_limit(task_t task)
584 {
585 kern_return_t ret;
586 ledger_amount_t max;
587
588 ret = ledger_get_limit(task->ledger, task_ledgers.phys_footprint, &max);
589 if (KERN_SUCCESS == ret) {
590 return max;
591 }
592
593 return 0;
594 }
595
596 uint64_t
get_task_internal(task_t task)597 get_task_internal(task_t task)
598 {
599 return get_task_ledger_balance(task, task_ledgers.internal);
600 }
601
602 uint64_t
get_task_internal_compressed(task_t task)603 get_task_internal_compressed(task_t task)
604 {
605 return get_task_ledger_balance(task, task_ledgers.internal_compressed);
606 }
607
608 uint64_t
get_task_purgeable_nonvolatile(task_t task)609 get_task_purgeable_nonvolatile(task_t task)
610 {
611 return get_task_ledger_balance(task, task_ledgers.purgeable_nonvolatile);
612 }
613
614 uint64_t
get_task_purgeable_nonvolatile_compressed(task_t task)615 get_task_purgeable_nonvolatile_compressed(task_t task)
616 {
617 return get_task_ledger_balance(task, task_ledgers.purgeable_nonvolatile_compressed);
618 }
619
620 uint64_t
get_task_alternate_accounting(task_t task)621 get_task_alternate_accounting(task_t task)
622 {
623 return get_task_ledger_balance(task, task_ledgers.alternate_accounting);
624 }
625
626 uint64_t
get_task_alternate_accounting_compressed(task_t task)627 get_task_alternate_accounting_compressed(task_t task)
628 {
629 return get_task_ledger_balance(task, task_ledgers.alternate_accounting_compressed);
630 }
631
632 uint64_t
get_task_page_table(task_t task)633 get_task_page_table(task_t task)
634 {
635 return get_task_ledger_balance(task, task_ledgers.page_table);
636 }
637
638 #if CONFIG_FREEZE
639 uint64_t
get_task_frozen_to_swap(task_t task)640 get_task_frozen_to_swap(task_t task)
641 {
642 return get_task_ledger_balance(task, task_ledgers.frozen_to_swap);
643 }
644 #endif /* CONFIG_FREEZE */
645
646 uint64_t
get_task_iokit_mapped(task_t task)647 get_task_iokit_mapped(task_t task)
648 {
649 return get_task_ledger_balance(task, task_ledgers.iokit_mapped);
650 }
651
652 uint64_t
get_task_network_nonvolatile(task_t task)653 get_task_network_nonvolatile(task_t task)
654 {
655 return get_task_ledger_balance(task, task_ledgers.network_nonvolatile);
656 }
657
658 uint64_t
get_task_network_nonvolatile_compressed(task_t task)659 get_task_network_nonvolatile_compressed(task_t task)
660 {
661 return get_task_ledger_balance(task, task_ledgers.network_nonvolatile_compressed);
662 }
663
664 uint64_t
get_task_wired_mem(task_t task)665 get_task_wired_mem(task_t task)
666 {
667 return get_task_ledger_balance(task, task_ledgers.wired_mem);
668 }
669
670 uint64_t
get_task_tagged_footprint(task_t task)671 get_task_tagged_footprint(task_t task)
672 {
673 kern_return_t ret;
674 ledger_amount_t credit, debit;
675
676 ret = ledger_get_entries(task->ledger, task_ledgers.tagged_footprint, &credit, &debit);
677 if (KERN_SUCCESS == ret) {
678 return credit - debit;
679 }
680
681 return 0;
682 }
683
684 uint64_t
get_task_tagged_footprint_compressed(task_t task)685 get_task_tagged_footprint_compressed(task_t task)
686 {
687 kern_return_t ret;
688 ledger_amount_t credit, debit;
689
690 ret = ledger_get_entries(task->ledger, task_ledgers.tagged_footprint_compressed, &credit, &debit);
691 if (KERN_SUCCESS == ret) {
692 return credit - debit;
693 }
694
695 return 0;
696 }
697
698 uint64_t
get_task_media_footprint(task_t task)699 get_task_media_footprint(task_t task)
700 {
701 kern_return_t ret;
702 ledger_amount_t credit, debit;
703
704 ret = ledger_get_entries(task->ledger, task_ledgers.media_footprint, &credit, &debit);
705 if (KERN_SUCCESS == ret) {
706 return credit - debit;
707 }
708
709 return 0;
710 }
711
712 uint64_t
get_task_media_footprint_compressed(task_t task)713 get_task_media_footprint_compressed(task_t task)
714 {
715 kern_return_t ret;
716 ledger_amount_t credit, debit;
717
718 ret = ledger_get_entries(task->ledger, task_ledgers.media_footprint_compressed, &credit, &debit);
719 if (KERN_SUCCESS == ret) {
720 return credit - debit;
721 }
722
723 return 0;
724 }
725
726 uint64_t
get_task_graphics_footprint(task_t task)727 get_task_graphics_footprint(task_t task)
728 {
729 kern_return_t ret;
730 ledger_amount_t credit, debit;
731
732 ret = ledger_get_entries(task->ledger, task_ledgers.graphics_footprint, &credit, &debit);
733 if (KERN_SUCCESS == ret) {
734 return credit - debit;
735 }
736
737 return 0;
738 }
739
740
741 uint64_t
get_task_graphics_footprint_compressed(task_t task)742 get_task_graphics_footprint_compressed(task_t task)
743 {
744 kern_return_t ret;
745 ledger_amount_t credit, debit;
746
747 ret = ledger_get_entries(task->ledger, task_ledgers.graphics_footprint_compressed, &credit, &debit);
748 if (KERN_SUCCESS == ret) {
749 return credit - debit;
750 }
751
752 return 0;
753 }
754
755 uint64_t
get_task_neural_footprint(task_t task)756 get_task_neural_footprint(task_t task)
757 {
758 kern_return_t ret;
759 ledger_amount_t credit, debit;
760
761 ret = ledger_get_entries(task->ledger, task_ledgers.neural_footprint, &credit, &debit);
762 if (KERN_SUCCESS == ret) {
763 return credit - debit;
764 }
765
766 return 0;
767 }
768
769 uint64_t
get_task_neural_footprint_compressed(task_t task)770 get_task_neural_footprint_compressed(task_t task)
771 {
772 kern_return_t ret;
773 ledger_amount_t credit, debit;
774
775 ret = ledger_get_entries(task->ledger, task_ledgers.neural_footprint_compressed, &credit, &debit);
776 if (KERN_SUCCESS == ret) {
777 return credit - debit;
778 }
779
780 return 0;
781 }
782
783 uint64_t
get_task_neural_nofootprint_total(task_t task)784 get_task_neural_nofootprint_total(task_t task)
785 {
786 kern_return_t ret;
787 ledger_amount_t credit, debit;
788
789 ret = ledger_get_entries(task->ledger, task_ledgers.neural_nofootprint_total, &credit, &debit);
790 if (KERN_SUCCESS == ret) {
791 return credit - debit;
792 }
793
794 return 0;
795 }
796
797 #if CONFIG_LEDGER_INTERVAL_MAX
798 uint64_t
get_task_neural_nofootprint_total_interval_max(task_t task,int reset)799 get_task_neural_nofootprint_total_interval_max(task_t task, int reset)
800 {
801 kern_return_t ret;
802 ledger_amount_t max;
803
804 ret = ledger_get_interval_max(task->ledger, task_ledgers.neural_nofootprint_total, &max, reset);
805
806 if (KERN_SUCCESS == ret) {
807 return max;
808 }
809
810 return 0;
811 }
812 #endif /* CONFIG_LEDGER_INTERVAL_MAX */
813
814 uint64_t
get_task_neural_nofootprint_total_lifetime_max(task_t task)815 get_task_neural_nofootprint_total_lifetime_max(task_t task)
816 {
817 kern_return_t ret;
818 ledger_amount_t max;
819
820 ret = ledger_get_lifetime_max(task->ledger, task_ledgers.neural_nofootprint_total, &max);
821
822 if (KERN_SUCCESS == ret) {
823 return max;
824 }
825
826 return 0;
827 }
828
829 uint64_t
get_task_cpu_time(task_t task)830 get_task_cpu_time(task_t task)
831 {
832 return get_task_ledger_balance(task, task_ledgers.cpu_time);
833 }
834
835 uint32_t
get_task_loadTag(task_t task)836 get_task_loadTag(task_t task)
837 {
838 return os_atomic_load(&task->loadTag, relaxed);
839 }
840
841 uint32_t
set_task_loadTag(task_t task,uint32_t loadTag)842 set_task_loadTag(task_t task, uint32_t loadTag)
843 {
844 return os_atomic_xchg(&task->loadTag, loadTag, relaxed);
845 }
846
847
848 task_t
get_threadtask(thread_t th)849 get_threadtask(thread_t th)
850 {
851 return get_thread_ro(th)->tro_task;
852 }
853
854 task_t
get_threadtask_early(thread_t th)855 get_threadtask_early(thread_t th)
856 {
857 if (__improbable(startup_phase < STARTUP_SUB_EARLY_BOOT)) {
858 if (th == THREAD_NULL || th->t_tro == NULL) {
859 return TASK_NULL;
860 }
861 }
862 return get_threadtask(th);
863 }
864
865 /*
866 *
867 */
868 vm_map_offset_t
get_map_min(vm_map_t map)869 get_map_min(
870 vm_map_t map)
871 {
872 return vm_map_min(map);
873 }
874
875 /*
876 *
877 */
878 vm_map_offset_t
get_map_max(vm_map_t map)879 get_map_max(
880 vm_map_t map)
881 {
882 return vm_map_max(map);
883 }
884 vm_map_size_t
get_vmmap_size(vm_map_t map)885 get_vmmap_size(
886 vm_map_t map)
887 {
888 return vm_map_adjusted_size(map);
889 }
890 int
get_task_page_size(task_t task)891 get_task_page_size(
892 task_t task)
893 {
894 return vm_map_page_size(task->map);
895 }
896
897 #if CONFIG_COREDUMP
898
899 static int
get_vmsubmap_entries(vm_map_t map,vm_object_offset_t start,vm_object_offset_t end)900 get_vmsubmap_entries(
901 vm_map_t map,
902 vm_object_offset_t start,
903 vm_object_offset_t end)
904 {
905 int total_entries = 0;
906 vm_map_entry_t entry;
907
908 if (not_in_kdp) {
909 vm_map_lock(map);
910 }
911 entry = vm_map_first_entry(map);
912 while ((entry != vm_map_to_entry(map)) && (entry->vme_start < start)) {
913 entry = entry->vme_next;
914 }
915
916 while ((entry != vm_map_to_entry(map)) && (entry->vme_start < end)) {
917 if (entry->is_sub_map) {
918 total_entries +=
919 get_vmsubmap_entries(VME_SUBMAP(entry),
920 VME_OFFSET(entry),
921 (VME_OFFSET(entry) +
922 entry->vme_end -
923 entry->vme_start));
924 } else {
925 total_entries += 1;
926 }
927 entry = entry->vme_next;
928 }
929 if (not_in_kdp) {
930 vm_map_unlock(map);
931 }
932 return total_entries;
933 }
934
935 int
get_vmmap_entries(vm_map_t map)936 get_vmmap_entries(
937 vm_map_t map)
938 {
939 int total_entries = 0;
940 vm_map_entry_t entry;
941
942 if (not_in_kdp) {
943 vm_map_lock(map);
944 }
945 entry = vm_map_first_entry(map);
946
947 while (entry != vm_map_to_entry(map)) {
948 if (entry->is_sub_map) {
949 total_entries +=
950 get_vmsubmap_entries(VME_SUBMAP(entry),
951 VME_OFFSET(entry),
952 (VME_OFFSET(entry) +
953 entry->vme_end -
954 entry->vme_start));
955 } else {
956 total_entries += 1;
957 }
958 entry = entry->vme_next;
959 }
960 if (not_in_kdp) {
961 vm_map_unlock(map);
962 }
963 return total_entries;
964 }
965 #endif /* CONFIG_COREDUMP */
966
967 int
get_task_userstop(task_t task)968 get_task_userstop(
969 task_t task)
970 {
971 return task->user_stop_count;
972 }
973
974 int
get_thread_userstop(thread_t th)975 get_thread_userstop(
976 thread_t th)
977 {
978 return th->user_stop_count;
979 }
980
981 boolean_t
get_task_pidsuspended(task_t task)982 get_task_pidsuspended(
983 task_t task)
984 {
985 return task->pidsuspended;
986 }
987
988 boolean_t
get_task_frozen(task_t task)989 get_task_frozen(
990 task_t task)
991 {
992 return task->frozen;
993 }
994
995 boolean_t
thread_should_abort(thread_t th)996 thread_should_abort(
997 thread_t th)
998 {
999 return (th->sched_flags & TH_SFLAG_ABORTED_MASK) == TH_SFLAG_ABORT;
1000 }
1001
1002 /*
1003 * This routine is like thread_should_abort() above. It checks to
1004 * see if the current thread is aborted. But unlike above, it also
1005 * checks to see if thread is safely aborted. If so, it returns
1006 * that fact, and clears the condition (safe aborts only should
1007 * have a single effect, and a poll of the abort status
1008 * qualifies.
1009 */
1010 boolean_t
current_thread_aborted(void)1011 current_thread_aborted(
1012 void)
1013 {
1014 thread_t th = current_thread();
1015 spl_t s;
1016
1017 if ((th->sched_flags & TH_SFLAG_ABORTED_MASK) == TH_SFLAG_ABORT &&
1018 (th->options & TH_OPT_INTMASK) != THREAD_UNINT) {
1019 return TRUE;
1020 }
1021 if (th->sched_flags & TH_SFLAG_ABORTSAFELY) {
1022 s = splsched();
1023 thread_lock(th);
1024 if (th->sched_flags & TH_SFLAG_ABORTSAFELY) {
1025 th->sched_flags &= ~TH_SFLAG_ABORTED_MASK;
1026 }
1027 thread_unlock(th);
1028 splx(s);
1029 }
1030 return FALSE;
1031 }
1032
1033 void
task_act_iterate_wth_args(task_t task,void (* func_callback)(thread_t,void *),void * func_arg)1034 task_act_iterate_wth_args(
1035 task_t task,
1036 void (*func_callback)(thread_t, void *),
1037 void *func_arg)
1038 {
1039 thread_t inc;
1040
1041 task_lock(task);
1042
1043 for (inc = (thread_t)(void *)queue_first(&task->threads);
1044 !queue_end(&task->threads, (queue_entry_t)inc);) {
1045 (void) (*func_callback)(inc, func_arg);
1046 inc = (thread_t)(void *)queue_next(&inc->task_threads);
1047 }
1048
1049 task_unlock(task);
1050 }
1051
1052 #include <sys/bsdtask_info.h>
1053
1054 void
fill_taskprocinfo(task_t task,struct proc_taskinfo_internal * ptinfo)1055 fill_taskprocinfo(task_t task, struct proc_taskinfo_internal * ptinfo)
1056 {
1057 vm_map_t map;
1058 task_absolutetime_info_data_t tinfo;
1059 thread_t thread;
1060 uint32_t cswitch = 0, numrunning = 0;
1061 uint32_t syscalls_unix = 0;
1062 uint32_t syscalls_mach = 0;
1063
1064 task_lock(task);
1065
1066 map = (task == kernel_task)? kernel_map: task->map;
1067
1068 ptinfo->pti_virtual_size = vm_map_adjusted_size(map);
1069 ledger_get_balance(task->ledger, task_ledgers.phys_mem, (ledger_amount_t *) &ptinfo->pti_resident_size);
1070
1071 ptinfo->pti_policy = ((task != kernel_task)?
1072 POLICY_TIMESHARE: POLICY_RR);
1073
1074 queue_iterate(&task->threads, thread, thread_t, task_threads) {
1075 spl_t x;
1076
1077 if (thread->options & TH_OPT_IDLE_THREAD) {
1078 continue;
1079 }
1080
1081 x = splsched();
1082 thread_lock(thread);
1083
1084 if ((thread->state & TH_RUN) == TH_RUN) {
1085 numrunning++;
1086 }
1087 cswitch += thread->c_switch;
1088
1089 syscalls_unix += thread->syscalls_unix;
1090 syscalls_mach += thread->syscalls_mach;
1091
1092 thread_unlock(thread);
1093 splx(x);
1094 }
1095
1096 struct recount_times_mach term_times = recount_task_terminated_times(task);
1097 struct recount_times_mach total_times = recount_task_times(task);
1098
1099 tinfo.threads_user = total_times.rtm_user - term_times.rtm_user;
1100 tinfo.threads_system = total_times.rtm_system - term_times.rtm_system;
1101 ptinfo->pti_threads_system = tinfo.threads_system;
1102 ptinfo->pti_threads_user = tinfo.threads_user;
1103
1104 ptinfo->pti_total_system = total_times.rtm_system;
1105 ptinfo->pti_total_user = total_times.rtm_user;
1106
1107 ptinfo->pti_faults = (int32_t) MIN(counter_load(&task->faults), INT32_MAX);
1108 ptinfo->pti_pageins = (int32_t) MIN(counter_load(&task->pageins), INT32_MAX);
1109 ptinfo->pti_cow_faults = (int32_t) MIN(counter_load(&task->cow_faults), INT32_MAX);
1110 ptinfo->pti_messages_sent = (int32_t) MIN(counter_load(&task->messages_sent), INT32_MAX);
1111 ptinfo->pti_messages_received = (int32_t) MIN(counter_load(&task->messages_received), INT32_MAX);
1112 ptinfo->pti_syscalls_mach = (int32_t) MIN(task->syscalls_mach + syscalls_mach, INT32_MAX);
1113 ptinfo->pti_syscalls_unix = (int32_t) MIN(task->syscalls_unix + syscalls_unix, INT32_MAX);
1114 ptinfo->pti_csw = (int32_t) MIN(task->c_switch + cswitch, INT32_MAX);
1115 ptinfo->pti_threadnum = task->thread_count;
1116 ptinfo->pti_numrunning = numrunning;
1117 ptinfo->pti_priority = task->priority;
1118
1119 task_unlock(task);
1120 }
1121
1122 int
fill_taskthreadinfo(task_t task,uint64_t thaddr,bool thuniqueid,struct proc_threadinfo_internal * ptinfo,void * vpp,int * vidp)1123 fill_taskthreadinfo(task_t task, uint64_t thaddr, bool thuniqueid, struct proc_threadinfo_internal * ptinfo, void * vpp, int *vidp)
1124 {
1125 thread_t thact;
1126 int err = 0;
1127 mach_msg_type_number_t count;
1128 thread_basic_info_data_t basic_info;
1129 kern_return_t kret;
1130 uint64_t addr = 0;
1131
1132 task_lock(task);
1133
1134 for (thact = (thread_t)(void *)queue_first(&task->threads);
1135 !queue_end(&task->threads, (queue_entry_t)thact);) {
1136 addr = (thuniqueid) ? thact->thread_id : thact->machine.cthread_self;
1137 if (addr == thaddr) {
1138 count = THREAD_BASIC_INFO_COUNT;
1139 if ((kret = thread_info_internal(thact, THREAD_BASIC_INFO, (thread_info_t)&basic_info, &count)) != KERN_SUCCESS) {
1140 err = 1;
1141 goto out;
1142 }
1143 ptinfo->pth_user_time = (((uint64_t)basic_info.user_time.seconds * NSEC_PER_SEC) + ((uint64_t)basic_info.user_time.microseconds * NSEC_PER_USEC));
1144 ptinfo->pth_system_time = (((uint64_t)basic_info.system_time.seconds * NSEC_PER_SEC) + ((uint64_t)basic_info.system_time.microseconds * NSEC_PER_USEC));
1145
1146 ptinfo->pth_cpu_usage = basic_info.cpu_usage;
1147 ptinfo->pth_policy = basic_info.policy;
1148 ptinfo->pth_run_state = basic_info.run_state;
1149 ptinfo->pth_flags = basic_info.flags;
1150 ptinfo->pth_sleep_time = basic_info.sleep_time;
1151 ptinfo->pth_curpri = thact->sched_pri;
1152 ptinfo->pth_priority = thact->base_pri;
1153 ptinfo->pth_maxpriority = thact->max_priority;
1154
1155 if (vpp != NULL) {
1156 bsd_threadcdir(get_bsdthread_info(thact), vpp, vidp);
1157 }
1158 bsd_getthreadname(get_bsdthread_info(thact), ptinfo->pth_name);
1159 err = 0;
1160 goto out;
1161 }
1162 thact = (thread_t)(void *)queue_next(&thact->task_threads);
1163 }
1164 err = 1;
1165
1166 out:
1167 task_unlock(task);
1168 return err;
1169 }
1170
1171 int
fill_taskthreadlist(task_t task,void * buffer,int thcount,bool thuniqueid)1172 fill_taskthreadlist(task_t task, void * buffer, int thcount, bool thuniqueid)
1173 {
1174 int numthr = 0;
1175 thread_t thact;
1176 uint64_t * uptr;
1177 uint64_t thaddr;
1178
1179 uptr = (uint64_t *)buffer;
1180
1181 task_lock(task);
1182
1183 for (thact = (thread_t)(void *)queue_first(&task->threads);
1184 !queue_end(&task->threads, (queue_entry_t)thact);) {
1185 thaddr = (thuniqueid) ? thact->thread_id : thact->machine.cthread_self;
1186 *uptr++ = thaddr;
1187 numthr++;
1188 if (numthr >= thcount) {
1189 goto out;
1190 }
1191 thact = (thread_t)(void *)queue_next(&thact->task_threads);
1192 }
1193
1194 out:
1195 task_unlock(task);
1196 return (int)(numthr * sizeof(uint64_t));
1197 }
1198
1199 int
fill_taskthreadschedinfo(task_t task,uint64_t thread_id,struct proc_threadschedinfo_internal * thread_sched_info)1200 fill_taskthreadschedinfo(task_t task, uint64_t thread_id, struct proc_threadschedinfo_internal *thread_sched_info)
1201 {
1202 int err = 0;
1203
1204 thread_t thread = current_thread();
1205
1206 /*
1207 * Looking up threads is pretty expensive and not realtime-safe
1208 * right now, requiring locking the task and iterating over all
1209 * threads. As long as that is the case, we officially only
1210 * support getting this info for the current thread.
1211 */
1212 if (task != current_task() || thread_id != thread->thread_id) {
1213 return -1;
1214 }
1215
1216 #if SCHED_HYGIENE_DEBUG
1217 absolutetime_to_nanoseconds(thread->machine.int_time_mt, &thread_sched_info->int_time_ns);
1218 #else
1219 (void)thread;
1220 thread_sched_info->int_time_ns = 0;
1221 #endif
1222
1223 return err;
1224 }
1225
1226 int
get_numthreads(task_t task)1227 get_numthreads(task_t task)
1228 {
1229 return task->thread_count;
1230 }
1231
1232 /*
1233 * Gather the various pieces of info about the designated task,
1234 * and collect it all into a single rusage_info.
1235 */
1236 int
fill_task_rusage(task_t task,rusage_info_current * ri)1237 fill_task_rusage(task_t task, rusage_info_current *ri)
1238 {
1239 struct task_power_info powerinfo;
1240
1241 assert(task != TASK_NULL);
1242 task_lock(task);
1243
1244 struct task_power_info_extra extra = { 0 };
1245 task_power_info_locked(task, &powerinfo, NULL, NULL, &extra);
1246 ri->ri_pkg_idle_wkups = powerinfo.task_platform_idle_wakeups;
1247 ri->ri_interrupt_wkups = powerinfo.task_interrupt_wakeups;
1248 ri->ri_user_time = powerinfo.total_user;
1249 ri->ri_system_time = powerinfo.total_system;
1250 ri->ri_runnable_time = extra.runnable_time;
1251 ri->ri_cycles = extra.cycles;
1252 ri->ri_instructions = extra.instructions;
1253 ri->ri_pcycles = extra.pcycles;
1254 ri->ri_pinstructions = extra.pinstructions;
1255 ri->ri_user_ptime = extra.user_ptime;
1256 ri->ri_system_ptime = extra.system_ptime;
1257 ri->ri_energy_nj = extra.energy;
1258 ri->ri_penergy_nj = extra.penergy;
1259 ri->ri_secure_time_in_system = extra.secure_time;
1260 ri->ri_secure_ptime_in_system = extra.secure_ptime;
1261
1262 ri->ri_phys_footprint = get_task_phys_footprint(task);
1263 ledger_get_balance(task->ledger, task_ledgers.phys_mem,
1264 (ledger_amount_t *)&ri->ri_resident_size);
1265 ri->ri_wired_size = get_task_wired_mem(task);
1266
1267 ledger_get_balance(task->ledger, task_ledgers.neural_nofootprint_total,
1268 (ledger_amount_t *)&ri->ri_neural_footprint);
1269 ri->ri_pageins = counter_load(&task->pageins);
1270
1271 task_unlock(task);
1272 return 0;
1273 }
1274
1275 void
fill_task_billed_usage(task_t task __unused,rusage_info_current * ri)1276 fill_task_billed_usage(task_t task __unused, rusage_info_current *ri)
1277 {
1278 bank_billed_balance_safe(task, &ri->ri_billed_system_time, &ri->ri_billed_energy);
1279 bank_serviced_balance_safe(task, &ri->ri_serviced_system_time, &ri->ri_serviced_energy);
1280 }
1281
1282 int
fill_task_io_rusage(task_t task,rusage_info_current * ri)1283 fill_task_io_rusage(task_t task, rusage_info_current *ri)
1284 {
1285 assert(task != TASK_NULL);
1286 task_lock(task);
1287
1288 if (task->task_io_stats) {
1289 ri->ri_diskio_bytesread = task->task_io_stats->disk_reads.size;
1290 ri->ri_diskio_byteswritten = (task->task_io_stats->total_io.size - task->task_io_stats->disk_reads.size);
1291 } else {
1292 /* I/O Stats unavailable */
1293 ri->ri_diskio_bytesread = 0;
1294 ri->ri_diskio_byteswritten = 0;
1295 }
1296 task_unlock(task);
1297 return 0;
1298 }
1299
1300 int
fill_task_qos_rusage(task_t task,rusage_info_current * ri)1301 fill_task_qos_rusage(task_t task, rusage_info_current *ri)
1302 {
1303 thread_t thread;
1304
1305 assert(task != TASK_NULL);
1306 task_lock(task);
1307
1308 /* Rollup QoS time of all the threads to task */
1309 queue_iterate(&task->threads, thread, thread_t, task_threads) {
1310 if (thread->options & TH_OPT_IDLE_THREAD) {
1311 continue;
1312 }
1313
1314 thread_update_qos_cpu_time(thread);
1315 }
1316 ri->ri_cpu_time_qos_default = task->cpu_time_eqos_stats.cpu_time_qos_default;
1317 ri->ri_cpu_time_qos_maintenance = task->cpu_time_eqos_stats.cpu_time_qos_maintenance;
1318 ri->ri_cpu_time_qos_background = task->cpu_time_eqos_stats.cpu_time_qos_background;
1319 ri->ri_cpu_time_qos_utility = task->cpu_time_eqos_stats.cpu_time_qos_utility;
1320 ri->ri_cpu_time_qos_legacy = task->cpu_time_eqos_stats.cpu_time_qos_legacy;
1321 ri->ri_cpu_time_qos_user_initiated = task->cpu_time_eqos_stats.cpu_time_qos_user_initiated;
1322 ri->ri_cpu_time_qos_user_interactive = task->cpu_time_eqos_stats.cpu_time_qos_user_interactive;
1323
1324 task_unlock(task);
1325 return 0;
1326 }
1327
1328 uint64_t
get_task_logical_writes(task_t task,bool external)1329 get_task_logical_writes(task_t task, bool external)
1330 {
1331 assert(task != TASK_NULL);
1332 struct ledger_entry_info lei;
1333 int entry = external ? task_ledgers.logical_writes_to_external :
1334 task_ledgers.logical_writes;
1335
1336 task_lock(task);
1337 ledger_get_entry_info(task->ledger, entry, &lei);
1338 task_unlock(task);
1339
1340 return lei.lei_balance;
1341 }
1342
1343 uint64_t
get_task_dispatchqueue_serialno_offset(task_t task)1344 get_task_dispatchqueue_serialno_offset(task_t task)
1345 {
1346 uint64_t dq_serialno_offset = 0;
1347 void *bsd_info = get_bsdtask_info(task);
1348
1349 if (bsd_info) {
1350 dq_serialno_offset = get_dispatchqueue_serialno_offset_from_proc(bsd_info);
1351 }
1352
1353 return dq_serialno_offset;
1354 }
1355
1356 uint64_t
get_task_dispatchqueue_label_offset(task_t task)1357 get_task_dispatchqueue_label_offset(task_t task)
1358 {
1359 uint64_t dq_label_offset = 0;
1360 void *bsd_info = get_bsdtask_info(task);
1361
1362 if (bsd_info) {
1363 dq_label_offset = get_dispatchqueue_label_offset_from_proc(bsd_info);
1364 }
1365
1366 return dq_label_offset;
1367 }
1368
1369 uint64_t
get_task_uniqueid(task_t task)1370 get_task_uniqueid(task_t task)
1371 {
1372 void *bsd_info = get_bsdtask_info(task);
1373
1374 if (bsd_info) {
1375 return proc_uniqueid_task(bsd_info, task);
1376 } else {
1377 return UINT64_MAX;
1378 }
1379 }
1380
1381 int
get_task_version(task_t task)1382 get_task_version(task_t task)
1383 {
1384 void *bsd_info = get_bsdtask_info(task);
1385
1386 if (bsd_info) {
1387 return proc_pidversion(bsd_info);
1388 } else {
1389 return INT_MAX;
1390 }
1391 }
1392
1393 #if CONFIG_MACF
1394 struct label *
get_task_crash_label(task_t task)1395 get_task_crash_label(task_t task)
1396 {
1397 return task->crash_label;
1398 }
1399
1400 void
set_task_crash_label(task_t task,struct label * label)1401 set_task_crash_label(task_t task, struct label *label)
1402 {
1403 task->crash_label = label;
1404 }
1405 #endif
1406
1407 int
fill_taskipctableinfo(task_t task,uint32_t * table_size,uint32_t * table_free)1408 fill_taskipctableinfo(task_t task, uint32_t *table_size, uint32_t *table_free)
1409 {
1410 ipc_space_t space = task->itk_space;
1411 if (space == NULL) {
1412 return -1;
1413 }
1414
1415 is_read_lock(space);
1416 if (!is_active(space)) {
1417 is_read_unlock(space);
1418 return -1;
1419 }
1420
1421 *table_size = ipc_entry_table_count(is_active_table(space));
1422 *table_free = space->is_table_free;
1423
1424 is_read_unlock(space);
1425
1426 return 0;
1427 }
1428
1429 int
get_task_cdhash(task_t task,char cdhash[static CS_CDHASH_LEN])1430 get_task_cdhash(task_t task, char cdhash[static CS_CDHASH_LEN])
1431 {
1432 int result = 0;
1433 void *bsd_info = NULL;
1434
1435 task_lock(task);
1436 bsd_info = get_bsdtask_info(task);
1437 result = bsd_info ? proc_getcdhash(bsd_info, cdhash) : ESRCH;
1438 task_unlock(task);
1439
1440 return result;
1441 }
1442
1443 bool
current_thread_in_kernel_fault(void)1444 current_thread_in_kernel_fault(void)
1445 {
1446 if (current_thread()->recover) {
1447 return true;
1448 }
1449 return false;
1450 }
1451
1452 /* moved from ubc_subr.c */
1453 int
mach_to_bsd_errno(kern_return_t mach_err)1454 mach_to_bsd_errno(kern_return_t mach_err)
1455 {
1456 switch (mach_err) {
1457 case KERN_SUCCESS:
1458 return 0;
1459
1460 case KERN_INVALID_ADDRESS:
1461 case KERN_INVALID_ARGUMENT:
1462 case KERN_NOT_IN_SET:
1463 case KERN_INVALID_NAME:
1464 case KERN_INVALID_TASK:
1465 case KERN_INVALID_RIGHT:
1466 case KERN_INVALID_VALUE:
1467 case KERN_INVALID_CAPABILITY:
1468 case KERN_INVALID_HOST:
1469 case KERN_MEMORY_PRESENT:
1470 case KERN_INVALID_PROCESSOR_SET:
1471 case KERN_INVALID_POLICY:
1472 case KERN_ALREADY_WAITING:
1473 case KERN_DEFAULT_SET:
1474 case KERN_EXCEPTION_PROTECTED:
1475 case KERN_INVALID_LEDGER:
1476 case KERN_INVALID_MEMORY_CONTROL:
1477 case KERN_INVALID_SECURITY:
1478 case KERN_NOT_DEPRESSED:
1479 case KERN_LOCK_OWNED:
1480 case KERN_LOCK_OWNED_SELF:
1481 return EINVAL;
1482
1483 case KERN_NOT_RECEIVER:
1484 case KERN_NO_ACCESS:
1485 case KERN_POLICY_STATIC:
1486 return EACCES;
1487
1488 case KERN_NO_SPACE:
1489 case KERN_RESOURCE_SHORTAGE:
1490 case KERN_UREFS_OVERFLOW:
1491 case KERN_INVALID_OBJECT:
1492 return ENOMEM;
1493
1494 case KERN_MEMORY_FAILURE:
1495 case KERN_MEMORY_ERROR:
1496 case KERN_PROTECTION_FAILURE:
1497 return EFAULT;
1498
1499 case KERN_POLICY_LIMIT:
1500 case KERN_CODESIGN_ERROR:
1501 case KERN_DENIED:
1502 return EPERM;
1503
1504 case KERN_ALREADY_IN_SET:
1505 case KERN_NAME_EXISTS:
1506 case KERN_RIGHT_EXISTS:
1507 return EEXIST;
1508
1509 case KERN_ABORTED:
1510 return EINTR;
1511
1512 case KERN_TERMINATED:
1513 case KERN_LOCK_SET_DESTROYED:
1514 case KERN_LOCK_UNSTABLE:
1515 case KERN_SEMAPHORE_DESTROYED:
1516 case KERN_NOT_FOUND:
1517 case KERN_NOT_WAITING:
1518 return ENOENT;
1519
1520 case KERN_RPC_SERVER_TERMINATED:
1521 return ECONNRESET;
1522
1523 case KERN_NOT_SUPPORTED:
1524 return ENOTSUP;
1525
1526 case KERN_NODE_DOWN:
1527 return ENETDOWN;
1528
1529 case KERN_OPERATION_TIMED_OUT:
1530 return ETIMEDOUT;
1531
1532 default:
1533 return EIO; /* 5 == KERN_FAILURE */
1534 }
1535 }
1536
1537 /*
1538 * Return the mach return value corresponding to a given BSD errno.
1539 */
1540 kern_return_t
kern_return_for_errno(int bsd_errno)1541 kern_return_for_errno(int bsd_errno)
1542 {
1543 switch (bsd_errno) {
1544 case 0:
1545 return KERN_SUCCESS;
1546 case EIO:
1547 case EACCES:
1548 case ENOMEM:
1549 case EFAULT:
1550 return KERN_MEMORY_ERROR;
1551
1552 case EINVAL:
1553 return KERN_INVALID_ARGUMENT;
1554
1555 case ETIMEDOUT:
1556 case EBUSY:
1557 return KERN_OPERATION_TIMED_OUT;
1558
1559 case ECONNRESET:
1560 return KERN_RPC_SERVER_TERMINATED;
1561
1562 case ENOTSUP:
1563 return KERN_NOT_SUPPORTED;
1564
1565 case ENETDOWN:
1566 return KERN_NODE_DOWN;
1567
1568 case ENOENT:
1569 return KERN_NOT_FOUND;
1570
1571 case EINTR:
1572 return KERN_ABORTED;
1573
1574 case EPERM:
1575 return KERN_DENIED;
1576
1577 case EEXIST:
1578 return KERN_ALREADY_IN_SET;
1579
1580 default:
1581 return KERN_FAILURE;
1582 }
1583 }
1584