1 /*
2 * Copyright (c) 2005-2021 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <kern/thread.h>
30
31 #include <sys/time.h>
32 #include <sys/proc.h>
33 #include <sys/kauth.h>
34 #include <sys/user.h>
35 #include <sys/systm.h>
36 #include <sys/dtrace.h>
37 #include <sys/dtrace_impl.h>
38 #include <machine/atomic.h>
39 #include <libkern/OSKextLibPrivate.h>
40 #include <kern/kern_types.h>
41 #include <kern/timer_call.h>
42 #include <kern/thread_call.h>
43 #include <kern/task.h>
44 #include <kern/sched_prim.h>
45 #include <miscfs/devfs/devfs.h>
46 #include <kern/kalloc.h>
47
48 #include <mach/vm_param.h>
49 #include <mach/mach_vm.h>
50 #include <mach/task.h>
51 #include <vm/vm_map_xnu.h> /* All the bits we care about are guarded by MACH_KERNEL_PRIVATE :-( */
52
53 /*
54 * pid/proc
55 */
56 /* Solaris proc_t is the struct. Darwin's proc_t is a pointer to it. */
57 #define proc_t struct proc /* Steer clear of the Darwin typedef for proc_t */
58
59 KALLOC_HEAP_DEFINE(KHEAP_DTRACE, "dtrace", KHEAP_ID_KT_VAR);
60
61 void
dtrace_sprlock(proc_t * p)62 dtrace_sprlock(proc_t *p)
63 {
64 lck_mtx_lock(&p->p_dtrace_sprlock);
65 }
66
67 void
dtrace_sprunlock(proc_t * p)68 dtrace_sprunlock(proc_t *p)
69 {
70 lck_mtx_unlock(&p->p_dtrace_sprlock);
71 }
72
73 /* Not called from probe context */
74 proc_t *
sprlock(pid_t pid)75 sprlock(pid_t pid)
76 {
77 proc_t* p;
78
79 if ((p = proc_find(pid)) == PROC_NULL) {
80 return PROC_NULL;
81 }
82
83 task_suspend_internal(proc_task(p));
84
85 dtrace_sprlock(p);
86
87 return p;
88 }
89
90 /* Not called from probe context */
91 void
sprunlock(proc_t * p)92 sprunlock(proc_t *p)
93 {
94 if (p != PROC_NULL) {
95 dtrace_sprunlock(p);
96
97 task_resume_internal(proc_task(p));
98
99 proc_rele(p);
100 }
101 }
102
103 /*
104 * uread/uwrite
105 */
106
107
108 /* Not called from probe context */
109 int
uread(proc_t * p,void * buf,user_size_t len,user_addr_t a)110 uread(proc_t *p, void *buf, user_size_t len, user_addr_t a)
111 {
112 kern_return_t ret;
113
114 ASSERT(p != PROC_NULL);
115 ASSERT(proc_task(p) != NULL);
116
117 task_t task = proc_task(p);
118
119 /*
120 * Grab a reference to the task vm_map_t to make sure
121 * the map isn't pulled out from under us.
122 *
123 * Because the proc_lock is not held at all times on all code
124 * paths leading here, it is possible for the proc to have
125 * exited. If the map is null, fail.
126 */
127 vm_map_t map = get_task_map_reference(task);
128 if (map) {
129 ret = vm_map_read_user( map, (vm_map_address_t)a, buf, (vm_size_t)len);
130 vm_map_deallocate(map);
131 } else {
132 ret = KERN_TERMINATED;
133 }
134
135 return (int)ret;
136 }
137
138
139 /* Not called from probe context */
140 int
uwrite(proc_t * p,void * buf,user_size_t len,user_addr_t a)141 uwrite(proc_t *p, void *buf, user_size_t len, user_addr_t a)
142 {
143 kern_return_t ret;
144
145 ASSERT(p != NULL);
146 ASSERT(proc_task(p) != NULL);
147
148 task_t task = proc_task(p);
149
150 /*
151 * Grab a reference to the task vm_map_t to make sure
152 * the map isn't pulled out from under us.
153 *
154 * Because the proc_lock is not held at all times on all code
155 * paths leading here, it is possible for the proc to have
156 * exited. If the map is null, fail.
157 */
158 vm_map_t map = get_task_map_reference(task);
159 if (map) {
160 /* Find the memory permissions. */
161 uint32_t nestingDepth = 999999;
162 vm_region_submap_short_info_data_64_t info;
163 mach_msg_type_number_t count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
164 mach_vm_address_t address = (mach_vm_address_t)a;
165 mach_vm_size_t sizeOfRegion = (mach_vm_size_t)len;
166
167 ret = mach_vm_region_recurse(map, &address, &sizeOfRegion, &nestingDepth, (vm_region_recurse_info_t)&info, &count);
168 if (ret != KERN_SUCCESS) {
169 goto done;
170 }
171
172 vm_prot_t reprotect;
173
174 if (!(info.protection & VM_PROT_WRITE)) {
175 /* Save the original protection values for restoration later */
176 reprotect = info.protection;
177
178 if (info.max_protection & VM_PROT_WRITE) {
179 /* The memory is not currently writable, but can be made writable. */
180 ret = mach_vm_protect(map, (mach_vm_offset_t)a, (mach_vm_size_t)len, 0, (reprotect & ~VM_PROT_EXECUTE) | VM_PROT_WRITE);
181 } else {
182 /*
183 * The memory is not currently writable, and cannot be made writable. We need to COW this memory.
184 *
185 * Strange, we can't just say "reprotect | VM_PROT_COPY", that fails.
186 */
187 ret = mach_vm_protect(map, (mach_vm_offset_t)a, (mach_vm_size_t)len, 0, VM_PROT_COPY | VM_PROT_READ | VM_PROT_WRITE);
188 }
189
190 if (ret != KERN_SUCCESS) {
191 goto done;
192 }
193 } else {
194 /* The memory was already writable. */
195 reprotect = VM_PROT_NONE;
196 }
197
198 ret = vm_map_write_user( map,
199 buf,
200 (vm_map_address_t)a,
201 (vm_size_t)len);
202
203 dtrace_flush_caches();
204
205 if (ret != KERN_SUCCESS) {
206 goto done;
207 }
208
209 if (reprotect != VM_PROT_NONE) {
210 ASSERT(reprotect & VM_PROT_EXECUTE);
211 ret = mach_vm_protect(map, (mach_vm_offset_t)a, (mach_vm_size_t)len, 0, reprotect);
212 }
213
214 done:
215 vm_map_deallocate(map);
216 } else {
217 ret = KERN_TERMINATED;
218 }
219
220 return (int)ret;
221 }
222
223 /*
224 * cpuvar
225 */
226 LCK_MTX_DECLARE_ATTR(cpu_lock, &dtrace_lck_grp, &dtrace_lck_attr);
227 LCK_MTX_DECLARE_ATTR(cyc_lock, &dtrace_lck_grp, &dtrace_lck_attr);
228 LCK_MTX_DECLARE_ATTR(mod_lock, &dtrace_lck_grp, &dtrace_lck_attr);
229
230 dtrace_cpu_t *cpu_list;
231 cpu_core_t *cpu_core; /* XXX TLB lockdown? */
232
233 /*
234 * cred_t
235 */
236
237 /*
238 * dtrace_CRED() can be called from probe context. We cannot simply call kauth_cred_get() since
239 * that function may try to resolve a lazy credential binding, which entails taking the proc_lock.
240 */
241 cred_t *
dtrace_CRED(void)242 dtrace_CRED(void)
243 {
244 return current_thread_ro_unchecked()->tro_cred;
245 }
246
247 int
PRIV_POLICY_CHOICE(void * cred,int priv,int all)248 PRIV_POLICY_CHOICE(void* cred, int priv, int all)
249 {
250 #pragma unused(priv, all)
251 return kauth_cred_issuser(cred); /* XXX TODO: How is this different from PRIV_POLICY_ONLY? */
252 }
253
254 int
PRIV_POLICY_ONLY(void * cr,int priv,int boolean)255 PRIV_POLICY_ONLY(void *cr, int priv, int boolean)
256 {
257 #pragma unused(priv, boolean)
258 return kauth_cred_issuser(cr); /* XXX TODO: HAS_PRIVILEGE(cr, priv); */
259 }
260
261 uid_t
crgetuid(const cred_t * cr)262 crgetuid(const cred_t *cr)
263 {
264 cred_t copy_cr = *cr; return kauth_cred_getuid(©_cr);
265 }
266
267 /*
268 * "cyclic"
269 */
270
271 typedef struct wrap_timer_call {
272 /* node attributes */
273 cyc_handler_t hdlr;
274 cyc_time_t when;
275 uint64_t deadline;
276 int cpuid;
277 boolean_t suspended;
278 struct timer_call call;
279
280 /* next item in the linked list */
281 LIST_ENTRY(wrap_timer_call) entries;
282 } wrap_timer_call_t;
283
284 #define WAKEUP_REAPER 0x7FFFFFFFFFFFFFFFLL
285 #define NEARLY_FOREVER 0x7FFFFFFFFFFFFFFELL
286
287
288 typedef struct cyc_list {
289 cyc_omni_handler_t cyl_omni;
290 wrap_timer_call_t cyl_wrap_by_cpus[];
291 } cyc_list_t;
292
293 /* CPU going online/offline notifications */
294 void (*dtrace_cpu_state_changed_hook)(int, boolean_t) = NULL;
295 void dtrace_cpu_state_changed(int, boolean_t);
296
297 void
dtrace_install_cpu_hooks(void)298 dtrace_install_cpu_hooks(void)
299 {
300 dtrace_cpu_state_changed_hook = dtrace_cpu_state_changed;
301 }
302
303 void
dtrace_cpu_state_changed(int cpuid,boolean_t is_running)304 dtrace_cpu_state_changed(int cpuid, boolean_t is_running)
305 {
306 wrap_timer_call_t *wrapTC = NULL;
307 boolean_t suspend = (is_running ? FALSE : TRUE);
308 dtrace_icookie_t s;
309
310 /* Ensure that we're not going to leave the CPU */
311 s = dtrace_interrupt_disable();
312
313 LIST_FOREACH(wrapTC, &(cpu_list[cpuid].cpu_cyc_list), entries) {
314 assert3u(wrapTC->cpuid, ==, cpuid);
315 if (suspend) {
316 assert(!wrapTC->suspended);
317 /* If this fails, we'll panic anyway, so let's do this now. */
318 if (!timer_call_cancel(&wrapTC->call)) {
319 panic("timer_call_cancel() failed to cancel a timer call: %p",
320 &wrapTC->call);
321 }
322 wrapTC->suspended = TRUE;
323 } else {
324 /* Rearm the timer, but ensure it was suspended first. */
325 assert(wrapTC->suspended);
326 clock_deadline_for_periodic_event(wrapTC->when.cyt_interval, mach_absolute_time(),
327 &wrapTC->deadline);
328 timer_call_enter1(&wrapTC->call, (void*) wrapTC, wrapTC->deadline,
329 TIMER_CALL_SYS_CRITICAL | TIMER_CALL_LOCAL);
330 wrapTC->suspended = FALSE;
331 }
332 }
333
334 /* Restore the previous interrupt state. */
335 dtrace_interrupt_enable(s);
336 }
337
338 static void
_timer_call_apply_cyclic(void * ignore,void * vTChdl)339 _timer_call_apply_cyclic( void *ignore, void *vTChdl )
340 {
341 #pragma unused(ignore)
342 wrap_timer_call_t *wrapTC = (wrap_timer_call_t *)vTChdl;
343
344 (*(wrapTC->hdlr.cyh_func))( wrapTC->hdlr.cyh_arg );
345
346 clock_deadline_for_periodic_event( wrapTC->when.cyt_interval, mach_absolute_time(), &(wrapTC->deadline));
347 timer_call_enter1( &(wrapTC->call), (void *)wrapTC, wrapTC->deadline, TIMER_CALL_SYS_CRITICAL | TIMER_CALL_LOCAL );
348 }
349
350 static cyclic_id_t
timer_call_add_cyclic(wrap_timer_call_t * wrapTC,cyc_handler_t * handler,cyc_time_t * when)351 timer_call_add_cyclic(wrap_timer_call_t *wrapTC, cyc_handler_t *handler, cyc_time_t *when)
352 {
353 uint64_t now;
354 dtrace_icookie_t s;
355
356 timer_call_setup( &(wrapTC->call), _timer_call_apply_cyclic, NULL );
357 wrapTC->hdlr = *handler;
358 wrapTC->when = *when;
359
360 nanoseconds_to_absolutetime( wrapTC->when.cyt_interval, (uint64_t *)&wrapTC->when.cyt_interval );
361
362 now = mach_absolute_time();
363 wrapTC->deadline = now;
364
365 clock_deadline_for_periodic_event( wrapTC->when.cyt_interval, now, &(wrapTC->deadline));
366
367 /* Insert the timer to the list of the running timers on this CPU, and start it. */
368 s = dtrace_interrupt_disable();
369 wrapTC->cpuid = cpu_number();
370 LIST_INSERT_HEAD(&cpu_list[wrapTC->cpuid].cpu_cyc_list, wrapTC, entries);
371 timer_call_enter1(&wrapTC->call, (void*) wrapTC, wrapTC->deadline,
372 TIMER_CALL_SYS_CRITICAL | TIMER_CALL_LOCAL);
373 wrapTC->suspended = FALSE;
374 dtrace_interrupt_enable(s);
375
376 return (cyclic_id_t)wrapTC;
377 }
378
379 /*
380 * Executed on the CPU the timer is running on.
381 */
382 static void
timer_call_remove_cyclic(wrap_timer_call_t * wrapTC)383 timer_call_remove_cyclic(wrap_timer_call_t *wrapTC)
384 {
385 assert(wrapTC);
386 assert(cpu_number() == wrapTC->cpuid);
387
388 if (!timer_call_cancel(&wrapTC->call)) {
389 panic("timer_call_remove_cyclic() failed to cancel a timer call");
390 }
391
392 LIST_REMOVE(wrapTC, entries);
393 }
394
395 static void *
timer_call_get_cyclic_arg(wrap_timer_call_t * wrapTC)396 timer_call_get_cyclic_arg(wrap_timer_call_t *wrapTC)
397 {
398 return wrapTC ? wrapTC->hdlr.cyh_arg : NULL;
399 }
400
401 cyclic_id_t
cyclic_timer_add(cyc_handler_t * handler,cyc_time_t * when)402 cyclic_timer_add(cyc_handler_t *handler, cyc_time_t *when)
403 {
404 wrap_timer_call_t *wrapTC = kalloc_type(wrap_timer_call_t, Z_ZERO | Z_WAITOK);
405 if (NULL == wrapTC) {
406 return CYCLIC_NONE;
407 } else {
408 return timer_call_add_cyclic( wrapTC, handler, when );
409 }
410 }
411
412 void
cyclic_timer_remove(cyclic_id_t cyclic)413 cyclic_timer_remove(cyclic_id_t cyclic)
414 {
415 ASSERT( cyclic != CYCLIC_NONE );
416
417 /* Removing a timer call must be done on the CPU the timer is running on. */
418 wrap_timer_call_t *wrapTC = (wrap_timer_call_t *) cyclic;
419 dtrace_xcall(wrapTC->cpuid, (dtrace_xcall_t) timer_call_remove_cyclic, (void*) cyclic);
420
421 kfree_type(wrap_timer_call_t, wrapTC);
422 }
423
424 static void
_cyclic_add_omni(cyc_list_t * cyc_list)425 _cyclic_add_omni(cyc_list_t *cyc_list)
426 {
427 cyc_time_t cT;
428 cyc_handler_t cH;
429 cyc_omni_handler_t *omni = &cyc_list->cyl_omni;
430
431 (omni->cyo_online)(omni->cyo_arg, CPU, &cH, &cT);
432
433 wrap_timer_call_t *wrapTC = &cyc_list->cyl_wrap_by_cpus[cpu_number()];
434 timer_call_add_cyclic(wrapTC, &cH, &cT);
435 }
436
437 cyclic_id_list_t
cyclic_add_omni(cyc_omni_handler_t * omni)438 cyclic_add_omni(cyc_omni_handler_t *omni)
439 {
440 cyc_list_t *cyc_list = kalloc_type(cyc_list_t, wrap_timer_call_t, NCPU, Z_WAITOK | Z_ZERO);
441
442 if (NULL == cyc_list) {
443 return NULL;
444 }
445
446 cyc_list->cyl_omni = *omni;
447
448 dtrace_xcall(DTRACE_CPUALL, (dtrace_xcall_t)_cyclic_add_omni, (void *)cyc_list);
449
450 return (cyclic_id_list_t)cyc_list;
451 }
452
453 static void
_cyclic_remove_omni(cyc_list_t * cyc_list)454 _cyclic_remove_omni(cyc_list_t *cyc_list)
455 {
456 cyc_omni_handler_t *omni = &cyc_list->cyl_omni;
457 void *oarg;
458 wrap_timer_call_t *wrapTC;
459
460 /*
461 * If the processor was offline when dtrace started, we did not allocate
462 * a cyclic timer for this CPU.
463 */
464 if ((wrapTC = &cyc_list->cyl_wrap_by_cpus[cpu_number()]) != NULL) {
465 oarg = timer_call_get_cyclic_arg(wrapTC);
466 timer_call_remove_cyclic(wrapTC);
467 (omni->cyo_offline)(omni->cyo_arg, CPU, oarg);
468 }
469 }
470
471 void
cyclic_remove_omni(cyclic_id_list_t cyc_list)472 cyclic_remove_omni(cyclic_id_list_t cyc_list)
473 {
474 ASSERT(cyc_list != NULL);
475
476 dtrace_xcall(DTRACE_CPUALL, (dtrace_xcall_t)_cyclic_remove_omni, (void *)cyc_list);
477 void *cyc_list_p = (void *)cyc_list;
478 kfree_type(cyc_list_t, wrap_timer_call_t, NCPU, cyc_list_p);
479 }
480
481 typedef struct wrap_thread_call {
482 thread_call_t TChdl;
483 cyc_handler_t hdlr;
484 cyc_time_t when;
485 uint64_t deadline;
486 } wrap_thread_call_t;
487
488 /*
489 * _cyclic_apply will run on some thread under kernel_task. That's OK for the
490 * cleaner and the deadman, but too distant in time and place for the profile provider.
491 */
492 static void
_cyclic_apply(void * ignore,void * vTChdl)493 _cyclic_apply( void *ignore, void *vTChdl )
494 {
495 #pragma unused(ignore)
496 wrap_thread_call_t *wrapTC = (wrap_thread_call_t *)vTChdl;
497
498 (*(wrapTC->hdlr.cyh_func))( wrapTC->hdlr.cyh_arg );
499
500 clock_deadline_for_periodic_event( wrapTC->when.cyt_interval, mach_absolute_time(), &(wrapTC->deadline));
501 (void)thread_call_enter1_delayed( wrapTC->TChdl, (void *)wrapTC, wrapTC->deadline );
502
503 /* Did cyclic_remove request a wakeup call when this thread call was re-armed? */
504 if (wrapTC->when.cyt_interval == WAKEUP_REAPER) {
505 thread_wakeup((event_t)wrapTC);
506 }
507 }
508
509 cyclic_id_t
cyclic_add(cyc_handler_t * handler,cyc_time_t * when)510 cyclic_add(cyc_handler_t *handler, cyc_time_t *when)
511 {
512 uint64_t now;
513
514 wrap_thread_call_t *wrapTC = kalloc_type(wrap_thread_call_t, Z_ZERO | Z_WAITOK);
515 if (NULL == wrapTC) {
516 return CYCLIC_NONE;
517 }
518
519 wrapTC->TChdl = thread_call_allocate( _cyclic_apply, NULL );
520 wrapTC->hdlr = *handler;
521 wrapTC->when = *when;
522
523 ASSERT(when->cyt_when == 0);
524 ASSERT(when->cyt_interval < WAKEUP_REAPER);
525
526 nanoseconds_to_absolutetime(wrapTC->when.cyt_interval, (uint64_t *)&wrapTC->when.cyt_interval);
527
528 now = mach_absolute_time();
529 wrapTC->deadline = now;
530
531 clock_deadline_for_periodic_event( wrapTC->when.cyt_interval, now, &(wrapTC->deadline));
532 (void)thread_call_enter1_delayed( wrapTC->TChdl, (void *)wrapTC, wrapTC->deadline );
533
534 return (cyclic_id_t)wrapTC;
535 }
536
537 static void
noop_cyh_func(void * ignore)538 noop_cyh_func(void * ignore)
539 {
540 #pragma unused(ignore)
541 }
542
543 void
cyclic_remove(cyclic_id_t cyclic)544 cyclic_remove(cyclic_id_t cyclic)
545 {
546 wrap_thread_call_t *wrapTC = (wrap_thread_call_t *)cyclic;
547
548 ASSERT(cyclic != CYCLIC_NONE);
549
550 while (!thread_call_cancel(wrapTC->TChdl)) {
551 int ret = assert_wait(wrapTC, THREAD_UNINT);
552 ASSERT(ret == THREAD_WAITING);
553
554 wrapTC->when.cyt_interval = WAKEUP_REAPER;
555
556 ret = thread_block(THREAD_CONTINUE_NULL);
557 ASSERT(ret == THREAD_AWAKENED);
558 }
559
560 if (thread_call_free(wrapTC->TChdl)) {
561 kfree_type(wrap_thread_call_t, wrapTC);
562 } else {
563 /* Gut this cyclic and move on ... */
564 wrapTC->hdlr.cyh_func = noop_cyh_func;
565 wrapTC->when.cyt_interval = NEARLY_FOREVER;
566 }
567 }
568
569 int
ddi_driver_major(dev_info_t * devi)570 ddi_driver_major(dev_info_t *devi)
571 {
572 return (int)major(CAST_DOWN_EXPLICIT(int, devi));
573 }
574
575 int
ddi_create_minor_node(dev_info_t * dip,const char * name,int spec_type,minor_t minor_num,const char * node_type,int flag)576 ddi_create_minor_node(dev_info_t *dip, const char *name, int spec_type,
577 minor_t minor_num, const char *node_type, int flag)
578 {
579 #pragma unused(spec_type,node_type,flag)
580 dev_t dev = makedev( ddi_driver_major(dip), minor_num );
581
582 if (NULL == devfs_make_node( dev, DEVFS_CHAR, UID_ROOT, GID_WHEEL, 0666, "%s", name )) {
583 return DDI_FAILURE;
584 } else {
585 return DDI_SUCCESS;
586 }
587 }
588
589 void
ddi_remove_minor_node(dev_info_t * dip,char * name)590 ddi_remove_minor_node(dev_info_t *dip, char *name)
591 {
592 #pragma unused(dip,name)
593 /* XXX called from dtrace_detach, so NOTREACHED for now. */
594 }
595
596 major_t
getemajor(dev_t d)597 getemajor( dev_t d )
598 {
599 return (major_t) major(d);
600 }
601
602 minor_t
getminor(dev_t d)603 getminor( dev_t d )
604 {
605 return (minor_t) minor(d);
606 }
607
608 extern void Debugger(const char*);
609
610 void
debug_enter(char * c)611 debug_enter(char *c)
612 {
613 Debugger(c);
614 }
615
616 /*
617 * kmem
618 */
619
620 // rdar://88962505
621 __typed_allocators_ignore_push
622
623 void *
dt_kmem_alloc_tag(size_t size,int kmflag,vm_tag_t tag)624 dt_kmem_alloc_tag(size_t size, int kmflag, vm_tag_t tag)
625 {
626 #pragma unused(kmflag)
627
628 /*
629 * We ignore the M_NOWAIT bit in kmflag (all of kmflag, in fact).
630 * Requests larger than 8K with M_NOWAIT fail in kalloc_ext.
631 */
632 return kheap_alloc_tag(KHEAP_DTRACE, size, Z_WAITOK, tag);
633 }
634
635 void *
dt_kmem_zalloc_tag(size_t size,int kmflag,vm_tag_t tag)636 dt_kmem_zalloc_tag(size_t size, int kmflag, vm_tag_t tag)
637 {
638 #pragma unused(kmflag)
639
640 /*
641 * We ignore the M_NOWAIT bit in kmflag (all of kmflag, in fact).
642 * Requests larger than 8K with M_NOWAIT fail in kalloc_ext.
643 */
644 return kheap_alloc_tag(KHEAP_DTRACE, size, Z_WAITOK | Z_ZERO, tag);
645 }
646
647 void
dt_kmem_free(void * buf,size_t size)648 dt_kmem_free(void *buf, size_t size)
649 {
650 kheap_free(KHEAP_DTRACE, buf, size);
651 }
652
653 __typed_allocators_ignore_pop
654
655
656 /*
657 * aligned dt_kmem allocator
658 * align should be a power of two
659 */
660
661 void*
dt_kmem_alloc_aligned_tag(size_t size,size_t align,int kmflag,vm_tag_t tag)662 dt_kmem_alloc_aligned_tag(size_t size, size_t align, int kmflag, vm_tag_t tag)
663 {
664 void *mem, **addr_to_free;
665 intptr_t mem_aligned;
666 size_t *size_to_free, hdr_size;
667
668 /* Must be a power of two. */
669 assert(align != 0);
670 assert((align & (align - 1)) == 0);
671
672 /*
673 * We are going to add a header to the allocation. It contains
674 * the address to free and the total size of the buffer.
675 */
676 hdr_size = sizeof(size_t) + sizeof(void*);
677 mem = dt_kmem_alloc_tag(size + align + hdr_size, kmflag, tag);
678 if (mem == NULL) {
679 return NULL;
680 }
681
682 mem_aligned = (intptr_t) (((intptr_t) mem + align + hdr_size) & ~(align - 1));
683
684 /* Write the address to free in the header. */
685 addr_to_free = (void**) (mem_aligned - sizeof(void*));
686 *addr_to_free = mem;
687
688 /* Write the size to free in the header. */
689 size_to_free = (size_t*) (mem_aligned - hdr_size);
690 *size_to_free = size + align + hdr_size;
691
692 return (void*) mem_aligned;
693 }
694
695 void*
dt_kmem_zalloc_aligned_tag(size_t size,size_t align,int kmflag,vm_tag_t tag)696 dt_kmem_zalloc_aligned_tag(size_t size, size_t align, int kmflag, vm_tag_t tag)
697 {
698 void* buf;
699
700 buf = dt_kmem_alloc_aligned_tag(size, align, kmflag, tag);
701
702 if (!buf) {
703 return NULL;
704 }
705
706 bzero(buf, size);
707
708 return buf;
709 }
710
711 void
dt_kmem_free_aligned(void * buf,size_t size)712 dt_kmem_free_aligned(void* buf, size_t size)
713 {
714 #pragma unused(size)
715 intptr_t ptr = (intptr_t) buf;
716 void **addr_to_free = (void**) (ptr - sizeof(void*));
717 size_t *size_to_free = (size_t*) (ptr - (sizeof(size_t) + sizeof(void*)));
718
719 if (buf == NULL) {
720 return;
721 }
722
723 dt_kmem_free(*addr_to_free, *size_to_free);
724 }
725
726 /*
727 * vmem (Solaris "slab" allocator) used by DTrace solely to hand out resource ids
728 */
729 typedef unsigned int u_daddr_t;
730 #include "blist.h"
731
732 /* By passing around blist *handles*, the underlying blist can be resized as needed. */
733 struct blist_hdl {
734 blist_t blist;
735 };
736
737 vmem_t *
vmem_create(const char * name,void * base,size_t size,size_t quantum,void * ignore5,void * ignore6,vmem_t * source,size_t qcache_max,int vmflag)738 vmem_create(const char *name, void *base, size_t size, size_t quantum, void *ignore5,
739 void *ignore6, vmem_t *source, size_t qcache_max, int vmflag)
740 {
741 #pragma unused(name,quantum,ignore5,ignore6,source,qcache_max,vmflag)
742 blist_t bl;
743 struct blist_hdl *p = kalloc_type(struct blist_hdl, Z_WAITOK);
744
745 ASSERT(quantum == 1);
746 ASSERT(NULL == ignore5);
747 ASSERT(NULL == ignore6);
748 ASSERT(NULL == source);
749 ASSERT(0 == qcache_max);
750 ASSERT(size <= INT32_MAX);
751 ASSERT(vmflag & VMC_IDENTIFIER);
752
753 size = MIN(128, size); /* Clamp to 128 initially, since the underlying data structure is pre-allocated */
754
755 p->blist = bl = blist_create((daddr_t)size);
756 blist_free(bl, 0, (daddr_t)size);
757 if (base) {
758 blist_alloc( bl, (daddr_t)(uintptr_t)base ); /* Chomp off initial ID(s) */
759 }
760 return (vmem_t *)p;
761 }
762
763 void *
vmem_alloc(vmem_t * vmp,size_t size,int vmflag)764 vmem_alloc(vmem_t *vmp, size_t size, int vmflag)
765 {
766 #pragma unused(vmflag)
767 struct blist_hdl *q = (struct blist_hdl *)vmp;
768 blist_t bl = q->blist;
769 daddr_t p;
770
771 p = blist_alloc(bl, (daddr_t)size);
772
773 if (p == SWAPBLK_NONE) {
774 blist_resize(&bl, (bl->bl_blocks) << 1, 1);
775 q->blist = bl;
776 p = blist_alloc(bl, (daddr_t)size);
777 if (p == SWAPBLK_NONE) {
778 panic("vmem_alloc: failure after blist_resize!");
779 }
780 }
781
782 return (void *)(uintptr_t)p;
783 }
784
785 void
vmem_free(vmem_t * vmp,void * vaddr,size_t size)786 vmem_free(vmem_t *vmp, void *vaddr, size_t size)
787 {
788 struct blist_hdl *p = (struct blist_hdl *)vmp;
789
790 blist_free( p->blist, (daddr_t)(uintptr_t)vaddr, (daddr_t)size );
791 }
792
793 void
vmem_destroy(vmem_t * vmp)794 vmem_destroy(vmem_t *vmp)
795 {
796 struct blist_hdl *p = (struct blist_hdl *)vmp;
797
798 blist_destroy( p->blist );
799 kfree_type(struct blist_hdl, p);
800 }
801
802 /*
803 * Timing
804 */
805
806 /*
807 * dtrace_gethrestime() provides the "walltimestamp", a value that is anchored at
808 * January 1, 1970. Because it can be called from probe context, it must take no locks.
809 */
810
811 hrtime_t
dtrace_gethrestime(void)812 dtrace_gethrestime(void)
813 {
814 clock_sec_t secs;
815 clock_nsec_t nanosecs;
816 uint64_t secs64, ns64;
817
818 clock_get_calendar_nanotime_nowait(&secs, &nanosecs);
819 secs64 = (uint64_t)secs;
820 ns64 = (uint64_t)nanosecs;
821
822 ns64 = ns64 + (secs64 * 1000000000LL);
823 return ns64;
824 }
825
826 /*
827 * dtrace_gethrtime() provides high-resolution timestamps with machine-dependent origin.
828 * Hence its primary use is to specify intervals.
829 */
830
831 hrtime_t
dtrace_abs_to_nano(uint64_t elapsed)832 dtrace_abs_to_nano(uint64_t elapsed)
833 {
834 static mach_timebase_info_data_t sTimebaseInfo = { 0, 0 };
835
836 /*
837 * If this is the first time we've run, get the timebase.
838 * We can use denom == 0 to indicate that sTimebaseInfo is
839 * uninitialised because it makes no sense to have a zero
840 * denominator in a fraction.
841 */
842
843 if (sTimebaseInfo.denom == 0) {
844 (void) clock_timebase_info(&sTimebaseInfo);
845 }
846
847 /*
848 * Convert to nanoseconds.
849 * return (elapsed * (uint64_t)sTimebaseInfo.numer)/(uint64_t)sTimebaseInfo.denom;
850 *
851 * Provided the final result is representable in 64 bits the following maneuver will
852 * deliver that result without intermediate overflow.
853 */
854 if (sTimebaseInfo.denom == sTimebaseInfo.numer) {
855 return elapsed;
856 } else if (sTimebaseInfo.denom == 1) {
857 return elapsed * (uint64_t)sTimebaseInfo.numer;
858 } else {
859 /* Decompose elapsed = eta32 * 2^32 + eps32: */
860 uint64_t eta32 = elapsed >> 32;
861 uint64_t eps32 = elapsed & 0x00000000ffffffffLL;
862
863 uint32_t numer = sTimebaseInfo.numer, denom = sTimebaseInfo.denom;
864
865 /* Form product of elapsed64 (decomposed) and numer: */
866 uint64_t mu64 = numer * eta32;
867 uint64_t lambda64 = numer * eps32;
868
869 /* Divide the constituents by denom: */
870 uint64_t q32 = mu64 / denom;
871 uint64_t r32 = mu64 - (q32 * denom); /* mu64 % denom */
872
873 return (q32 << 32) + ((r32 << 32) + lambda64) / denom;
874 }
875 }
876
877 hrtime_t
dtrace_gethrtime(void)878 dtrace_gethrtime(void)
879 {
880 static uint64_t start = 0;
881
882 if (start == 0) {
883 start = mach_absolute_time();
884 }
885
886 return dtrace_abs_to_nano(mach_absolute_time() - start);
887 }
888
889 /*
890 * Atomicity and synchronization
891 */
892 uint32_t
dtrace_cas32(uint32_t * target,uint32_t cmp,uint32_t new)893 dtrace_cas32(uint32_t *target, uint32_t cmp, uint32_t new)
894 {
895 if (OSCompareAndSwap((UInt32)cmp, (UInt32)new, (volatile UInt32 *)target )) {
896 return cmp;
897 } else {
898 return ~cmp; /* Must return something *other* than cmp */
899 }
900 }
901
902 void *
dtrace_casptr(void * target,void * cmp,void * new)903 dtrace_casptr(void *target, void *cmp, void *new)
904 {
905 if (OSCompareAndSwapPtr( cmp, new, (void**)target )) {
906 return cmp;
907 } else {
908 return (void *)(~(uintptr_t)cmp); /* Must return something *other* than cmp */
909 }
910 }
911
912 /*
913 * Interrupt manipulation
914 */
915 dtrace_icookie_t
dtrace_interrupt_disable(void)916 dtrace_interrupt_disable(void)
917 {
918 return (dtrace_icookie_t)ml_set_interrupts_enabled(FALSE);
919 }
920
921 void
dtrace_interrupt_enable(dtrace_icookie_t reenable)922 dtrace_interrupt_enable(dtrace_icookie_t reenable)
923 {
924 (void)ml_set_interrupts_enabled((boolean_t)reenable);
925 }
926
927 /*
928 * MP coordination
929 */
930 static void
dtrace_sync_func(void)931 dtrace_sync_func(void)
932 {
933 }
934
935 /*
936 * dtrace_sync() is not called from probe context.
937 */
938 void
dtrace_sync(void)939 dtrace_sync(void)
940 {
941 dtrace_xcall(DTRACE_CPUALL, (dtrace_xcall_t)dtrace_sync_func, NULL);
942 }
943
944 /*
945 * The dtrace_copyin/out/instr and dtrace_fuword* routines can be called from probe context.
946 */
947
948 extern kern_return_t dtrace_copyio_preflight(addr64_t);
949 extern kern_return_t dtrace_copyio_postflight(addr64_t);
950
951 static int
dtrace_copycheck(user_addr_t uaddr,uintptr_t kaddr,size_t size)952 dtrace_copycheck(user_addr_t uaddr, uintptr_t kaddr, size_t size)
953 {
954 #pragma unused(kaddr)
955
956 ASSERT(kaddr + size >= kaddr);
957
958 if (uaddr + size < uaddr || /* Avoid address wrap. */
959 KERN_FAILURE == dtrace_copyio_preflight(uaddr)) { /* Machine specific setup/constraints. */
960 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
961 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr;
962 return 0;
963 }
964 return 1;
965 }
966
967 void
dtrace_copyin(user_addr_t src,uintptr_t dst,size_t len,volatile uint16_t * flags)968 dtrace_copyin(user_addr_t src, uintptr_t dst, size_t len, volatile uint16_t *flags)
969 {
970 #pragma unused(flags)
971
972 if (dtrace_copycheck( src, dst, len )) {
973 if (copyin((const user_addr_t)src, (char *)dst, (vm_size_t)len)) {
974 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
975 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = src;
976 }
977 dtrace_copyio_postflight(src);
978 }
979 }
980
981 void
dtrace_copyinstr(user_addr_t src,uintptr_t dst,size_t len,volatile uint16_t * flags)982 dtrace_copyinstr(user_addr_t src, uintptr_t dst, size_t len, volatile uint16_t *flags)
983 {
984 #pragma unused(flags)
985
986 size_t actual;
987
988 if (dtrace_copycheck( src, dst, len )) {
989 /* copyin as many as 'len' bytes. */
990 int error = copyinstr((const user_addr_t)src, (char *)dst, (vm_size_t)len, &actual);
991
992 /*
993 * ENAMETOOLONG is returned when 'len' bytes have been copied in but the NUL terminator was
994 * not encountered. That does not require raising CPU_DTRACE_BADADDR, and we press on.
995 * Note that we do *not* stuff a NUL terminator when returning ENAMETOOLONG, that's left
996 * to the caller.
997 */
998 if (error && error != ENAMETOOLONG) {
999 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1000 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = src;
1001 }
1002 dtrace_copyio_postflight(src);
1003 }
1004 }
1005
1006 void
dtrace_copyout(uintptr_t src,user_addr_t dst,size_t len,volatile uint16_t * flags)1007 dtrace_copyout(uintptr_t src, user_addr_t dst, size_t len, volatile uint16_t *flags)
1008 {
1009 #pragma unused(flags)
1010
1011 if (dtrace_copycheck( dst, src, len )) {
1012 if (copyout((const void *)src, dst, (vm_size_t)len)) {
1013 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1014 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = dst;
1015 }
1016 dtrace_copyio_postflight(dst);
1017 }
1018 }
1019
1020 void
dtrace_copyoutstr(uintptr_t src,user_addr_t dst,size_t len,volatile uint16_t * flags)1021 dtrace_copyoutstr(uintptr_t src, user_addr_t dst, size_t len, volatile uint16_t *flags)
1022 {
1023 #pragma unused(flags)
1024
1025 size_t actual;
1026
1027 if (dtrace_copycheck( dst, src, len )) {
1028 /*
1029 * ENAMETOOLONG is returned when 'len' bytes have been copied out but the NUL terminator was
1030 * not encountered. We raise CPU_DTRACE_BADADDR in that case.
1031 * Note that we do *not* stuff a NUL terminator when returning ENAMETOOLONG, that's left
1032 * to the caller.
1033 */
1034 if (copyoutstr((const void *)src, dst, (size_t)len, &actual)) {
1035 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1036 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = dst;
1037 }
1038 dtrace_copyio_postflight(dst);
1039 }
1040 }
1041
1042 extern const int copysize_limit_panic;
1043
1044 int
dtrace_copy_maxsize(void)1045 dtrace_copy_maxsize(void)
1046 {
1047 return copysize_limit_panic;
1048 }
1049
1050
1051 int
dtrace_buffer_copyout(const void * kaddr,user_addr_t uaddr,vm_size_t nbytes)1052 dtrace_buffer_copyout(const void *kaddr, user_addr_t uaddr, vm_size_t nbytes)
1053 {
1054 int maxsize = dtrace_copy_maxsize();
1055 /*
1056 * Partition the copyout in copysize_limit_panic-sized chunks
1057 */
1058 while (nbytes >= (vm_size_t)maxsize) {
1059 if (copyout(kaddr, uaddr, maxsize) != 0) {
1060 return EFAULT;
1061 }
1062
1063 nbytes -= maxsize;
1064 uaddr += maxsize;
1065 kaddr = (const void *)((uintptr_t)kaddr + maxsize);
1066 }
1067 if (nbytes > 0) {
1068 if (copyout(kaddr, uaddr, nbytes) != 0) {
1069 return EFAULT;
1070 }
1071 }
1072
1073 return 0;
1074 }
1075
1076 uint8_t
dtrace_fuword8(user_addr_t uaddr)1077 dtrace_fuword8(user_addr_t uaddr)
1078 {
1079 uint8_t ret = 0;
1080
1081 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
1082 if (dtrace_copycheck( uaddr, (uintptr_t)&ret, sizeof(ret))) {
1083 if (copyin((const user_addr_t)uaddr, (char *)&ret, sizeof(ret))) {
1084 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1085 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr;
1086 }
1087 dtrace_copyio_postflight(uaddr);
1088 }
1089 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
1090
1091 return ret;
1092 }
1093
1094 uint16_t
dtrace_fuword16(user_addr_t uaddr)1095 dtrace_fuword16(user_addr_t uaddr)
1096 {
1097 uint16_t ret = 0;
1098
1099 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
1100 if (dtrace_copycheck( uaddr, (uintptr_t)&ret, sizeof(ret))) {
1101 if (copyin((const user_addr_t)uaddr, (char *)&ret, sizeof(ret))) {
1102 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1103 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr;
1104 }
1105 dtrace_copyio_postflight(uaddr);
1106 }
1107 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
1108
1109 return ret;
1110 }
1111
1112 uint32_t
dtrace_fuword32(user_addr_t uaddr)1113 dtrace_fuword32(user_addr_t uaddr)
1114 {
1115 uint32_t ret = 0;
1116
1117 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
1118 if (dtrace_copycheck( uaddr, (uintptr_t)&ret, sizeof(ret))) {
1119 if (copyin((const user_addr_t)uaddr, (char *)&ret, sizeof(ret))) {
1120 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1121 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr;
1122 }
1123 dtrace_copyio_postflight(uaddr);
1124 }
1125 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
1126
1127 return ret;
1128 }
1129
1130 uint64_t
dtrace_fuword64(user_addr_t uaddr)1131 dtrace_fuword64(user_addr_t uaddr)
1132 {
1133 uint64_t ret = 0;
1134
1135 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
1136 if (dtrace_copycheck( uaddr, (uintptr_t)&ret, sizeof(ret))) {
1137 if (copyin((const user_addr_t)uaddr, (char *)&ret, sizeof(ret))) {
1138 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1139 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr;
1140 }
1141 dtrace_copyio_postflight(uaddr);
1142 }
1143 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
1144
1145 return ret;
1146 }
1147
1148 /*
1149 * Emulation of Solaris fuword / suword
1150 * Called from the fasttrap provider, so the use of copyin/out requires fewer safegaurds.
1151 */
1152
1153 int
fuword8(user_addr_t uaddr,uint8_t * value)1154 fuword8(user_addr_t uaddr, uint8_t *value)
1155 {
1156 if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint8_t)) != 0) {
1157 return -1;
1158 }
1159
1160 return 0;
1161 }
1162
1163 int
fuword16(user_addr_t uaddr,uint16_t * value)1164 fuword16(user_addr_t uaddr, uint16_t *value)
1165 {
1166 if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint16_t)) != 0) {
1167 return -1;
1168 }
1169
1170 return 0;
1171 }
1172
1173 int
fuword32(user_addr_t uaddr,uint32_t * value)1174 fuword32(user_addr_t uaddr, uint32_t *value)
1175 {
1176 if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint32_t)) != 0) {
1177 return -1;
1178 }
1179
1180 return 0;
1181 }
1182
1183 int
fuword64(user_addr_t uaddr,uint64_t * value)1184 fuword64(user_addr_t uaddr, uint64_t *value)
1185 {
1186 if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint64_t)) != 0) {
1187 return -1;
1188 }
1189
1190 return 0;
1191 }
1192
1193 void
fuword32_noerr(user_addr_t uaddr,uint32_t * value)1194 fuword32_noerr(user_addr_t uaddr, uint32_t *value)
1195 {
1196 if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint32_t))) {
1197 *value = 0;
1198 }
1199 }
1200
1201 void
fuword64_noerr(user_addr_t uaddr,uint64_t * value)1202 fuword64_noerr(user_addr_t uaddr, uint64_t *value)
1203 {
1204 if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint64_t))) {
1205 *value = 0;
1206 }
1207 }
1208
1209 int
suword64(user_addr_t addr,uint64_t value)1210 suword64(user_addr_t addr, uint64_t value)
1211 {
1212 if (copyout((const void *)&value, addr, sizeof(value)) != 0) {
1213 return -1;
1214 }
1215
1216 return 0;
1217 }
1218
1219 int
suword32(user_addr_t addr,uint32_t value)1220 suword32(user_addr_t addr, uint32_t value)
1221 {
1222 if (copyout((const void *)&value, addr, sizeof(value)) != 0) {
1223 return -1;
1224 }
1225
1226 return 0;
1227 }
1228
1229 /*
1230 * Miscellaneous
1231 */
1232 extern boolean_t dtrace_tally_fault(user_addr_t);
1233
1234 boolean_t
dtrace_tally_fault(user_addr_t uaddr)1235 dtrace_tally_fault(user_addr_t uaddr)
1236 {
1237 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1238 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr;
1239 return DTRACE_CPUFLAG_ISSET(CPU_DTRACE_NOFAULT) ? TRUE : FALSE;
1240 }
1241
1242 #define TOTTY 0x02
1243 extern int prf(const char *, va_list, int, struct tty *); /* bsd/kern/subr_prf.h */
1244
1245 int
vuprintf(const char * format,va_list ap)1246 vuprintf(const char *format, va_list ap)
1247 {
1248 return prf(format, ap, TOTTY, NULL);
1249 }
1250
1251 /* Not called from probe context */
1252 void
cmn_err(int level,const char * format,...)1253 cmn_err( int level, const char *format, ... )
1254 {
1255 #pragma unused(level)
1256 va_list alist;
1257
1258 va_start(alist, format);
1259 vuprintf(format, alist);
1260 va_end(alist);
1261 uprintf("\n");
1262 }
1263
1264 const void*
bsearch(const void * key,const void * base0,size_t nmemb,size_t size,int (* compar)(const void *,const void *))1265 bsearch(const void *key, const void *base0, size_t nmemb, size_t size, int (*compar)(const void *, const void *))
1266 {
1267 const char *base = base0;
1268 size_t lim;
1269 int cmp;
1270 const void *p;
1271 for (lim = nmemb; lim != 0; lim >>= 1) {
1272 p = base + (lim >> 1) * size;
1273 cmp = (*compar)(key, p);
1274 if (cmp == 0) {
1275 return p;
1276 }
1277 if (cmp > 0) { /* key > p: move right */
1278 base = (const char *)p + size;
1279 lim--;
1280 } /* else move left */
1281 }
1282 return NULL;
1283 }
1284
1285 /*
1286 * Runtime and ABI
1287 */
1288 uintptr_t
dtrace_caller(int ignore)1289 dtrace_caller(int ignore)
1290 {
1291 #pragma unused(ignore)
1292 return -1; /* Just as in Solaris dtrace_asm.s */
1293 }
1294
1295 int
dtrace_getstackdepth(int aframes)1296 dtrace_getstackdepth(int aframes)
1297 {
1298 struct frame *fp = (struct frame *)__builtin_frame_address(0);
1299 struct frame *nextfp, *minfp, *stacktop;
1300 int depth = 0;
1301 int on_intr;
1302
1303 if ((on_intr = CPU_ON_INTR(CPU)) != 0) {
1304 stacktop = (struct frame *)dtrace_get_cpu_int_stack_top();
1305 } else {
1306 stacktop = (struct frame *)(dtrace_get_kernel_stack(current_thread()) + kernel_stack_size);
1307 }
1308
1309 minfp = fp;
1310
1311 aframes++;
1312
1313 for (;;) {
1314 depth++;
1315
1316 nextfp = *(struct frame **)fp;
1317
1318 if (nextfp <= minfp || nextfp >= stacktop) {
1319 if (on_intr) {
1320 /*
1321 * Hop from interrupt stack to thread stack.
1322 */
1323 vm_offset_t kstack_base = dtrace_get_kernel_stack(current_thread());
1324
1325 minfp = (struct frame *)kstack_base;
1326 stacktop = (struct frame *)(kstack_base + kernel_stack_size);
1327
1328 on_intr = 0;
1329 continue;
1330 }
1331 break;
1332 }
1333
1334 fp = nextfp;
1335 minfp = fp;
1336 }
1337
1338 if (depth <= aframes) {
1339 return 0;
1340 }
1341
1342 return depth - aframes;
1343 }
1344
1345 int
dtrace_addr_in_module(const void * addr,const struct modctl * ctl)1346 dtrace_addr_in_module(const void* addr, const struct modctl *ctl)
1347 {
1348 return OSKextKextForAddress(addr) == (void*)ctl->mod_address;
1349 }
1350
1351 /*
1352 * Unconsidered
1353 */
1354 void
dtrace_vtime_enable(void)1355 dtrace_vtime_enable(void)
1356 {
1357 }
1358
1359 void
dtrace_vtime_disable(void)1360 dtrace_vtime_disable(void)
1361 {
1362 }
1363