1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
58 * support for mandatory and extensible security protections. This notice
59 * is included in support of clause 2.2 (b) of the Apple Public License,
60 * Version 2.0.
61 * Copyright (c) 2005-2006 SPARTA, Inc.
62 */
63 /*
64 */
65 /*
66 * File: ipc/ipc_object.c
67 * Author: Rich Draves
68 * Date: 1989
69 *
70 * Functions to manipulate IPC objects.
71 */
72
73 #include <mach/mach_types.h>
74 #include <mach/boolean.h>
75 #include <mach/kern_return.h>
76 #include <mach/port.h>
77 #include <mach/message.h>
78
79 #include <kern/kern_types.h>
80 #include <kern/misc_protos.h>
81 #include <kern/ipc_kobject.h>
82 #include <kern/zalloc_internal.h> // zone_id_for_element
83
84 #include <ipc/ipc_types.h>
85 #include <ipc/ipc_importance.h>
86 #include <ipc/port.h>
87 #include <ipc/ipc_space.h>
88 #include <ipc/ipc_entry.h>
89 #include <ipc/ipc_object.h>
90 #include <ipc/ipc_hash.h>
91 #include <ipc/ipc_kmsg.h>
92 #include <ipc/ipc_right.h>
93 #include <ipc/ipc_notify.h>
94 #include <ipc/ipc_port.h>
95 #include <ipc/ipc_pset.h>
96
97 #include <security/mac_mach_internal.h>
98
99 static struct mpsc_daemon_queue ipc_object_deallocate_queue;
100 SECURITY_READ_ONLY_LATE(zone_t) ipc_object_zones[IOT_NUMBER];
101
102 /*
103 * In order to do lockfree lookups in the IPC space, we combine two schemes:
104 *
105 * - the ipc table pointer is protected with hazard pointers to allow
106 * dereferencing it with only holding a ref on a task or space;
107 *
108 * - we use ipc_object_lock_allow_invalid in order to lock locks and validate
109 * that they are the droid we're looking for.
110 *
111 * The second half requires that virtual addresses assigned that ever held
112 * a port, either hold a port, or nothing, forever. To get this property,
113 * we just piggy back on the zone sequestering security feature which gives
114 * us exactly that.
115 *
116 * However, sequestering really only "works" on a sufficiently large address
117 * space, especially for a resource that can be made by userspace at will,
118 * so we can't do lockless lookups on ILP32.
119 *
120 * Note: this scheme is incompatible with kasan quarantines
121 * (because it uses elements to store backtraces in them
122 * which lets the waitq lock appear "valid" by accident when
123 * elements are freed).
124 */
125 #define IPC_OBJECT_ZC_BASE (ZC_ZFREE_CLEARMEM | ZC_SEQUESTER)
126
127 ZONE_INIT(&ipc_object_zones[IOT_PORT],
128 "ipc ports", sizeof(struct ipc_port),
129 IPC_OBJECT_ZC_BASE | ZC_CACHING, ZONE_ID_IPC_PORT, NULL);
130
131 ZONE_INIT(&ipc_object_zones[IOT_PORT_SET],
132 "ipc port sets", sizeof(struct ipc_pset),
133 IPC_OBJECT_ZC_BASE, ZONE_ID_IPC_PORT_SET, NULL);
134
135 __attribute__((noinline))
136 static void
ipc_object_free(unsigned int otype,ipc_object_t object,bool last_ref)137 ipc_object_free(unsigned int otype, ipc_object_t object, bool last_ref)
138 {
139 if (last_ref) {
140 if (otype == IOT_PORT) {
141 ipc_port_finalize(ip_object_to_port(object));
142 } else {
143 ipc_pset_finalize(ips_object_to_pset(object));
144 }
145 }
146 zfree(ipc_object_zones[otype], object);
147 }
148
149 __attribute__((noinline))
150 static void
ipc_object_free_safe(ipc_object_t object)151 ipc_object_free_safe(ipc_object_t object)
152 {
153 struct waitq *wq = io_waitq(object);
154
155 assert(!waitq_is_valid(wq));
156 assert(os_atomic_load(&wq->waitq_defer.mpqc_next, relaxed) == NULL);
157 mpsc_daemon_enqueue(&ipc_object_deallocate_queue,
158 &wq->waitq_defer, MPSC_QUEUE_NONE);
159 }
160
161 static void
ipc_object_deallocate_queue_invoke(mpsc_queue_chain_t e,__assert_only mpsc_daemon_queue_t dq)162 ipc_object_deallocate_queue_invoke(mpsc_queue_chain_t e,
163 __assert_only mpsc_daemon_queue_t dq)
164 {
165 struct waitq *wq = __container_of(e, struct waitq, waitq_defer);
166 ipc_object_t io = io_from_waitq(wq);
167
168 assert(dq == &ipc_object_deallocate_queue);
169
170 os_atomic_store(&wq->waitq_defer.mpqc_next, NULL, relaxed);
171 ipc_object_free(io_otype(io), io, true);
172 }
173
174 void
ipc_object_deallocate_register_queue(void)175 ipc_object_deallocate_register_queue(void)
176 {
177 thread_deallocate_daemon_register_queue(&ipc_object_deallocate_queue,
178 ipc_object_deallocate_queue_invoke);
179 }
180
181 /*
182 * Routine: ipc_object_reference
183 * Purpose:
184 * Take a reference to an object.
185 */
186
187 void
ipc_object_reference(ipc_object_t io)188 ipc_object_reference(
189 ipc_object_t io)
190 {
191 static_assert(sizeof(os_ref_atomic_t) == sizeof(io->io_references));
192 os_ref_retain_raw((os_ref_atomic_t *)&io->io_references, NULL);
193 }
194
195 /*
196 * Routine: ipc_object_release
197 * Purpose:
198 * Release a reference to an object.
199 */
200
201 void
ipc_object_release(ipc_object_t io)202 ipc_object_release(
203 ipc_object_t io)
204 {
205 #if DEBUG
206 assert(get_preemption_level() == 0);
207 #endif
208
209 if (os_ref_release_raw((os_ref_atomic_t *)&io->io_references, NULL) == 0) {
210 /* Free the object */
211 ipc_object_free(io_otype(io), io, true);
212 }
213 }
214
215 /*
216 * Routine: ipc_object_release_safe
217 * Purpose:
218 * Release a reference to an object safely
219 */
220
221 void
ipc_object_release_safe(ipc_object_t io)222 ipc_object_release_safe(
223 ipc_object_t io)
224 {
225 if (os_ref_release_raw((os_ref_atomic_t *)&io->io_references, NULL) == 0) {
226 if (get_preemption_level() == 0) {
227 ipc_object_free(io_otype(io), io, true);
228 } else {
229 ipc_object_free_safe(io);
230 }
231 }
232 }
233
234 /*
235 * Routine: ipc_object_release_live
236 * Purpose:
237 * Release a reference to an object that isn't the last one.
238 */
239
240 void
ipc_object_release_live(ipc_object_t io)241 ipc_object_release_live(
242 ipc_object_t io)
243 {
244 os_ref_release_live_raw((os_ref_atomic_t *)&io->io_references, NULL);
245 }
246
247 /*
248 * Routine: ipc_object_translate
249 * Purpose:
250 * Look up an object in a space.
251 * Conditions:
252 * Nothing locked before. If successful, the object
253 * is returned active and locked. The caller doesn't get a ref.
254 * Returns:
255 * KERN_SUCCESS Object returned locked.
256 * KERN_INVALID_TASK The space is dead.
257 * KERN_INVALID_NAME The name doesn't denote a right
258 * KERN_INVALID_RIGHT Name doesn't denote the correct right
259 */
260 kern_return_t
ipc_object_translate(ipc_space_t space,mach_port_name_t name,mach_port_right_t right,ipc_object_t * objectp)261 ipc_object_translate(
262 ipc_space_t space,
263 mach_port_name_t name,
264 mach_port_right_t right,
265 ipc_object_t *objectp)
266 {
267 ipc_entry_bits_t bits;
268 ipc_object_t object;
269 kern_return_t kr;
270
271 if (!MACH_PORT_RIGHT_VALID_TRANSLATE(right)) {
272 return KERN_INVALID_RIGHT;
273 }
274
275 kr = ipc_right_lookup_read(space, name, &bits, &object);
276 if (kr != KERN_SUCCESS) {
277 return kr;
278 }
279 /* object is locked and active */
280
281 if ((bits & MACH_PORT_TYPE(right)) == MACH_PORT_TYPE_NONE) {
282 io_unlock(object);
283 return KERN_INVALID_RIGHT;
284 }
285
286 *objectp = object;
287 return KERN_SUCCESS;
288 }
289
290 /*
291 * Routine: ipc_object_translate_two
292 * Purpose:
293 * Look up two objects in a space.
294 * Conditions:
295 * Nothing locked before. If successful, the objects
296 * are returned locked. The caller doesn't get a ref.
297 * Returns:
298 * KERN_SUCCESS Objects returned locked.
299 * KERN_INVALID_TASK The space is dead.
300 * KERN_INVALID_NAME A name doesn't denote a right.
301 * KERN_INVALID_RIGHT A name doesn't denote the correct right.
302 */
303
304 kern_return_t
ipc_object_translate_two(ipc_space_t space,mach_port_name_t name1,mach_port_right_t right1,ipc_object_t * objectp1,mach_port_name_t name2,mach_port_right_t right2,ipc_object_t * objectp2)305 ipc_object_translate_two(
306 ipc_space_t space,
307 mach_port_name_t name1,
308 mach_port_right_t right1,
309 ipc_object_t *objectp1,
310 mach_port_name_t name2,
311 mach_port_right_t right2,
312 ipc_object_t *objectp2)
313 {
314 ipc_entry_t entry1;
315 ipc_entry_t entry2;
316 ipc_object_t object1, object2;
317 kern_return_t kr;
318 boolean_t doguard = TRUE;
319
320 kr = ipc_right_lookup_two_read(space, name1, &entry1, name2, &entry2);
321 if (kr != KERN_SUCCESS) {
322 return kr;
323 }
324 /* space is read-locked and active */
325
326 if ((entry1->ie_bits & MACH_PORT_TYPE(right1)) == MACH_PORT_TYPE_NONE) {
327 /* If looking for receive, and the entry used to hold one, give a pass on EXC_GUARD */
328 if ((right1 & MACH_PORT_RIGHT_RECEIVE) == MACH_PORT_RIGHT_RECEIVE &&
329 (entry1->ie_bits & MACH_PORT_TYPE_EX_RECEIVE) == MACH_PORT_TYPE_EX_RECEIVE) {
330 doguard = FALSE;
331 }
332 is_read_unlock(space);
333 if (doguard) {
334 mach_port_guard_exception(name1, 0, 0, kGUARD_EXC_INVALID_RIGHT);
335 }
336 return KERN_INVALID_RIGHT;
337 }
338
339 if ((entry2->ie_bits & MACH_PORT_TYPE(right2)) == MACH_PORT_TYPE_NONE) {
340 /* If looking for receive, and the entry used to hold one, give a pass on EXC_GUARD */
341 if ((right2 & MACH_PORT_RIGHT_RECEIVE) == MACH_PORT_RIGHT_RECEIVE &&
342 (entry2->ie_bits & MACH_PORT_TYPE_EX_RECEIVE) == MACH_PORT_TYPE_EX_RECEIVE) {
343 doguard = FALSE;
344 }
345 is_read_unlock(space);
346 if (doguard) {
347 mach_port_guard_exception(name2, 0, 0, kGUARD_EXC_INVALID_RIGHT);
348 }
349 return KERN_INVALID_RIGHT;
350 }
351
352 object1 = entry1->ie_object;
353 assert(object1 != IO_NULL);
354 io_lock(object1);
355 if (!io_active(object1)) {
356 io_unlock(object1);
357 is_read_unlock(space);
358 return KERN_INVALID_NAME;
359 }
360
361 object2 = entry2->ie_object;
362 assert(object2 != IO_NULL);
363 io_lock(object2);
364 if (!io_active(object2)) {
365 io_unlock(object1);
366 io_unlock(object2);
367 is_read_unlock(space);
368 return KERN_INVALID_NAME;
369 }
370
371 *objectp1 = object1;
372 *objectp2 = object2;
373
374 is_read_unlock(space);
375 return KERN_SUCCESS;
376 }
377
378 /*
379 * Routine: ipc_object_alloc_dead
380 * Purpose:
381 * Allocate a dead-name entry.
382 * Conditions:
383 * Nothing locked.
384 * Returns:
385 * KERN_SUCCESS The dead name is allocated.
386 * KERN_INVALID_TASK The space is dead.
387 * KERN_NO_SPACE No room for an entry in the space.
388 */
389
390 kern_return_t
ipc_object_alloc_dead(ipc_space_t space,mach_port_name_t * namep)391 ipc_object_alloc_dead(
392 ipc_space_t space,
393 mach_port_name_t *namep)
394 {
395 ipc_entry_t entry;
396 kern_return_t kr;
397
398 kr = ipc_entry_alloc(space, IO_NULL, namep, &entry);
399 if (kr != KERN_SUCCESS) {
400 return kr;
401 }
402 /* space is write-locked */
403
404 /* null object, MACH_PORT_TYPE_DEAD_NAME, 1 uref */
405
406 entry->ie_bits |= MACH_PORT_TYPE_DEAD_NAME | 1;
407 ipc_entry_modified(space, *namep, entry);
408 is_write_unlock(space);
409 return KERN_SUCCESS;
410 }
411
412 /*
413 * Routine: ipc_object_alloc
414 * Purpose:
415 * Allocate an object.
416 * Conditions:
417 * Nothing locked.
418 * The space is write locked on successful return.
419 * The caller doesn't get a reference for the object.
420 * Returns:
421 * KERN_SUCCESS The object is allocated.
422 * KERN_INVALID_TASK The space is dead.
423 * KERN_NO_SPACE No room for an entry in the space.
424 */
425
426 kern_return_t
ipc_object_alloc(ipc_space_t space,ipc_object_type_t otype,mach_port_type_t type,mach_port_urefs_t urefs,mach_port_name_t * namep,ipc_object_t * objectp)427 ipc_object_alloc(
428 ipc_space_t space,
429 ipc_object_type_t otype,
430 mach_port_type_t type,
431 mach_port_urefs_t urefs,
432 mach_port_name_t *namep,
433 ipc_object_t *objectp)
434 {
435 ipc_object_t object;
436 ipc_entry_t entry;
437 kern_return_t kr;
438
439 assert(otype < IOT_NUMBER);
440 assert((type & MACH_PORT_TYPE_ALL_RIGHTS) == type);
441 assert(type != MACH_PORT_TYPE_NONE);
442 assert(urefs <= MACH_PORT_UREFS_MAX);
443
444 object = io_alloc(otype, Z_WAITOK | Z_ZERO | Z_NOFAIL);
445 os_atomic_init(&object->io_bits, io_makebits(otype));
446 os_atomic_init(&object->io_references, 1); /* for entry, not caller */
447
448 *namep = CAST_MACH_PORT_TO_NAME(object);
449 kr = ipc_entry_alloc(space, object, namep, &entry);
450 if (kr != KERN_SUCCESS) {
451 ipc_object_free(otype, object, false);
452 return kr;
453 }
454 /* space is write-locked */
455
456 entry->ie_bits |= type | urefs;
457 ipc_entry_modified(space, *namep, entry);
458
459 *objectp = object;
460 return KERN_SUCCESS;
461 }
462
463 /*
464 * Routine: ipc_object_alloc_name
465 * Purpose:
466 * Allocate an object, with a specific name.
467 * Conditions:
468 * Nothing locked. If successful, the object is returned locked.
469 * The caller doesn't get a reference for the object.
470 *
471 * finish_init() must call an ipc_*_init function
472 * that will return the object locked (using IPC_PORT_INIT_LOCKED,
473 * or SYNC_POLICY_INIT_LOCKED, or equivalent).
474 *
475 * Returns:
476 * KERN_SUCCESS The object is allocated.
477 * KERN_INVALID_TASK The space is dead.
478 * KERN_NAME_EXISTS The name already denotes a right.
479 */
480
481 kern_return_t
482 ipc_object_alloc_name(
483 ipc_space_t space,
484 ipc_object_type_t otype,
485 mach_port_type_t type,
486 mach_port_urefs_t urefs,
487 mach_port_name_t name,
488 ipc_object_t *objectp,
489 void (^finish_init)(ipc_object_t))
490 {
491 ipc_object_t object;
492 ipc_entry_t entry;
493 kern_return_t kr;
494
495 assert(otype < IOT_NUMBER);
496 assert((type & MACH_PORT_TYPE_ALL_RIGHTS) == type);
497 assert(type != MACH_PORT_TYPE_NONE);
498 assert(urefs <= MACH_PORT_UREFS_MAX);
499
500 object = io_alloc(otype, Z_WAITOK | Z_ZERO | Z_NOFAIL);
501 os_atomic_init(&object->io_bits, io_makebits(otype));
502 os_atomic_init(&object->io_references, 1); /* for entry, not caller */
503
504 kr = ipc_entry_alloc_name(space, name, &entry);
505 if (kr != KERN_SUCCESS) {
506 ipc_object_free(otype, object, false);
507 return kr;
508 }
509 /* space is write-locked */
510
511 if (ipc_right_inuse(entry)) {
512 is_write_unlock(space);
513 ipc_object_free(otype, object, false);
514 return KERN_NAME_EXISTS;
515 }
516
517 entry->ie_bits |= type | urefs;
518 entry->ie_object = object;
519
520 finish_init(object);
521 /* object is locked */
522 io_lock_held(object);
523
524 ipc_entry_modified(space, name, entry);
525 is_write_unlock(space);
526
527 *objectp = object;
528 return KERN_SUCCESS;
529 }
530
531 /* Routine: ipc_object_validate
532 * Purpose:
533 * Validates an ipc port or port set as belonging to the correct
534 * zone.
535 */
536
537 void
ipc_object_validate(ipc_object_t object,ipc_object_type_t type)538 ipc_object_validate(
539 ipc_object_t object,
540 ipc_object_type_t type)
541 {
542 if (type != IOT_PORT_SET) {
543 ip_validate(object);
544 } else {
545 ips_validate(object);
546 }
547 }
548
549 void
ipc_object_validate_aligned(ipc_object_t object,ipc_object_type_t type)550 ipc_object_validate_aligned(
551 ipc_object_t object,
552 ipc_object_type_t type)
553 {
554 if (type != IOT_PORT_SET) {
555 ip_validate_aligned(object);
556 } else {
557 ips_validate_aligned(object);
558 }
559 }
560
561 /*
562 * Routine: ipc_object_copyin_type
563 * Purpose:
564 * Convert a send type name to a received type name.
565 */
566
567 mach_msg_type_name_t
ipc_object_copyin_type(mach_msg_type_name_t msgt_name)568 ipc_object_copyin_type(
569 mach_msg_type_name_t msgt_name)
570 {
571 switch (msgt_name) {
572 case MACH_MSG_TYPE_MOVE_RECEIVE:
573 return MACH_MSG_TYPE_PORT_RECEIVE;
574
575 case MACH_MSG_TYPE_MOVE_SEND_ONCE:
576 case MACH_MSG_TYPE_MAKE_SEND_ONCE:
577 return MACH_MSG_TYPE_PORT_SEND_ONCE;
578
579 case MACH_MSG_TYPE_MOVE_SEND:
580 case MACH_MSG_TYPE_MAKE_SEND:
581 case MACH_MSG_TYPE_COPY_SEND:
582 return MACH_MSG_TYPE_PORT_SEND;
583
584 default:
585 return MACH_MSG_TYPE_PORT_NONE;
586 }
587 }
588
589 /*
590 * Routine: ipc_object_copyin
591 * Purpose:
592 * Copyin a capability from a space.
593 * If successful, the caller gets a ref
594 * for the resulting object, unless it is IO_DEAD.
595 * Conditions:
596 * Nothing locked.
597 * Returns:
598 * KERN_SUCCESS Acquired an object, possibly IO_DEAD.
599 * KERN_INVALID_TASK The space is dead.
600 * KERN_INVALID_NAME Name doesn't exist in space.
601 * KERN_INVALID_RIGHT Name doesn't denote correct right.
602 */
603
604 kern_return_t
ipc_object_copyin(ipc_space_t space,mach_port_name_t name,mach_msg_type_name_t msgt_name,ipc_object_t * objectp,mach_port_context_t context,mach_msg_guard_flags_t * guard_flags,ipc_object_copyin_flags_t copyin_flags)605 ipc_object_copyin(
606 ipc_space_t space,
607 mach_port_name_t name,
608 mach_msg_type_name_t msgt_name,
609 ipc_object_t *objectp,
610 mach_port_context_t context,
611 mach_msg_guard_flags_t *guard_flags,
612 ipc_object_copyin_flags_t copyin_flags)
613 {
614 ipc_entry_t entry;
615 ipc_port_t soright;
616 ipc_port_t release_port;
617 kern_return_t kr;
618 int assertcnt = 0;
619
620 ipc_object_copyin_flags_t copyin_mask = IPC_OBJECT_COPYIN_FLAGS_ALLOW_IMMOVABLE_SEND
621 | IPC_OBJECT_COPYIN_FLAGS_ALLOW_CONN_IMMOVABLE_RECEIVE;
622 copyin_mask = (copyin_flags & copyin_mask) | IPC_OBJECT_COPYIN_FLAGS_DEADOK;
623
624 /*
625 * We allow moving of immovable receive right of a service port when it is from launchd.
626 */
627 task_t task = current_task_early();
628 #ifdef MACH_BSD
629 if (task && proc_isinitproc(get_bsdtask_info(task))) {
630 copyin_mask |= IPC_OBJECT_COPYIN_FLAGS_ALLOW_IMMOVABLE_RECEIVE;
631 }
632 #endif
633
634 /*
635 * Could first try a read lock when doing
636 * MACH_MSG_TYPE_COPY_SEND, MACH_MSG_TYPE_MAKE_SEND,
637 * and MACH_MSG_TYPE_MAKE_SEND_ONCE.
638 */
639
640 kr = ipc_right_lookup_write(space, name, &entry);
641 if (kr != KERN_SUCCESS) {
642 return kr;
643 }
644 /* space is write-locked and active */
645
646 release_port = IP_NULL;
647 kr = ipc_right_copyin(space, name, entry,
648 msgt_name, copyin_mask,
649 objectp, &soright,
650 &release_port,
651 &assertcnt,
652 context,
653 guard_flags);
654 is_write_unlock(space);
655
656 if (moved_provisional_reply_port(msgt_name, soright)) {
657 send_prp_telemetry(-1);
658 }
659
660
661 #if IMPORTANCE_INHERITANCE
662 if (0 < assertcnt && ipc_importance_task_is_any_receiver_type(current_task()->task_imp_base)) {
663 ipc_importance_task_drop_internal_assertion(current_task()->task_imp_base, assertcnt);
664 }
665 #endif /* IMPORTANCE_INHERITANCE */
666
667 if (release_port != IP_NULL) {
668 ip_release(release_port);
669 }
670
671 if ((kr == KERN_SUCCESS) && (soright != IP_NULL)) {
672 ipc_notify_port_deleted(soright, name);
673 }
674
675 return kr;
676 }
677
678 /*
679 * Routine: ipc_object_copyin_from_kernel
680 * Purpose:
681 * Copyin a naked capability from the kernel.
682 *
683 * MACH_MSG_TYPE_MOVE_RECEIVE
684 * The receiver must be ipc_space_kernel
685 * or the receive right must already be in limbo.
686 * Consumes the naked receive right.
687 * MACH_MSG_TYPE_COPY_SEND
688 * A naked send right must be supplied.
689 * The port gains a reference, and a send right
690 * if the port is still active.
691 * MACH_MSG_TYPE_MAKE_SEND
692 * The receiver must be ipc_space_kernel.
693 * The port gains a reference and a send right.
694 * MACH_MSG_TYPE_MOVE_SEND
695 * Consumes a naked send right.
696 * MACH_MSG_TYPE_MAKE_SEND_ONCE
697 * The port gains a reference and a send-once right.
698 * Receiver also be the caller of device subsystem,
699 * so no assertion.
700 * MACH_MSG_TYPE_MOVE_SEND_ONCE
701 * Consumes a naked send-once right.
702 * Conditions:
703 * Nothing locked.
704 */
705
706 void
ipc_object_copyin_from_kernel(ipc_object_t object,mach_msg_type_name_t msgt_name)707 ipc_object_copyin_from_kernel(
708 ipc_object_t object,
709 mach_msg_type_name_t msgt_name)
710 {
711 assert(IO_VALID(object));
712
713 switch (msgt_name) {
714 case MACH_MSG_TYPE_MOVE_RECEIVE: {
715 ipc_port_t port = ip_object_to_port(object);
716
717 ip_mq_lock(port);
718 require_ip_active(port);
719 if (ip_in_a_space(port)) {
720 assert(ip_in_space(port, ipc_space_kernel));
721 assert(port->ip_immovable_receive == 0);
722
723 /* relevant part of ipc_port_clear_receiver */
724 port->ip_mscount = 0;
725
726 /* port transtions to IN-LIMBO state */
727 port->ip_receiver_name = MACH_PORT_NULL;
728 port->ip_destination = IP_NULL;
729 }
730 ip_mq_unlock(port);
731 break;
732 }
733
734 case MACH_MSG_TYPE_COPY_SEND: {
735 ipc_port_t port = ip_object_to_port(object);
736
737 ip_mq_lock(port);
738 if (ip_active(port)) {
739 assert(port->ip_srights > 0);
740 }
741 ip_srights_inc(port);
742 ip_reference(port);
743 ip_mq_unlock(port);
744 break;
745 }
746
747 case MACH_MSG_TYPE_MAKE_SEND: {
748 ipc_port_t port = ip_object_to_port(object);
749
750 ip_mq_lock(port);
751 if (ip_active(port)) {
752 assert(ip_in_a_space(port));
753 assert((ip_in_space(port, ipc_space_kernel)) ||
754 (port->ip_receiver->is_node_id != HOST_LOCAL_NODE));
755 port->ip_mscount++;
756 }
757
758 ip_srights_inc(port);
759 ip_reference(port);
760 ip_mq_unlock(port);
761 break;
762 }
763
764 case MACH_MSG_TYPE_MOVE_SEND: {
765 /* move naked send right into the message */
766 assert(ip_object_to_port(object)->ip_srights);
767 break;
768 }
769
770 case MACH_MSG_TYPE_MAKE_SEND_ONCE: {
771 ipc_port_t port = ip_object_to_port(object);
772
773 ip_mq_lock(port);
774 if (ip_active(port)) {
775 assert(ip_in_a_space(port));
776 }
777 ipc_port_make_sonce_locked(port);
778 ip_mq_unlock(port);
779 break;
780 }
781
782 case MACH_MSG_TYPE_MOVE_SEND_ONCE: {
783 /* move naked send-once right into the message */
784 assert(ip_object_to_port(object)->ip_sorights);
785 break;
786 }
787
788 default:
789 panic("ipc_object_copyin_from_kernel: strange rights");
790 }
791 }
792
793 /*
794 * Routine: ipc_object_destroy
795 * Purpose:
796 * Destroys a naked capability.
797 * Consumes a ref for the object.
798 *
799 * A receive right should be in limbo or in transit.
800 * Conditions:
801 * Nothing locked.
802 */
803
804 void
ipc_object_destroy(ipc_object_t object,mach_msg_type_name_t msgt_name)805 ipc_object_destroy(
806 ipc_object_t object,
807 mach_msg_type_name_t msgt_name)
808 {
809 ipc_port_t port = ip_object_to_port(object);
810
811 assert(IO_VALID(object));
812 assert(io_otype(object) == IOT_PORT);
813
814 switch (msgt_name) {
815 case MACH_MSG_TYPE_PORT_SEND:
816 ipc_port_release_send(port);
817 break;
818
819 case MACH_MSG_TYPE_PORT_SEND_ONCE:
820 ip_mq_lock(port);
821 ipc_notify_send_once_and_unlock(port);
822 break;
823
824 case MACH_MSG_TYPE_PORT_RECEIVE:
825 ipc_port_release_receive(port);
826 break;
827
828 default:
829 panic("ipc_object_destroy: strange rights");
830 }
831 }
832
833 /*
834 * Routine: ipc_object_destroy_dest
835 * Purpose:
836 * Destroys a naked capability for the destination of
837 * of a message. Consumes a ref for the object.
838 *
839 * Conditions:
840 * Nothing locked.
841 */
842
843 void
ipc_object_destroy_dest(ipc_object_t object,mach_msg_type_name_t msgt_name)844 ipc_object_destroy_dest(
845 ipc_object_t object,
846 mach_msg_type_name_t msgt_name)
847 {
848 ipc_port_t port = ip_object_to_port(object);
849
850 assert(IO_VALID(object));
851 assert(io_otype(object) == IOT_PORT);
852
853 switch (msgt_name) {
854 case MACH_MSG_TYPE_PORT_SEND:
855 ipc_port_release_send(port);
856 break;
857
858 case MACH_MSG_TYPE_PORT_SEND_ONCE:
859 ip_mq_lock(port);
860 ipc_notify_send_once_and_unlock(port);
861 break;
862
863 default:
864 panic("ipc_object_destroy_dest: strange rights");
865 }
866 }
867
868 /*
869 * Routine: ipc_object_insert_send_right
870 * Purpose:
871 * Insert a send right into an object already in the space.
872 * The specified name must already point to a valid object.
873 *
874 * Note: This really is a combined copyin()/copyout(),
875 * that avoids most of the overhead of being implemented that way.
876 *
877 * This is the fastpath for mach_port_insert_right.
878 *
879 * Conditions:
880 * Nothing locked.
881 *
882 * msgt_name must be MACH_MSG_TYPE_MAKE_SEND or
883 * MACH_MSG_TYPE_COPY_SEND.
884 *
885 * Returns:
886 * KERN_SUCCESS Copied out object, consumed ref.
887 * KERN_INVALID_TASK The space is dead.
888 * KERN_INVALID_NAME Name doesn't exist in space.
889 * KERN_INVALID_CAPABILITY The object is dead.
890 * KERN_RIGHT_EXISTS Space has rights under another name.
891 */
892 kern_return_t
ipc_object_insert_send_right(ipc_space_t space,mach_port_name_t name,mach_msg_type_name_t msgt_name)893 ipc_object_insert_send_right(
894 ipc_space_t space,
895 mach_port_name_t name,
896 mach_msg_type_name_t msgt_name)
897 {
898 ipc_entry_bits_t bits;
899 ipc_object_t object;
900 ipc_entry_t entry;
901 ipc_port_t port;
902 kern_return_t kr;
903
904 assert(msgt_name == MACH_MSG_TYPE_MAKE_SEND ||
905 msgt_name == MACH_MSG_TYPE_COPY_SEND);
906
907 kr = ipc_right_lookup_write(space, name, &entry);
908 if (kr != KERN_SUCCESS) {
909 return kr;
910 }
911 /* space is write-locked and active */
912
913 bits = entry->ie_bits;
914 object = entry->ie_object;
915
916 if (!IO_VALID(object)) {
917 is_write_unlock(space);
918 return KERN_INVALID_CAPABILITY;
919 }
920 if ((bits & MACH_PORT_TYPE_PORT_RIGHTS) == 0) {
921 is_write_unlock(space);
922 return KERN_INVALID_RIGHT;
923 }
924
925 port = ip_object_to_port(object);
926
927 ip_mq_lock(port);
928 if (!ip_active(port)) {
929 kr = KERN_INVALID_CAPABILITY;
930 } else if (msgt_name == MACH_MSG_TYPE_MAKE_SEND) {
931 if (bits & MACH_PORT_TYPE_RECEIVE) {
932 port->ip_mscount++;
933 if ((bits & MACH_PORT_TYPE_SEND) == 0) {
934 ip_srights_inc(port);
935 bits |= MACH_PORT_TYPE_SEND;
936 }
937 /* leave urefs pegged to maximum if it overflowed */
938 if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
939 bits += 1; /* increment urefs */
940 }
941 entry->ie_bits = bits;
942 ipc_entry_modified(space, name, entry);
943 kr = KERN_SUCCESS;
944 } else {
945 kr = KERN_INVALID_RIGHT;
946 }
947 } else { // MACH_MSG_TYPE_COPY_SEND
948 if (bits & MACH_PORT_TYPE_SEND) {
949 /* leave urefs pegged to maximum if it overflowed */
950 if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
951 entry->ie_bits = bits + 1; /* increment urefs */
952 }
953 ipc_entry_modified(space, name, entry);
954 kr = KERN_SUCCESS;
955 } else {
956 kr = KERN_INVALID_RIGHT;
957 }
958 }
959
960 ip_mq_unlock(port);
961 is_write_unlock(space);
962
963 return kr;
964 }
965
966 /*
967 * Routine: ipc_object_copyout
968 * Purpose:
969 * Copyout a capability, placing it into a space.
970 * Always consumes a ref for the object.
971 * Conditions:
972 * Nothing locked.
973 * Returns:
974 * KERN_SUCCESS Copied out object, consumed ref.
975 * KERN_INVALID_TASK The space is dead.
976 * KERN_INVALID_CAPABILITY The object is dead.
977 * KERN_NO_SPACE No room in space for another right.
978 * KERN_UREFS_OVERFLOW Urefs limit exceeded
979 * and overflow wasn't specified.
980 */
981
982 kern_return_t
ipc_object_copyout(ipc_space_t space,ipc_object_t object,mach_msg_type_name_t msgt_name,ipc_object_copyout_flags_t flags,mach_port_context_t * context,mach_msg_guard_flags_t * guard_flags,mach_port_name_t * namep)983 ipc_object_copyout(
984 ipc_space_t space,
985 ipc_object_t object,
986 mach_msg_type_name_t msgt_name,
987 ipc_object_copyout_flags_t flags,
988 mach_port_context_t *context,
989 mach_msg_guard_flags_t *guard_flags,
990 mach_port_name_t *namep)
991 {
992 struct knote *kn = current_thread()->ith_knote;
993 mach_port_name_t name;
994 ipc_port_t port = ip_object_to_port(object);
995 ipc_entry_t entry;
996 kern_return_t kr;
997
998 assert(IO_VALID(object));
999 assert(io_otype(object) == IOT_PORT);
1000
1001 if (ITH_KNOTE_VALID(kn, msgt_name)) {
1002 filt_machport_turnstile_prepare_lazily(kn, msgt_name, port);
1003 }
1004
1005 is_write_lock(space);
1006
1007 for (;;) {
1008 ipc_port_t port_subst = IP_NULL;
1009
1010 if (!is_active(space)) {
1011 is_write_unlock(space);
1012 kr = KERN_INVALID_TASK;
1013 goto out;
1014 }
1015
1016 kr = ipc_entries_hold(space, 1);
1017 if (kr != KERN_SUCCESS) {
1018 /* unlocks/locks space, so must start again */
1019
1020 kr = ipc_entry_grow_table(space, ITS_SIZE_NONE);
1021 if (kr != KERN_SUCCESS) {
1022 /* space is unlocked */
1023 goto out;
1024 }
1025 continue;
1026 }
1027
1028 ip_mq_lock_check_aligned(port);
1029 if (!ip_active(port)) {
1030 ip_mq_unlock(port);
1031 is_write_unlock(space);
1032 kr = KERN_INVALID_CAPABILITY;
1033 goto out;
1034 }
1035
1036 /* Don't actually copyout rights we aren't allowed to */
1037 if (!ip_label_check(space, port, msgt_name, &flags, &port_subst)) {
1038 ip_mq_unlock(port);
1039 is_write_unlock(space);
1040 assert(port_subst == IP_NULL);
1041 kr = KERN_INVALID_CAPABILITY;
1042 goto out;
1043 }
1044
1045 /* is the kolabel requesting a substitution */
1046 if (port_subst != IP_NULL) {
1047 /*
1048 * port is unlocked, its right consumed
1049 * space is unlocked
1050 */
1051 assert(msgt_name == MACH_MSG_TYPE_PORT_SEND);
1052 port = port_subst;
1053 if (!IP_VALID(port)) {
1054 object = IO_DEAD;
1055 kr = KERN_INVALID_CAPABILITY;
1056 goto out;
1057 }
1058
1059 object = ip_to_object(port);
1060 is_write_lock(space);
1061 continue;
1062 }
1063
1064 break;
1065 }
1066
1067 /* space is write-locked and active, object is locked and active */
1068
1069 if ((msgt_name != MACH_MSG_TYPE_PORT_SEND_ONCE) &&
1070 ipc_right_reverse(space, object, &name, &entry)) {
1071 assert(entry->ie_bits & MACH_PORT_TYPE_SEND_RECEIVE);
1072 } else {
1073 ipc_entry_claim(space, object, &name, &entry);
1074 }
1075
1076 kr = ipc_right_copyout(space, name, entry,
1077 msgt_name, flags, context, guard_flags, object);
1078
1079 /* object is unlocked */
1080 is_write_unlock(space);
1081
1082 out:
1083 if (kr == KERN_SUCCESS) {
1084 *namep = name;
1085 } else if (IO_VALID(object)) {
1086 ipc_object_destroy(object, msgt_name);
1087 }
1088
1089 return kr;
1090 }
1091
1092 /*
1093 * Routine: ipc_object_copyout_name
1094 * Purpose:
1095 * Copyout a capability, placing it into a space.
1096 * The specified name is used for the capability.
1097 * If successful, consumes a ref for the object.
1098 * Conditions:
1099 * Nothing locked.
1100 * Returns:
1101 * KERN_SUCCESS Copied out object, consumed ref.
1102 * KERN_INVALID_TASK The space is dead.
1103 * KERN_INVALID_CAPABILITY The object is dead.
1104 * KERN_UREFS_OVERFLOW Urefs limit exceeded
1105 * and overflow wasn't specified.
1106 * KERN_RIGHT_EXISTS Space has rights under another name.
1107 * KERN_NAME_EXISTS Name is already used.
1108 * KERN_INVALID_VALUE Supplied port name is invalid.
1109 */
1110
1111 kern_return_t
ipc_object_copyout_name(ipc_space_t space,ipc_object_t object,mach_msg_type_name_t msgt_name,mach_port_name_t name)1112 ipc_object_copyout_name(
1113 ipc_space_t space,
1114 ipc_object_t object,
1115 mach_msg_type_name_t msgt_name,
1116 mach_port_name_t name)
1117 {
1118 ipc_port_t port = ip_object_to_port(object);
1119 mach_port_name_t oname;
1120 ipc_entry_t oentry;
1121 ipc_entry_t entry;
1122 kern_return_t kr;
1123
1124 #if IMPORTANCE_INHERITANCE
1125 int assertcnt = 0;
1126 ipc_importance_task_t task_imp = IIT_NULL;
1127 #endif /* IMPORTANCE_INHERITANCE */
1128
1129 assert(IO_VALID(object));
1130 assert(io_otype(object) == IOT_PORT);
1131
1132 kr = ipc_entry_alloc_name(space, name, &entry);
1133 if (kr != KERN_SUCCESS) {
1134 return kr;
1135 }
1136 /* space is write-locked and active */
1137
1138 ip_mq_lock_check_aligned(port);
1139
1140 /*
1141 * Don't actually copyout rights we aren't allowed to
1142 *
1143 * In particular, kolabel-ed objects do not allow callers
1144 * to pick the name they end up with.
1145 */
1146 if (!ip_active(port) || ip_is_kolabeled(port)) {
1147 ip_mq_unlock(port);
1148 if (!ipc_right_inuse(entry)) {
1149 ipc_entry_dealloc(space, IO_NULL, name, entry);
1150 }
1151 is_write_unlock(space);
1152 return KERN_INVALID_CAPABILITY;
1153 }
1154
1155 /* space is write-locked and active, object is locked and active */
1156
1157 if ((msgt_name != MACH_MSG_TYPE_PORT_SEND_ONCE) &&
1158 ipc_right_reverse(space, object, &oname, &oentry)) {
1159 if (name != oname) {
1160 ip_mq_unlock(port);
1161 if (!ipc_right_inuse(entry)) {
1162 ipc_entry_dealloc(space, IO_NULL, name, entry);
1163 }
1164 is_write_unlock(space);
1165 return KERN_RIGHT_EXISTS;
1166 }
1167
1168 assert(entry == oentry);
1169 assert(entry->ie_bits & MACH_PORT_TYPE_SEND_RECEIVE);
1170 } else if (ipc_right_inuse(entry)) {
1171 ip_mq_unlock(port);
1172 is_write_unlock(space);
1173 return KERN_NAME_EXISTS;
1174 } else {
1175 assert(entry->ie_object == IO_NULL);
1176
1177 entry->ie_object = object;
1178 }
1179
1180 #if IMPORTANCE_INHERITANCE
1181 /*
1182 * We are slamming a receive right into the space, without
1183 * first having been enqueued on a port destined there. So,
1184 * we have to arrange to boost the task appropriately if this
1185 * port has assertions (and the task wants them).
1186 */
1187 if (msgt_name == MACH_MSG_TYPE_PORT_RECEIVE) {
1188 if (space->is_task != TASK_NULL) {
1189 task_imp = space->is_task->task_imp_base;
1190 if (ipc_importance_task_is_any_receiver_type(task_imp)) {
1191 assertcnt = port->ip_impcount;
1192 ipc_importance_task_reference(task_imp);
1193 } else {
1194 task_imp = IIT_NULL;
1195 }
1196 }
1197
1198 /* take port out of limbo */
1199 port->ip_tempowner = 0;
1200 }
1201
1202 #endif /* IMPORTANCE_INHERITANCE */
1203
1204 kr = ipc_right_copyout(space, name, entry,
1205 msgt_name, IPC_OBJECT_COPYOUT_FLAGS_NONE, NULL, NULL, object);
1206
1207 /* object is unlocked */
1208 is_write_unlock(space);
1209
1210 #if IMPORTANCE_INHERITANCE
1211 /*
1212 * Add the assertions to the task that we captured before
1213 */
1214 if (task_imp != IIT_NULL) {
1215 ipc_importance_task_hold_internal_assertion(task_imp, assertcnt);
1216 ipc_importance_task_release(task_imp);
1217 }
1218 #endif /* IMPORTANCE_INHERITANCE */
1219
1220 return kr;
1221 }
1222
1223 /*
1224 * Routine: ipc_object_copyout_dest
1225 * Purpose:
1226 * Translates/consumes the destination right of a message.
1227 * This is unlike normal copyout because the right is consumed
1228 * in a funny way instead of being given to the receiving space.
1229 * The receiver gets his name for the port, if he has receive
1230 * rights, otherwise MACH_PORT_NULL.
1231 * Conditions:
1232 * The object is locked and active. Nothing else locked.
1233 * The object is unlocked and loses a reference.
1234 */
1235
1236 void
ipc_object_copyout_dest(ipc_space_t space,ipc_object_t object,mach_msg_type_name_t msgt_name,mach_port_name_t * namep)1237 ipc_object_copyout_dest(
1238 ipc_space_t space,
1239 ipc_object_t object,
1240 mach_msg_type_name_t msgt_name,
1241 mach_port_name_t *namep)
1242 {
1243 mach_port_name_t name;
1244
1245 assert(IO_VALID(object));
1246 assert(io_active(object));
1247
1248 /*
1249 * If the space is the receiver/owner of the object,
1250 * then we quietly consume the right and return
1251 * the space's name for the object. Otherwise
1252 * we destroy the right and return MACH_PORT_NULL.
1253 */
1254
1255 switch (msgt_name) {
1256 case MACH_MSG_TYPE_PORT_SEND: {
1257 ipc_port_t port = ip_object_to_port(object);
1258 ipc_notify_nsenders_t nsrequest = { };
1259
1260 if (ip_in_space(port, space)) {
1261 name = ip_get_receiver_name(port);
1262 } else {
1263 name = MACH_PORT_NULL;
1264 }
1265 ip_srights_dec(port);
1266 if (port->ip_srights == 0) {
1267 nsrequest = ipc_notify_no_senders_prepare(port);
1268 }
1269 ipc_port_clear_sync_rcv_thread_boost_locked(port);
1270 /* port unlocked */
1271
1272 ipc_notify_no_senders_emit(nsrequest);
1273
1274 ip_release(port);
1275 break;
1276 }
1277
1278 case MACH_MSG_TYPE_PORT_SEND_ONCE: {
1279 ipc_port_t port = ip_object_to_port(object);
1280
1281 assert(port->ip_sorights > 0);
1282
1283 if (ip_in_space(port, space)) {
1284 /* quietly consume the send-once right */
1285 ip_sorights_dec(port);
1286 name = ip_get_receiver_name(port);
1287 ipc_port_clear_sync_rcv_thread_boost_locked(port);
1288 /* port unlocked */
1289 ip_release(port);
1290 } else {
1291 /*
1292 * A very bizarre case. The message
1293 * was received, but before this copyout
1294 * happened the space lost receive rights.
1295 * We can't quietly consume the soright
1296 * out from underneath some other task,
1297 * so generate a send-once notification.
1298 */
1299
1300 ipc_notify_send_once_and_unlock(port);
1301 name = MACH_PORT_NULL;
1302 }
1303
1304 break;
1305 }
1306
1307 default:
1308 panic("ipc_object_copyout_dest: strange rights");
1309 name = MACH_PORT_DEAD;
1310 }
1311
1312 *namep = name;
1313 }
1314
1315 static_assert(offsetof(struct ipc_object_waitq, iowq_waitq) ==
1316 offsetof(struct ipc_port, ip_waitq));
1317 static_assert(offsetof(struct ipc_object_waitq, iowq_waitq) ==
1318 offsetof(struct ipc_pset, ips_wqset));
1319
1320 /*
1321 * Routine: ipc_object_lock
1322 * Purpose:
1323 * Validate, then acquire a lock on an ipc object
1324 */
1325 void
ipc_object_lock(ipc_object_t io,ipc_object_type_t type)1326 ipc_object_lock(ipc_object_t io, ipc_object_type_t type)
1327 {
1328 ipc_object_validate(io, type);
1329 waitq_lock(io_waitq(io));
1330 }
1331
1332 void
ipc_object_lock_check_aligned(ipc_object_t io,ipc_object_type_t type)1333 ipc_object_lock_check_aligned(ipc_object_t io, ipc_object_type_t type)
1334 {
1335 ipc_object_validate_aligned(io, type);
1336 waitq_lock(io_waitq(io));
1337 }
1338
1339 __abortlike
1340 static void
ipc_object_validate_preflight_panic(ipc_object_t io)1341 ipc_object_validate_preflight_panic(ipc_object_t io)
1342 {
1343 panic("ipc object %p is neither a port or a port-set", io);
1344 }
1345
1346 /*
1347 * Routine: ipc_object_lock_allow_invalid
1348 * Purpose:
1349 * Speculatively try to lock an object in an undefined state.
1350 *
1351 * This relies on the fact that IPC object memory is allocated
1352 * from sequestered zones, so at a given address, one can find:
1353 * 1. a valid object,
1354 * 2. a freed or invalid (uninitialized) object,
1355 * 3. unmapped memory.
1356 *
1357 * (2) is possible because the zone is made with ZC_ZFREE_CLEARMEM which
1358 * ensures freed elements are always zeroed.
1359 *
1360 * (3) is a direct courtesy of waitq_lock_allow_invalid().
1361 *
1362 * In order to disambiguate (1) from (2), we use the "waitq valid"
1363 * bit which is part of the lock. When that bit is absent,
1364 * waitq_lock() will function as expected, but
1365 * waitq_lock_allow_invalid() will not.
1366 *
1367 * Objects are then initialized and destroyed carefully so that
1368 * this "valid bit" is only set when the object invariants are
1369 * respected.
1370 *
1371 * Returns:
1372 * true: the lock was acquired
1373 * false: the object was freed or not initialized.
1374 */
1375 bool
ipc_object_lock_allow_invalid(ipc_object_t orig_io)1376 ipc_object_lock_allow_invalid(ipc_object_t orig_io)
1377 {
1378 struct waitq *orig_wq = io_waitq(orig_io);
1379 struct waitq *wq = pgz_decode_allow_invalid(orig_wq, ZONE_ID_ANY);
1380
1381 switch (zone_id_for_element(wq, sizeof(*wq))) {
1382 case ZONE_ID_IPC_PORT:
1383 case ZONE_ID_IPC_PORT_SET:
1384 break;
1385 default:
1386 #if CONFIG_PROB_GZALLOC
1387 if (orig_wq != wq) {
1388 /*
1389 * The element was PGZ protected, and the translation
1390 * returned another type than port or port-set, or
1391 * ZONE_ID_INVALID (wq is NULL).
1392 *
1393 * We have to allow this skew, and assumed the slot
1394 * has held a now freed port/port-set.
1395 */
1396 return false;
1397 }
1398 #endif /* CONFIG_PROB_GZALLOC */
1399 ipc_object_validate_preflight_panic(orig_io);
1400 }
1401
1402 if (__probable(waitq_lock_allow_invalid(wq))) {
1403 ipc_object_t io = io_from_waitq(wq);
1404
1405 ipc_object_validate(io, io_otype(io));
1406 #if CONFIG_PROB_GZALLOC
1407 if (__improbable(wq != orig_wq &&
1408 wq != pgz_decode_allow_invalid(orig_wq, ZONE_ID_ANY))) {
1409 /*
1410 * This object is no longer held in the slot,
1411 * whatever this object is, it's not the droid
1412 * we're looking for. Pretend we failed the lock.
1413 */
1414 waitq_unlock(wq);
1415 return false;
1416 }
1417 #endif /* CONFIG_PROB_GZALLOC */
1418 return true;
1419 }
1420 return false;
1421 }
1422
1423 /*
1424 * Routine: ipc_object_lock_try
1425 * Purpose:
1426 * Validate, then try to acquire a lock on an object,
1427 * fail if there is an existing busy lock
1428 */
1429 bool
ipc_object_lock_try(ipc_object_t io,ipc_object_type_t type)1430 ipc_object_lock_try(ipc_object_t io, ipc_object_type_t type)
1431 {
1432 ipc_object_validate(io, type);
1433 return waitq_lock_try(io_waitq(io));
1434 }
1435
1436 /*
1437 * Routine: ipc_object_unlock
1438 * Purpose:
1439 * Unlocks the given object.
1440 */
1441 void
ipc_object_unlock(ipc_object_t io)1442 ipc_object_unlock(ipc_object_t io)
1443 {
1444 waitq_unlock(io_waitq(io));
1445 }
1446