1 /*
2 * Copyright (c) 1998-2022, 2024 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*
30 * Copyright (c) 1982, 1986, 1988, 1990, 1993
31 * The Regents of the University of California. All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. All advertising materials mentioning features or use of this software
42 * must display the following acknowledgement:
43 * This product includes software developed by the University of
44 * California, Berkeley and its contributors.
45 * 4. Neither the name of the University nor the names of its contributors
46 * may be used to endorse or promote products derived from this software
47 * without specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * SUCH DAMAGE.
60 *
61 * @(#)uipc_socket.c 8.3 (Berkeley) 4/15/94
62 */
63 /*
64 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
65 * support for mandatory and extensible security protections. This notice
66 * is included in support of clause 2.2 (b) of the Apple Public License,
67 * Version 2.0.
68 */
69
70 #include <sys/param.h>
71 #include <sys/systm.h>
72 #include <sys/filedesc.h>
73 #include <sys/proc.h>
74 #include <sys/proc_internal.h>
75 #include <sys/kauth.h>
76 #include <sys/file_internal.h>
77 #include <sys/fcntl.h>
78 #include <sys/malloc.h>
79 #include <sys/mbuf.h>
80 #include <sys/domain.h>
81 #include <sys/kernel.h>
82 #include <sys/event.h>
83 #include <sys/poll.h>
84 #include <sys/protosw.h>
85 #include <sys/socket.h>
86 #include <sys/socketvar.h>
87 #include <sys/resourcevar.h>
88 #include <sys/signalvar.h>
89 #include <sys/sysctl.h>
90 #include <sys/syslog.h>
91 #include <sys/uio.h>
92 #include <sys/uio_internal.h>
93 #include <sys/ev.h>
94 #include <sys/kdebug.h>
95 #include <sys/un.h>
96 #include <sys/user.h>
97 #include <sys/priv.h>
98 #include <sys/kern_event.h>
99 #include <sys/persona.h>
100 #include <net/route.h>
101 #include <net/init.h>
102 #include <net/net_api_stats.h>
103 #include <net/ntstat.h>
104 #include <net/content_filter.h>
105 #include <net/sockaddr_utils.h>
106 #include <netinet/in.h>
107 #include <netinet/in_pcb.h>
108 #include <netinet/in_tclass.h>
109 #include <netinet/in_var.h>
110 #include <netinet/tcp_var.h>
111 #include <netinet/ip6.h>
112 #include <netinet6/ip6_var.h>
113 #include <netinet/flow_divert.h>
114 #include <kern/zalloc.h>
115 #include <kern/locks.h>
116 #include <machine/limits.h>
117 #include <libkern/OSAtomic.h>
118 #include <pexpert/pexpert.h>
119 #include <kern/assert.h>
120 #include <kern/task.h>
121 #include <kern/policy_internal.h>
122
123 #include <sys/kpi_mbuf.h>
124 #include <sys/mcache.h>
125 #include <sys/unpcb.h>
126 #include <libkern/section_keywords.h>
127
128 #include <os/log.h>
129
130 #if CONFIG_MACF
131 #include <security/mac_framework.h>
132 #endif /* MAC */
133
134 #if MULTIPATH
135 #include <netinet/mp_pcb.h>
136 #include <netinet/mptcp_var.h>
137 #endif /* MULTIPATH */
138
139 #define ROUNDUP(a, b) (((a) + ((b) - 1)) & (~((b) - 1)))
140
141 #if DEBUG || DEVELOPMENT
142 #define DEBUG_KERNEL_ADDRPERM(_v) (_v)
143 #else
144 #define DEBUG_KERNEL_ADDRPERM(_v) VM_KERNEL_ADDRPERM(_v)
145 #endif
146
147 /* TODO: this should be in a header file somewhere */
148 extern char *proc_name_address(void *p);
149
150 static u_int32_t so_cache_hw; /* High water mark for socache */
151 static u_int32_t so_cache_timeouts; /* number of timeouts */
152 static u_int32_t so_cache_max_freed; /* max freed per timeout */
153 static u_int32_t cached_sock_count = 0;
154 STAILQ_HEAD(, socket) so_cache_head;
155 int max_cached_sock_count = MAX_CACHED_SOCKETS;
156 static uint64_t so_cache_time;
157 static int socketinit_done;
158 static struct zone *so_cache_zone;
159 ZONE_DECLARE(so_cache_zone, struct zone *);
160
161 static LCK_GRP_DECLARE(so_cache_mtx_grp, "so_cache");
162 static LCK_MTX_DECLARE(so_cache_mtx, &so_cache_mtx_grp);
163
164 #include <machine/limits.h>
165
166 static int filt_sorattach(struct knote *kn, struct kevent_qos_s *kev);
167 static void filt_sordetach(struct knote *kn);
168 static int filt_soread(struct knote *kn, long hint);
169 static int filt_sortouch(struct knote *kn, struct kevent_qos_s *kev);
170 static int filt_sorprocess(struct knote *kn, struct kevent_qos_s *kev);
171
172 static int filt_sowattach(struct knote *kn, struct kevent_qos_s *kev);
173 static void filt_sowdetach(struct knote *kn);
174 static int filt_sowrite(struct knote *kn, long hint);
175 static int filt_sowtouch(struct knote *kn, struct kevent_qos_s *kev);
176 static int filt_sowprocess(struct knote *kn, struct kevent_qos_s *kev);
177
178 static int filt_sockattach(struct knote *kn, struct kevent_qos_s *kev);
179 static void filt_sockdetach(struct knote *kn);
180 static int filt_sockev(struct knote *kn, long hint);
181 static int filt_socktouch(struct knote *kn, struct kevent_qos_s *kev);
182 static int filt_sockprocess(struct knote *kn, struct kevent_qos_s *kev);
183
184 static int sooptcopyin_timeval(struct sockopt *, struct timeval *);
185 static int sooptcopyout_timeval(struct sockopt *, const struct timeval *);
186
187 SECURITY_READ_ONLY_EARLY(struct filterops) soread_filtops = {
188 .f_isfd = 1,
189 .f_attach = filt_sorattach,
190 .f_detach = filt_sordetach,
191 .f_event = filt_soread,
192 .f_touch = filt_sortouch,
193 .f_process = filt_sorprocess,
194 };
195
196 SECURITY_READ_ONLY_EARLY(struct filterops) sowrite_filtops = {
197 .f_isfd = 1,
198 .f_attach = filt_sowattach,
199 .f_detach = filt_sowdetach,
200 .f_event = filt_sowrite,
201 .f_touch = filt_sowtouch,
202 .f_process = filt_sowprocess,
203 };
204
205 SECURITY_READ_ONLY_EARLY(struct filterops) sock_filtops = {
206 .f_isfd = 1,
207 .f_attach = filt_sockattach,
208 .f_detach = filt_sockdetach,
209 .f_event = filt_sockev,
210 .f_touch = filt_socktouch,
211 .f_process = filt_sockprocess,
212 };
213
214 SECURITY_READ_ONLY_EARLY(struct filterops) soexcept_filtops = {
215 .f_isfd = 1,
216 .f_attach = filt_sorattach,
217 .f_detach = filt_sordetach,
218 .f_event = filt_soread,
219 .f_touch = filt_sortouch,
220 .f_process = filt_sorprocess,
221 };
222
223 SYSCTL_DECL(_kern_ipc);
224
225 #define EVEN_MORE_LOCKING_DEBUG 0
226
227 int socket_debug = 0;
228 SYSCTL_INT(_kern_ipc, OID_AUTO, socket_debug,
229 CTLFLAG_RW | CTLFLAG_LOCKED, &socket_debug, 0, "");
230
231 #if (DEBUG || DEVELOPMENT)
232 #define DEFAULT_SOSEND_ASSERT_PANIC 1
233 #else
234 #define DEFAULT_SOSEND_ASSERT_PANIC 0
235 #endif /* (DEBUG || DEVELOPMENT) */
236
237 int sosend_assert_panic = 0;
238 SYSCTL_INT(_kern_ipc, OID_AUTO, sosend_assert_panic,
239 CTLFLAG_RW | CTLFLAG_LOCKED, &sosend_assert_panic, DEFAULT_SOSEND_ASSERT_PANIC, "");
240
241 static unsigned long sodefunct_calls = 0;
242 SYSCTL_LONG(_kern_ipc, OID_AUTO, sodefunct_calls, CTLFLAG_LOCKED,
243 &sodefunct_calls, "");
244
245 ZONE_DEFINE_TYPE(socket_zone, "socket", struct socket, ZC_ZFREE_CLEARMEM);
246 so_gen_t so_gencnt; /* generation count for sockets */
247
248 MALLOC_DEFINE(M_PCB, "pcb", "protocol control block");
249
250 #define DBG_LAYER_IN_BEG NETDBG_CODE(DBG_NETSOCK, 0)
251 #define DBG_LAYER_IN_END NETDBG_CODE(DBG_NETSOCK, 2)
252 #define DBG_LAYER_OUT_BEG NETDBG_CODE(DBG_NETSOCK, 1)
253 #define DBG_LAYER_OUT_END NETDBG_CODE(DBG_NETSOCK, 3)
254 #define DBG_FNC_SOSEND NETDBG_CODE(DBG_NETSOCK, (4 << 8) | 1)
255 #define DBG_FNC_SOSEND_LIST NETDBG_CODE(DBG_NETSOCK, (4 << 8) | 3)
256 #define DBG_FNC_SORECEIVE NETDBG_CODE(DBG_NETSOCK, (8 << 8))
257 #define DBG_FNC_SORECEIVE_LIST NETDBG_CODE(DBG_NETSOCK, (8 << 8) | 3)
258 #define DBG_FNC_SOSHUTDOWN NETDBG_CODE(DBG_NETSOCK, (9 << 8))
259
260 #define MAX_SOOPTGETM_SIZE (128 * MCLBYTES)
261
262 int somaxconn = SOMAXCONN;
263 SYSCTL_INT(_kern_ipc, KIPC_SOMAXCONN, somaxconn,
264 CTLFLAG_RW | CTLFLAG_LOCKED, &somaxconn, 0, "");
265
266 /* Should we get a maximum also ??? */
267 static int sosendmaxchain = 65536;
268 static int sosendminchain = 16384;
269 static int sorecvmincopy = 16384;
270 SYSCTL_INT(_kern_ipc, OID_AUTO, sosendminchain,
271 CTLFLAG_RW | CTLFLAG_LOCKED, &sosendminchain, 0, "");
272 SYSCTL_INT(_kern_ipc, OID_AUTO, sorecvmincopy,
273 CTLFLAG_RW | CTLFLAG_LOCKED, &sorecvmincopy, 0, "");
274
275 /*
276 * Set to enable jumbo clusters (if available) for large writes when
277 * the socket is marked with SOF_MULTIPAGES; see below.
278 */
279 int sosendjcl = 1;
280 SYSCTL_INT(_kern_ipc, OID_AUTO, sosendjcl,
281 CTLFLAG_RW | CTLFLAG_LOCKED, &sosendjcl, 0, "");
282
283 /*
284 * Set this to ignore SOF_MULTIPAGES and use jumbo clusters for large
285 * writes on the socket for all protocols on any network interfaces,
286 * depending upon sosendjcl above. Be extra careful when setting this
287 * to 1, because sending down packets that cross physical pages down to
288 * broken drivers (those that falsely assume that the physical pages
289 * are contiguous) might lead to system panics or silent data corruption.
290 * When set to 0, the system will respect SOF_MULTIPAGES, which is set
291 * only for TCP sockets whose outgoing interface is IFNET_MULTIPAGES
292 * capable. Set this to 1 only for testing/debugging purposes.
293 */
294 int sosendjcl_ignore_capab = 0;
295 SYSCTL_INT(_kern_ipc, OID_AUTO, sosendjcl_ignore_capab,
296 CTLFLAG_RW | CTLFLAG_LOCKED, &sosendjcl_ignore_capab, 0, "");
297
298 /*
299 * Set this to ignore SOF1_IF_2KCL and use big clusters for large
300 * writes on the socket for all protocols on any network interfaces.
301 * Be extra careful when setting this to 1, because sending down packets with
302 * clusters larger that 2 KB might lead to system panics or data corruption.
303 * When set to 0, the system will respect SOF1_IF_2KCL, which is set
304 * on the outgoing interface
305 * Set this to 1 for testing/debugging purposes only.
306 */
307 int sosendbigcl_ignore_capab = 0;
308 SYSCTL_INT(_kern_ipc, OID_AUTO, sosendbigcl_ignore_capab,
309 CTLFLAG_RW | CTLFLAG_LOCKED, &sosendbigcl_ignore_capab, 0, "");
310
311 int sodefunctlog = 0;
312 SYSCTL_INT(_kern_ipc, OID_AUTO, sodefunctlog, CTLFLAG_RW | CTLFLAG_LOCKED,
313 &sodefunctlog, 0, "");
314
315 int sothrottlelog = 0;
316 SYSCTL_INT(_kern_ipc, OID_AUTO, sothrottlelog, CTLFLAG_RW | CTLFLAG_LOCKED,
317 &sothrottlelog, 0, "");
318
319 int sorestrictrecv = 1;
320 SYSCTL_INT(_kern_ipc, OID_AUTO, sorestrictrecv, CTLFLAG_RW | CTLFLAG_LOCKED,
321 &sorestrictrecv, 0, "Enable inbound interface restrictions");
322
323 int sorestrictsend = 1;
324 SYSCTL_INT(_kern_ipc, OID_AUTO, sorestrictsend, CTLFLAG_RW | CTLFLAG_LOCKED,
325 &sorestrictsend, 0, "Enable outbound interface restrictions");
326
327 int soreserveheadroom = 1;
328 SYSCTL_INT(_kern_ipc, OID_AUTO, soreserveheadroom, CTLFLAG_RW | CTLFLAG_LOCKED,
329 &soreserveheadroom, 0, "To allocate contiguous datagram buffers");
330
331 #if (DEBUG || DEVELOPMENT)
332 int so_notsent_lowat_check = 1;
333 SYSCTL_INT(_kern_ipc, OID_AUTO, notsent_lowat, CTLFLAG_RW | CTLFLAG_LOCKED,
334 &so_notsent_lowat_check, 0, "enable/disable notsnet lowat check");
335 #endif /* DEBUG || DEVELOPMENT */
336
337 int so_accept_list_waits = 0;
338 #if (DEBUG || DEVELOPMENT)
339 SYSCTL_INT(_kern_ipc, OID_AUTO, accept_list_waits, CTLFLAG_RW | CTLFLAG_LOCKED,
340 &so_accept_list_waits, 0, "number of waits for listener incomp list");
341 #endif /* DEBUG || DEVELOPMENT */
342
343 extern struct inpcbinfo tcbinfo;
344
345 /* TODO: these should be in header file */
346 extern int get_inpcb_str_size(void);
347 extern int get_tcp_str_size(void);
348
349 vm_size_t so_cache_zone_element_size;
350
351 static int sodelayed_copy(struct socket *, struct uio *, struct mbuf **,
352 user_ssize_t *);
353 static void cached_sock_alloc(struct socket **, zalloc_flags_t);
354 static void cached_sock_free(struct socket *);
355
356 /*
357 * Maximum of extended background idle sockets per process
358 * Set to zero to disable further setting of the option
359 */
360
361 #define SO_IDLE_BK_IDLE_MAX_PER_PROC 1
362 #define SO_IDLE_BK_IDLE_TIME 600
363 #define SO_IDLE_BK_IDLE_RCV_HIWAT 131072
364
365 struct soextbkidlestat soextbkidlestat;
366
367 SYSCTL_UINT(_kern_ipc, OID_AUTO, maxextbkidleperproc,
368 CTLFLAG_RW | CTLFLAG_LOCKED, &soextbkidlestat.so_xbkidle_maxperproc, 0,
369 "Maximum of extended background idle sockets per process");
370
371 SYSCTL_UINT(_kern_ipc, OID_AUTO, extbkidletime, CTLFLAG_RW | CTLFLAG_LOCKED,
372 &soextbkidlestat.so_xbkidle_time, 0,
373 "Time in seconds to keep extended background idle sockets");
374
375 SYSCTL_UINT(_kern_ipc, OID_AUTO, extbkidlercvhiwat, CTLFLAG_RW | CTLFLAG_LOCKED,
376 &soextbkidlestat.so_xbkidle_rcvhiwat, 0,
377 "High water mark for extended background idle sockets");
378
379 SYSCTL_STRUCT(_kern_ipc, OID_AUTO, extbkidlestat, CTLFLAG_RD | CTLFLAG_LOCKED,
380 &soextbkidlestat, soextbkidlestat, "");
381
382 int so_set_extended_bk_idle(struct socket *, int);
383
384 #define SO_MAX_MSG_X 1024
385
386 /*
387 * SOTCDB_NO_DSCP is set by default, to prevent the networking stack from
388 * setting the DSCP code on the packet based on the service class; see
389 * <rdar://problem/11277343> for details.
390 */
391 __private_extern__ u_int32_t sotcdb = 0;
392 SYSCTL_INT(_kern_ipc, OID_AUTO, sotcdb, CTLFLAG_RW | CTLFLAG_LOCKED,
393 &sotcdb, 0, "");
394
395 void
socketinit(void)396 socketinit(void)
397 {
398 _CASSERT(sizeof(so_gencnt) == sizeof(uint64_t));
399 VERIFY(IS_P2ALIGNED(&so_gencnt, sizeof(uint32_t)));
400
401 #ifdef __LP64__
402 _CASSERT(sizeof(struct sa_endpoints) == sizeof(struct user64_sa_endpoints));
403 _CASSERT(offsetof(struct sa_endpoints, sae_srcif) == offsetof(struct user64_sa_endpoints, sae_srcif));
404 _CASSERT(offsetof(struct sa_endpoints, sae_srcaddr) == offsetof(struct user64_sa_endpoints, sae_srcaddr));
405 _CASSERT(offsetof(struct sa_endpoints, sae_srcaddrlen) == offsetof(struct user64_sa_endpoints, sae_srcaddrlen));
406 _CASSERT(offsetof(struct sa_endpoints, sae_dstaddr) == offsetof(struct user64_sa_endpoints, sae_dstaddr));
407 _CASSERT(offsetof(struct sa_endpoints, sae_dstaddrlen) == offsetof(struct user64_sa_endpoints, sae_dstaddrlen));
408 #else
409 _CASSERT(sizeof(struct sa_endpoints) == sizeof(struct user32_sa_endpoints));
410 _CASSERT(offsetof(struct sa_endpoints, sae_srcif) == offsetof(struct user32_sa_endpoints, sae_srcif));
411 _CASSERT(offsetof(struct sa_endpoints, sae_srcaddr) == offsetof(struct user32_sa_endpoints, sae_srcaddr));
412 _CASSERT(offsetof(struct sa_endpoints, sae_srcaddrlen) == offsetof(struct user32_sa_endpoints, sae_srcaddrlen));
413 _CASSERT(offsetof(struct sa_endpoints, sae_dstaddr) == offsetof(struct user32_sa_endpoints, sae_dstaddr));
414 _CASSERT(offsetof(struct sa_endpoints, sae_dstaddrlen) == offsetof(struct user32_sa_endpoints, sae_dstaddrlen));
415 #endif
416
417 if (socketinit_done) {
418 printf("socketinit: already called...\n");
419 return;
420 }
421 socketinit_done = 1;
422
423 PE_parse_boot_argn("socket_debug", &socket_debug,
424 sizeof(socket_debug));
425
426 PE_parse_boot_argn("sosend_assert_panic", &sosend_assert_panic,
427 sizeof(sosend_assert_panic));
428
429 STAILQ_INIT(&so_cache_head);
430
431 so_cache_zone_element_size = (vm_size_t)(sizeof(struct socket) + 4
432 + get_inpcb_str_size() + 4 + get_tcp_str_size());
433
434 so_cache_zone = zone_create("socache zone", so_cache_zone_element_size,
435 ZC_PGZ_USE_GUARDS | ZC_ZFREE_CLEARMEM);
436
437 bzero(&soextbkidlestat, sizeof(struct soextbkidlestat));
438 soextbkidlestat.so_xbkidle_maxperproc = SO_IDLE_BK_IDLE_MAX_PER_PROC;
439 soextbkidlestat.so_xbkidle_time = SO_IDLE_BK_IDLE_TIME;
440 soextbkidlestat.so_xbkidle_rcvhiwat = SO_IDLE_BK_IDLE_RCV_HIWAT;
441
442 in_pcbinit();
443 }
444
445 static void
cached_sock_alloc(struct socket ** so,zalloc_flags_t how)446 cached_sock_alloc(struct socket **so, zalloc_flags_t how)
447 {
448 caddr_t temp;
449 uintptr_t offset;
450
451 lck_mtx_lock(&so_cache_mtx);
452
453 if (!STAILQ_EMPTY(&so_cache_head)) {
454 VERIFY(cached_sock_count > 0);
455
456 *so = STAILQ_FIRST(&so_cache_head);
457 STAILQ_REMOVE_HEAD(&so_cache_head, so_cache_ent);
458 STAILQ_NEXT((*so), so_cache_ent) = NULL;
459
460 cached_sock_count--;
461 lck_mtx_unlock(&so_cache_mtx);
462
463 temp = (*so)->so_saved_pcb;
464 bzero(*so, sizeof(struct socket));
465
466 (*so)->so_saved_pcb = temp;
467 } else {
468 lck_mtx_unlock(&so_cache_mtx);
469
470 uint8_t *so_mem = zalloc_flags_buf(so_cache_zone, how | Z_ZERO);
471 #pragma clang diagnostic push
472 #pragma clang diagnostic ignored "-Wcast-align"
473 *so = (struct socket *)so_mem;
474
475 /*
476 * Define offsets for extra structures into our
477 * single block of memory. Align extra structures
478 * on longword boundaries.
479 */
480
481 offset = (uintptr_t)so_mem;
482 offset += sizeof(struct socket);
483 offset = ALIGN(offset);
484 struct inpcb *pcb = (struct inpcb *)(so_mem + (offset - (uintptr_t)so_mem));
485 #pragma clang diagnostic pop
486 (*so)->so_saved_pcb = (caddr_t)pcb;
487
488 offset += get_inpcb_str_size();
489 offset = ALIGN(offset);
490 pcb->inp_saved_ppcb = (caddr_t)(so_mem + (offset - (uintptr_t)so_mem));
491 }
492
493 OSBitOrAtomic(SOF1_CACHED_IN_SOCK_LAYER, &(*so)->so_flags1);
494 }
495
496 static void
cached_sock_free(struct socket * so)497 cached_sock_free(struct socket *so)
498 {
499 lck_mtx_lock(&so_cache_mtx);
500
501 so_cache_time = net_uptime();
502 if (++cached_sock_count > max_cached_sock_count) {
503 --cached_sock_count;
504 lck_mtx_unlock(&so_cache_mtx);
505 zfree(so_cache_zone, so);
506 } else {
507 if (so_cache_hw < cached_sock_count) {
508 so_cache_hw = cached_sock_count;
509 }
510
511 STAILQ_INSERT_TAIL(&so_cache_head, so, so_cache_ent);
512
513 so->cache_timestamp = so_cache_time;
514 lck_mtx_unlock(&so_cache_mtx);
515 }
516 }
517
518 void
so_update_last_owner_locked(struct socket * so,proc_t self)519 so_update_last_owner_locked(struct socket *so, proc_t self)
520 {
521 if (so->last_pid != 0) {
522 /*
523 * last_pid and last_upid should remain zero for sockets
524 * created using sock_socket. The check above achieves that
525 */
526 if (self == PROC_NULL) {
527 self = current_proc();
528 }
529
530 if (so->last_upid != proc_uniqueid(self) ||
531 so->last_pid != proc_pid(self)) {
532 so->last_upid = proc_uniqueid(self);
533 so->last_pid = proc_pid(self);
534 proc_getexecutableuuid(self, so->last_uuid,
535 sizeof(so->last_uuid));
536 if (so->so_proto != NULL && so->so_proto->pr_update_last_owner != NULL) {
537 (*so->so_proto->pr_update_last_owner)(so, self, NULL);
538 }
539 }
540 proc_pidoriginatoruuid(so->so_vuuid, sizeof(so->so_vuuid));
541 }
542 }
543
544 void
so_update_policy(struct socket * so)545 so_update_policy(struct socket *so)
546 {
547 if (SOCK_DOM(so) == PF_INET || SOCK_DOM(so) == PF_INET6) {
548 (void) inp_update_policy(sotoinpcb(so));
549 }
550 }
551
552 #if NECP
553 static void
so_update_necp_policy(struct socket * so,struct sockaddr * override_local_addr,struct sockaddr * override_remote_addr)554 so_update_necp_policy(struct socket *so, struct sockaddr *override_local_addr,
555 struct sockaddr *override_remote_addr)
556 {
557 if (SOCK_DOM(so) == PF_INET || SOCK_DOM(so) == PF_INET6) {
558 inp_update_necp_policy(sotoinpcb(so), override_local_addr,
559 override_remote_addr, 0);
560 }
561 }
562 #endif /* NECP */
563
564 boolean_t
so_cache_timer(void)565 so_cache_timer(void)
566 {
567 struct socket *p;
568 int n_freed = 0;
569 boolean_t rc = FALSE;
570
571 lck_mtx_lock(&so_cache_mtx);
572 so_cache_timeouts++;
573 so_cache_time = net_uptime();
574
575 while (!STAILQ_EMPTY(&so_cache_head)) {
576 VERIFY(cached_sock_count > 0);
577 p = STAILQ_FIRST(&so_cache_head);
578 if ((so_cache_time - p->cache_timestamp) <
579 SO_CACHE_TIME_LIMIT) {
580 break;
581 }
582
583 STAILQ_REMOVE_HEAD(&so_cache_head, so_cache_ent);
584 --cached_sock_count;
585
586 zfree(so_cache_zone, p);
587
588 if (++n_freed >= SO_CACHE_MAX_FREE_BATCH) {
589 so_cache_max_freed++;
590 break;
591 }
592 }
593
594 /* Schedule again if there is more to cleanup */
595 if (!STAILQ_EMPTY(&so_cache_head)) {
596 rc = TRUE;
597 }
598
599 lck_mtx_unlock(&so_cache_mtx);
600 return rc;
601 }
602
603 /*
604 * Get a socket structure from our zone, and initialize it.
605 * We don't implement `waitok' yet (see comments in uipc_domain.c).
606 * Note that it would probably be better to allocate socket
607 * and PCB at the same time, but I'm not convinced that all
608 * the protocols can be easily modified to do this.
609 */
610 struct socket *
soalloc(int waitok,int dom,int type)611 soalloc(int waitok, int dom, int type)
612 {
613 zalloc_flags_t how = waitok ? Z_WAITOK : Z_NOWAIT;
614 struct socket *__single so;
615
616 if ((dom == PF_INET) && (type == SOCK_STREAM)) {
617 cached_sock_alloc(&so, how);
618 } else {
619 so = zalloc_flags(socket_zone, how | Z_ZERO);
620 }
621 if (so != NULL) {
622 so->so_gencnt = OSIncrementAtomic64((SInt64 *)&so_gencnt);
623
624 /*
625 * Increment the socket allocation statistics
626 */
627 INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_alloc_total);
628 }
629
630 return so;
631 }
632
633 int
socreate_internal(int dom,struct socket ** aso,int type,int proto,struct proc * p,uint32_t flags,struct proc * ep)634 socreate_internal(int dom, struct socket **aso, int type, int proto,
635 struct proc *p, uint32_t flags, struct proc *ep)
636 {
637 struct protosw *prp;
638 struct socket *so;
639 int error = 0;
640 pid_t rpid = -1;
641
642 VERIFY(aso != NULL);
643 *aso = NULL;
644
645 if (proto != 0) {
646 prp = pffindproto(dom, proto, type);
647 } else {
648 prp = pffindtype(dom, type);
649 }
650
651 if (prp == NULL || prp->pr_usrreqs->pru_attach == NULL) {
652 if (pffinddomain(dom) == NULL) {
653 return EAFNOSUPPORT;
654 }
655 if (proto != 0) {
656 if (pffindprotonotype(dom, proto) != NULL) {
657 return EPROTOTYPE;
658 }
659 }
660 return EPROTONOSUPPORT;
661 }
662 if (prp->pr_type != type) {
663 return EPROTOTYPE;
664 }
665 so = soalloc(1, dom, type);
666 if (so == NULL) {
667 return ENOBUFS;
668 }
669
670 switch (dom) {
671 case PF_LOCAL:
672 INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_domain_local_total);
673 break;
674 case PF_INET:
675 INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_domain_inet_total);
676 if (type == SOCK_STREAM) {
677 INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_inet_stream_total);
678 } else {
679 INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_inet_dgram_total);
680 }
681 break;
682 case PF_ROUTE:
683 INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_domain_route_total);
684 break;
685 case PF_NDRV:
686 INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_domain_ndrv_total);
687 break;
688 case PF_KEY:
689 INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_domain_key_total);
690 break;
691 case PF_INET6:
692 INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_domain_inet6_total);
693 if (type == SOCK_STREAM) {
694 INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_inet6_stream_total);
695 } else {
696 INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_inet6_dgram_total);
697 }
698 break;
699 case PF_SYSTEM:
700 INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_domain_system_total);
701 break;
702 case PF_MULTIPATH:
703 INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_domain_multipath_total);
704 break;
705 default:
706 INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_domain_other_total);
707 break;
708 }
709
710 if (flags & SOCF_MPTCP) {
711 so->so_state |= SS_NBIO;
712 }
713
714 TAILQ_INIT(&so->so_incomp);
715 TAILQ_INIT(&so->so_comp);
716 so->so_type = (short)type;
717 so->so_family = prp->pr_domain->dom_family;
718 so->so_protocol = prp->pr_protocol;
719 so->last_upid = proc_uniqueid(p);
720 so->last_pid = proc_pid(p);
721 proc_getexecutableuuid(p, so->last_uuid, sizeof(so->last_uuid));
722 proc_pidoriginatoruuid(so->so_vuuid, sizeof(so->so_vuuid));
723
724 so->so_rpid = -1;
725 uuid_clear(so->so_ruuid);
726
727 if (ep != PROC_NULL && ep != p) {
728 so->e_upid = proc_uniqueid(ep);
729 so->e_pid = proc_pid(ep);
730 proc_getexecutableuuid(ep, so->e_uuid, sizeof(so->e_uuid));
731 so->so_flags |= SOF_DELEGATED;
732 if (ep->p_responsible_pid != so->e_pid) {
733 rpid = ep->p_responsible_pid;
734 so->so_rpid = rpid;
735 proc_getresponsibleuuid(ep, so->so_ruuid, sizeof(so->so_ruuid));
736 }
737 }
738
739 if (rpid < 0 && p->p_responsible_pid != so->last_pid) {
740 rpid = p->p_responsible_pid;
741 so->so_rpid = rpid;
742 proc_getresponsibleuuid(p, so->so_ruuid, sizeof(so->so_ruuid));
743 }
744
745 so->so_cred = kauth_cred_proc_ref(p);
746 if (!suser(kauth_cred_get(), NULL)) {
747 so->so_state |= SS_PRIV;
748 }
749
750 so->so_persona_id = current_persona_get_id();
751 so->so_proto = prp;
752 so->so_rcv.sb_flags |= SB_RECV;
753 so->so_rcv.sb_so = so->so_snd.sb_so = so;
754 so->next_lock_lr = 0;
755 so->next_unlock_lr = 0;
756
757 /*
758 * Attachment will create the per pcb lock if necessary and
759 * increase refcount for creation, make sure it's done before
760 * socket is inserted in lists.
761 */
762 so->so_usecount++;
763
764 error = (*prp->pr_usrreqs->pru_attach)(so, proto, p);
765 if (error != 0) {
766 /*
767 * Warning:
768 * If so_pcb is not zero, the socket will be leaked,
769 * so protocol attachment handler must be coded carefuly
770 */
771 if (so->so_pcb != NULL) {
772 os_log_error(OS_LOG_DEFAULT,
773 "so_pcb not NULL after pru_attach error %d for dom %d, proto %d, type %d",
774 error, dom, proto, type);
775 }
776 /*
777 * Both SS_NOFDREF and SOF_PCBCLEARING should be set to free the socket
778 */
779 so->so_state |= SS_NOFDREF;
780 so->so_flags |= SOF_PCBCLEARING;
781 VERIFY(so->so_usecount > 0);
782 so->so_usecount--;
783 sofreelastref(so, 1); /* will deallocate the socket */
784 return error;
785 }
786
787 /*
788 * Note: needs so_pcb to be set after pru_attach
789 */
790 if (prp->pr_update_last_owner != NULL) {
791 (*prp->pr_update_last_owner)(so, p, ep);
792 }
793
794 os_atomic_inc(&prp->pr_domain->dom_refs, relaxed);
795
796 /* Attach socket filters for this protocol */
797 sflt_initsock(so);
798 so_set_default_traffic_class(so);
799
800 /*
801 * If this thread or task is marked to create backgrounded sockets,
802 * mark the socket as background.
803 */
804 if (!(flags & SOCF_MPTCP) &&
805 proc_get_effective_thread_policy(current_thread(), TASK_POLICY_NEW_SOCKETS_BG)) {
806 socket_set_traffic_mgt_flags(so, TRAFFIC_MGT_SO_BACKGROUND);
807 so->so_background_thread = current_thread();
808 }
809
810 switch (dom) {
811 /*
812 * Don't mark Unix domain or system
813 * eligible for defunct by default.
814 */
815 case PF_LOCAL:
816 case PF_SYSTEM:
817 so->so_flags |= SOF_NODEFUNCT;
818 break;
819 default:
820 break;
821 }
822
823 /*
824 * Entitlements can't be checked at socket creation time except if the
825 * application requested a feature guarded by a privilege (c.f., socket
826 * delegation).
827 * The priv(9) and the Sandboxing APIs are designed with the idea that
828 * a privilege check should only be triggered by a userland request.
829 * A privilege check at socket creation time is time consuming and
830 * could trigger many authorisation error messages from the security
831 * APIs.
832 */
833
834 *aso = so;
835
836 return 0;
837 }
838
839 /*
840 * Returns: 0 Success
841 * EAFNOSUPPORT
842 * EPROTOTYPE
843 * EPROTONOSUPPORT
844 * ENOBUFS
845 * <pru_attach>:ENOBUFS[AF_UNIX]
846 * <pru_attach>:ENOBUFS[TCP]
847 * <pru_attach>:ENOMEM[TCP]
848 * <pru_attach>:??? [other protocol families, IPSEC]
849 */
850 int
socreate(int dom,struct socket ** aso,int type,int proto)851 socreate(int dom, struct socket **aso, int type, int proto)
852 {
853 return socreate_internal(dom, aso, type, proto, current_proc(), 0,
854 PROC_NULL);
855 }
856
857 int
socreate_delegate(int dom,struct socket ** aso,int type,int proto,pid_t epid)858 socreate_delegate(int dom, struct socket **aso, int type, int proto, pid_t epid)
859 {
860 int error = 0;
861 struct proc *ep = PROC_NULL;
862
863 if ((proc_selfpid() != epid) && ((ep = proc_find(epid)) == PROC_NULL)) {
864 error = ESRCH;
865 goto done;
866 }
867
868 error = socreate_internal(dom, aso, type, proto, current_proc(), 0, ep);
869
870 /*
871 * It might not be wise to hold the proc reference when calling
872 * socreate_internal since it calls soalloc with M_WAITOK
873 */
874 done:
875 if (ep != PROC_NULL) {
876 proc_rele(ep);
877 }
878
879 return error;
880 }
881
882 /*
883 * Returns: 0 Success
884 * <pru_bind>:EINVAL Invalid argument [COMMON_START]
885 * <pru_bind>:EAFNOSUPPORT Address family not supported
886 * <pru_bind>:EADDRNOTAVAIL Address not available.
887 * <pru_bind>:EINVAL Invalid argument
888 * <pru_bind>:EAFNOSUPPORT Address family not supported [notdef]
889 * <pru_bind>:EACCES Permission denied
890 * <pru_bind>:EADDRINUSE Address in use
891 * <pru_bind>:EAGAIN Resource unavailable, try again
892 * <pru_bind>:EPERM Operation not permitted
893 * <pru_bind>:???
894 * <sf_bind>:???
895 *
896 * Notes: It's not possible to fully enumerate the return codes above,
897 * since socket filter authors and protocol family authors may
898 * not choose to limit their error returns to those listed, even
899 * though this may result in some software operating incorrectly.
900 *
901 * The error codes which are enumerated above are those known to
902 * be returned by the tcp_usr_bind function supplied.
903 */
904 int
sobindlock(struct socket * so,struct sockaddr * nam,int dolock)905 sobindlock(struct socket *so, struct sockaddr *nam, int dolock)
906 {
907 struct proc *p = current_proc();
908 int error = 0;
909
910 if (dolock) {
911 socket_lock(so, 1);
912 }
913
914 so_update_last_owner_locked(so, p);
915 so_update_policy(so);
916
917 #if NECP
918 so_update_necp_policy(so, nam, NULL);
919 #endif /* NECP */
920
921 /*
922 * If this is a bind request on a socket that has been marked
923 * as inactive, reject it now before we go any further.
924 */
925 if (so->so_flags & SOF_DEFUNCT) {
926 error = EINVAL;
927 SODEFUNCTLOG("%s[%d, %s]: defunct so 0x%llu [%d,%d] (%d)\n",
928 __func__, proc_pid(p), proc_best_name(p),
929 so->so_gencnt,
930 SOCK_DOM(so), SOCK_TYPE(so), error);
931 goto out;
932 }
933
934 /* Socket filter */
935 error = sflt_bind(so, nam);
936
937 if (error == 0) {
938 error = (*so->so_proto->pr_usrreqs->pru_bind)(so, nam, p);
939 }
940 out:
941 if (dolock) {
942 socket_unlock(so, 1);
943 }
944
945 if (error == EJUSTRETURN) {
946 error = 0;
947 }
948
949 return error;
950 }
951
952 void
sodealloc(struct socket * so)953 sodealloc(struct socket *so)
954 {
955 kauth_cred_unref(&so->so_cred);
956
957 /* Remove any filters */
958 sflt_termsock(so);
959
960 so->so_gencnt = OSIncrementAtomic64((SInt64 *)&so_gencnt);
961
962 if (so->so_flags1 & SOF1_CACHED_IN_SOCK_LAYER) {
963 cached_sock_free(so);
964 } else {
965 zfree(socket_zone, so);
966 }
967 }
968
969 /*
970 * Returns: 0 Success
971 * EINVAL
972 * EOPNOTSUPP
973 * <pru_listen>:EINVAL[AF_UNIX]
974 * <pru_listen>:EINVAL[TCP]
975 * <pru_listen>:EADDRNOTAVAIL[TCP] Address not available.
976 * <pru_listen>:EINVAL[TCP] Invalid argument
977 * <pru_listen>:EAFNOSUPPORT[TCP] Address family not supported [notdef]
978 * <pru_listen>:EACCES[TCP] Permission denied
979 * <pru_listen>:EADDRINUSE[TCP] Address in use
980 * <pru_listen>:EAGAIN[TCP] Resource unavailable, try again
981 * <pru_listen>:EPERM[TCP] Operation not permitted
982 * <sf_listen>:???
983 *
984 * Notes: Other <pru_listen> returns depend on the protocol family; all
985 * <sf_listen> returns depend on what the filter author causes
986 * their filter to return.
987 */
988 int
solisten(struct socket * so,int backlog)989 solisten(struct socket *so, int backlog)
990 {
991 struct proc *p = current_proc();
992 int error = 0;
993
994 socket_lock(so, 1);
995
996 so_update_last_owner_locked(so, p);
997 so_update_policy(so);
998
999 if (TAILQ_EMPTY(&so->so_comp)) {
1000 so->so_options |= SO_ACCEPTCONN;
1001 }
1002
1003 #if NECP
1004 so_update_necp_policy(so, NULL, NULL);
1005 #endif /* NECP */
1006
1007 if (so->so_proto == NULL) {
1008 error = EINVAL;
1009 so->so_options &= ~SO_ACCEPTCONN;
1010 goto out;
1011 }
1012 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) == 0) {
1013 error = EOPNOTSUPP;
1014 so->so_options &= ~SO_ACCEPTCONN;
1015 goto out;
1016 }
1017
1018 /*
1019 * If the listen request is made on a socket that is not fully
1020 * disconnected, or on a socket that has been marked as inactive,
1021 * reject the request now.
1022 */
1023 if ((so->so_state &
1024 (SS_ISCONNECTED | SS_ISCONNECTING | SS_ISDISCONNECTING)) ||
1025 (so->so_flags & SOF_DEFUNCT)) {
1026 error = EINVAL;
1027 if (so->so_flags & SOF_DEFUNCT) {
1028 SODEFUNCTLOG("%s[%d, %s]: defunct so 0x%llu [%d,%d] "
1029 "(%d)\n", __func__, proc_pid(p),
1030 proc_best_name(p),
1031 so->so_gencnt,
1032 SOCK_DOM(so), SOCK_TYPE(so), error);
1033 }
1034 so->so_options &= ~SO_ACCEPTCONN;
1035 goto out;
1036 }
1037
1038 if ((so->so_restrictions & SO_RESTRICT_DENY_IN) != 0) {
1039 error = EPERM;
1040 so->so_options &= ~SO_ACCEPTCONN;
1041 goto out;
1042 }
1043
1044 error = sflt_listen(so);
1045 if (error == 0) {
1046 error = (*so->so_proto->pr_usrreqs->pru_listen)(so, p);
1047 }
1048
1049 if (error) {
1050 if (error == EJUSTRETURN) {
1051 error = 0;
1052 }
1053 so->so_options &= ~SO_ACCEPTCONN;
1054 goto out;
1055 }
1056
1057 /*
1058 * POSIX: The implementation may have an upper limit on the length of
1059 * the listen queue-either global or per accepting socket. If backlog
1060 * exceeds this limit, the length of the listen queue is set to the
1061 * limit.
1062 *
1063 * If listen() is called with a backlog argument value that is less
1064 * than 0, the function behaves as if it had been called with a backlog
1065 * argument value of 0.
1066 *
1067 * A backlog argument of 0 may allow the socket to accept connections,
1068 * in which case the length of the listen queue may be set to an
1069 * implementation-defined minimum value.
1070 */
1071 if (backlog <= 0 || backlog > somaxconn) {
1072 backlog = somaxconn;
1073 }
1074
1075 so->so_qlimit = (short)backlog;
1076 out:
1077 socket_unlock(so, 1);
1078 return error;
1079 }
1080
1081 /*
1082 * The "accept list lock" protects the fields related to the listener queues
1083 * because we can unlock a socket to respect the lock ordering between
1084 * the listener socket and its clients sockets. The lock ordering is first to
1085 * acquire the client socket before the listener socket.
1086 *
1087 * The accept list lock serializes access to the following fields:
1088 * - of the listener socket:
1089 * - so_comp
1090 * - so_incomp
1091 * - so_qlen
1092 * - so_inqlen
1093 * - of client sockets that are in so_comp or so_incomp:
1094 * - so_head
1095 * - so_list
1096 *
1097 * As one can see the accept list lock protects the consistent of the
1098 * linkage of the client sockets.
1099 *
1100 * Note that those fields may be read without holding the accept list lock
1101 * for a preflight provided the accept list lock is taken when committing
1102 * to take an action based on the result of the preflight. The preflight
1103 * saves the cost of doing the unlock/lock dance.
1104 */
1105 void
so_acquire_accept_list(struct socket * head,struct socket * so)1106 so_acquire_accept_list(struct socket *head, struct socket *so)
1107 {
1108 lck_mtx_t *mutex_held;
1109
1110 if (head->so_proto->pr_getlock == NULL) {
1111 return;
1112 }
1113 mutex_held = (*head->so_proto->pr_getlock)(head, PR_F_WILLUNLOCK);
1114 LCK_MTX_ASSERT(mutex_held, LCK_MTX_ASSERT_OWNED);
1115
1116 if (!(head->so_flags1 & SOF1_ACCEPT_LIST_HELD)) {
1117 head->so_flags1 |= SOF1_ACCEPT_LIST_HELD;
1118 return;
1119 }
1120 if (so != NULL) {
1121 socket_unlock(so, 0);
1122 }
1123 while (head->so_flags1 & SOF1_ACCEPT_LIST_HELD) {
1124 so_accept_list_waits += 1;
1125 msleep((caddr_t)&head->so_incomp, mutex_held,
1126 PSOCK | PCATCH, __func__, NULL);
1127 }
1128 head->so_flags1 |= SOF1_ACCEPT_LIST_HELD;
1129 if (so != NULL) {
1130 socket_unlock(head, 0);
1131 socket_lock(so, 0);
1132 socket_lock(head, 0);
1133 }
1134 }
1135
1136 void
so_release_accept_list(struct socket * head)1137 so_release_accept_list(struct socket *head)
1138 {
1139 if (head->so_proto->pr_getlock != NULL) {
1140 lck_mtx_t *mutex_held;
1141
1142 mutex_held = (*head->so_proto->pr_getlock)(head, 0);
1143 LCK_MTX_ASSERT(mutex_held, LCK_MTX_ASSERT_OWNED);
1144
1145 head->so_flags1 &= ~SOF1_ACCEPT_LIST_HELD;
1146 wakeup((caddr_t)&head->so_incomp);
1147 }
1148 }
1149
1150 void
sofreelastref(struct socket * so,int dealloc)1151 sofreelastref(struct socket *so, int dealloc)
1152 {
1153 struct socket *head = so->so_head;
1154
1155 /* Assume socket is locked */
1156
1157 #if FLOW_DIVERT
1158 if (so->so_flags & SOF_FLOW_DIVERT) {
1159 flow_divert_detach(so);
1160 }
1161 #endif /* FLOW_DIVERT */
1162
1163 #if CONTENT_FILTER
1164 if ((so->so_flags & SOF_CONTENT_FILTER) != 0) {
1165 cfil_sock_detach(so);
1166 }
1167 #endif /* CONTENT_FILTER */
1168
1169 if (NEED_DGRAM_FLOW_TRACKING(so)) {
1170 soflow_detach(so);
1171 }
1172
1173 if (!(so->so_flags & SOF_PCBCLEARING) || !(so->so_state & SS_NOFDREF)) {
1174 selthreadclear(&so->so_snd.sb_sel);
1175 selthreadclear(&so->so_rcv.sb_sel);
1176 so->so_rcv.sb_flags &= ~(SB_SEL | SB_UPCALL);
1177 so->so_snd.sb_flags &= ~(SB_SEL | SB_UPCALL);
1178 so->so_event = sonullevent;
1179 return;
1180 }
1181 if (head != NULL) {
1182 /*
1183 * Need to lock the listener when the protocol has
1184 * per socket locks
1185 */
1186 if (head->so_proto->pr_getlock != NULL) {
1187 socket_lock(head, 1);
1188 so_acquire_accept_list(head, so);
1189 }
1190 if (so->so_state & SS_INCOMP) {
1191 so->so_state &= ~SS_INCOMP;
1192 TAILQ_REMOVE(&head->so_incomp, so, so_list);
1193 head->so_incqlen--;
1194 head->so_qlen--;
1195 so->so_head = NULL;
1196
1197 if (head->so_proto->pr_getlock != NULL) {
1198 so_release_accept_list(head);
1199 socket_unlock(head, 1);
1200 }
1201 } else if (so->so_state & SS_COMP) {
1202 if (head->so_proto->pr_getlock != NULL) {
1203 so_release_accept_list(head);
1204 socket_unlock(head, 1);
1205 }
1206 /*
1207 * We must not decommission a socket that's
1208 * on the accept(2) queue. If we do, then
1209 * accept(2) may hang after select(2) indicated
1210 * that the listening socket was ready.
1211 */
1212 selthreadclear(&so->so_snd.sb_sel);
1213 selthreadclear(&so->so_rcv.sb_sel);
1214 so->so_rcv.sb_flags &= ~(SB_SEL | SB_UPCALL);
1215 so->so_snd.sb_flags &= ~(SB_SEL | SB_UPCALL);
1216 so->so_event = sonullevent;
1217 return;
1218 } else {
1219 if (head->so_proto->pr_getlock != NULL) {
1220 so_release_accept_list(head);
1221 socket_unlock(head, 1);
1222 }
1223 printf("sofree: not queued\n");
1224 }
1225 }
1226 sowflush(so);
1227 sorflush(so);
1228
1229 /* 3932268: disable upcall */
1230 so->so_rcv.sb_flags &= ~SB_UPCALL;
1231 so->so_snd.sb_flags &= ~(SB_UPCALL | SB_SNDBYTE_CNT);
1232 so->so_event = sonullevent;
1233
1234 if (dealloc) {
1235 sodealloc(so);
1236 }
1237 }
1238
1239 void
soclose_wait_locked(struct socket * so)1240 soclose_wait_locked(struct socket *so)
1241 {
1242 lck_mtx_t *mutex_held;
1243
1244 if (so->so_proto->pr_getlock != NULL) {
1245 mutex_held = (*so->so_proto->pr_getlock)(so, PR_F_WILLUNLOCK);
1246 } else {
1247 mutex_held = so->so_proto->pr_domain->dom_mtx;
1248 }
1249 LCK_MTX_ASSERT(mutex_held, LCK_MTX_ASSERT_OWNED);
1250
1251 /*
1252 * Double check here and return if there's no outstanding upcall;
1253 * otherwise proceed further only if SOF_UPCALLCLOSEWAIT is set.
1254 */
1255 if (!so->so_upcallusecount || !(so->so_flags & SOF_UPCALLCLOSEWAIT)) {
1256 return;
1257 }
1258 so->so_rcv.sb_flags &= ~SB_UPCALL;
1259 so->so_snd.sb_flags &= ~SB_UPCALL;
1260 so->so_flags |= SOF_CLOSEWAIT;
1261
1262 (void) msleep((caddr_t)&so->so_upcallusecount, mutex_held, (PZERO - 1),
1263 "soclose_wait_locked", NULL);
1264 LCK_MTX_ASSERT(mutex_held, LCK_MTX_ASSERT_OWNED);
1265 so->so_flags &= ~SOF_CLOSEWAIT;
1266 }
1267
1268 /*
1269 * Close a socket on last file table reference removal.
1270 * Initiate disconnect if connected.
1271 * Free socket when disconnect complete.
1272 */
1273 int
soclose_locked(struct socket * so)1274 soclose_locked(struct socket *so)
1275 {
1276 int error = 0;
1277 struct timespec ts;
1278
1279 if (so->so_usecount == 0) {
1280 panic("soclose: so=%p refcount=0", so);
1281 /* NOTREACHED */
1282 }
1283
1284 sflt_notify(so, sock_evt_closing, NULL);
1285
1286 if (so->so_upcallusecount) {
1287 soclose_wait_locked(so);
1288 }
1289
1290 #if CONTENT_FILTER
1291 /*
1292 * We have to wait until the content filters are done
1293 */
1294 if ((so->so_flags & SOF_CONTENT_FILTER) != 0) {
1295 cfil_sock_close_wait(so);
1296 cfil_sock_is_closed(so);
1297 cfil_sock_detach(so);
1298 }
1299 #endif /* CONTENT_FILTER */
1300
1301 if (NEED_DGRAM_FLOW_TRACKING(so)) {
1302 soflow_detach(so);
1303 }
1304
1305 if (so->so_flags1 & SOF1_EXTEND_BK_IDLE_INPROG) {
1306 soresume(current_proc(), so, 1);
1307 so->so_flags1 &= ~SOF1_EXTEND_BK_IDLE_WANTED;
1308 }
1309
1310 if ((so->so_options & SO_ACCEPTCONN)) {
1311 struct socket *sp, *sonext;
1312 int persocklock = 0;
1313 int incomp_overflow_only;
1314
1315 /*
1316 * We do not want new connection to be added
1317 * to the connection queues
1318 */
1319 so->so_options &= ~SO_ACCEPTCONN;
1320
1321 /*
1322 * We can drop the lock on the listener once
1323 * we've acquired the incoming list
1324 */
1325 if (so->so_proto->pr_getlock != NULL) {
1326 persocklock = 1;
1327 so_acquire_accept_list(so, NULL);
1328 socket_unlock(so, 0);
1329 }
1330 again:
1331 incomp_overflow_only = 1;
1332
1333 TAILQ_FOREACH_SAFE(sp, &so->so_incomp, so_list, sonext) {
1334 /*
1335 * Radar 5350314
1336 * skip sockets thrown away by tcpdropdropblreq
1337 * they will get cleanup by the garbage collection.
1338 * otherwise, remove the incomp socket from the queue
1339 * and let soabort trigger the appropriate cleanup.
1340 */
1341 if (sp->so_flags & SOF_OVERFLOW) {
1342 continue;
1343 }
1344
1345 if (persocklock != 0) {
1346 socket_lock(sp, 1);
1347 }
1348
1349 /*
1350 * Radar 27945981
1351 * The extra reference for the list insure the
1352 * validity of the socket pointer when we perform the
1353 * unlock of the head above
1354 */
1355 if (sp->so_state & SS_INCOMP) {
1356 sp->so_state &= ~SS_INCOMP;
1357 sp->so_head = NULL;
1358 TAILQ_REMOVE(&so->so_incomp, sp, so_list);
1359 so->so_incqlen--;
1360 so->so_qlen--;
1361
1362 (void) soabort(sp);
1363 } else {
1364 panic("%s sp %p in so_incomp but !SS_INCOMP",
1365 __func__, sp);
1366 }
1367
1368 if (persocklock != 0) {
1369 socket_unlock(sp, 1);
1370 }
1371 }
1372
1373 TAILQ_FOREACH_SAFE(sp, &so->so_comp, so_list, sonext) {
1374 /* Dequeue from so_comp since sofree() won't do it */
1375 if (persocklock != 0) {
1376 socket_lock(sp, 1);
1377 }
1378
1379 if (sp->so_state & SS_COMP) {
1380 sp->so_state &= ~SS_COMP;
1381 sp->so_head = NULL;
1382 TAILQ_REMOVE(&so->so_comp, sp, so_list);
1383 so->so_qlen--;
1384
1385 (void) soabort(sp);
1386 } else {
1387 panic("%s sp %p in so_comp but !SS_COMP",
1388 __func__, sp);
1389 }
1390
1391 if (persocklock) {
1392 socket_unlock(sp, 1);
1393 }
1394 }
1395
1396 if (incomp_overflow_only == 0 && !TAILQ_EMPTY(&so->so_incomp)) {
1397 #if (DEBUG | DEVELOPMENT)
1398 panic("%s head %p so_comp not empty", __func__, so);
1399 #endif /* (DEVELOPMENT || DEBUG) */
1400
1401 goto again;
1402 }
1403
1404 if (!TAILQ_EMPTY(&so->so_comp)) {
1405 #if (DEBUG | DEVELOPMENT)
1406 panic("%s head %p so_comp not empty", __func__, so);
1407 #endif /* (DEVELOPMENT || DEBUG) */
1408
1409 goto again;
1410 }
1411
1412 if (persocklock) {
1413 socket_lock(so, 0);
1414 so_release_accept_list(so);
1415 }
1416 }
1417 if (so->so_pcb == NULL) {
1418 /* 3915887: mark the socket as ready for dealloc */
1419 so->so_flags |= SOF_PCBCLEARING;
1420 goto discard;
1421 }
1422
1423 if (so->so_state & SS_ISCONNECTED) {
1424 if ((so->so_state & SS_ISDISCONNECTING) == 0) {
1425 error = sodisconnectlocked(so);
1426 if (error) {
1427 goto drop;
1428 }
1429 }
1430 if (so->so_options & SO_LINGER) {
1431 if ((so->so_state & SS_ISDISCONNECTING) &&
1432 (so->so_state & SS_NBIO)) {
1433 goto drop;
1434 }
1435 while ((so->so_state & SS_ISCONNECTED) && so->so_linger > 0) {
1436 lck_mtx_t *mutex_held;
1437
1438 if (so->so_proto->pr_getlock != NULL) {
1439 mutex_held = (*so->so_proto->pr_getlock)(so, PR_F_WILLUNLOCK);
1440 } else {
1441 mutex_held = so->so_proto->pr_domain->dom_mtx;
1442 }
1443 ts.tv_sec = (so->so_linger / 100);
1444 ts.tv_nsec = (so->so_linger % 100) *
1445 NSEC_PER_USEC * 1000 * 10;
1446 error = msleep((caddr_t)&so->so_timeo,
1447 mutex_held, PSOCK | PCATCH, "soclose", &ts);
1448 if (error) {
1449 /*
1450 * It's OK when the time fires,
1451 * don't report an error
1452 */
1453 if (error == EWOULDBLOCK) {
1454 error = 0;
1455 }
1456 break;
1457 }
1458 }
1459 }
1460 }
1461 drop:
1462 if (so->so_usecount == 0) {
1463 panic("soclose: usecount is zero so=%p", so);
1464 /* NOTREACHED */
1465 }
1466 if (so->so_pcb != NULL && !(so->so_flags & SOF_PCBCLEARING)) {
1467 int error2 = (*so->so_proto->pr_usrreqs->pru_detach)(so);
1468 if (error == 0) {
1469 error = error2;
1470 }
1471 }
1472 if (so->so_usecount <= 0) {
1473 panic("soclose: usecount is zero so=%p", so);
1474 /* NOTREACHED */
1475 }
1476 discard:
1477 if (so->so_pcb != NULL && !(so->so_flags & SOF_MP_SUBFLOW) &&
1478 (so->so_state & SS_NOFDREF)) {
1479 panic("soclose: NOFDREF");
1480 /* NOTREACHED */
1481 }
1482 so->so_state |= SS_NOFDREF;
1483
1484 if ((so->so_flags & SOF_KNOTE) != 0) {
1485 KNOTE(&so->so_klist, SO_FILT_HINT_LOCKED);
1486 }
1487
1488 os_atomic_dec(&so->so_proto->pr_domain->dom_refs, relaxed);
1489
1490 VERIFY(so->so_usecount > 0);
1491 so->so_usecount--;
1492 sofree(so);
1493 return error;
1494 }
1495
1496 int
soclose(struct socket * so)1497 soclose(struct socket *so)
1498 {
1499 int error = 0;
1500 socket_lock(so, 1);
1501
1502 if (so->so_retaincnt == 0) {
1503 error = soclose_locked(so);
1504 } else {
1505 /*
1506 * if the FD is going away, but socket is
1507 * retained in kernel remove its reference
1508 */
1509 so->so_usecount--;
1510 if (so->so_usecount < 2) {
1511 panic("soclose: retaincnt non null and so=%p "
1512 "usecount=%d\n", so, so->so_usecount);
1513 }
1514 }
1515 socket_unlock(so, 1);
1516 return error;
1517 }
1518
1519 /*
1520 * Must be called at splnet...
1521 */
1522 /* Should already be locked */
1523 int
soabort(struct socket * so)1524 soabort(struct socket *so)
1525 {
1526 int error;
1527
1528 #ifdef MORE_LOCKING_DEBUG
1529 lck_mtx_t *mutex_held;
1530
1531 if (so->so_proto->pr_getlock != NULL) {
1532 mutex_held = (*so->so_proto->pr_getlock)(so, 0);
1533 } else {
1534 mutex_held = so->so_proto->pr_domain->dom_mtx;
1535 }
1536 LCK_MTX_ASSERT(mutex_held, LCK_MTX_ASSERT_OWNED);
1537 #endif
1538
1539 if ((so->so_flags & SOF_ABORTED) == 0) {
1540 so->so_flags |= SOF_ABORTED;
1541 error = (*so->so_proto->pr_usrreqs->pru_abort)(so);
1542 if (error) {
1543 sofree(so);
1544 return error;
1545 }
1546 }
1547 return 0;
1548 }
1549
1550 int
soacceptlock(struct socket * so,struct sockaddr ** nam,int dolock)1551 soacceptlock(struct socket *so, struct sockaddr **nam, int dolock)
1552 {
1553 int error;
1554
1555 if (dolock) {
1556 socket_lock(so, 1);
1557 }
1558
1559 so_update_last_owner_locked(so, PROC_NULL);
1560 so_update_policy(so);
1561 #if NECP
1562 so_update_necp_policy(so, NULL, NULL);
1563 #endif /* NECP */
1564
1565 if ((so->so_state & SS_NOFDREF) == 0) {
1566 panic("soaccept: !NOFDREF");
1567 }
1568 so->so_state &= ~SS_NOFDREF;
1569 error = (*so->so_proto->pr_usrreqs->pru_accept)(so, nam);
1570
1571 if (dolock) {
1572 socket_unlock(so, 1);
1573 }
1574 return error;
1575 }
1576
1577 int
soaccept(struct socket * so,struct sockaddr ** nam)1578 soaccept(struct socket *so, struct sockaddr **nam)
1579 {
1580 return soacceptlock(so, nam, 1);
1581 }
1582
1583 int
soacceptfilter(struct socket * so,struct socket * head)1584 soacceptfilter(struct socket *so, struct socket *head)
1585 {
1586 struct sockaddr *__single local = NULL, *__single remote = NULL;
1587 int error = 0;
1588
1589 /*
1590 * Hold the lock even if this socket has not been made visible
1591 * to the filter(s). For sockets with global locks, this protects
1592 * against the head or peer going away
1593 */
1594 socket_lock(so, 1);
1595 if (sogetaddr_locked(so, &remote, 1) != 0 ||
1596 sogetaddr_locked(so, &local, 0) != 0) {
1597 so->so_state &= ~SS_NOFDREF;
1598 socket_unlock(so, 1);
1599 soclose(so);
1600 /* Out of resources; try it again next time */
1601 error = ECONNABORTED;
1602 goto done;
1603 }
1604
1605 error = sflt_accept(head, so, local, remote);
1606
1607 /*
1608 * If we get EJUSTRETURN from one of the filters, mark this socket
1609 * as inactive and return it anyway. This newly accepted socket
1610 * will be disconnected later before we hand it off to the caller.
1611 */
1612 if (error == EJUSTRETURN) {
1613 error = 0;
1614 (void) sosetdefunct(current_proc(), so,
1615 SHUTDOWN_SOCKET_LEVEL_DISCONNECT_INTERNAL, FALSE);
1616 }
1617
1618 if (error != 0) {
1619 /*
1620 * This may seem like a duplication to the above error
1621 * handling part when we return ECONNABORTED, except
1622 * the following is done while holding the lock since
1623 * the socket has been exposed to the filter(s) earlier.
1624 */
1625 so->so_state &= ~SS_NOFDREF;
1626 socket_unlock(so, 1);
1627 soclose(so);
1628 /* Propagate socket filter's error code to the caller */
1629 } else {
1630 socket_unlock(so, 1);
1631 }
1632 done:
1633 /* Callee checks for NULL pointer */
1634 sock_freeaddr(remote);
1635 sock_freeaddr(local);
1636 return error;
1637 }
1638
1639 /*
1640 * Returns: 0 Success
1641 * EOPNOTSUPP Operation not supported on socket
1642 * EISCONN Socket is connected
1643 * <pru_connect>:EADDRNOTAVAIL Address not available.
1644 * <pru_connect>:EINVAL Invalid argument
1645 * <pru_connect>:EAFNOSUPPORT Address family not supported [notdef]
1646 * <pru_connect>:EACCES Permission denied
1647 * <pru_connect>:EADDRINUSE Address in use
1648 * <pru_connect>:EAGAIN Resource unavailable, try again
1649 * <pru_connect>:EPERM Operation not permitted
1650 * <sf_connect_out>:??? [anything a filter writer might set]
1651 */
1652 int
soconnectlock(struct socket * so,struct sockaddr * nam,int dolock)1653 soconnectlock(struct socket *so, struct sockaddr *nam, int dolock)
1654 {
1655 int error;
1656 struct proc *p = current_proc();
1657 tracker_metadata_t metadata = { };
1658
1659 if (dolock) {
1660 socket_lock(so, 1);
1661 }
1662
1663 so_update_last_owner_locked(so, p);
1664 so_update_policy(so);
1665
1666 /*
1667 * If this is a listening socket or if this is a previously-accepted
1668 * socket that has been marked as inactive, reject the connect request.
1669 */
1670 if ((so->so_options & SO_ACCEPTCONN) || (so->so_flags & SOF_DEFUNCT)) {
1671 error = EOPNOTSUPP;
1672 if (so->so_flags & SOF_DEFUNCT) {
1673 SODEFUNCTLOG("%s[%d, %s]: defunct so 0x%llu [%d,%d] "
1674 "(%d)\n", __func__, proc_pid(p),
1675 proc_best_name(p),
1676 so->so_gencnt,
1677 SOCK_DOM(so), SOCK_TYPE(so), error);
1678 }
1679 if (dolock) {
1680 socket_unlock(so, 1);
1681 }
1682 return error;
1683 }
1684
1685 if ((so->so_restrictions & SO_RESTRICT_DENY_OUT) != 0) {
1686 if (dolock) {
1687 socket_unlock(so, 1);
1688 }
1689 return EPERM;
1690 }
1691
1692 /*
1693 * If protocol is connection-based, can only connect once.
1694 * Otherwise, if connected, try to disconnect first.
1695 * This allows user to disconnect by connecting to, e.g.,
1696 * a null address.
1697 */
1698 if (so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING) &&
1699 ((so->so_proto->pr_flags & PR_CONNREQUIRED) ||
1700 (error = sodisconnectlocked(so)))) {
1701 error = EISCONN;
1702 } else {
1703 /*
1704 * For connected v4/v6 sockets, check if destination address associates with a domain name and if it is
1705 * a tracker domain. Mark socket accordingly. Skip lookup if socket has already been marked a tracker.
1706 */
1707 if (!(so->so_flags1 & SOF1_KNOWN_TRACKER) && IS_INET(so)) {
1708 if (tracker_lookup(so->so_flags & SOF_DELEGATED ? so->e_uuid : so->last_uuid, nam, &metadata) == 0) {
1709 if (metadata.flags & SO_TRACKER_ATTRIBUTE_FLAGS_TRACKER) {
1710 so->so_flags1 |= SOF1_KNOWN_TRACKER;
1711 }
1712 if (metadata.flags & SO_TRACKER_ATTRIBUTE_FLAGS_APP_APPROVED) {
1713 so->so_flags1 |= SOF1_APPROVED_APP_DOMAIN;
1714 }
1715 necp_set_socket_domain_attributes(so,
1716 __unsafe_null_terminated_from_indexable(metadata.domain),
1717 __unsafe_null_terminated_from_indexable(metadata.domain_owner));
1718 }
1719 }
1720
1721 #if NECP
1722 /* Update NECP evaluation after setting any domain via the tracker checks */
1723 so_update_necp_policy(so, NULL, nam);
1724 #endif /* NECP */
1725
1726 /*
1727 * Run connect filter before calling protocol:
1728 * - non-blocking connect returns before completion;
1729 */
1730 error = sflt_connectout(so, nam);
1731 if (error != 0) {
1732 if (error == EJUSTRETURN) {
1733 error = 0;
1734 }
1735 } else {
1736 error = (*so->so_proto->pr_usrreqs->pru_connect)
1737 (so, nam, p);
1738 if (error != 0) {
1739 so->so_state &= ~SS_ISCONNECTING;
1740 }
1741 }
1742 }
1743 if (dolock) {
1744 socket_unlock(so, 1);
1745 }
1746 return error;
1747 }
1748
1749 int
soconnect(struct socket * so,struct sockaddr * nam)1750 soconnect(struct socket *so, struct sockaddr *nam)
1751 {
1752 return soconnectlock(so, nam, 1);
1753 }
1754
1755 /*
1756 * Returns: 0 Success
1757 * <pru_connect2>:EINVAL[AF_UNIX]
1758 * <pru_connect2>:EPROTOTYPE[AF_UNIX]
1759 * <pru_connect2>:??? [other protocol families]
1760 *
1761 * Notes: <pru_connect2> is not supported by [TCP].
1762 */
1763 int
soconnect2(struct socket * so1,struct socket * so2)1764 soconnect2(struct socket *so1, struct socket *so2)
1765 {
1766 int error;
1767
1768 socket_lock(so1, 1);
1769 if (so2->so_proto->pr_lock) {
1770 socket_lock(so2, 1);
1771 }
1772
1773 error = (*so1->so_proto->pr_usrreqs->pru_connect2)(so1, so2);
1774
1775 socket_unlock(so1, 1);
1776 if (so2->so_proto->pr_lock) {
1777 socket_unlock(so2, 1);
1778 }
1779 return error;
1780 }
1781
1782 int
soconnectxlocked(struct socket * so,struct sockaddr * src,struct sockaddr * dst,struct proc * p,uint32_t ifscope,sae_associd_t aid,sae_connid_t * pcid,uint32_t flags,void * arg,uint32_t arglen,uio_t auio,user_ssize_t * bytes_written)1783 soconnectxlocked(struct socket *so, struct sockaddr *src,
1784 struct sockaddr *dst, struct proc *p, uint32_t ifscope,
1785 sae_associd_t aid, sae_connid_t *pcid, uint32_t flags, void *arg,
1786 uint32_t arglen, uio_t auio, user_ssize_t *bytes_written)
1787 {
1788 int error;
1789 tracker_metadata_t metadata = { };
1790
1791 so_update_last_owner_locked(so, p);
1792 so_update_policy(so);
1793
1794 /*
1795 * If this is a listening socket or if this is a previously-accepted
1796 * socket that has been marked as inactive, reject the connect request.
1797 */
1798 if ((so->so_options & SO_ACCEPTCONN) || (so->so_flags & SOF_DEFUNCT)) {
1799 error = EOPNOTSUPP;
1800 if (so->so_flags & SOF_DEFUNCT) {
1801 SODEFUNCTLOG("%s[%d, %s]: defunct so 0x%llu [%d,%d] "
1802 "(%d)\n", __func__, proc_pid(p),
1803 proc_best_name(p),
1804 so->so_gencnt,
1805 SOCK_DOM(so), SOCK_TYPE(so), error);
1806 }
1807 return error;
1808 }
1809
1810 if ((so->so_restrictions & SO_RESTRICT_DENY_OUT) != 0) {
1811 return EPERM;
1812 }
1813
1814 /*
1815 * If protocol is connection-based, can only connect once
1816 * unless PR_MULTICONN is set. Otherwise, if connected,
1817 * try to disconnect first. This allows user to disconnect
1818 * by connecting to, e.g., a null address.
1819 */
1820 if ((so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING)) &&
1821 !(so->so_proto->pr_flags & PR_MULTICONN) &&
1822 ((so->so_proto->pr_flags & PR_CONNREQUIRED) ||
1823 (error = sodisconnectlocked(so)) != 0)) {
1824 error = EISCONN;
1825 } else {
1826 /*
1827 * For TCP, check if destination address is a tracker and mark the socket accordingly
1828 * (only if it hasn't been marked yet).
1829 */
1830 if (SOCK_CHECK_TYPE(so, SOCK_STREAM) && SOCK_CHECK_PROTO(so, IPPROTO_TCP) &&
1831 !(so->so_flags1 & SOF1_KNOWN_TRACKER)) {
1832 if (tracker_lookup(so->so_flags & SOF_DELEGATED ? so->e_uuid : so->last_uuid, dst, &metadata) == 0) {
1833 if (metadata.flags & SO_TRACKER_ATTRIBUTE_FLAGS_TRACKER) {
1834 so->so_flags1 |= SOF1_KNOWN_TRACKER;
1835 }
1836 if (metadata.flags & SO_TRACKER_ATTRIBUTE_FLAGS_APP_APPROVED) {
1837 so->so_flags1 |= SOF1_APPROVED_APP_DOMAIN;
1838 }
1839 necp_set_socket_domain_attributes(so, __unsafe_null_terminated_from_indexable(metadata.domain),
1840 __unsafe_null_terminated_from_indexable(metadata.domain_owner));
1841 }
1842 }
1843
1844 if ((so->so_proto->pr_flags & PR_DATA_IDEMPOTENT) &&
1845 (flags & CONNECT_DATA_IDEMPOTENT)) {
1846 so->so_flags1 |= SOF1_DATA_IDEMPOTENT;
1847
1848 if (flags & CONNECT_DATA_AUTHENTICATED) {
1849 so->so_flags1 |= SOF1_DATA_AUTHENTICATED;
1850 }
1851 }
1852
1853 /*
1854 * Case 1: CONNECT_RESUME_ON_READ_WRITE set, no data.
1855 * Case 2: CONNECT_RESUME_ON_READ_WRITE set, with data (user error)
1856 * Case 3: CONNECT_RESUME_ON_READ_WRITE not set, with data
1857 * Case 3 allows user to combine write with connect even if they have
1858 * no use for TFO (such as regular TCP, and UDP).
1859 * Case 4: CONNECT_RESUME_ON_READ_WRITE not set, no data (regular case)
1860 */
1861 if ((so->so_proto->pr_flags & PR_PRECONN_WRITE) &&
1862 ((flags & CONNECT_RESUME_ON_READ_WRITE) || auio)) {
1863 so->so_flags1 |= SOF1_PRECONNECT_DATA;
1864 }
1865
1866 /*
1867 * If a user sets data idempotent and does not pass an uio, or
1868 * sets CONNECT_RESUME_ON_READ_WRITE, this is an error, reset
1869 * SOF1_DATA_IDEMPOTENT.
1870 */
1871 if (!(so->so_flags1 & SOF1_PRECONNECT_DATA) &&
1872 (so->so_flags1 & SOF1_DATA_IDEMPOTENT)) {
1873 /* We should return EINVAL instead perhaps. */
1874 so->so_flags1 &= ~SOF1_DATA_IDEMPOTENT;
1875 }
1876
1877 /*
1878 * Run connect filter before calling protocol:
1879 * - non-blocking connect returns before completion;
1880 */
1881 error = sflt_connectout(so, dst);
1882 if (error != 0) {
1883 /* Disable PRECONNECT_DATA, as we don't need to send a SYN anymore. */
1884 so->so_flags1 &= ~SOF1_PRECONNECT_DATA;
1885 if (error == EJUSTRETURN) {
1886 error = 0;
1887 }
1888 } else {
1889 error = (*so->so_proto->pr_usrreqs->pru_connectx)
1890 (so, src, dst, p, ifscope, aid, pcid,
1891 flags, arg, arglen, auio, bytes_written);
1892 if (error != 0) {
1893 so->so_state &= ~SS_ISCONNECTING;
1894 if (error != EINPROGRESS) {
1895 so->so_flags1 &= ~SOF1_PRECONNECT_DATA;
1896 }
1897 }
1898 }
1899 }
1900
1901 return error;
1902 }
1903
1904 int
sodisconnectlocked(struct socket * so)1905 sodisconnectlocked(struct socket *so)
1906 {
1907 int error;
1908
1909 if ((so->so_state & SS_ISCONNECTED) == 0) {
1910 error = ENOTCONN;
1911 goto bad;
1912 }
1913 if (so->so_state & SS_ISDISCONNECTING) {
1914 error = EALREADY;
1915 goto bad;
1916 }
1917
1918 error = (*so->so_proto->pr_usrreqs->pru_disconnect)(so);
1919 if (error == 0) {
1920 sflt_notify(so, sock_evt_disconnected, NULL);
1921 }
1922
1923 bad:
1924 return error;
1925 }
1926
1927 /* Locking version */
1928 int
sodisconnect(struct socket * so)1929 sodisconnect(struct socket *so)
1930 {
1931 int error;
1932
1933 socket_lock(so, 1);
1934 error = sodisconnectlocked(so);
1935 socket_unlock(so, 1);
1936 return error;
1937 }
1938
1939 int
sodisconnectxlocked(struct socket * so,sae_associd_t aid,sae_connid_t cid)1940 sodisconnectxlocked(struct socket *so, sae_associd_t aid, sae_connid_t cid)
1941 {
1942 int error;
1943
1944 /*
1945 * Call the protocol disconnectx handler; let it handle all
1946 * matters related to the connection state of this session.
1947 */
1948 error = (*so->so_proto->pr_usrreqs->pru_disconnectx)(so, aid, cid);
1949 if (error == 0) {
1950 /*
1951 * The event applies only for the session, not for
1952 * the disconnection of individual subflows.
1953 */
1954 if (so->so_state & (SS_ISDISCONNECTING | SS_ISDISCONNECTED)) {
1955 sflt_notify(so, sock_evt_disconnected, NULL);
1956 }
1957 }
1958 return error;
1959 }
1960
1961 int
sodisconnectx(struct socket * so,sae_associd_t aid,sae_connid_t cid)1962 sodisconnectx(struct socket *so, sae_associd_t aid, sae_connid_t cid)
1963 {
1964 int error;
1965
1966 socket_lock(so, 1);
1967 error = sodisconnectxlocked(so, aid, cid);
1968 socket_unlock(so, 1);
1969 return error;
1970 }
1971
1972 #define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? 0 : SBL_WAIT)
1973
1974 /*
1975 * sosendcheck will lock the socket buffer if it isn't locked and
1976 * verify that there is space for the data being inserted.
1977 *
1978 * Returns: 0 Success
1979 * EPIPE
1980 * sblock:EWOULDBLOCK
1981 * sblock:EINTR
1982 * sbwait:EBADF
1983 * sbwait:EINTR
1984 * [so_error]:???
1985 */
1986 int
sosendcheck(struct socket * so,struct sockaddr * addr,user_ssize_t resid,int32_t clen,int32_t atomic,int flags,int * sblocked)1987 sosendcheck(struct socket *so, struct sockaddr *addr, user_ssize_t resid,
1988 int32_t clen, int32_t atomic, int flags, int *sblocked)
1989 {
1990 int error = 0;
1991 int32_t space;
1992 int assumelock = 0;
1993
1994 restart:
1995 if (*sblocked == 0) {
1996 if ((so->so_snd.sb_flags & SB_LOCK) != 0 &&
1997 so->so_send_filt_thread != 0 &&
1998 so->so_send_filt_thread == current_thread()) {
1999 /*
2000 * We're being called recursively from a filter,
2001 * allow this to continue. Radar 4150520.
2002 * Don't set sblocked because we don't want
2003 * to perform an unlock later.
2004 */
2005 assumelock = 1;
2006 } else {
2007 error = sblock(&so->so_snd, SBLOCKWAIT(flags));
2008 if (error) {
2009 if (so->so_flags & SOF_DEFUNCT) {
2010 goto defunct;
2011 }
2012 return error;
2013 }
2014 *sblocked = 1;
2015 }
2016 }
2017
2018 /*
2019 * If a send attempt is made on a socket that has been marked
2020 * as inactive (disconnected), reject the request.
2021 */
2022 if (so->so_flags & SOF_DEFUNCT) {
2023 defunct:
2024 error = EPIPE;
2025 SODEFUNCTLOG("%s[%d, %s]: defunct so 0x%llu [%d,%d] (%d)\n",
2026 __func__, proc_selfpid(), proc_best_name(current_proc()),
2027 so->so_gencnt,
2028 SOCK_DOM(so), SOCK_TYPE(so), error);
2029 return error;
2030 }
2031
2032 if (so->so_state & SS_CANTSENDMORE) {
2033 #if CONTENT_FILTER
2034 /*
2035 * Can re-inject data of half closed connections
2036 */
2037 if ((so->so_state & SS_ISDISCONNECTED) == 0 &&
2038 so->so_snd.sb_cfil_thread == current_thread() &&
2039 cfil_sock_data_pending(&so->so_snd) != 0) {
2040 CFIL_LOG(LOG_INFO,
2041 "so %llx ignore SS_CANTSENDMORE",
2042 (uint64_t)DEBUG_KERNEL_ADDRPERM(so));
2043 } else
2044 #endif /* CONTENT_FILTER */
2045 return EPIPE;
2046 }
2047 if (so->so_error) {
2048 error = so->so_error;
2049 so->so_error = 0;
2050 return error;
2051 }
2052
2053 if ((so->so_state & SS_ISCONNECTED) == 0) {
2054 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) != 0) {
2055 if (((so->so_state & SS_ISCONFIRMING) == 0) &&
2056 (resid != 0 || clen == 0) &&
2057 !(so->so_flags1 & SOF1_PRECONNECT_DATA)) {
2058 return ENOTCONN;
2059 }
2060 } else if (addr == 0) {
2061 return (so->so_proto->pr_flags & PR_CONNREQUIRED) ?
2062 ENOTCONN : EDESTADDRREQ;
2063 }
2064 }
2065
2066 space = sbspace(&so->so_snd);
2067
2068 if (flags & MSG_OOB) {
2069 space += 1024;
2070 }
2071 if ((atomic && resid > so->so_snd.sb_hiwat) ||
2072 clen > so->so_snd.sb_hiwat) {
2073 return EMSGSIZE;
2074 }
2075
2076 if ((space < resid + clen &&
2077 (atomic || (space < (int32_t)so->so_snd.sb_lowat) ||
2078 space < clen)) ||
2079 (so->so_type == SOCK_STREAM && so_wait_for_if_feedback(so))) {
2080 /*
2081 * don't block the connectx call when there's more data
2082 * than can be copied.
2083 */
2084 if (so->so_flags1 & SOF1_PRECONNECT_DATA) {
2085 if (space == 0) {
2086 return EWOULDBLOCK;
2087 }
2088 if (space < (int32_t)so->so_snd.sb_lowat) {
2089 return 0;
2090 }
2091 }
2092 if ((so->so_state & SS_NBIO) || (flags & MSG_NBIO) ||
2093 assumelock) {
2094 return EWOULDBLOCK;
2095 }
2096 sbunlock(&so->so_snd, TRUE); /* keep socket locked */
2097 *sblocked = 0;
2098 error = sbwait(&so->so_snd);
2099 if (error) {
2100 if (so->so_flags & SOF_DEFUNCT) {
2101 goto defunct;
2102 }
2103 return error;
2104 }
2105 goto restart;
2106 }
2107 return 0;
2108 }
2109
2110 /*
2111 * Send on a socket.
2112 * If send must go all at once and message is larger than
2113 * send buffering, then hard error.
2114 * Lock against other senders.
2115 * If must go all at once and not enough room now, then
2116 * inform user that this would block and do nothing.
2117 * Otherwise, if nonblocking, send as much as possible.
2118 * The data to be sent is described by "uio" if nonzero,
2119 * otherwise by the mbuf chain "top" (which must be null
2120 * if uio is not). Data provided in mbuf chain must be small
2121 * enough to send all at once.
2122 *
2123 * Returns nonzero on error, timeout or signal; callers
2124 * must check for short counts if EINTR/ERESTART are returned.
2125 * Data and control buffers are freed on return.
2126 *
2127 * Returns: 0 Success
2128 * EOPNOTSUPP
2129 * EINVAL
2130 * ENOBUFS
2131 * uiomove:EFAULT
2132 * sosendcheck:EPIPE
2133 * sosendcheck:EWOULDBLOCK
2134 * sosendcheck:EINTR
2135 * sosendcheck:EBADF
2136 * sosendcheck:EINTR
2137 * sosendcheck:??? [value from so_error]
2138 * <pru_send>:ECONNRESET[TCP]
2139 * <pru_send>:EINVAL[TCP]
2140 * <pru_send>:ENOBUFS[TCP]
2141 * <pru_send>:EADDRINUSE[TCP]
2142 * <pru_send>:EADDRNOTAVAIL[TCP]
2143 * <pru_send>:EAFNOSUPPORT[TCP]
2144 * <pru_send>:EACCES[TCP]
2145 * <pru_send>:EAGAIN[TCP]
2146 * <pru_send>:EPERM[TCP]
2147 * <pru_send>:EMSGSIZE[TCP]
2148 * <pru_send>:EHOSTUNREACH[TCP]
2149 * <pru_send>:ENETUNREACH[TCP]
2150 * <pru_send>:ENETDOWN[TCP]
2151 * <pru_send>:ENOMEM[TCP]
2152 * <pru_send>:ENOBUFS[TCP]
2153 * <pru_send>:???[TCP] [ignorable: mostly IPSEC/firewall/DLIL]
2154 * <pru_send>:EINVAL[AF_UNIX]
2155 * <pru_send>:EOPNOTSUPP[AF_UNIX]
2156 * <pru_send>:EPIPE[AF_UNIX]
2157 * <pru_send>:ENOTCONN[AF_UNIX]
2158 * <pru_send>:EISCONN[AF_UNIX]
2159 * <pru_send>:???[AF_UNIX] [whatever a filter author chooses]
2160 * <sf_data_out>:??? [whatever a filter author chooses]
2161 *
2162 * Notes: Other <pru_send> returns depend on the protocol family; all
2163 * <sf_data_out> returns depend on what the filter author causes
2164 * their filter to return.
2165 */
2166 int
sosend(struct socket * so,struct sockaddr * addr,struct uio * uio,struct mbuf * top,struct mbuf * control,int flags)2167 sosend(struct socket *so, struct sockaddr *addr, struct uio *uio,
2168 struct mbuf *top, struct mbuf *control, int flags)
2169 {
2170 mbuf_ref_ref_t mp;
2171 mbuf_ref_t m, freelist = NULL;
2172 struct soflow_hash_entry *__single dgram_flow_entry = NULL;
2173 user_ssize_t space, len, resid, orig_resid;
2174 int clen = 0, error, dontroute, sendflags;
2175 int atomic = sosendallatonce(so) || top;
2176 int sblocked = 0;
2177 struct proc *p = current_proc();
2178 uint16_t headroom = 0;
2179 ssize_t mlen;
2180 boolean_t en_tracing = FALSE;
2181
2182 if (uio != NULL) {
2183 resid = uio_resid(uio);
2184 } else {
2185 resid = top->m_pkthdr.len;
2186 }
2187 orig_resid = resid;
2188
2189 KERNEL_DEBUG((DBG_FNC_SOSEND | DBG_FUNC_START), so, resid,
2190 so->so_snd.sb_cc, so->so_snd.sb_lowat, so->so_snd.sb_hiwat);
2191
2192 socket_lock(so, 1);
2193
2194 if (NEED_DGRAM_FLOW_TRACKING(so)) {
2195 dgram_flow_entry = soflow_get_flow(so, NULL, addr, control, resid, SOFLOW_DIRECTION_OUTBOUND, 0);
2196 }
2197
2198 /*
2199 * trace if tracing & network (vs. unix) sockets & and
2200 * non-loopback
2201 */
2202 if (ENTR_SHOULDTRACE &&
2203 (SOCK_CHECK_DOM(so, AF_INET) || SOCK_CHECK_DOM(so, AF_INET6))) {
2204 struct inpcb *inp = sotoinpcb(so);
2205 if (inp->inp_last_outifp != NULL &&
2206 !(inp->inp_last_outifp->if_flags & IFF_LOOPBACK)) {
2207 en_tracing = TRUE;
2208 KERNEL_ENERGYTRACE(kEnTrActKernSockWrite, DBG_FUNC_START,
2209 VM_KERNEL_ADDRPERM(so),
2210 ((so->so_state & SS_NBIO) ? kEnTrFlagNonBlocking : 0),
2211 (int64_t)resid);
2212 }
2213 }
2214
2215 /*
2216 * Re-injection should not affect process accounting
2217 */
2218 if ((flags & MSG_SKIPCFIL) == 0) {
2219 so_update_last_owner_locked(so, p);
2220 so_update_policy(so);
2221
2222 #if NECP
2223 so_update_necp_policy(so, NULL, addr);
2224 #endif /* NECP */
2225 }
2226
2227 if (so->so_type != SOCK_STREAM && (flags & MSG_OOB) != 0) {
2228 error = EOPNOTSUPP;
2229 goto out_locked;
2230 }
2231
2232 /*
2233 * In theory resid should be unsigned.
2234 * However, space must be signed, as it might be less than 0
2235 * if we over-committed, and we must use a signed comparison
2236 * of space and resid. On the other hand, a negative resid
2237 * causes us to loop sending 0-length segments to the protocol.
2238 *
2239 * Usually, MSG_EOR isn't used on SOCK_STREAM type sockets.
2240 *
2241 * Note: We limit resid to be a positive int value as we use
2242 * imin() to set bytes_to_copy -- radr://14558484
2243 */
2244 if (resid < 0 || resid > INT_MAX ||
2245 (so->so_type == SOCK_STREAM && (flags & MSG_EOR))) {
2246 error = EINVAL;
2247 goto out_locked;
2248 }
2249
2250 dontroute = (flags & MSG_DONTROUTE) &&
2251 (so->so_options & SO_DONTROUTE) == 0 &&
2252 (so->so_proto->pr_flags & PR_ATOMIC);
2253 OSIncrementAtomicLong(&p->p_stats->p_ru.ru_msgsnd);
2254
2255 if (control != NULL) {
2256 clen = control->m_len;
2257 }
2258
2259 if (soreserveheadroom != 0) {
2260 headroom = so->so_pktheadroom;
2261 }
2262
2263 do {
2264 error = sosendcheck(so, addr, resid, clen, atomic, flags,
2265 &sblocked);
2266 if (error) {
2267 goto out_locked;
2268 }
2269
2270 mp = ⊤
2271 space = sbspace(&so->so_snd) - clen;
2272 space += ((flags & MSG_OOB) ? 1024 : 0);
2273
2274 do {
2275 if (uio == NULL) {
2276 /*
2277 * Data is prepackaged in "top".
2278 */
2279 resid = 0;
2280 if (flags & MSG_EOR) {
2281 top->m_flags |= M_EOR;
2282 }
2283 } else {
2284 int chainlength;
2285 int bytes_to_copy;
2286 boolean_t jumbocl;
2287 boolean_t bigcl;
2288 int bytes_to_alloc;
2289
2290 bytes_to_copy = imin((int)resid, (int)space);
2291
2292 bytes_to_alloc = bytes_to_copy;
2293 if (top == NULL) {
2294 bytes_to_alloc += headroom;
2295 }
2296
2297 if (sosendminchain > 0) {
2298 chainlength = 0;
2299 } else {
2300 chainlength = sosendmaxchain;
2301 }
2302
2303 /*
2304 * Use big 4 KB cluster when the outgoing interface
2305 * does not prefer 2 KB clusters
2306 */
2307 bigcl = !(so->so_flags1 & SOF1_IF_2KCL) ||
2308 sosendbigcl_ignore_capab;
2309
2310 /*
2311 * Attempt to use larger than system page-size
2312 * clusters for large writes only if there is
2313 * a jumbo cluster pool and if the socket is
2314 * marked accordingly.
2315 */
2316 jumbocl = sosendjcl && njcl > 0 &&
2317 ((so->so_flags & SOF_MULTIPAGES) ||
2318 sosendjcl_ignore_capab) &&
2319 bigcl;
2320
2321 socket_unlock(so, 0);
2322
2323 do {
2324 int num_needed;
2325 int hdrs_needed = (top == NULL) ? 1 : 0;
2326
2327 /*
2328 * try to maintain a local cache of mbuf
2329 * clusters needed to complete this
2330 * write the list is further limited to
2331 * the number that are currently needed
2332 * to fill the socket this mechanism
2333 * allows a large number of mbufs/
2334 * clusters to be grabbed under a single
2335 * mbuf lock... if we can't get any
2336 * clusters, than fall back to trying
2337 * for mbufs if we fail early (or
2338 * miscalcluate the number needed) make
2339 * sure to release any clusters we
2340 * haven't yet consumed.
2341 */
2342 if (freelist == NULL &&
2343 bytes_to_alloc > MBIGCLBYTES &&
2344 jumbocl) {
2345 num_needed =
2346 bytes_to_alloc / M16KCLBYTES;
2347
2348 if ((bytes_to_alloc -
2349 (num_needed * M16KCLBYTES))
2350 >= MINCLSIZE) {
2351 num_needed++;
2352 }
2353
2354 freelist =
2355 m_getpackets_internal(
2356 (unsigned int *)&num_needed,
2357 hdrs_needed, M_WAIT, 0,
2358 M16KCLBYTES);
2359 /*
2360 * Fall back to 4K cluster size
2361 * if allocation failed
2362 */
2363 }
2364
2365 if (freelist == NULL &&
2366 bytes_to_alloc > MCLBYTES &&
2367 bigcl) {
2368 num_needed =
2369 bytes_to_alloc / MBIGCLBYTES;
2370
2371 if ((bytes_to_alloc -
2372 (num_needed * MBIGCLBYTES)) >=
2373 MINCLSIZE) {
2374 num_needed++;
2375 }
2376
2377 freelist =
2378 m_getpackets_internal(
2379 (unsigned int *)&num_needed,
2380 hdrs_needed, M_WAIT, 0,
2381 MBIGCLBYTES);
2382 /*
2383 * Fall back to cluster size
2384 * if allocation failed
2385 */
2386 }
2387
2388 /*
2389 * Allocate a cluster as we want to
2390 * avoid to split the data in more
2391 * that one segment and using MINCLSIZE
2392 * would lead us to allocate two mbufs
2393 */
2394 if (soreserveheadroom != 0 &&
2395 freelist == NULL &&
2396 ((top == NULL &&
2397 bytes_to_alloc > _MHLEN) ||
2398 bytes_to_alloc > _MLEN)) {
2399 num_needed = ROUNDUP(bytes_to_alloc, MCLBYTES) /
2400 MCLBYTES;
2401 freelist =
2402 m_getpackets_internal(
2403 (unsigned int *)&num_needed,
2404 hdrs_needed, M_WAIT, 0,
2405 MCLBYTES);
2406 /*
2407 * Fall back to a single mbuf
2408 * if allocation failed
2409 */
2410 } else if (freelist == NULL &&
2411 bytes_to_alloc > MINCLSIZE) {
2412 num_needed =
2413 bytes_to_alloc / MCLBYTES;
2414
2415 if ((bytes_to_alloc -
2416 (num_needed * MCLBYTES)) >=
2417 MINCLSIZE) {
2418 num_needed++;
2419 }
2420
2421 freelist =
2422 m_getpackets_internal(
2423 (unsigned int *)&num_needed,
2424 hdrs_needed, M_WAIT, 0,
2425 MCLBYTES);
2426 /*
2427 * Fall back to a single mbuf
2428 * if allocation failed
2429 */
2430 }
2431 /*
2432 * For datagram protocols, leave
2433 * headroom for protocol headers
2434 * in the first cluster of the chain
2435 */
2436 if (freelist != NULL && atomic &&
2437 top == NULL && headroom > 0) {
2438 freelist->m_data += headroom;
2439 }
2440
2441 /*
2442 * Fall back to regular mbufs without
2443 * reserving the socket headroom
2444 */
2445 if (freelist == NULL) {
2446 if (SOCK_TYPE(so) != SOCK_STREAM || bytes_to_alloc <= MINCLSIZE) {
2447 if (top == NULL) {
2448 MGETHDR(freelist,
2449 M_WAIT, MT_DATA);
2450 } else {
2451 MGET(freelist,
2452 M_WAIT, MT_DATA);
2453 }
2454 }
2455
2456 if (freelist == NULL) {
2457 error = ENOBUFS;
2458 socket_lock(so, 0);
2459 goto out_locked;
2460 }
2461 /*
2462 * For datagram protocols,
2463 * leave room for protocol
2464 * headers in first mbuf.
2465 */
2466 if (atomic && top == NULL &&
2467 bytes_to_copy > 0 &&
2468 bytes_to_copy < MHLEN) {
2469 MH_ALIGN(freelist,
2470 bytes_to_copy);
2471 }
2472 }
2473 m = freelist;
2474 freelist = m->m_next;
2475 m->m_next = NULL;
2476
2477 if ((m->m_flags & M_EXT)) {
2478 mlen = m->m_ext.ext_size -
2479 M_LEADINGSPACE(m);
2480 } else if ((m->m_flags & M_PKTHDR)) {
2481 mlen = MHLEN - M_LEADINGSPACE(m);
2482 m_add_crumb(m, PKT_CRUMB_SOSEND);
2483 } else {
2484 mlen = MLEN - M_LEADINGSPACE(m);
2485 }
2486 len = imin((int)mlen, bytes_to_copy);
2487
2488 chainlength += len;
2489
2490 space -= len;
2491
2492 error = uiomove(mtod(m, caddr_t),
2493 (int)len, uio);
2494
2495 resid = uio_resid(uio);
2496
2497 m->m_len = (int32_t)len;
2498 *mp = m;
2499 top->m_pkthdr.len += len;
2500 if (error) {
2501 break;
2502 }
2503 mp = &m->m_next;
2504 if (resid <= 0) {
2505 if (flags & MSG_EOR) {
2506 top->m_flags |= M_EOR;
2507 }
2508 break;
2509 }
2510 bytes_to_copy = imin((int)resid, (int)space);
2511 } while (space > 0 &&
2512 (chainlength < sosendmaxchain || atomic ||
2513 resid < MINCLSIZE));
2514
2515 socket_lock(so, 0);
2516
2517 if (error) {
2518 goto out_locked;
2519 }
2520 }
2521
2522 if (dontroute) {
2523 so->so_options |= SO_DONTROUTE;
2524 }
2525
2526 /*
2527 * Compute flags here, for pru_send and NKEs
2528 *
2529 * If the user set MSG_EOF, the protocol
2530 * understands this flag and nothing left to
2531 * send then use PRU_SEND_EOF instead of PRU_SEND.
2532 */
2533 sendflags = (flags & MSG_OOB) ? PRUS_OOB :
2534 ((flags & MSG_EOF) &&
2535 (so->so_proto->pr_flags & PR_IMPLOPCL) &&
2536 (resid <= 0)) ? PRUS_EOF :
2537 /* If there is more to send set PRUS_MORETOCOME */
2538 (resid > 0 && space > 0) ? PRUS_MORETOCOME : 0;
2539
2540 if ((flags & MSG_SKIPCFIL) == 0) {
2541 /*
2542 * Socket filter processing
2543 */
2544 error = sflt_data_out(so, addr, &top,
2545 &control, (sendflags & MSG_OOB) ?
2546 sock_data_filt_flag_oob : 0);
2547 if (error) {
2548 if (error == EJUSTRETURN) {
2549 error = 0;
2550 goto packet_consumed;
2551 }
2552 goto out_locked;
2553 }
2554 #if CONTENT_FILTER
2555 /*
2556 * Content filter processing
2557 */
2558 error = cfil_sock_data_out(so, addr, top,
2559 control, sendflags, dgram_flow_entry);
2560 if (error) {
2561 if (error == EJUSTRETURN) {
2562 error = 0;
2563 goto packet_consumed;
2564 }
2565 goto out_locked;
2566 }
2567 #endif /* CONTENT_FILTER */
2568 }
2569 error = (*so->so_proto->pr_usrreqs->pru_send)
2570 (so, sendflags, top, addr, control, p);
2571
2572 packet_consumed:
2573 if (dontroute) {
2574 so->so_options &= ~SO_DONTROUTE;
2575 }
2576
2577 clen = 0;
2578 control = NULL;
2579 top = NULL;
2580 mp = ⊤
2581 if (error) {
2582 goto out_locked;
2583 }
2584 } while (resid && space > 0);
2585 } while (resid);
2586
2587
2588 out_locked:
2589 if (resid > orig_resid) {
2590 char pname[MAXCOMLEN] = {};
2591 pid_t current_pid = proc_pid(current_proc());
2592 proc_name(current_pid, pname, sizeof(pname));
2593
2594 if (sosend_assert_panic != 0) {
2595 panic("sosend so %p resid %lld > orig_resid %lld proc %s:%d",
2596 so, resid, orig_resid, pname, current_pid);
2597 } else {
2598 os_log_error(OS_LOG_DEFAULT, "sosend: so_gencnt %llu resid %lld > orig_resid %lld proc %s:%d",
2599 so->so_gencnt, resid, orig_resid, pname, current_pid);
2600 }
2601 }
2602
2603 if (sblocked) {
2604 sbunlock(&so->so_snd, FALSE); /* will unlock socket */
2605 } else {
2606 socket_unlock(so, 1);
2607 }
2608 if (top != NULL) {
2609 m_freem(top);
2610 }
2611 if (control != NULL) {
2612 m_freem(control);
2613 }
2614 if (freelist != NULL) {
2615 m_freem_list(freelist);
2616 }
2617
2618 if (dgram_flow_entry != NULL) {
2619 soflow_free_flow(dgram_flow_entry);
2620 }
2621
2622 soclearfastopen(so);
2623
2624 if (en_tracing) {
2625 /* resid passed here is the bytes left in uio */
2626 KERNEL_ENERGYTRACE(kEnTrActKernSockWrite, DBG_FUNC_END,
2627 VM_KERNEL_ADDRPERM(so),
2628 ((error == EWOULDBLOCK) ? kEnTrFlagNoWork : 0),
2629 (int64_t)(orig_resid - resid));
2630 }
2631 KERNEL_DEBUG(DBG_FNC_SOSEND | DBG_FUNC_END, so, resid,
2632 so->so_snd.sb_cc, space, error);
2633
2634 return error;
2635 }
2636
2637 int
sosend_reinject(struct socket * so,struct sockaddr * addr,struct mbuf * top,struct mbuf * control,uint32_t sendflags)2638 sosend_reinject(struct socket *so, struct sockaddr *addr, struct mbuf *top, struct mbuf *control, uint32_t sendflags)
2639 {
2640 struct mbuf *m0 = NULL, *control_end = NULL;
2641
2642 socket_lock_assert_owned(so);
2643
2644 /*
2645 * top must points to mbuf chain to be sent.
2646 * If control is not NULL, top must be packet header
2647 */
2648 VERIFY(top != NULL &&
2649 (control == NULL || top->m_flags & M_PKTHDR));
2650
2651 /*
2652 * If control is not passed in, see if we can get it
2653 * from top.
2654 */
2655 if (control == NULL && (top->m_flags & M_PKTHDR) == 0) {
2656 // Locate start of control if present and start of data
2657 for (m0 = top; m0 != NULL; m0 = m0->m_next) {
2658 if (m0->m_flags & M_PKTHDR) {
2659 top = m0;
2660 break;
2661 } else if (m0->m_type == MT_CONTROL) {
2662 if (control == NULL) {
2663 // Found start of control
2664 control = m0;
2665 }
2666 if (control != NULL && m0->m_next != NULL && m0->m_next->m_type != MT_CONTROL) {
2667 // Found end of control
2668 control_end = m0;
2669 }
2670 }
2671 }
2672 if (control_end != NULL) {
2673 control_end->m_next = NULL;
2674 }
2675 }
2676
2677 int error = (*so->so_proto->pr_usrreqs->pru_send)
2678 (so, sendflags, top, addr, control, current_proc());
2679
2680 return error;
2681 }
2682
2683 static struct mbuf *
mbuf_detach_control_from_list(struct mbuf ** mp)2684 mbuf_detach_control_from_list(struct mbuf **mp)
2685 {
2686 struct mbuf *control = NULL;
2687 struct mbuf *m = *mp;
2688
2689 if (m->m_type == MT_CONTROL) {
2690 struct mbuf *control_end;
2691 struct mbuf *n;
2692
2693 n = control_end = control = m;
2694
2695 /*
2696 * Break the chain per mbuf type
2697 */
2698 while (n != NULL && n->m_type == MT_CONTROL) {
2699 control_end = n;
2700 n = n->m_next;
2701 }
2702 control_end->m_next = NULL;
2703 *mp = n;
2704 }
2705 VERIFY(*mp != NULL);
2706
2707 return control;
2708 }
2709
2710 /*
2711 * Supported only connected sockets (no address) without ancillary data
2712 * (control mbuf) for atomic protocols
2713 */
2714 int
sosend_list(struct socket * so,struct mbuf * pktlist,size_t total_len,u_int * pktcnt,int flags)2715 sosend_list(struct socket *so, struct mbuf *pktlist, size_t total_len, u_int *pktcnt, int flags)
2716 {
2717 mbuf_ref_t m;
2718 struct soflow_hash_entry *__single dgram_flow_entry = NULL;
2719 int error, dontroute;
2720 int atomic = sosendallatonce(so);
2721 int sblocked = 0;
2722 struct proc *p = current_proc();
2723 struct mbuf *top = pktlist;
2724 bool skip_filt = (SOCK_DOM(so) != PF_INET && SOCK_DOM(so) != PF_INET6) || (flags & MSG_SKIPCFIL);
2725
2726 KERNEL_DEBUG((DBG_FNC_SOSEND_LIST | DBG_FUNC_START), so, uiocnt,
2727 so->so_snd.sb_cc, so->so_snd.sb_lowat, so->so_snd.sb_hiwat);
2728
2729 if (so->so_type != SOCK_DGRAM) {
2730 error = EINVAL;
2731 os_log(OS_LOG_DEFAULT, "sosend_list: so->so_type != SOCK_DGRAM error %d",
2732 error);
2733 goto out;
2734 }
2735 if (atomic == 0) {
2736 error = EINVAL;
2737 os_log(OS_LOG_DEFAULT, "sosend_list: atomic == 0 error %d",
2738 error);
2739 goto out;
2740 }
2741 if ((so->so_state & SS_ISCONNECTED) == 0) {
2742 error = ENOTCONN;
2743 os_log(OS_LOG_DEFAULT, "sosend_list: SS_ISCONNECTED not set error: %d",
2744 error);
2745 goto out;
2746 }
2747 if (flags & ~(MSG_DONTWAIT | MSG_NBIO | MSG_SKIPCFIL)) {
2748 error = EINVAL;
2749 os_log(OS_LOG_DEFAULT, "sosend_list: flags 0x%x error %d",
2750 flags, error);
2751 goto out;
2752 }
2753
2754 socket_lock(so, 1);
2755 so_update_last_owner_locked(so, p);
2756 so_update_policy(so);
2757
2758 if (NEED_DGRAM_FLOW_TRACKING(so)) {
2759 dgram_flow_entry = soflow_get_flow(so, NULL, NULL, NULL, total_len, SOFLOW_DIRECTION_OUTBOUND, 0);
2760 }
2761
2762 #if NECP
2763 so_update_necp_policy(so, NULL, NULL);
2764 #endif /* NECP */
2765
2766 dontroute = (flags & MSG_DONTROUTE) &&
2767 (so->so_options & SO_DONTROUTE) == 0 &&
2768 (so->so_proto->pr_flags & PR_ATOMIC);
2769 if (dontroute) {
2770 so->so_options |= SO_DONTROUTE;
2771 }
2772
2773 OSIncrementAtomicLong(&p->p_stats->p_ru.ru_msgsnd);
2774
2775 error = sosendcheck(so, NULL, 0, 0, atomic, flags, &sblocked);
2776 if (error) {
2777 os_log(OS_LOG_DEFAULT, "sosend_list: sosendcheck error %d",
2778 error);
2779 goto release;
2780 }
2781
2782 if (!skip_filt) {
2783 mbuf_ref_ref_t prevnextp = NULL;
2784
2785 for (m = top; m != NULL; m = m->m_nextpkt) {
2786 mbuf_ref_t control = NULL;
2787 mbuf_ref_t last_control = NULL;
2788 mbuf_ref_t nextpkt;
2789
2790 /*
2791 * Remove packet from the list of packets
2792 */
2793 nextpkt = m->m_nextpkt;
2794 if (prevnextp != NULL) {
2795 *prevnextp = nextpkt;
2796 } else {
2797 top = nextpkt;
2798 }
2799 m->m_nextpkt = NULL;
2800
2801 /*
2802 * Break the chain per mbuf type
2803 */
2804 if (m->m_type == MT_CONTROL) {
2805 control = mbuf_detach_control_from_list(&m);
2806 }
2807 /*
2808 * Socket filter processing
2809 */
2810 error = sflt_data_out(so, NULL, &m,
2811 &control, 0);
2812 if (error != 0 && error != EJUSTRETURN) {
2813 os_log(OS_LOG_DEFAULT, "sosend_list: sflt_data_out error %d",
2814 error);
2815 goto release;
2816 }
2817
2818 #if CONTENT_FILTER
2819 if (error == 0) {
2820 /*
2821 * Content filter processing
2822 */
2823 error = cfil_sock_data_out(so, NULL, m,
2824 control, 0, dgram_flow_entry);
2825 if (error != 0 && error != EJUSTRETURN) {
2826 os_log(OS_LOG_DEFAULT, "sosend_list: cfil_sock_data_out error %d",
2827 error);
2828 goto release;
2829 }
2830 }
2831 #endif /* CONTENT_FILTER */
2832 if (error == EJUSTRETURN) {
2833 /*
2834 * When swallowed by a filter, the packet is not
2835 * in the list anymore
2836 */
2837 error = 0;
2838 } else {
2839 /*
2840 * Rebuild the mbuf chain of the packet
2841 */
2842 if (control != NULL) {
2843 last_control->m_next = m;
2844 m = control;
2845 }
2846 /*
2847 * Reinsert the packet in the list of packets
2848 */
2849 m->m_nextpkt = nextpkt;
2850 if (prevnextp != NULL) {
2851 *prevnextp = m;
2852 } else {
2853 top = m;
2854 }
2855 prevnextp = &m->m_nextpkt;
2856 }
2857 }
2858 }
2859
2860 if (top != NULL) {
2861 if (so->so_proto->pr_usrreqs->pru_send_list != pru_send_list_notsupp) {
2862 error = (*so->so_proto->pr_usrreqs->pru_send_list)
2863 (so, top, pktcnt, flags);
2864 if (error != 0 && error != ENOBUFS) {
2865 os_log(OS_LOG_DEFAULT, "sosend_list: pru_send_list error %d",
2866 error);
2867 }
2868 top = NULL;
2869 } else {
2870 *pktcnt = 0;
2871 for (m = top; m != NULL; m = top) {
2872 struct mbuf *control = NULL;
2873
2874 top = m->m_nextpkt;
2875 m->m_nextpkt = NULL;
2876
2877 /*
2878 * Break the chain per mbuf type
2879 */
2880 if (m->m_type == MT_CONTROL) {
2881 control = mbuf_detach_control_from_list(&m);
2882 }
2883
2884 error = (*so->so_proto->pr_usrreqs->pru_send)
2885 (so, 0, m, NULL, control, current_proc());
2886 if (error != 0) {
2887 if (error != ENOBUFS) {
2888 os_log(OS_LOG_DEFAULT, "sosend_list: pru_send error %d",
2889 error);
2890 }
2891 goto release;
2892 }
2893 *pktcnt += 1;
2894 }
2895 }
2896 }
2897
2898 release:
2899 if (dontroute) {
2900 so->so_options &= ~SO_DONTROUTE;
2901 }
2902 if (sblocked) {
2903 sbunlock(&so->so_snd, FALSE); /* will unlock socket */
2904 } else {
2905 socket_unlock(so, 1);
2906 }
2907 out:
2908 if (top != NULL) {
2909 if (error != ENOBUFS) {
2910 os_log(OS_LOG_DEFAULT, "sosend_list: m_freem_list(top) with error %d",
2911 error);
2912 }
2913 m_freem_list(top);
2914 }
2915
2916 if (dgram_flow_entry != NULL) {
2917 soflow_free_flow(dgram_flow_entry);
2918 }
2919
2920 KERNEL_DEBUG(DBG_FNC_SOSEND_LIST | DBG_FUNC_END, so, resid,
2921 so->so_snd.sb_cc, 0, error);
2922
2923 return error;
2924 }
2925
2926 /*
2927 * May return ERESTART when packet is dropped by MAC policy check
2928 */
2929 static int
soreceive_addr(struct proc * p,struct socket * so,struct sockaddr ** psa,struct mbuf ** maddrp,int flags,struct mbuf ** mp,struct mbuf ** nextrecordp,int canwait)2930 soreceive_addr(struct proc *p, struct socket *so, struct sockaddr **psa,
2931 struct mbuf **maddrp,
2932 int flags, struct mbuf **mp, struct mbuf **nextrecordp, int canwait)
2933 {
2934 int error = 0;
2935 struct mbuf *m = *mp;
2936 struct mbuf *nextrecord = *nextrecordp;
2937
2938 KASSERT(m->m_type == MT_SONAME, ("receive 1a"));
2939 #if CONFIG_MACF_SOCKET_SUBSET
2940 /*
2941 * Call the MAC framework for policy checking if we're in
2942 * the user process context and the socket isn't connected.
2943 */
2944 if (p != kernproc && !(so->so_state & SS_ISCONNECTED)) {
2945 struct mbuf *m0 = m;
2946 /*
2947 * Dequeue this record (temporarily) from the receive
2948 * list since we're about to drop the socket's lock
2949 * where a new record may arrive and be appended to
2950 * the list. Upon MAC policy failure, the record
2951 * will be freed. Otherwise, we'll add it back to
2952 * the head of the list. We cannot rely on SB_LOCK
2953 * because append operation uses the socket's lock.
2954 */
2955 do {
2956 m->m_nextpkt = NULL;
2957 sbfree(&so->so_rcv, m);
2958 m = m->m_next;
2959 } while (m != NULL);
2960 m = m0;
2961 so->so_rcv.sb_mb = nextrecord;
2962 SB_EMPTY_FIXUP(&so->so_rcv);
2963 SBLASTRECORDCHK(&so->so_rcv, "soreceive 1a");
2964 SBLASTMBUFCHK(&so->so_rcv, "soreceive 1a");
2965 socket_unlock(so, 0);
2966
2967 error = mac_socket_check_received(kauth_cred_get(), so,
2968 mtod(m, struct sockaddr *));
2969
2970 if (error != 0) {
2971 /*
2972 * MAC policy failure; free this record and
2973 * process the next record (or block until
2974 * one is available). We have adjusted sb_cc
2975 * and sb_mbcnt above so there is no need to
2976 * call sbfree() again.
2977 */
2978 m_freem(m);
2979 /*
2980 * Clear SB_LOCK but don't unlock the socket.
2981 * Process the next record or wait for one.
2982 */
2983 socket_lock(so, 0);
2984 sbunlock(&so->so_rcv, TRUE); /* stay locked */
2985 error = ERESTART;
2986 goto done;
2987 }
2988 socket_lock(so, 0);
2989 /*
2990 * If the socket has been defunct'd, drop it.
2991 */
2992 if (so->so_flags & SOF_DEFUNCT) {
2993 m_freem(m);
2994 error = ENOTCONN;
2995 goto done;
2996 }
2997 /*
2998 * Re-adjust the socket receive list and re-enqueue
2999 * the record in front of any packets which may have
3000 * been appended while we dropped the lock.
3001 */
3002 for (m = m0; m->m_next != NULL; m = m->m_next) {
3003 sballoc(&so->so_rcv, m);
3004 }
3005 sballoc(&so->so_rcv, m);
3006 if (so->so_rcv.sb_mb == NULL) {
3007 so->so_rcv.sb_lastrecord = m0;
3008 so->so_rcv.sb_mbtail = m;
3009 }
3010 m = m0;
3011 nextrecord = m->m_nextpkt = so->so_rcv.sb_mb;
3012 so->so_rcv.sb_mb = m;
3013 SBLASTRECORDCHK(&so->so_rcv, "soreceive 1b");
3014 SBLASTMBUFCHK(&so->so_rcv, "soreceive 1b");
3015 }
3016 #endif /* CONFIG_MACF_SOCKET_SUBSET */
3017 if (psa != NULL) {
3018 *psa = dup_sockaddr(mtod(m, struct sockaddr *), canwait);
3019 if ((*psa == NULL) && (flags & MSG_NEEDSA)) {
3020 error = EWOULDBLOCK;
3021 goto done;
3022 }
3023 } else if (maddrp != NULL) {
3024 *maddrp = m;
3025 }
3026 if (flags & MSG_PEEK) {
3027 m = m->m_next;
3028 } else {
3029 sbfree(&so->so_rcv, m);
3030 if (m->m_next == NULL && so->so_rcv.sb_cc != 0) {
3031 panic("%s: about to create invalid socketbuf",
3032 __func__);
3033 /* NOTREACHED */
3034 }
3035 if (maddrp == NULL) {
3036 MFREE(m, so->so_rcv.sb_mb);
3037 } else {
3038 so->so_rcv.sb_mb = m->m_next;
3039 m->m_next = NULL;
3040 }
3041 m = so->so_rcv.sb_mb;
3042 if (m != NULL) {
3043 m->m_nextpkt = nextrecord;
3044 } else {
3045 so->so_rcv.sb_mb = nextrecord;
3046 SB_EMPTY_FIXUP(&so->so_rcv);
3047 }
3048 }
3049 done:
3050 *mp = m;
3051 *nextrecordp = nextrecord;
3052
3053 return error;
3054 }
3055
3056 /*
3057 * When peeking SCM_RIGHTS, the actual file descriptors are not yet created
3058 * so clear the data portion in order not to leak the file pointers
3059 */
3060 static void
sopeek_scm_rights(struct mbuf * rights)3061 sopeek_scm_rights(struct mbuf *rights)
3062 {
3063 struct cmsghdr *cm = mtod(rights, struct cmsghdr *);
3064
3065 if (cm->cmsg_level == SOL_SOCKET && cm->cmsg_type == SCM_RIGHTS) {
3066 VERIFY(cm->cmsg_len <= rights->m_len);
3067 memset(cm + 1, 0, cm->cmsg_len - sizeof(*cm));
3068 }
3069 }
3070
3071 /*
3072 * Process one or more MT_CONTROL mbufs present before any data mbufs
3073 * in the first mbuf chain on the socket buffer. If MSG_PEEK, we
3074 * just copy the data; if !MSG_PEEK, we call into the protocol to
3075 * perform externalization.
3076 */
3077 static int
soreceive_ctl(struct socket * so,struct mbuf ** controlp,int flags,struct mbuf ** mp,struct mbuf ** nextrecordp)3078 soreceive_ctl(struct socket *so, struct mbuf **controlp, int flags,
3079 struct mbuf **mp, struct mbuf **nextrecordp)
3080 {
3081 int error = 0;
3082 mbuf_ref_t cm = NULL, cmn;
3083 mbuf_ref_ref_t cme = &cm;
3084 struct sockbuf *sb_rcv = &so->so_rcv;
3085 mbuf_ref_ref_t msgpcm = NULL;
3086 mbuf_ref_t m = *mp;
3087 mbuf_ref_t nextrecord = *nextrecordp;
3088 struct protosw *pr = so->so_proto;
3089
3090 /*
3091 * Externalizing the control messages would require us to
3092 * drop the socket's lock below. Once we re-acquire the
3093 * lock, the mbuf chain might change. In order to preserve
3094 * consistency, we unlink all control messages from the
3095 * first mbuf chain in one shot and link them separately
3096 * onto a different chain.
3097 */
3098 do {
3099 if (flags & MSG_PEEK) {
3100 if (controlp != NULL) {
3101 if (*controlp == NULL) {
3102 msgpcm = controlp;
3103 }
3104 *controlp = m_copy(m, 0, m->m_len);
3105
3106 /*
3107 * If we failed to allocate an mbuf,
3108 * release any previously allocated
3109 * mbufs for control data. Return
3110 * an error. Keep the mbufs in the
3111 * socket as this is using
3112 * MSG_PEEK flag.
3113 */
3114 if (*controlp == NULL) {
3115 m_freem(*msgpcm);
3116 error = ENOBUFS;
3117 goto done;
3118 }
3119
3120 if (pr->pr_domain->dom_externalize != NULL) {
3121 sopeek_scm_rights(*controlp);
3122 }
3123
3124 controlp = &(*controlp)->m_next;
3125 }
3126 m = m->m_next;
3127 } else {
3128 m->m_nextpkt = NULL;
3129 sbfree(sb_rcv, m);
3130 sb_rcv->sb_mb = m->m_next;
3131 m->m_next = NULL;
3132 *cme = m;
3133 cme = &(*cme)->m_next;
3134 m = sb_rcv->sb_mb;
3135 }
3136 } while (m != NULL && m->m_type == MT_CONTROL);
3137
3138 if (!(flags & MSG_PEEK)) {
3139 if (sb_rcv->sb_mb != NULL) {
3140 sb_rcv->sb_mb->m_nextpkt = nextrecord;
3141 } else {
3142 sb_rcv->sb_mb = nextrecord;
3143 SB_EMPTY_FIXUP(sb_rcv);
3144 }
3145 if (nextrecord == NULL) {
3146 sb_rcv->sb_lastrecord = m;
3147 }
3148 }
3149
3150 SBLASTRECORDCHK(&so->so_rcv, "soreceive ctl");
3151 SBLASTMBUFCHK(&so->so_rcv, "soreceive ctl");
3152
3153 while (cm != NULL) {
3154 int cmsg_level;
3155 int cmsg_type;
3156
3157 cmn = cm->m_next;
3158 cm->m_next = NULL;
3159 cmsg_level = mtod(cm, struct cmsghdr *)->cmsg_level;
3160 cmsg_type = mtod(cm, struct cmsghdr *)->cmsg_type;
3161
3162 /*
3163 * Call the protocol to externalize SCM_RIGHTS message
3164 * and return the modified message to the caller upon
3165 * success. Otherwise, all other control messages are
3166 * returned unmodified to the caller. Note that we
3167 * only get into this loop if MSG_PEEK is not set.
3168 */
3169 if (pr->pr_domain->dom_externalize != NULL &&
3170 cmsg_level == SOL_SOCKET &&
3171 cmsg_type == SCM_RIGHTS) {
3172 /*
3173 * Release socket lock: see 3903171. This
3174 * would also allow more records to be appended
3175 * to the socket buffer. We still have SB_LOCK
3176 * set on it, so we can be sure that the head
3177 * of the mbuf chain won't change.
3178 */
3179 socket_unlock(so, 0);
3180 error = (*pr->pr_domain->dom_externalize)(cm);
3181 socket_lock(so, 0);
3182 } else {
3183 error = 0;
3184 }
3185
3186 if (controlp != NULL && error == 0) {
3187 *controlp = cm;
3188 controlp = &(*controlp)->m_next;
3189 } else {
3190 (void) m_free(cm);
3191 }
3192 cm = cmn;
3193 }
3194 /*
3195 * Update the value of nextrecord in case we received new
3196 * records when the socket was unlocked above for
3197 * externalizing SCM_RIGHTS.
3198 */
3199 if (m != NULL) {
3200 nextrecord = sb_rcv->sb_mb->m_nextpkt;
3201 } else {
3202 nextrecord = sb_rcv->sb_mb;
3203 }
3204
3205 done:
3206 *mp = m;
3207 *nextrecordp = nextrecord;
3208
3209 return error;
3210 }
3211
3212 /*
3213 * If we have less data than requested, block awaiting more
3214 * (subject to any timeout) if:
3215 * 1. the current count is less than the low water mark, or
3216 * 2. MSG_WAITALL is set, and it is possible to do the entire
3217 * receive operation at once if we block (resid <= hiwat).
3218 * 3. MSG_DONTWAIT is not set
3219 * If MSG_WAITALL is set but resid is larger than the receive buffer,
3220 * we have to do the receive in sections, and thus risk returning
3221 * a short count if a timeout or signal occurs after we start.
3222 */
3223 static boolean_t
so_should_wait(struct socket * so,struct uio * uio,struct mbuf * m,int flags)3224 so_should_wait(struct socket *so, struct uio *uio, struct mbuf *m, int flags)
3225 {
3226 struct protosw *pr = so->so_proto;
3227
3228 /* No mbufs in the receive-queue? Wait! */
3229 if (m == NULL) {
3230 return true;
3231 }
3232
3233 /* Not enough data in the receive socket-buffer - we may have to wait */
3234 if ((flags & MSG_DONTWAIT) == 0 && so->so_rcv.sb_cc < uio_resid(uio) &&
3235 m->m_nextpkt == NULL && (pr->pr_flags & PR_ATOMIC) == 0) {
3236 /*
3237 * Application did set the lowater-mark, so we should wait for
3238 * this data to be present.
3239 */
3240 if (so->so_rcv.sb_cc < so->so_rcv.sb_lowat) {
3241 return true;
3242 }
3243
3244 /*
3245 * Application wants all the data - so let's try to do the
3246 * receive-operation at once by waiting for everything to
3247 * be there.
3248 */
3249 if ((flags & MSG_WAITALL) && uio_resid(uio) <= so->so_rcv.sb_hiwat) {
3250 return true;
3251 }
3252 }
3253
3254 return false;
3255 }
3256
3257 /*
3258 * Implement receive operations on a socket.
3259 * We depend on the way that records are added to the sockbuf
3260 * by sbappend*. In particular, each record (mbufs linked through m_next)
3261 * must begin with an address if the protocol so specifies,
3262 * followed by an optional mbuf or mbufs containing ancillary data,
3263 * and then zero or more mbufs of data.
3264 * In order to avoid blocking network interrupts for the entire time here,
3265 * we splx() while doing the actual copy to user space.
3266 * Although the sockbuf is locked, new data may still be appended,
3267 * and thus we must maintain consistency of the sockbuf during that time.
3268 *
3269 * The caller may receive the data as a single mbuf chain by supplying
3270 * an mbuf **mp0 for use in returning the chain. The uio is then used
3271 * only for the count in uio_resid.
3272 *
3273 * Returns: 0 Success
3274 * ENOBUFS
3275 * ENOTCONN
3276 * EWOULDBLOCK
3277 * uiomove:EFAULT
3278 * sblock:EWOULDBLOCK
3279 * sblock:EINTR
3280 * sbwait:EBADF
3281 * sbwait:EINTR
3282 * sodelayed_copy:EFAULT
3283 * <pru_rcvoob>:EINVAL[TCP]
3284 * <pru_rcvoob>:EWOULDBLOCK[TCP]
3285 * <pru_rcvoob>:???
3286 * <pr_domain->dom_externalize>:EMSGSIZE[AF_UNIX]
3287 * <pr_domain->dom_externalize>:ENOBUFS[AF_UNIX]
3288 * <pr_domain->dom_externalize>:???
3289 *
3290 * Notes: Additional return values from calls through <pru_rcvoob> and
3291 * <pr_domain->dom_externalize> depend on protocols other than
3292 * TCP or AF_UNIX, which are documented above.
3293 */
3294 int
soreceive(struct socket * so,struct sockaddr ** psa,struct uio * uio,struct mbuf ** mp0,struct mbuf ** controlp,int * flagsp)3295 soreceive(struct socket *so, struct sockaddr **psa, struct uio *uio,
3296 struct mbuf **mp0, struct mbuf **controlp, int *flagsp)
3297 {
3298 mbuf_ref_t m;
3299 mbuf_ref_ref_t mp;
3300 mbuf_ref_t ml = NULL;
3301 mbuf_ref_t nextrecord, free_list;
3302 int flags, error, offset;
3303 user_ssize_t len;
3304 struct protosw *pr = so->so_proto;
3305 int moff, type = 0;
3306 user_ssize_t orig_resid = uio_resid(uio);
3307 user_ssize_t delayed_copy_len;
3308 int can_delay;
3309 struct proc *p = current_proc();
3310 boolean_t en_tracing = FALSE;
3311
3312 /*
3313 * Sanity check on the length passed by caller as we are making 'int'
3314 * comparisons
3315 */
3316 if (orig_resid < 0 || orig_resid > INT_MAX) {
3317 return EINVAL;
3318 }
3319
3320 KERNEL_DEBUG(DBG_FNC_SORECEIVE | DBG_FUNC_START, so,
3321 uio_resid(uio), so->so_rcv.sb_cc, so->so_rcv.sb_lowat,
3322 so->so_rcv.sb_hiwat);
3323
3324 socket_lock(so, 1);
3325 so_update_last_owner_locked(so, p);
3326 so_update_policy(so);
3327
3328 #ifdef MORE_LOCKING_DEBUG
3329 if (so->so_usecount == 1) {
3330 panic("%s: so=%x no other reference on socket", __func__, so);
3331 /* NOTREACHED */
3332 }
3333 #endif
3334 mp = mp0;
3335 if (psa != NULL) {
3336 *psa = NULL;
3337 }
3338 if (controlp != NULL) {
3339 *controlp = NULL;
3340 }
3341 if (flagsp != NULL) {
3342 flags = *flagsp & ~MSG_EOR;
3343 } else {
3344 flags = 0;
3345 }
3346
3347 /*
3348 * If a recv attempt is made on a previously-accepted socket
3349 * that has been marked as inactive (disconnected), reject
3350 * the request.
3351 */
3352 if (so->so_flags & SOF_DEFUNCT) {
3353 struct sockbuf *sb = &so->so_rcv;
3354
3355 error = ENOTCONN;
3356 SODEFUNCTLOG("%s[%d, %s]: defunct so 0x%llu [%d,%d] (%d)\n",
3357 __func__, proc_pid(p), proc_best_name(p),
3358 so->so_gencnt,
3359 SOCK_DOM(so), SOCK_TYPE(so), error);
3360 /*
3361 * This socket should have been disconnected and flushed
3362 * prior to being returned from sodefunct(); there should
3363 * be no data on its receive list, so panic otherwise.
3364 */
3365 if (so->so_state & SS_DEFUNCT) {
3366 sb_empty_assert(sb, __func__);
3367 }
3368 socket_unlock(so, 1);
3369 return error;
3370 }
3371
3372 if ((so->so_flags1 & SOF1_PRECONNECT_DATA) &&
3373 pr->pr_usrreqs->pru_preconnect) {
3374 /*
3375 * A user may set the CONNECT_RESUME_ON_READ_WRITE-flag but not
3376 * calling write() right after this. *If* the app calls a read
3377 * we do not want to block this read indefinetely. Thus,
3378 * we trigger a connect so that the session gets initiated.
3379 */
3380 error = (*pr->pr_usrreqs->pru_preconnect)(so);
3381
3382 if (error) {
3383 socket_unlock(so, 1);
3384 return error;
3385 }
3386 }
3387
3388 if (ENTR_SHOULDTRACE &&
3389 (SOCK_CHECK_DOM(so, AF_INET) || SOCK_CHECK_DOM(so, AF_INET6))) {
3390 /*
3391 * enable energy tracing for inet sockets that go over
3392 * non-loopback interfaces only.
3393 */
3394 struct inpcb *inp = sotoinpcb(so);
3395 if (inp->inp_last_outifp != NULL &&
3396 !(inp->inp_last_outifp->if_flags & IFF_LOOPBACK)) {
3397 en_tracing = TRUE;
3398 KERNEL_ENERGYTRACE(kEnTrActKernSockRead, DBG_FUNC_START,
3399 VM_KERNEL_ADDRPERM(so),
3400 ((so->so_state & SS_NBIO) ?
3401 kEnTrFlagNonBlocking : 0),
3402 (int64_t)orig_resid);
3403 }
3404 }
3405
3406 /*
3407 * When SO_WANTOOBFLAG is set we try to get out-of-band data
3408 * regardless of the flags argument. Here is the case were
3409 * out-of-band data is not inline.
3410 */
3411 if ((flags & MSG_OOB) ||
3412 ((so->so_options & SO_WANTOOBFLAG) != 0 &&
3413 (so->so_options & SO_OOBINLINE) == 0 &&
3414 (so->so_oobmark || (so->so_state & SS_RCVATMARK)))) {
3415 m = m_get(M_WAIT, MT_DATA);
3416 if (m == NULL) {
3417 socket_unlock(so, 1);
3418 KERNEL_DEBUG(DBG_FNC_SORECEIVE | DBG_FUNC_END,
3419 ENOBUFS, 0, 0, 0, 0);
3420 return ENOBUFS;
3421 }
3422 error = (*pr->pr_usrreqs->pru_rcvoob)(so, m, flags & MSG_PEEK);
3423 if (error) {
3424 goto bad;
3425 }
3426 socket_unlock(so, 0);
3427 do {
3428 error = uiomove(mtod(m, caddr_t),
3429 imin((int)uio_resid(uio), m->m_len), uio);
3430 m = m_free(m);
3431 } while (uio_resid(uio) && error == 0 && m != NULL);
3432 socket_lock(so, 0);
3433 bad:
3434 if (m != NULL) {
3435 m_freem(m);
3436 }
3437
3438 if ((so->so_options & SO_WANTOOBFLAG) != 0) {
3439 if (error == EWOULDBLOCK || error == EINVAL) {
3440 /*
3441 * Let's try to get normal data:
3442 * EWOULDBLOCK: out-of-band data not
3443 * receive yet. EINVAL: out-of-band data
3444 * already read.
3445 */
3446 error = 0;
3447 goto nooob;
3448 } else if (error == 0 && flagsp != NULL) {
3449 *flagsp |= MSG_OOB;
3450 }
3451 }
3452 socket_unlock(so, 1);
3453 if (en_tracing) {
3454 KERNEL_ENERGYTRACE(kEnTrActKernSockRead, DBG_FUNC_END,
3455 VM_KERNEL_ADDRPERM(so), 0,
3456 (int64_t)(orig_resid - uio_resid(uio)));
3457 }
3458 KERNEL_DEBUG(DBG_FNC_SORECEIVE | DBG_FUNC_END, error,
3459 0, 0, 0, 0);
3460
3461 return error;
3462 }
3463 nooob:
3464 if (mp != NULL) {
3465 *mp = NULL;
3466 }
3467
3468 if (so->so_state & SS_ISCONFIRMING && uio_resid(uio)) {
3469 (*pr->pr_usrreqs->pru_rcvd)(so, 0);
3470 }
3471
3472 free_list = NULL;
3473 delayed_copy_len = 0;
3474 restart:
3475 #ifdef MORE_LOCKING_DEBUG
3476 if (so->so_usecount <= 1) {
3477 printf("soreceive: sblock so=0x%llx ref=%d on socket\n",
3478 (uint64_t)DEBUG_KERNEL_ADDRPERM(so), so->so_usecount);
3479 }
3480 #endif
3481 /*
3482 * See if the socket has been closed (SS_NOFDREF|SS_CANTRCVMORE)
3483 * and if so just return to the caller. This could happen when
3484 * soreceive() is called by a socket upcall function during the
3485 * time the socket is freed. The socket buffer would have been
3486 * locked across the upcall, therefore we cannot put this thread
3487 * to sleep (else we will deadlock) or return EWOULDBLOCK (else
3488 * we may livelock), because the lock on the socket buffer will
3489 * only be released when the upcall routine returns to its caller.
3490 * Because the socket has been officially closed, there can be
3491 * no further read on it.
3492 *
3493 * A multipath subflow socket would have its SS_NOFDREF set by
3494 * default, so check for SOF_MP_SUBFLOW socket flag; when the
3495 * socket is closed for real, SOF_MP_SUBFLOW would be cleared.
3496 */
3497 if ((so->so_state & (SS_NOFDREF | SS_CANTRCVMORE)) ==
3498 (SS_NOFDREF | SS_CANTRCVMORE) && !(so->so_flags & SOF_MP_SUBFLOW)) {
3499 socket_unlock(so, 1);
3500 return 0;
3501 }
3502
3503 error = sblock(&so->so_rcv, SBLOCKWAIT(flags));
3504 if (error) {
3505 socket_unlock(so, 1);
3506 KERNEL_DEBUG(DBG_FNC_SORECEIVE | DBG_FUNC_END, error,
3507 0, 0, 0, 0);
3508 if (en_tracing) {
3509 KERNEL_ENERGYTRACE(kEnTrActKernSockRead, DBG_FUNC_END,
3510 VM_KERNEL_ADDRPERM(so), 0,
3511 (int64_t)(orig_resid - uio_resid(uio)));
3512 }
3513 return error;
3514 }
3515
3516 m = so->so_rcv.sb_mb;
3517 if (so_should_wait(so, uio, m, flags)) {
3518 /*
3519 * Panic if we notice inconsistencies in the socket's
3520 * receive list; both sb_mb and sb_cc should correctly
3521 * reflect the contents of the list, otherwise we may
3522 * end up with false positives during select() or poll()
3523 * which could put the application in a bad state.
3524 */
3525 SB_MB_CHECK(&so->so_rcv);
3526
3527 if (so->so_error) {
3528 if (m != NULL) {
3529 goto dontblock;
3530 }
3531 error = so->so_error;
3532 if ((flags & MSG_PEEK) == 0) {
3533 so->so_error = 0;
3534 }
3535 goto release;
3536 }
3537 if (so->so_state & SS_CANTRCVMORE) {
3538 #if CONTENT_FILTER
3539 /*
3540 * Deal with half closed connections
3541 */
3542 if ((so->so_state & SS_ISDISCONNECTED) == 0 &&
3543 cfil_sock_data_pending(&so->so_rcv) != 0) {
3544 CFIL_LOG(LOG_INFO,
3545 "so %llx ignore SS_CANTRCVMORE",
3546 (uint64_t)DEBUG_KERNEL_ADDRPERM(so));
3547 } else
3548 #endif /* CONTENT_FILTER */
3549 if (m != NULL) {
3550 goto dontblock;
3551 } else {
3552 goto release;
3553 }
3554 }
3555 for (; m != NULL; m = m->m_next) {
3556 if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) {
3557 m = so->so_rcv.sb_mb;
3558 goto dontblock;
3559 }
3560 }
3561 if ((so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING)) == 0 &&
3562 (so->so_proto->pr_flags & PR_CONNREQUIRED)) {
3563 error = ENOTCONN;
3564 goto release;
3565 }
3566 if (uio_resid(uio) == 0) {
3567 goto release;
3568 }
3569
3570 if ((so->so_state & SS_NBIO) ||
3571 (flags & (MSG_DONTWAIT | MSG_NBIO))) {
3572 error = EWOULDBLOCK;
3573 goto release;
3574 }
3575 SBLASTRECORDCHK(&so->so_rcv, "soreceive sbwait 1");
3576 SBLASTMBUFCHK(&so->so_rcv, "soreceive sbwait 1");
3577 sbunlock(&so->so_rcv, TRUE); /* keep socket locked */
3578 #if EVEN_MORE_LOCKING_DEBUG
3579 if (socket_debug) {
3580 printf("Waiting for socket data\n");
3581 }
3582 #endif
3583
3584 /*
3585 * Depending on the protocol (e.g. TCP), the following
3586 * might cause the socket lock to be dropped and later
3587 * be reacquired, and more data could have arrived and
3588 * have been appended to the receive socket buffer by
3589 * the time it returns. Therefore, we only sleep in
3590 * sbwait() below if and only if the wait-condition is still
3591 * true.
3592 */
3593 if ((pr->pr_flags & PR_WANTRCVD) && so->so_pcb != NULL) {
3594 (*pr->pr_usrreqs->pru_rcvd)(so, flags);
3595 }
3596
3597 error = 0;
3598 if (so_should_wait(so, uio, so->so_rcv.sb_mb, flags)) {
3599 error = sbwait(&so->so_rcv);
3600 }
3601
3602 #if EVEN_MORE_LOCKING_DEBUG
3603 if (socket_debug) {
3604 printf("SORECEIVE - sbwait returned %d\n", error);
3605 }
3606 #endif
3607 if (so->so_usecount < 1) {
3608 panic("%s: after 2nd sblock so=%p ref=%d on socket",
3609 __func__, so, so->so_usecount);
3610 /* NOTREACHED */
3611 }
3612 if (error) {
3613 socket_unlock(so, 1);
3614 KERNEL_DEBUG(DBG_FNC_SORECEIVE | DBG_FUNC_END, error,
3615 0, 0, 0, 0);
3616 if (en_tracing) {
3617 KERNEL_ENERGYTRACE(kEnTrActKernSockRead, DBG_FUNC_END,
3618 VM_KERNEL_ADDRPERM(so), 0,
3619 (int64_t)(orig_resid - uio_resid(uio)));
3620 }
3621 return error;
3622 }
3623 goto restart;
3624 }
3625 dontblock:
3626 OSIncrementAtomicLong(&p->p_stats->p_ru.ru_msgrcv);
3627 SBLASTRECORDCHK(&so->so_rcv, "soreceive 1");
3628 SBLASTMBUFCHK(&so->so_rcv, "soreceive 1");
3629 nextrecord = m->m_nextpkt;
3630
3631 if ((pr->pr_flags & PR_ADDR) && m->m_type == MT_SONAME) {
3632 error = soreceive_addr(p, so, psa, NULL, flags, &m, &nextrecord,
3633 mp0 == NULL);
3634 if (error == ERESTART) {
3635 goto restart;
3636 } else if (error != 0) {
3637 goto release;
3638 }
3639 orig_resid = 0;
3640 }
3641
3642 /*
3643 * Process one or more MT_CONTROL mbufs present before any data mbufs
3644 * in the first mbuf chain on the socket buffer. If MSG_PEEK, we
3645 * just copy the data; if !MSG_PEEK, we call into the protocol to
3646 * perform externalization.
3647 */
3648 if (m != NULL && m->m_type == MT_CONTROL) {
3649 error = soreceive_ctl(so, controlp, flags, &m, &nextrecord);
3650 if (error != 0) {
3651 goto release;
3652 }
3653 orig_resid = 0;
3654 }
3655
3656 if (m != NULL) {
3657 if (!(flags & MSG_PEEK)) {
3658 /*
3659 * We get here because m points to an mbuf following
3660 * any MT_SONAME or MT_CONTROL mbufs which have been
3661 * processed above. In any case, m should be pointing
3662 * to the head of the mbuf chain, and the nextrecord
3663 * should be either NULL or equal to m->m_nextpkt.
3664 * See comments above about SB_LOCK.
3665 */
3666 if (m != so->so_rcv.sb_mb ||
3667 m->m_nextpkt != nextrecord) {
3668 panic("%s: post-control !sync so=%p m=%p "
3669 "nextrecord=%p\n", __func__, so, m,
3670 nextrecord);
3671 /* NOTREACHED */
3672 }
3673 if (nextrecord == NULL) {
3674 so->so_rcv.sb_lastrecord = m;
3675 }
3676 }
3677 type = m->m_type;
3678 if (type == MT_OOBDATA) {
3679 flags |= MSG_OOB;
3680 }
3681 } else {
3682 if (!(flags & MSG_PEEK)) {
3683 SB_EMPTY_FIXUP(&so->so_rcv);
3684 }
3685 }
3686 SBLASTRECORDCHK(&so->so_rcv, "soreceive 2");
3687 SBLASTMBUFCHK(&so->so_rcv, "soreceive 2");
3688
3689 moff = 0;
3690 offset = 0;
3691
3692 if (!(flags & MSG_PEEK) && uio_resid(uio) > sorecvmincopy) {
3693 can_delay = 1;
3694 } else {
3695 can_delay = 0;
3696 }
3697
3698 while (m != NULL &&
3699 (uio_resid(uio) - delayed_copy_len) > 0 && error == 0) {
3700 if (m->m_type == MT_OOBDATA) {
3701 if (type != MT_OOBDATA) {
3702 break;
3703 }
3704 } else if (type == MT_OOBDATA) {
3705 break;
3706 }
3707
3708 if (!m_has_mtype(m, MTF_DATA | MTF_HEADER | MTF_OOBDATA)) {
3709 break;
3710 }
3711 /*
3712 * Make sure to allways set MSG_OOB event when getting
3713 * out of band data inline.
3714 */
3715 if ((so->so_options & SO_WANTOOBFLAG) != 0 &&
3716 (so->so_options & SO_OOBINLINE) != 0 &&
3717 (so->so_state & SS_RCVATMARK) != 0) {
3718 flags |= MSG_OOB;
3719 }
3720 so->so_state &= ~SS_RCVATMARK;
3721 len = uio_resid(uio) - delayed_copy_len;
3722 if (so->so_oobmark && len > so->so_oobmark - offset) {
3723 len = so->so_oobmark - offset;
3724 }
3725 if (len > m->m_len - moff) {
3726 len = m->m_len - moff;
3727 }
3728 /*
3729 * If mp is set, just pass back the mbufs.
3730 * Otherwise copy them out via the uio, then free.
3731 * Sockbuf must be consistent here (points to current mbuf,
3732 * it points to next record) when we drop priority;
3733 * we must note any additions to the sockbuf when we
3734 * block interrupts again.
3735 */
3736 if (mp == NULL) {
3737 SBLASTRECORDCHK(&so->so_rcv, "soreceive uiomove");
3738 SBLASTMBUFCHK(&so->so_rcv, "soreceive uiomove");
3739 if (can_delay && len == m->m_len) {
3740 /*
3741 * only delay the copy if we're consuming the
3742 * mbuf and we're NOT in MSG_PEEK mode
3743 * and we have enough data to make it worthwile
3744 * to drop and retake the lock... can_delay
3745 * reflects the state of the 2 latter
3746 * constraints moff should always be zero
3747 * in these cases
3748 */
3749 delayed_copy_len += len;
3750 } else {
3751 if (delayed_copy_len) {
3752 error = sodelayed_copy(so, uio,
3753 &free_list, &delayed_copy_len);
3754
3755 if (error) {
3756 goto release;
3757 }
3758 /*
3759 * can only get here if MSG_PEEK is not
3760 * set therefore, m should point at the
3761 * head of the rcv queue; if it doesn't,
3762 * it means something drastically
3763 * changed while we were out from behind
3764 * the lock in sodelayed_copy. perhaps
3765 * a RST on the stream. in any event,
3766 * the stream has been interrupted. it's
3767 * probably best just to return whatever
3768 * data we've moved and let the caller
3769 * sort it out...
3770 */
3771 if (m != so->so_rcv.sb_mb) {
3772 break;
3773 }
3774 }
3775 socket_unlock(so, 0);
3776 error = uiomove(mtod(m, caddr_t) + moff,
3777 (int)len, uio);
3778 socket_lock(so, 0);
3779
3780 if (error) {
3781 goto release;
3782 }
3783 }
3784 } else {
3785 uio_setresid(uio, (uio_resid(uio) - len));
3786 }
3787 if (len == m->m_len - moff) {
3788 if (m->m_flags & M_EOR) {
3789 flags |= MSG_EOR;
3790 }
3791 if (flags & MSG_PEEK) {
3792 m = m->m_next;
3793 moff = 0;
3794 } else {
3795 nextrecord = m->m_nextpkt;
3796 sbfree(&so->so_rcv, m);
3797 m->m_nextpkt = NULL;
3798
3799 if (mp != NULL) {
3800 *mp = m;
3801 mp = &m->m_next;
3802 so->so_rcv.sb_mb = m = m->m_next;
3803 *mp = NULL;
3804 } else {
3805 if (free_list == NULL) {
3806 free_list = m;
3807 } else {
3808 ml->m_next = m;
3809 }
3810 ml = m;
3811 so->so_rcv.sb_mb = m = m->m_next;
3812 ml->m_next = NULL;
3813 }
3814 if (m != NULL) {
3815 m->m_nextpkt = nextrecord;
3816 if (nextrecord == NULL) {
3817 so->so_rcv.sb_lastrecord = m;
3818 }
3819 } else {
3820 so->so_rcv.sb_mb = nextrecord;
3821 SB_EMPTY_FIXUP(&so->so_rcv);
3822 }
3823 SBLASTRECORDCHK(&so->so_rcv, "soreceive 3");
3824 SBLASTMBUFCHK(&so->so_rcv, "soreceive 3");
3825 }
3826 } else {
3827 if (flags & MSG_PEEK) {
3828 moff += len;
3829 } else {
3830 if (mp != NULL) {
3831 int copy_flag;
3832
3833 if (flags & MSG_DONTWAIT) {
3834 copy_flag = M_DONTWAIT;
3835 } else {
3836 copy_flag = M_WAIT;
3837 }
3838 *mp = m_copym(m, 0, (int)len, copy_flag);
3839 /*
3840 * Failed to allocate an mbuf?
3841 * Adjust uio_resid back, it was
3842 * adjusted down by len bytes which
3843 * we didn't copy over.
3844 */
3845 if (*mp == NULL) {
3846 uio_setresid(uio,
3847 (uio_resid(uio) + len));
3848 break;
3849 }
3850 }
3851 m->m_data += len;
3852 m->m_len -= len;
3853 so->so_rcv.sb_cc -= len;
3854 }
3855 }
3856 if (so->so_oobmark) {
3857 if ((flags & MSG_PEEK) == 0) {
3858 so->so_oobmark -= len;
3859 if (so->so_oobmark == 0) {
3860 so->so_state |= SS_RCVATMARK;
3861 break;
3862 }
3863 } else {
3864 offset += len;
3865 if (offset == so->so_oobmark) {
3866 break;
3867 }
3868 }
3869 }
3870 if (flags & MSG_EOR) {
3871 break;
3872 }
3873 /*
3874 * If the MSG_WAITALL or MSG_WAITSTREAM flag is set
3875 * (for non-atomic socket), we must not quit until
3876 * "uio->uio_resid == 0" or an error termination.
3877 * If a signal/timeout occurs, return with a short
3878 * count but without error. Keep sockbuf locked
3879 * against other readers.
3880 */
3881 while (flags & (MSG_WAITALL | MSG_WAITSTREAM) && m == NULL &&
3882 (uio_resid(uio) - delayed_copy_len) > 0 &&
3883 !sosendallatonce(so) && !nextrecord) {
3884 if (so->so_error || ((so->so_state & SS_CANTRCVMORE)
3885 #if CONTENT_FILTER
3886 && cfil_sock_data_pending(&so->so_rcv) == 0
3887 #endif /* CONTENT_FILTER */
3888 )) {
3889 goto release;
3890 }
3891
3892 /*
3893 * Depending on the protocol (e.g. TCP), the following
3894 * might cause the socket lock to be dropped and later
3895 * be reacquired, and more data could have arrived and
3896 * have been appended to the receive socket buffer by
3897 * the time it returns. Therefore, we only sleep in
3898 * sbwait() below if and only if the socket buffer is
3899 * empty, in order to avoid a false sleep.
3900 */
3901 if ((pr->pr_flags & PR_WANTRCVD) && so->so_pcb != NULL) {
3902 (*pr->pr_usrreqs->pru_rcvd)(so, flags);
3903 }
3904
3905 SBLASTRECORDCHK(&so->so_rcv, "soreceive sbwait 2");
3906 SBLASTMBUFCHK(&so->so_rcv, "soreceive sbwait 2");
3907
3908 if (so->so_rcv.sb_mb == NULL && sbwait(&so->so_rcv)) {
3909 error = 0;
3910 goto release;
3911 }
3912 /*
3913 * have to wait until after we get back from the sbwait
3914 * to do the copy because we will drop the lock if we
3915 * have enough data that has been delayed... by dropping
3916 * the lock we open up a window allowing the netisr
3917 * thread to process the incoming packets and to change
3918 * the state of this socket... we're issuing the sbwait
3919 * because the socket is empty and we're expecting the
3920 * netisr thread to wake us up when more packets arrive;
3921 * if we allow that processing to happen and then sbwait
3922 * we could stall forever with packets sitting in the
3923 * socket if no further packets arrive from the remote
3924 * side.
3925 *
3926 * we want to copy before we've collected all the data
3927 * to satisfy this request to allow the copy to overlap
3928 * the incoming packet processing on an MP system
3929 */
3930 if (delayed_copy_len > sorecvmincopy &&
3931 (delayed_copy_len > (so->so_rcv.sb_hiwat / 2))) {
3932 error = sodelayed_copy(so, uio,
3933 &free_list, &delayed_copy_len);
3934
3935 if (error) {
3936 goto release;
3937 }
3938 }
3939 m = so->so_rcv.sb_mb;
3940 if (m != NULL) {
3941 nextrecord = m->m_nextpkt;
3942 }
3943 SB_MB_CHECK(&so->so_rcv);
3944 }
3945 }
3946 #ifdef MORE_LOCKING_DEBUG
3947 if (so->so_usecount <= 1) {
3948 panic("%s: after big while so=%p ref=%d on socket",
3949 __func__, so, so->so_usecount);
3950 /* NOTREACHED */
3951 }
3952 #endif
3953
3954 if (m != NULL && pr->pr_flags & PR_ATOMIC) {
3955 if (so->so_options & SO_DONTTRUNC) {
3956 flags |= MSG_RCVMORE;
3957 } else {
3958 flags |= MSG_TRUNC;
3959 if ((flags & MSG_PEEK) == 0) {
3960 (void) sbdroprecord(&so->so_rcv);
3961 }
3962 }
3963 }
3964
3965 /*
3966 * pru_rcvd below (for TCP) may cause more data to be received
3967 * if the socket lock is dropped prior to sending the ACK; some
3968 * legacy OpenTransport applications don't handle this well
3969 * (if it receives less data than requested while MSG_HAVEMORE
3970 * is set), and so we set the flag now based on what we know
3971 * prior to calling pru_rcvd.
3972 */
3973 if ((so->so_options & SO_WANTMORE) && so->so_rcv.sb_cc > 0) {
3974 flags |= MSG_HAVEMORE;
3975 }
3976
3977 if ((flags & MSG_PEEK) == 0) {
3978 if (m == NULL) {
3979 so->so_rcv.sb_mb = nextrecord;
3980 /*
3981 * First part is an inline SB_EMPTY_FIXUP(). Second
3982 * part makes sure sb_lastrecord is up-to-date if
3983 * there is still data in the socket buffer.
3984 */
3985 if (so->so_rcv.sb_mb == NULL) {
3986 so->so_rcv.sb_mbtail = NULL;
3987 so->so_rcv.sb_lastrecord = NULL;
3988 } else if (nextrecord->m_nextpkt == NULL) {
3989 so->so_rcv.sb_lastrecord = nextrecord;
3990 }
3991 SB_MB_CHECK(&so->so_rcv);
3992 }
3993 SBLASTRECORDCHK(&so->so_rcv, "soreceive 4");
3994 SBLASTMBUFCHK(&so->so_rcv, "soreceive 4");
3995 if (pr->pr_flags & PR_WANTRCVD && so->so_pcb) {
3996 (*pr->pr_usrreqs->pru_rcvd)(so, flags);
3997 }
3998 }
3999
4000 if (delayed_copy_len) {
4001 error = sodelayed_copy(so, uio, &free_list, &delayed_copy_len);
4002 if (error) {
4003 goto release;
4004 }
4005 }
4006 if (free_list != NULL) {
4007 m_freem_list(free_list);
4008 free_list = NULL;
4009 }
4010
4011 if (orig_resid == uio_resid(uio) && orig_resid &&
4012 (flags & MSG_EOR) == 0 && (so->so_state & SS_CANTRCVMORE) == 0) {
4013 sbunlock(&so->so_rcv, TRUE); /* keep socket locked */
4014 goto restart;
4015 }
4016
4017 if (flagsp != NULL) {
4018 *flagsp |= flags;
4019 }
4020 release:
4021 #ifdef MORE_LOCKING_DEBUG
4022 if (so->so_usecount <= 1) {
4023 panic("%s: release so=%p ref=%d on socket", __func__,
4024 so, so->so_usecount);
4025 /* NOTREACHED */
4026 }
4027 #endif
4028 if (delayed_copy_len) {
4029 error = sodelayed_copy(so, uio, &free_list, &delayed_copy_len);
4030 }
4031
4032 if (free_list != NULL) {
4033 m_freem_list(free_list);
4034 }
4035
4036 sbunlock(&so->so_rcv, FALSE); /* will unlock socket */
4037
4038 if (en_tracing) {
4039 KERNEL_ENERGYTRACE(kEnTrActKernSockRead, DBG_FUNC_END,
4040 VM_KERNEL_ADDRPERM(so),
4041 ((error == EWOULDBLOCK) ? kEnTrFlagNoWork : 0),
4042 (int64_t)(orig_resid - uio_resid(uio)));
4043 }
4044 KERNEL_DEBUG(DBG_FNC_SORECEIVE | DBG_FUNC_END, so, uio_resid(uio),
4045 so->so_rcv.sb_cc, 0, error);
4046
4047 return error;
4048 }
4049
4050 /*
4051 * Returns: 0 Success
4052 * uiomove:EFAULT
4053 */
4054 static int
sodelayed_copy(struct socket * so,struct uio * uio,struct mbuf ** free_list,user_ssize_t * resid)4055 sodelayed_copy(struct socket *so, struct uio *uio, struct mbuf **free_list,
4056 user_ssize_t *resid)
4057 {
4058 int error = 0;
4059 struct mbuf *m;
4060
4061 m = *free_list;
4062
4063 socket_unlock(so, 0);
4064
4065 while (m != NULL && error == 0) {
4066 error = uiomove(mtod(m, caddr_t), (int)m->m_len, uio);
4067 m = m->m_next;
4068 }
4069 m_freem_list(*free_list);
4070
4071 *free_list = NULL;
4072 *resid = 0;
4073
4074 socket_lock(so, 0);
4075
4076 return error;
4077 }
4078
4079 int
soreceive_m_list(struct socket * so,u_int * pktcntp,struct mbuf ** maddrp,struct mbuf ** mp0,struct mbuf ** controlp,int * flagsp)4080 soreceive_m_list(struct socket *so, u_int *pktcntp, struct mbuf **maddrp,
4081 struct mbuf **mp0, struct mbuf **controlp, int *flagsp)
4082 {
4083 mbuf_ref_t m;
4084 mbuf_ref_ref_t mp;
4085 mbuf_ref_t nextrecord;
4086 int flags, error;
4087 struct protosw *pr = so->so_proto;
4088 struct proc *p = current_proc();
4089 u_int npkts = 0;
4090 mbuf_ref_t free_list = NULL;
4091 int sblocked = 0;
4092
4093 /*
4094 * Sanity check on the parameters passed by caller
4095 */
4096 if (mp0 == NULL || pktcntp == NULL) {
4097 return EINVAL;
4098 }
4099 if (*pktcntp > SO_MAX_MSG_X || *pktcntp == 0) {
4100 return EINVAL;
4101 }
4102
4103 mp = mp0;
4104 *mp0 = NULL;
4105 if (controlp != NULL) {
4106 *controlp = NULL;
4107 }
4108 if (maddrp != NULL) {
4109 *maddrp = NULL;
4110 }
4111 if (flagsp != NULL) {
4112 flags = *flagsp;
4113 } else {
4114 flags = 0;
4115 }
4116
4117 KERNEL_DEBUG(DBG_FNC_SORECEIVE_LIST | DBG_FUNC_START, so,
4118 *pktcntp, so->so_rcv.sb_cc, so->so_rcv.sb_lowat,
4119 so->so_rcv.sb_hiwat);
4120
4121 socket_lock(so, 1);
4122 so_update_last_owner_locked(so, p);
4123 so_update_policy(so);
4124
4125 #if NECP
4126 so_update_necp_policy(so, NULL, NULL);
4127 #endif /* NECP */
4128
4129 /*
4130 * If a recv attempt is made on a previously-accepted socket
4131 * that has been marked as inactive (disconnected), reject
4132 * the request.
4133 */
4134 if (so->so_flags & SOF_DEFUNCT) {
4135 struct sockbuf *sb = &so->so_rcv;
4136
4137 error = ENOTCONN;
4138 SODEFUNCTLOG("%s[%d, %s]: defunct so 0x%llu [%d,%d] (%d)\n",
4139 __func__, proc_pid(p), proc_best_name(p),
4140 so->so_gencnt,
4141 SOCK_DOM(so), SOCK_TYPE(so), error);
4142 /*
4143 * This socket should have been disconnected and flushed
4144 * prior to being returned from sodefunct(); there should
4145 * be no data on its receive list, so panic otherwise.
4146 */
4147 if (so->so_state & SS_DEFUNCT) {
4148 sb_empty_assert(sb, __func__);
4149 }
4150 goto release;
4151 }
4152
4153 *mp = NULL;
4154
4155 restart:
4156 /*
4157 * See if the socket has been closed (SS_NOFDREF|SS_CANTRCVMORE)
4158 * and if so just return to the caller. This could happen when
4159 * soreceive() is called by a socket upcall function during the
4160 * time the socket is freed. The socket buffer would have been
4161 * locked across the upcall, therefore we cannot put this thread
4162 * to sleep (else we will deadlock) or return EWOULDBLOCK (else
4163 * we may livelock), because the lock on the socket buffer will
4164 * only be released when the upcall routine returns to its caller.
4165 * Because the socket has been officially closed, there can be
4166 * no further read on it.
4167 */
4168 if ((so->so_state & (SS_NOFDREF | SS_CANTRCVMORE)) ==
4169 (SS_NOFDREF | SS_CANTRCVMORE)) {
4170 error = 0;
4171 goto out;
4172 }
4173
4174 error = sblock(&so->so_rcv, SBLOCKWAIT(flags));
4175 if (error) {
4176 goto out;
4177 }
4178 sblocked = 1;
4179
4180 m = so->so_rcv.sb_mb;
4181 /*
4182 * Block awaiting more datagram if needed
4183 */
4184 if (m == NULL || ((flags & MSG_DONTWAIT) == 0 &&
4185 so->so_rcv.sb_cc < so->so_rcv.sb_lowat)) {
4186 /*
4187 * Panic if we notice inconsistencies in the socket's
4188 * receive list; both sb_mb and sb_cc should correctly
4189 * reflect the contents of the list, otherwise we may
4190 * end up with false positives during select() or poll()
4191 * which could put the application in a bad state.
4192 */
4193 SB_MB_CHECK(&so->so_rcv);
4194
4195 if (so->so_error) {
4196 if (m != NULL) {
4197 goto dontblock;
4198 }
4199 error = so->so_error;
4200 if ((flags & MSG_PEEK) == 0) {
4201 so->so_error = 0;
4202 }
4203 goto release;
4204 }
4205 if (so->so_state & SS_CANTRCVMORE) {
4206 if (m != NULL) {
4207 goto dontblock;
4208 } else {
4209 goto release;
4210 }
4211 }
4212 for (; m != NULL; m = m->m_next) {
4213 if (m->m_flags & M_EOR) {
4214 m = so->so_rcv.sb_mb;
4215 goto dontblock;
4216 }
4217 }
4218 if ((so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING)) == 0 &&
4219 (so->so_proto->pr_flags & PR_CONNREQUIRED)) {
4220 error = ENOTCONN;
4221 goto release;
4222 }
4223 if ((so->so_state & SS_NBIO) ||
4224 (flags & (MSG_DONTWAIT | MSG_NBIO))) {
4225 error = EWOULDBLOCK;
4226 goto release;
4227 }
4228 SBLASTRECORDCHK(&so->so_rcv, "soreceive sbwait 1");
4229 SBLASTMBUFCHK(&so->so_rcv, "soreceive sbwait 1");
4230
4231 sbunlock(&so->so_rcv, TRUE); /* keep socket locked */
4232 sblocked = 0;
4233
4234 error = sbwait(&so->so_rcv);
4235 if (error != 0) {
4236 goto release;
4237 }
4238 goto restart;
4239 }
4240 dontblock:
4241 m = so->so_rcv.sb_mb;
4242 if (m == NULL) {
4243 goto release;
4244 }
4245
4246 OSIncrementAtomicLong(&p->p_stats->p_ru.ru_msgrcv);
4247 SBLASTRECORDCHK(&so->so_rcv, "soreceive 1");
4248 SBLASTMBUFCHK(&so->so_rcv, "soreceive 1");
4249 nextrecord = m->m_nextpkt;
4250
4251 if ((pr->pr_flags & PR_ADDR) && m->m_type == MT_SONAME) {
4252 mbuf_ref_t maddr = NULL;
4253
4254 error = soreceive_addr(p, so, NULL, &maddr, flags, &m,
4255 &nextrecord, 1);
4256 if (error == ERESTART) {
4257 goto restart;
4258 } else if (error != 0) {
4259 goto release;
4260 }
4261
4262 if (maddr != NULL) {
4263 maddr->m_nextpkt = NULL;
4264 maddr->m_next = NULL;
4265 if (maddrp != NULL) {
4266 *maddrp = maddr;
4267 maddrp = &maddr->m_nextpkt;
4268 } else {
4269 maddr->m_next = free_list;
4270 free_list = maddr;
4271 }
4272 }
4273 }
4274
4275 /*
4276 * Process one or more MT_CONTROL mbufs present before any data mbufs
4277 * in the first mbuf chain on the socket buffer.
4278 * We call into the protocol to perform externalization.
4279 */
4280 if (m != NULL && m->m_type == MT_CONTROL) {
4281 mbuf_ref_t control = NULL;
4282
4283 error = soreceive_ctl(so, &control, flags, &m, &nextrecord);
4284 if (error != 0) {
4285 goto release;
4286 }
4287 if (control != NULL) {
4288 control->m_nextpkt = NULL;
4289 control->m_next = NULL;
4290 if (controlp != NULL) {
4291 *controlp = control;
4292 controlp = &control->m_nextpkt;
4293 } else {
4294 control->m_next = free_list;
4295 free_list = control;
4296 }
4297 }
4298 }
4299
4300 /*
4301 * Link the packet to the list
4302 */
4303 if (m != NULL) {
4304 if (!m_has_mtype(m, MTF_DATA | MTF_HEADER | MTF_OOBDATA)) {
4305 panic("%s: m %p m_type %d != MT_DATA", __func__, m, m->m_type);
4306 }
4307 m->m_nextpkt = NULL;
4308 *mp = m;
4309 mp = &m->m_nextpkt;
4310 }
4311 while (m != NULL) {
4312 sbfree(&so->so_rcv, m);
4313
4314 m = m->m_next;
4315 }
4316
4317 so->so_rcv.sb_mb = nextrecord;
4318 /*
4319 * First part is an inline SB_EMPTY_FIXUP(). Second
4320 * part makes sure sb_lastrecord is up-to-date if
4321 * there is still data in the socket buffer.
4322 */
4323 if (so->so_rcv.sb_mb == NULL) {
4324 so->so_rcv.sb_mbtail = NULL;
4325 so->so_rcv.sb_lastrecord = NULL;
4326 } else if (nextrecord->m_nextpkt == NULL) {
4327 so->so_rcv.sb_lastrecord = nextrecord;
4328 }
4329 SB_MB_CHECK(&so->so_rcv);
4330
4331 SBLASTRECORDCHK(&so->so_rcv, "soreceive 4");
4332 SBLASTMBUFCHK(&so->so_rcv, "soreceive 4");
4333
4334 npkts += 1;
4335
4336 /*
4337 * We continue as long as all those conditions as we have less packets
4338 * than requested and the socket buffer is not empty
4339 */
4340 if (npkts < *pktcntp) {
4341 if (so->so_rcv.sb_mb != NULL) {
4342 goto dontblock;
4343 }
4344 if ((flags & MSG_WAITALL) != 0) {
4345 goto restart;
4346 }
4347 }
4348
4349 if (flagsp != NULL) {
4350 *flagsp |= flags;
4351 }
4352
4353 release:
4354 /*
4355 * pru_rcvd may cause more data to be received if the socket lock
4356 * is dropped so we set MSG_HAVEMORE now based on what we know.
4357 * That way the caller won't be surprised if it receives less data
4358 * than requested.
4359 */
4360 if ((so->so_options & SO_WANTMORE) && so->so_rcv.sb_cc > 0) {
4361 flags |= MSG_HAVEMORE;
4362 }
4363
4364 if (pr->pr_flags & PR_WANTRCVD && so->so_pcb != NULL) {
4365 (*pr->pr_usrreqs->pru_rcvd)(so, flags);
4366 }
4367
4368 if (sblocked) {
4369 sbunlock(&so->so_rcv, FALSE); /* will unlock socket */
4370 } else {
4371 socket_unlock(so, 1);
4372 }
4373
4374 out:
4375 *pktcntp = npkts;
4376 /*
4377 * Amortize the cost of freeing the mbufs
4378 */
4379 if (free_list != NULL) {
4380 m_freem_list(free_list);
4381 }
4382
4383 KERNEL_DEBUG(DBG_FNC_SORECEIVE_LIST | DBG_FUNC_END, error,
4384 0, 0, 0, 0);
4385 return error;
4386 }
4387
4388 static int
so_statistics_event_to_nstat_event(int64_t * input_options,uint64_t * nstat_event)4389 so_statistics_event_to_nstat_event(int64_t *input_options,
4390 uint64_t *nstat_event)
4391 {
4392 int error = 0;
4393 switch (*input_options) {
4394 case SO_STATISTICS_EVENT_ENTER_CELLFALLBACK:
4395 *nstat_event = NSTAT_EVENT_SRC_ENTER_CELLFALLBACK;
4396 break;
4397 case SO_STATISTICS_EVENT_EXIT_CELLFALLBACK:
4398 *nstat_event = NSTAT_EVENT_SRC_EXIT_CELLFALLBACK;
4399 break;
4400 #if (DEBUG || DEVELOPMENT)
4401 case SO_STATISTICS_EVENT_RESERVED_1:
4402 *nstat_event = NSTAT_EVENT_SRC_RESERVED_1;
4403 break;
4404 case SO_STATISTICS_EVENT_RESERVED_2:
4405 *nstat_event = NSTAT_EVENT_SRC_RESERVED_2;
4406 break;
4407 #endif /* (DEBUG || DEVELOPMENT) */
4408 default:
4409 error = EINVAL;
4410 break;
4411 }
4412 return error;
4413 }
4414
4415 /*
4416 * Returns: 0 Success
4417 * EINVAL
4418 * ENOTCONN
4419 * <pru_shutdown>:EINVAL
4420 * <pru_shutdown>:EADDRNOTAVAIL[TCP]
4421 * <pru_shutdown>:ENOBUFS[TCP]
4422 * <pru_shutdown>:EMSGSIZE[TCP]
4423 * <pru_shutdown>:EHOSTUNREACH[TCP]
4424 * <pru_shutdown>:ENETUNREACH[TCP]
4425 * <pru_shutdown>:ENETDOWN[TCP]
4426 * <pru_shutdown>:ENOMEM[TCP]
4427 * <pru_shutdown>:EACCES[TCP]
4428 * <pru_shutdown>:EMSGSIZE[TCP]
4429 * <pru_shutdown>:ENOBUFS[TCP]
4430 * <pru_shutdown>:???[TCP] [ignorable: mostly IPSEC/firewall/DLIL]
4431 * <pru_shutdown>:??? [other protocol families]
4432 */
4433 int
soshutdown(struct socket * so,int how)4434 soshutdown(struct socket *so, int how)
4435 {
4436 int error;
4437
4438 KERNEL_DEBUG(DBG_FNC_SOSHUTDOWN | DBG_FUNC_START, how, 0, 0, 0, 0);
4439
4440 switch (how) {
4441 case SHUT_RD:
4442 case SHUT_WR:
4443 case SHUT_RDWR:
4444 socket_lock(so, 1);
4445 if ((so->so_state &
4446 (SS_ISCONNECTED | SS_ISCONNECTING | SS_ISDISCONNECTING)) == 0) {
4447 error = ENOTCONN;
4448 } else {
4449 error = soshutdownlock(so, how);
4450 }
4451 socket_unlock(so, 1);
4452 break;
4453 default:
4454 error = EINVAL;
4455 break;
4456 }
4457
4458 KERNEL_DEBUG(DBG_FNC_SOSHUTDOWN | DBG_FUNC_END, how, error, 0, 0, 0);
4459
4460 return error;
4461 }
4462
4463 int
soshutdownlock_final(struct socket * so,int how)4464 soshutdownlock_final(struct socket *so, int how)
4465 {
4466 struct protosw *pr = so->so_proto;
4467 int error = 0;
4468
4469 sflt_notify(so, sock_evt_shutdown, &how);
4470
4471 if (how != SHUT_WR) {
4472 if ((so->so_state & SS_CANTRCVMORE) != 0) {
4473 /* read already shut down */
4474 error = ENOTCONN;
4475 goto done;
4476 }
4477 sorflush(so);
4478 }
4479 if (how != SHUT_RD) {
4480 if ((so->so_state & SS_CANTSENDMORE) != 0) {
4481 /* write already shut down */
4482 error = ENOTCONN;
4483 goto done;
4484 }
4485 error = (*pr->pr_usrreqs->pru_shutdown)(so);
4486 }
4487 done:
4488 KERNEL_DEBUG(DBG_FNC_SOSHUTDOWN, how, 1, 0, 0, 0);
4489 return error;
4490 }
4491
4492 int
soshutdownlock(struct socket * so,int how)4493 soshutdownlock(struct socket *so, int how)
4494 {
4495 int error = 0;
4496
4497 #if CONTENT_FILTER
4498 /*
4499 * A content filter may delay the actual shutdown until it
4500 * has processed the pending data
4501 */
4502 if (so->so_flags & SOF_CONTENT_FILTER) {
4503 error = cfil_sock_shutdown(so, &how);
4504 if (error == EJUSTRETURN) {
4505 error = 0;
4506 goto done;
4507 } else if (error != 0) {
4508 goto done;
4509 }
4510 }
4511 #endif /* CONTENT_FILTER */
4512
4513 error = soshutdownlock_final(so, how);
4514
4515 done:
4516 return error;
4517 }
4518
4519 void
sowflush(struct socket * so)4520 sowflush(struct socket *so)
4521 {
4522 struct sockbuf *sb = &so->so_snd;
4523
4524 /*
4525 * Obtain lock on the socket buffer (SB_LOCK). This is required
4526 * to prevent the socket buffer from being unexpectedly altered
4527 * while it is used by another thread in socket send/receive.
4528 *
4529 * sblock() must not fail here, hence the assertion.
4530 */
4531 (void) sblock(sb, SBL_WAIT | SBL_NOINTR | SBL_IGNDEFUNCT);
4532 VERIFY(sb->sb_flags & SB_LOCK);
4533
4534 sb->sb_flags &= ~(SB_SEL | SB_UPCALL);
4535 sb->sb_flags |= SB_DROP;
4536 sb->sb_upcall = NULL;
4537 sb->sb_upcallarg = NULL;
4538
4539 sbunlock(sb, TRUE); /* keep socket locked */
4540
4541 selthreadclear(&sb->sb_sel);
4542 sbrelease(sb);
4543 }
4544
4545 void
sorflush(struct socket * so)4546 sorflush(struct socket *so)
4547 {
4548 struct sockbuf *sb = &so->so_rcv;
4549 struct protosw *pr = so->so_proto;
4550 struct sockbuf asb;
4551 #ifdef notyet
4552 lck_mtx_t *mutex_held;
4553 /*
4554 * XXX: This code is currently commented out, because we may get here
4555 * as part of sofreelastref(), and at that time, pr_getlock() may no
4556 * longer be able to return us the lock; this will be fixed in future.
4557 */
4558 if (so->so_proto->pr_getlock != NULL) {
4559 mutex_held = (*so->so_proto->pr_getlock)(so, 0);
4560 } else {
4561 mutex_held = so->so_proto->pr_domain->dom_mtx;
4562 }
4563
4564 LCK_MTX_ASSERT(mutex_held, LCK_MTX_ASSERT_OWNED);
4565 #endif /* notyet */
4566
4567 sflt_notify(so, sock_evt_flush_read, NULL);
4568
4569 socantrcvmore(so);
4570
4571 /*
4572 * Obtain lock on the socket buffer (SB_LOCK). This is required
4573 * to prevent the socket buffer from being unexpectedly altered
4574 * while it is used by another thread in socket send/receive.
4575 *
4576 * sblock() must not fail here, hence the assertion.
4577 */
4578 (void) sblock(sb, SBL_WAIT | SBL_NOINTR | SBL_IGNDEFUNCT);
4579 VERIFY(sb->sb_flags & SB_LOCK);
4580
4581 /*
4582 * Copy only the relevant fields from "sb" to "asb" which we
4583 * need for sbrelease() to function. In particular, skip
4584 * sb_sel as it contains the wait queue linkage, which would
4585 * wreak havoc if we were to issue selthreadclear() on "asb".
4586 * Make sure to not carry over SB_LOCK in "asb", as we need
4587 * to acquire it later as part of sbrelease().
4588 */
4589 bzero(&asb, sizeof(asb));
4590 asb.sb_cc = sb->sb_cc;
4591 asb.sb_hiwat = sb->sb_hiwat;
4592 asb.sb_mbcnt = sb->sb_mbcnt;
4593 asb.sb_mbmax = sb->sb_mbmax;
4594 asb.sb_ctl = sb->sb_ctl;
4595 asb.sb_lowat = sb->sb_lowat;
4596 asb.sb_mb = sb->sb_mb;
4597 asb.sb_mbtail = sb->sb_mbtail;
4598 asb.sb_lastrecord = sb->sb_lastrecord;
4599 asb.sb_so = sb->sb_so;
4600 asb.sb_flags = sb->sb_flags;
4601 asb.sb_flags &= ~(SB_LOCK | SB_SEL | SB_KNOTE | SB_UPCALL);
4602 asb.sb_flags |= SB_DROP;
4603
4604 /*
4605 * Ideally we'd bzero() these and preserve the ones we need;
4606 * but to do that we'd need to shuffle things around in the
4607 * sockbuf, and we can't do it now because there are KEXTS
4608 * that are directly referring to the socket structure.
4609 *
4610 * Setting SB_DROP acts as a barrier to prevent further appends.
4611 * Clearing SB_SEL is done for selthreadclear() below.
4612 */
4613 sb->sb_cc = 0;
4614 sb->sb_hiwat = 0;
4615 sb->sb_mbcnt = 0;
4616 sb->sb_mbmax = 0;
4617 sb->sb_ctl = 0;
4618 sb->sb_lowat = 0;
4619 sb->sb_mb = NULL;
4620 sb->sb_mbtail = NULL;
4621 sb->sb_lastrecord = NULL;
4622 sb->sb_timeo.tv_sec = 0;
4623 sb->sb_timeo.tv_usec = 0;
4624 sb->sb_upcall = NULL;
4625 sb->sb_upcallarg = NULL;
4626 sb->sb_flags &= ~(SB_SEL | SB_UPCALL);
4627 sb->sb_flags |= SB_DROP;
4628
4629 sbunlock(sb, TRUE); /* keep socket locked */
4630
4631 /*
4632 * Note that selthreadclear() is called on the original "sb" and
4633 * not the local "asb" because of the way wait queue linkage is
4634 * implemented. Given that selwakeup() may be triggered, SB_SEL
4635 * should no longer be set (cleared above.)
4636 */
4637 selthreadclear(&sb->sb_sel);
4638
4639 if ((pr->pr_flags & PR_RIGHTS) && pr->pr_domain->dom_dispose) {
4640 (*pr->pr_domain->dom_dispose)(asb.sb_mb);
4641 }
4642
4643 sbrelease(&asb);
4644 }
4645
4646 /*
4647 * Perhaps this routine, and sooptcopyout(), below, ought to come in
4648 * an additional variant to handle the case where the option value needs
4649 * to be some kind of integer, but not a specific size.
4650 * In addition to their use here, these functions are also called by the
4651 * protocol-level pr_ctloutput() routines.
4652 *
4653 * Returns: 0 Success
4654 * EINVAL
4655 * copyin:EFAULT
4656 */
4657 int
sooptcopyin(struct sockopt * sopt,void * __sized_by (len)buf,size_t len,size_t minlen)4658 sooptcopyin(struct sockopt *sopt, void *__sized_by(len) buf, size_t len, size_t minlen)
4659 {
4660 size_t valsize;
4661
4662 /*
4663 * If the user gives us more than we wanted, we ignore it,
4664 * but if we don't get the minimum length the caller
4665 * wants, we return EINVAL. On success, sopt->sopt_valsize
4666 * is set to however much we actually retrieved.
4667 */
4668 if ((valsize = sopt->sopt_valsize) < minlen) {
4669 return EINVAL;
4670 }
4671 if (valsize > len) {
4672 sopt->sopt_valsize = valsize = len;
4673 }
4674
4675 if (sopt->sopt_p != kernproc) {
4676 return copyin(sopt->sopt_val, buf, valsize);
4677 }
4678
4679 caddr_t tmp = __unsafe_forge_bidi_indexable(caddr_t,
4680 CAST_DOWN(caddr_t, sopt->sopt_val),
4681 valsize);
4682 bcopy(tmp, buf, valsize);
4683
4684 return 0;
4685 }
4686
4687 /*
4688 * sooptcopyin_timeval
4689 * Copy in a timeval value into tv_p, and take into account whether the
4690 * the calling process is 64-bit or 32-bit. Moved the sanity checking
4691 * code here so that we can verify the 64-bit tv_sec value before we lose
4692 * the top 32-bits assigning tv64.tv_sec to tv_p->tv_sec.
4693 */
4694 static int
sooptcopyin_timeval(struct sockopt * sopt,struct timeval * tv_p)4695 sooptcopyin_timeval(struct sockopt *sopt, struct timeval *tv_p)
4696 {
4697 int error;
4698
4699 if (proc_is64bit(sopt->sopt_p)) {
4700 struct user64_timeval tv64;
4701
4702 if (sopt->sopt_valsize < sizeof(tv64)) {
4703 return EINVAL;
4704 }
4705
4706 sopt->sopt_valsize = sizeof(tv64);
4707 if (sopt->sopt_p != kernproc) {
4708 error = copyin(sopt->sopt_val, &tv64, sizeof(tv64));
4709 if (error != 0) {
4710 return error;
4711 }
4712 } else {
4713 caddr_t tmp = __unsafe_forge_bidi_indexable(caddr_t,
4714 CAST_DOWN(caddr_t, sopt->sopt_val),
4715 sizeof(tv64));
4716 bcopy(tmp, &tv64, sizeof(tv64));
4717 }
4718 if (tv64.tv_sec < 0 || tv64.tv_sec > LONG_MAX ||
4719 tv64.tv_usec < 0 || tv64.tv_usec >= 1000000) {
4720 return EDOM;
4721 }
4722
4723 tv_p->tv_sec = (__darwin_time_t)tv64.tv_sec;
4724 tv_p->tv_usec = tv64.tv_usec;
4725 } else {
4726 struct user32_timeval tv32;
4727
4728 if (sopt->sopt_valsize < sizeof(tv32)) {
4729 return EINVAL;
4730 }
4731
4732 sopt->sopt_valsize = sizeof(tv32);
4733 if (sopt->sopt_p != kernproc) {
4734 error = copyin(sopt->sopt_val, &tv32, sizeof(tv32));
4735 if (error != 0) {
4736 return error;
4737 }
4738 } else {
4739 caddr_t tmp = __unsafe_forge_bidi_indexable(caddr_t,
4740 CAST_DOWN(caddr_t, sopt->sopt_val),
4741 sizeof(tv32));
4742 bcopy(tmp, &tv32, sizeof(tv32));
4743 }
4744 #ifndef __LP64__
4745 /*
4746 * K64todo "comparison is always false due to
4747 * limited range of data type"
4748 */
4749 if (tv32.tv_sec < 0 || tv32.tv_sec > LONG_MAX ||
4750 tv32.tv_usec < 0 || tv32.tv_usec >= 1000000) {
4751 return EDOM;
4752 }
4753 #endif
4754 tv_p->tv_sec = tv32.tv_sec;
4755 tv_p->tv_usec = tv32.tv_usec;
4756 }
4757 return 0;
4758 }
4759
4760 int
sooptcopyin_bindtodevice(struct sockopt * sopt,char * __sized_by (bufsize)buf,size_t bufsize)4761 sooptcopyin_bindtodevice(struct sockopt *sopt, char * __sized_by(bufsize) buf, size_t bufsize)
4762 {
4763 #define MIN_BINDTODEVICE_NAME_SIZE 2
4764 size_t maxlen = bufsize - 1; /* the max string length that fits in the buffer */
4765
4766 if (bufsize < MIN_BINDTODEVICE_NAME_SIZE) {
4767 #if DEBUG || DEVELOPMENT
4768 os_log(OS_LOG_DEFAULT, "%s: bufsize %lu < MIN_BINDTODEVICE_NAME_SIZE %d",
4769 __func__, bufsize, MIN_BINDTODEVICE_NAME_SIZE);
4770 #endif /* DEBUG || DEVELOPMENT */
4771 return EINVAL;
4772 }
4773
4774 memset(buf, 0, bufsize);
4775
4776 /*
4777 * bufsize includes the end-of-string because of the uncertainty wether
4778 * interface names are passed as strings or byte buffers.
4779 * If the user gives us more than the max string length return EINVAL.
4780 * On success, sopt->sopt_valsize is not modified
4781 */
4782 maxlen = bufsize - 1;
4783 if (sopt->sopt_valsize > maxlen) {
4784 os_log(OS_LOG_DEFAULT, "%s: sopt_valsize %lu > maxlen %lu",
4785 __func__, sopt->sopt_valsize, maxlen);
4786 return EINVAL;
4787 }
4788
4789 if (sopt->sopt_p != kernproc) {
4790 return copyin(sopt->sopt_val, buf, sopt->sopt_valsize);
4791 } else {
4792 caddr_t tmp = __unsafe_forge_bidi_indexable(caddr_t,
4793 CAST_DOWN(caddr_t, sopt->sopt_val),
4794 sopt->sopt_valsize);
4795 bcopy(tmp, buf, sopt->sopt_valsize);
4796 }
4797
4798 return 0;
4799 #undef MIN_BINDTODEVICE_NAME_SIZE
4800 }
4801
4802 int
soopt_cred_check(struct socket * so,int priv,boolean_t allow_root,boolean_t ignore_delegate)4803 soopt_cred_check(struct socket *so, int priv, boolean_t allow_root,
4804 boolean_t ignore_delegate)
4805 {
4806 kauth_cred_t cred = NULL;
4807 proc_t ep = PROC_NULL;
4808 uid_t uid;
4809 int error = 0;
4810
4811 if (ignore_delegate == false && so->so_flags & SOF_DELEGATED) {
4812 ep = proc_find(so->e_pid);
4813 if (ep) {
4814 cred = kauth_cred_proc_ref(ep);
4815 }
4816 }
4817
4818 uid = kauth_cred_getuid(cred ? cred : so->so_cred);
4819
4820 /* uid is 0 for root */
4821 if (uid != 0 || !allow_root) {
4822 error = priv_check_cred(cred ? cred : so->so_cred, priv, 0);
4823 }
4824 if (cred) {
4825 kauth_cred_unref(&cred);
4826 }
4827 if (ep != PROC_NULL) {
4828 proc_rele(ep);
4829 }
4830
4831 return error;
4832 }
4833
4834 /*
4835 * Returns: 0 Success
4836 * EINVAL
4837 * ENOPROTOOPT
4838 * ENOBUFS
4839 * EDOM
4840 * sooptcopyin:EINVAL
4841 * sooptcopyin:EFAULT
4842 * sooptcopyin_timeval:EINVAL
4843 * sooptcopyin_timeval:EFAULT
4844 * sooptcopyin_timeval:EDOM
4845 * <pr_ctloutput>:EOPNOTSUPP[AF_UNIX]
4846 * <pr_ctloutput>:???w
4847 * sflt_attach_private:??? [whatever a filter author chooses]
4848 * <sf_setoption>:??? [whatever a filter author chooses]
4849 *
4850 * Notes: Other <pru_listen> returns depend on the protocol family; all
4851 * <sf_listen> returns depend on what the filter author causes
4852 * their filter to return.
4853 */
4854 int
sosetoptlock(struct socket * so,struct sockopt * sopt,int dolock)4855 sosetoptlock(struct socket *so, struct sockopt *sopt, int dolock)
4856 {
4857 int error, optval;
4858 int64_t long_optval;
4859 struct linger l;
4860 struct timeval tv;
4861
4862 if (sopt->sopt_dir != SOPT_SET) {
4863 sopt->sopt_dir = SOPT_SET;
4864 }
4865
4866 if (dolock) {
4867 socket_lock(so, 1);
4868 }
4869
4870 if ((so->so_state & (SS_CANTRCVMORE | SS_CANTSENDMORE)) ==
4871 (SS_CANTRCVMORE | SS_CANTSENDMORE) &&
4872 (so->so_flags & SOF_NPX_SETOPTSHUT) == 0) {
4873 /* the socket has been shutdown, no more sockopt's */
4874 error = EINVAL;
4875 goto out;
4876 }
4877
4878 error = sflt_setsockopt(so, sopt);
4879 if (error != 0) {
4880 if (error == EJUSTRETURN) {
4881 error = 0;
4882 }
4883 goto out;
4884 }
4885
4886 if (sopt->sopt_level != SOL_SOCKET || sopt->sopt_name == SO_BINDTODEVICE) {
4887 if (so->so_proto != NULL &&
4888 so->so_proto->pr_ctloutput != NULL) {
4889 error = (*so->so_proto->pr_ctloutput)(so, sopt);
4890 goto out;
4891 }
4892 error = ENOPROTOOPT;
4893 } else {
4894 /*
4895 * Allow socket-level (SOL_SOCKET) options to be filtered by
4896 * the protocol layer, if needed. A zero value returned from
4897 * the handler means use default socket-level processing as
4898 * done by the rest of this routine. Otherwise, any other
4899 * return value indicates that the option is unsupported.
4900 */
4901 if (so->so_proto != NULL && (error = so->so_proto->pr_usrreqs->
4902 pru_socheckopt(so, sopt)) != 0) {
4903 goto out;
4904 }
4905
4906 error = 0;
4907 switch (sopt->sopt_name) {
4908 case SO_LINGER:
4909 case SO_LINGER_SEC: {
4910 error = sooptcopyin(sopt, &l, sizeof(l), sizeof(l));
4911 if (error != 0) {
4912 goto out;
4913 }
4914 /* Make sure to use sane values */
4915 if (sopt->sopt_name == SO_LINGER) {
4916 so->so_linger = (short)l.l_linger;
4917 } else {
4918 so->so_linger = (short)((long)l.l_linger * hz);
4919 }
4920 if (l.l_onoff != 0) {
4921 so->so_options |= SO_LINGER;
4922 } else {
4923 so->so_options &= ~SO_LINGER;
4924 }
4925 break;
4926 }
4927 case SO_DEBUG:
4928 case SO_KEEPALIVE:
4929 case SO_DONTROUTE:
4930 case SO_USELOOPBACK:
4931 case SO_BROADCAST:
4932 case SO_REUSEADDR:
4933 case SO_REUSEPORT:
4934 case SO_OOBINLINE:
4935 case SO_TIMESTAMP:
4936 case SO_TIMESTAMP_MONOTONIC:
4937 case SO_TIMESTAMP_CONTINUOUS:
4938 case SO_DONTTRUNC:
4939 case SO_WANTMORE:
4940 case SO_WANTOOBFLAG:
4941 case SO_NOWAKEFROMSLEEP:
4942 case SO_NOAPNFALLBK:
4943 error = sooptcopyin(sopt, &optval, sizeof(optval),
4944 sizeof(optval));
4945 if (error != 0) {
4946 goto out;
4947 }
4948 if (optval) {
4949 so->so_options |= sopt->sopt_name;
4950 } else {
4951 so->so_options &= ~sopt->sopt_name;
4952 }
4953 #if SKYWALK
4954 inp_update_netns_flags(so);
4955 #endif /* SKYWALK */
4956 break;
4957
4958 case SO_SNDBUF:
4959 case SO_RCVBUF:
4960 case SO_SNDLOWAT:
4961 case SO_RCVLOWAT:
4962 error = sooptcopyin(sopt, &optval, sizeof(optval),
4963 sizeof(optval));
4964 if (error != 0) {
4965 goto out;
4966 }
4967
4968 /*
4969 * Values < 1 make no sense for any of these
4970 * options, so disallow them.
4971 */
4972 if (optval < 1) {
4973 error = EINVAL;
4974 goto out;
4975 }
4976
4977 switch (sopt->sopt_name) {
4978 case SO_SNDBUF:
4979 case SO_RCVBUF: {
4980 struct sockbuf *sb =
4981 (sopt->sopt_name == SO_SNDBUF) ?
4982 &so->so_snd : &so->so_rcv;
4983 if (sbreserve(sb, (u_int32_t)optval) == 0) {
4984 error = ENOBUFS;
4985 goto out;
4986 }
4987 sb->sb_flags |= SB_USRSIZE;
4988 sb->sb_flags &= ~SB_AUTOSIZE;
4989 sb->sb_idealsize = (u_int32_t)optval;
4990 break;
4991 }
4992 /*
4993 * Make sure the low-water is never greater than
4994 * the high-water.
4995 */
4996 case SO_SNDLOWAT: {
4997 int space = sbspace(&so->so_snd);
4998 uint32_t hiwat = so->so_snd.sb_hiwat;
4999
5000 if (so->so_snd.sb_flags & SB_UNIX) {
5001 struct unpcb *unp =
5002 (struct unpcb *)(so->so_pcb);
5003 if (unp != NULL &&
5004 unp->unp_conn != NULL) {
5005 struct socket *so2 = unp->unp_conn->unp_socket;
5006 hiwat += unp->unp_conn->unp_cc;
5007 space = sbspace(&so2->so_rcv);
5008 }
5009 }
5010
5011 so->so_snd.sb_lowat =
5012 (optval > hiwat) ?
5013 hiwat : optval;
5014
5015 if (space >= so->so_snd.sb_lowat) {
5016 sowwakeup(so);
5017 }
5018 break;
5019 }
5020 case SO_RCVLOWAT: {
5021 int64_t data_len;
5022 so->so_rcv.sb_lowat =
5023 (optval > so->so_rcv.sb_hiwat) ?
5024 so->so_rcv.sb_hiwat : optval;
5025 if (so->so_rcv.sb_flags & SB_UNIX) {
5026 struct unpcb *unp =
5027 (struct unpcb *)(so->so_pcb);
5028 if (unp != NULL &&
5029 unp->unp_conn != NULL) {
5030 struct socket *so2 = unp->unp_conn->unp_socket;
5031 data_len = so2->so_snd.sb_cc
5032 - so2->so_snd.sb_ctl;
5033 } else {
5034 data_len = so->so_rcv.sb_cc
5035 - so->so_rcv.sb_ctl;
5036 }
5037 } else {
5038 data_len = so->so_rcv.sb_cc
5039 - so->so_rcv.sb_ctl;
5040 }
5041
5042 if (data_len >= so->so_rcv.sb_lowat) {
5043 sorwakeup(so);
5044 }
5045 break;
5046 }
5047 }
5048 break;
5049
5050 case SO_SNDTIMEO:
5051 case SO_RCVTIMEO:
5052 error = sooptcopyin_timeval(sopt, &tv);
5053 if (error != 0) {
5054 goto out;
5055 }
5056
5057 switch (sopt->sopt_name) {
5058 case SO_SNDTIMEO:
5059 so->so_snd.sb_timeo = tv;
5060 break;
5061 case SO_RCVTIMEO:
5062 so->so_rcv.sb_timeo = tv;
5063 break;
5064 }
5065 break;
5066
5067 case SO_NKE: {
5068 struct so_nke nke;
5069
5070 error = sooptcopyin(sopt, &nke, sizeof(nke),
5071 sizeof(nke));
5072 if (error != 0) {
5073 goto out;
5074 }
5075
5076 error = sflt_attach_internal(so, nke.nke_handle);
5077 break;
5078 }
5079
5080 case SO_NOSIGPIPE:
5081 error = sooptcopyin(sopt, &optval, sizeof(optval),
5082 sizeof(optval));
5083 if (error != 0) {
5084 goto out;
5085 }
5086 if (optval != 0) {
5087 so->so_flags |= SOF_NOSIGPIPE;
5088 } else {
5089 so->so_flags &= ~SOF_NOSIGPIPE;
5090 }
5091 break;
5092
5093 case SO_NOADDRERR:
5094 error = sooptcopyin(sopt, &optval, sizeof(optval),
5095 sizeof(optval));
5096 if (error != 0) {
5097 goto out;
5098 }
5099 if (optval != 0) {
5100 so->so_flags |= SOF_NOADDRAVAIL;
5101 } else {
5102 so->so_flags &= ~SOF_NOADDRAVAIL;
5103 }
5104 break;
5105
5106 case SO_REUSESHAREUID:
5107 error = sooptcopyin(sopt, &optval, sizeof(optval),
5108 sizeof(optval));
5109 if (error != 0) {
5110 goto out;
5111 }
5112 if (optval != 0) {
5113 so->so_flags |= SOF_REUSESHAREUID;
5114 } else {
5115 so->so_flags &= ~SOF_REUSESHAREUID;
5116 }
5117 break;
5118
5119 case SO_NOTIFYCONFLICT:
5120 if (kauth_cred_issuser(kauth_cred_get()) == 0) {
5121 error = EPERM;
5122 goto out;
5123 }
5124 error = sooptcopyin(sopt, &optval, sizeof(optval),
5125 sizeof(optval));
5126 if (error != 0) {
5127 goto out;
5128 }
5129 if (optval != 0) {
5130 so->so_flags |= SOF_NOTIFYCONFLICT;
5131 } else {
5132 so->so_flags &= ~SOF_NOTIFYCONFLICT;
5133 }
5134 break;
5135
5136 case SO_RESTRICTIONS:
5137 error = sooptcopyin(sopt, &optval, sizeof(optval),
5138 sizeof(optval));
5139 if (error != 0) {
5140 goto out;
5141 }
5142
5143 error = so_set_restrictions(so, optval);
5144 break;
5145
5146 case SO_AWDL_UNRESTRICTED:
5147 if (SOCK_DOM(so) != PF_INET &&
5148 SOCK_DOM(so) != PF_INET6) {
5149 error = EOPNOTSUPP;
5150 goto out;
5151 }
5152 error = sooptcopyin(sopt, &optval, sizeof(optval),
5153 sizeof(optval));
5154 if (error != 0) {
5155 goto out;
5156 }
5157 if (optval != 0) {
5158 error = soopt_cred_check(so,
5159 PRIV_NET_RESTRICTED_AWDL, false, false);
5160 if (error == 0) {
5161 inp_set_awdl_unrestricted(
5162 sotoinpcb(so));
5163 }
5164 } else {
5165 inp_clear_awdl_unrestricted(sotoinpcb(so));
5166 }
5167 break;
5168 case SO_INTCOPROC_ALLOW:
5169 if (SOCK_DOM(so) != PF_INET6) {
5170 error = EOPNOTSUPP;
5171 goto out;
5172 }
5173 error = sooptcopyin(sopt, &optval, sizeof(optval),
5174 sizeof(optval));
5175 if (error != 0) {
5176 goto out;
5177 }
5178 if (optval != 0 &&
5179 inp_get_intcoproc_allowed(sotoinpcb(so)) == FALSE) {
5180 error = soopt_cred_check(so,
5181 PRIV_NET_RESTRICTED_INTCOPROC, false, false);
5182 if (error == 0) {
5183 inp_set_intcoproc_allowed(
5184 sotoinpcb(so));
5185 }
5186 } else if (optval == 0) {
5187 inp_clear_intcoproc_allowed(sotoinpcb(so));
5188 }
5189 break;
5190
5191 case SO_LABEL:
5192 error = EOPNOTSUPP;
5193 break;
5194
5195 case SO_UPCALLCLOSEWAIT:
5196 error = sooptcopyin(sopt, &optval, sizeof(optval),
5197 sizeof(optval));
5198 if (error != 0) {
5199 goto out;
5200 }
5201 if (optval != 0) {
5202 so->so_flags |= SOF_UPCALLCLOSEWAIT;
5203 } else {
5204 so->so_flags &= ~SOF_UPCALLCLOSEWAIT;
5205 }
5206 break;
5207
5208 case SO_RANDOMPORT:
5209 error = sooptcopyin(sopt, &optval, sizeof(optval),
5210 sizeof(optval));
5211 if (error != 0) {
5212 goto out;
5213 }
5214 if (optval != 0) {
5215 so->so_flags |= SOF_BINDRANDOMPORT;
5216 } else {
5217 so->so_flags &= ~SOF_BINDRANDOMPORT;
5218 }
5219 break;
5220
5221 case SO_NP_EXTENSIONS: {
5222 struct so_np_extensions sonpx;
5223
5224 error = sooptcopyin(sopt, &sonpx, sizeof(sonpx),
5225 sizeof(sonpx));
5226 if (error != 0) {
5227 goto out;
5228 }
5229 if (sonpx.npx_mask & ~SONPX_MASK_VALID) {
5230 error = EINVAL;
5231 goto out;
5232 }
5233 /*
5234 * Only one bit defined for now
5235 */
5236 if ((sonpx.npx_mask & SONPX_SETOPTSHUT)) {
5237 if ((sonpx.npx_flags & SONPX_SETOPTSHUT)) {
5238 so->so_flags |= SOF_NPX_SETOPTSHUT;
5239 } else {
5240 so->so_flags &= ~SOF_NPX_SETOPTSHUT;
5241 }
5242 }
5243 break;
5244 }
5245
5246 case SO_TRAFFIC_CLASS: {
5247 error = sooptcopyin(sopt, &optval, sizeof(optval),
5248 sizeof(optval));
5249 if (error != 0) {
5250 goto out;
5251 }
5252 if (optval >= SO_TC_NET_SERVICE_OFFSET) {
5253 int netsvc = optval - SO_TC_NET_SERVICE_OFFSET;
5254 error = so_set_net_service_type(so, netsvc);
5255 goto out;
5256 }
5257 error = so_set_traffic_class(so, optval);
5258 if (error != 0) {
5259 goto out;
5260 }
5261 so->so_flags1 &= ~SOF1_TC_NET_SERV_TYPE;
5262 so->so_netsvctype = _NET_SERVICE_TYPE_UNSPEC;
5263 break;
5264 }
5265
5266 case SO_RECV_TRAFFIC_CLASS: {
5267 error = sooptcopyin(sopt, &optval, sizeof(optval),
5268 sizeof(optval));
5269 if (error != 0) {
5270 goto out;
5271 }
5272 if (optval == 0) {
5273 so->so_flags &= ~SOF_RECV_TRAFFIC_CLASS;
5274 } else {
5275 so->so_flags |= SOF_RECV_TRAFFIC_CLASS;
5276 }
5277 break;
5278 }
5279
5280 #if (DEVELOPMENT || DEBUG)
5281 case SO_TRAFFIC_CLASS_DBG: {
5282 struct so_tcdbg so_tcdbg;
5283
5284 error = sooptcopyin(sopt, &so_tcdbg,
5285 sizeof(struct so_tcdbg), sizeof(struct so_tcdbg));
5286 if (error != 0) {
5287 goto out;
5288 }
5289 error = so_set_tcdbg(so, &so_tcdbg);
5290 if (error != 0) {
5291 goto out;
5292 }
5293 break;
5294 }
5295 #endif /* (DEVELOPMENT || DEBUG) */
5296
5297 case SO_PRIVILEGED_TRAFFIC_CLASS:
5298 error = priv_check_cred(kauth_cred_get(),
5299 PRIV_NET_PRIVILEGED_TRAFFIC_CLASS, 0);
5300 if (error != 0) {
5301 goto out;
5302 }
5303 error = sooptcopyin(sopt, &optval, sizeof(optval),
5304 sizeof(optval));
5305 if (error != 0) {
5306 goto out;
5307 }
5308 if (optval == 0) {
5309 so->so_flags &= ~SOF_PRIVILEGED_TRAFFIC_CLASS;
5310 } else {
5311 so->so_flags |= SOF_PRIVILEGED_TRAFFIC_CLASS;
5312 }
5313 break;
5314
5315 #if (DEVELOPMENT || DEBUG)
5316 case SO_DEFUNCTIT:
5317 error = sosetdefunct(current_proc(), so, 0, FALSE);
5318 if (error == 0) {
5319 error = sodefunct(current_proc(), so, 0);
5320 }
5321
5322 break;
5323 #endif /* (DEVELOPMENT || DEBUG) */
5324
5325 case SO_DEFUNCTOK:
5326 error = sooptcopyin(sopt, &optval, sizeof(optval),
5327 sizeof(optval));
5328 if (error != 0 || (so->so_flags & SOF_DEFUNCT)) {
5329 if (error == 0) {
5330 error = EBADF;
5331 }
5332 goto out;
5333 }
5334 /*
5335 * Any process can set SO_DEFUNCTOK (clear
5336 * SOF_NODEFUNCT), but only root can clear
5337 * SO_DEFUNCTOK (set SOF_NODEFUNCT).
5338 */
5339 if (optval == 0 &&
5340 kauth_cred_issuser(kauth_cred_get()) == 0) {
5341 error = EPERM;
5342 goto out;
5343 }
5344 if (optval) {
5345 so->so_flags &= ~SOF_NODEFUNCT;
5346 } else {
5347 so->so_flags |= SOF_NODEFUNCT;
5348 }
5349
5350 if (SOCK_DOM(so) == PF_INET ||
5351 SOCK_DOM(so) == PF_INET6) {
5352 char s[MAX_IPv6_STR_LEN];
5353 char d[MAX_IPv6_STR_LEN];
5354 struct inpcb *inp = sotoinpcb(so);
5355
5356 SODEFUNCTLOG("%s[%d, %s]: so 0x%llu "
5357 "[%s %s:%d -> %s:%d] is now marked "
5358 "as %seligible for "
5359 "defunct\n", __func__, proc_selfpid(),
5360 proc_best_name(current_proc()),
5361 so->so_gencnt,
5362 (SOCK_TYPE(so) == SOCK_STREAM) ?
5363 "TCP" : "UDP", inet_ntop(SOCK_DOM(so),
5364 ((SOCK_DOM(so) == PF_INET) ?
5365 (void *)&inp->inp_laddr.s_addr :
5366 (void *)&inp->in6p_laddr), s, sizeof(s)),
5367 ntohs(inp->in6p_lport),
5368 inet_ntop(SOCK_DOM(so),
5369 (SOCK_DOM(so) == PF_INET) ?
5370 (void *)&inp->inp_faddr.s_addr :
5371 (void *)&inp->in6p_faddr, d, sizeof(d)),
5372 ntohs(inp->in6p_fport),
5373 (so->so_flags & SOF_NODEFUNCT) ?
5374 "not " : "");
5375 } else {
5376 SODEFUNCTLOG("%s[%d, %s]: so 0x%llu [%d,%d] "
5377 "is now marked as %seligible for "
5378 "defunct\n",
5379 __func__, proc_selfpid(),
5380 proc_best_name(current_proc()),
5381 so->so_gencnt,
5382 SOCK_DOM(so), SOCK_TYPE(so),
5383 (so->so_flags & SOF_NODEFUNCT) ?
5384 "not " : "");
5385 }
5386 break;
5387
5388 case SO_ISDEFUNCT:
5389 /* This option is not settable */
5390 error = EINVAL;
5391 break;
5392
5393 case SO_OPPORTUNISTIC:
5394 error = sooptcopyin(sopt, &optval, sizeof(optval),
5395 sizeof(optval));
5396 if (error == 0) {
5397 error = so_set_opportunistic(so, optval);
5398 }
5399 break;
5400
5401 case SO_FLUSH:
5402 /* This option is handled by lower layer(s) */
5403 error = 0;
5404 break;
5405
5406 case SO_RECV_ANYIF:
5407 error = sooptcopyin(sopt, &optval, sizeof(optval),
5408 sizeof(optval));
5409 if (error == 0) {
5410 error = so_set_recv_anyif(so, optval);
5411 }
5412 break;
5413
5414 case SO_TRAFFIC_MGT_BACKGROUND: {
5415 /* This option is handled by lower layer(s) */
5416 error = 0;
5417 break;
5418 }
5419
5420 #if FLOW_DIVERT
5421 case SO_FLOW_DIVERT_TOKEN:
5422 error = flow_divert_token_set(so, sopt);
5423 break;
5424 #endif /* FLOW_DIVERT */
5425
5426
5427 case SO_DELEGATED:
5428 if ((error = sooptcopyin(sopt, &optval, sizeof(optval),
5429 sizeof(optval))) != 0) {
5430 break;
5431 }
5432
5433 error = so_set_effective_pid(so, optval, sopt->sopt_p, true);
5434 break;
5435
5436 case SO_DELEGATED_UUID: {
5437 uuid_t euuid;
5438
5439 if ((error = sooptcopyin(sopt, &euuid, sizeof(euuid),
5440 sizeof(euuid))) != 0) {
5441 break;
5442 }
5443
5444 error = so_set_effective_uuid(so, euuid, sopt->sopt_p, true);
5445 break;
5446 }
5447
5448 #if NECP
5449 case SO_NECP_ATTRIBUTES:
5450 if (SOCK_DOM(so) == PF_MULTIPATH) {
5451 /* Handled by MPTCP itself */
5452 break;
5453 }
5454
5455 if (SOCK_DOM(so) != PF_INET && SOCK_DOM(so) != PF_INET6) {
5456 error = EINVAL;
5457 goto out;
5458 }
5459
5460 error = necp_set_socket_attributes(&sotoinpcb(so)->inp_necp_attributes, sopt);
5461 break;
5462
5463 case SO_NECP_CLIENTUUID: {
5464 if (SOCK_DOM(so) == PF_MULTIPATH) {
5465 /* Handled by MPTCP itself */
5466 break;
5467 }
5468
5469 if (SOCK_DOM(so) != PF_INET && SOCK_DOM(so) != PF_INET6) {
5470 error = EINVAL;
5471 goto out;
5472 }
5473
5474 struct inpcb *inp = sotoinpcb(so);
5475 if (!uuid_is_null(inp->necp_client_uuid)) {
5476 // Clear out the old client UUID if present
5477 necp_inpcb_remove_cb(inp);
5478 }
5479
5480 error = sooptcopyin(sopt, &inp->necp_client_uuid,
5481 sizeof(uuid_t), sizeof(uuid_t));
5482 if (error != 0) {
5483 goto out;
5484 }
5485
5486 if (uuid_is_null(inp->necp_client_uuid)) {
5487 error = EINVAL;
5488 goto out;
5489 }
5490
5491 pid_t current_pid = proc_pid(current_proc());
5492 error = necp_client_register_socket_flow(current_pid,
5493 inp->necp_client_uuid, inp);
5494 if (error != 0) {
5495 uuid_clear(inp->necp_client_uuid);
5496 goto out;
5497 }
5498
5499 if (inp->inp_lport != 0) {
5500 // There is a bound local port, so this is not
5501 // a fresh socket. Assign to the client.
5502 necp_client_assign_from_socket(current_pid, inp->necp_client_uuid, inp);
5503 }
5504
5505 break;
5506 }
5507 case SO_NECP_LISTENUUID: {
5508 if (SOCK_DOM(so) != PF_INET && SOCK_DOM(so) != PF_INET6) {
5509 error = EINVAL;
5510 goto out;
5511 }
5512
5513 struct inpcb *inp = sotoinpcb(so);
5514 if (!uuid_is_null(inp->necp_client_uuid)) {
5515 error = EINVAL;
5516 goto out;
5517 }
5518
5519 error = sooptcopyin(sopt, &inp->necp_client_uuid,
5520 sizeof(uuid_t), sizeof(uuid_t));
5521 if (error != 0) {
5522 goto out;
5523 }
5524
5525 if (uuid_is_null(inp->necp_client_uuid)) {
5526 error = EINVAL;
5527 goto out;
5528 }
5529
5530 error = necp_client_register_socket_listener(proc_pid(current_proc()),
5531 inp->necp_client_uuid, inp);
5532 if (error != 0) {
5533 uuid_clear(inp->necp_client_uuid);
5534 goto out;
5535 }
5536
5537 // Mark that the port registration is held by NECP
5538 inp->inp_flags2 |= INP2_EXTERNAL_PORT;
5539
5540 break;
5541 }
5542
5543 case SO_RESOLVER_SIGNATURE: {
5544 if (SOCK_DOM(so) != PF_INET && SOCK_DOM(so) != PF_INET6) {
5545 error = EINVAL;
5546 goto out;
5547 }
5548 error = necp_set_socket_resolver_signature(sotoinpcb(so), sopt);
5549 break;
5550 }
5551 #endif /* NECP */
5552
5553 case SO_EXTENDED_BK_IDLE:
5554 error = sooptcopyin(sopt, &optval, sizeof(optval),
5555 sizeof(optval));
5556 if (error == 0) {
5557 error = so_set_extended_bk_idle(so, optval);
5558 }
5559 break;
5560
5561 case SO_MARK_CELLFALLBACK:
5562 error = sooptcopyin(sopt, &optval, sizeof(optval),
5563 sizeof(optval));
5564 if (error != 0) {
5565 goto out;
5566 }
5567 if (optval < 0) {
5568 error = EINVAL;
5569 goto out;
5570 }
5571 if (optval == 0) {
5572 so->so_flags1 &= ~SOF1_CELLFALLBACK;
5573 } else {
5574 so->so_flags1 |= SOF1_CELLFALLBACK;
5575 }
5576 break;
5577
5578 case SO_MARK_CELLFALLBACK_UUID:
5579 {
5580 struct so_mark_cellfallback_uuid_args args;
5581
5582 error = sooptcopyin(sopt, &args, sizeof(args),
5583 sizeof(args));
5584 if (error != 0) {
5585 goto out;
5586 }
5587 error = nstat_userland_mark_rnf_override(args.flow_uuid,
5588 args.flow_cellfallback);
5589 break;
5590 }
5591
5592 case SO_FALLBACK_MODE:
5593 error = sooptcopyin(sopt, &optval, sizeof(optval),
5594 sizeof(optval));
5595 if (error != 0) {
5596 goto out;
5597 }
5598 if (optval < SO_FALLBACK_MODE_NONE ||
5599 optval > SO_FALLBACK_MODE_PREFER) {
5600 error = EINVAL;
5601 goto out;
5602 }
5603 so->so_fallback_mode = (u_int8_t)optval;
5604 break;
5605
5606 case SO_MARK_KNOWN_TRACKER: {
5607 error = sooptcopyin(sopt, &optval, sizeof(optval),
5608 sizeof(optval));
5609 if (error != 0) {
5610 goto out;
5611 }
5612 if (optval < 0) {
5613 error = EINVAL;
5614 goto out;
5615 }
5616 if (optval == 0) {
5617 so->so_flags1 &= ~SOF1_KNOWN_TRACKER;
5618 } else {
5619 so->so_flags1 |= SOF1_KNOWN_TRACKER;
5620 }
5621 break;
5622 }
5623
5624 case SO_MARK_KNOWN_TRACKER_NON_APP_INITIATED: {
5625 error = sooptcopyin(sopt, &optval, sizeof(optval),
5626 sizeof(optval));
5627 if (error != 0) {
5628 goto out;
5629 }
5630 if (optval < 0) {
5631 error = EINVAL;
5632 goto out;
5633 }
5634 if (optval == 0) {
5635 so->so_flags1 &= ~SOF1_TRACKER_NON_APP_INITIATED;
5636 } else {
5637 so->so_flags1 |= SOF1_TRACKER_NON_APP_INITIATED;
5638 }
5639 break;
5640 }
5641
5642 case SO_MARK_APPROVED_APP_DOMAIN: {
5643 error = sooptcopyin(sopt, &optval, sizeof(optval),
5644 sizeof(optval));
5645 if (error != 0) {
5646 goto out;
5647 }
5648 if (optval < 0) {
5649 error = EINVAL;
5650 goto out;
5651 }
5652 if (optval == 0) {
5653 so->so_flags1 &= ~SOF1_APPROVED_APP_DOMAIN;
5654 } else {
5655 so->so_flags1 |= SOF1_APPROVED_APP_DOMAIN;
5656 }
5657 break;
5658 }
5659
5660 case SO_STATISTICS_EVENT:
5661 error = sooptcopyin(sopt, &long_optval,
5662 sizeof(long_optval), sizeof(long_optval));
5663 if (error != 0) {
5664 goto out;
5665 }
5666 u_int64_t nstat_event = 0;
5667 error = so_statistics_event_to_nstat_event(
5668 &long_optval, &nstat_event);
5669 if (error != 0) {
5670 goto out;
5671 }
5672 nstat_pcb_event(sotoinpcb(so), nstat_event);
5673 break;
5674
5675 case SO_NET_SERVICE_TYPE: {
5676 error = sooptcopyin(sopt, &optval, sizeof(optval),
5677 sizeof(optval));
5678 if (error != 0) {
5679 goto out;
5680 }
5681 error = so_set_net_service_type(so, optval);
5682 break;
5683 }
5684
5685 case SO_QOSMARKING_POLICY_OVERRIDE:
5686 error = priv_check_cred(kauth_cred_get(),
5687 PRIV_NET_QOSMARKING_POLICY_OVERRIDE, 0);
5688 if (error != 0) {
5689 goto out;
5690 }
5691 error = sooptcopyin(sopt, &optval, sizeof(optval),
5692 sizeof(optval));
5693 if (error != 0) {
5694 goto out;
5695 }
5696 if (optval == 0) {
5697 so->so_flags1 &= ~SOF1_QOSMARKING_POLICY_OVERRIDE;
5698 } else {
5699 so->so_flags1 |= SOF1_QOSMARKING_POLICY_OVERRIDE;
5700 }
5701 break;
5702
5703 case SO_MPKL_SEND_INFO: {
5704 struct so_mpkl_send_info so_mpkl_send_info;
5705
5706 error = sooptcopyin(sopt, &so_mpkl_send_info,
5707 sizeof(struct so_mpkl_send_info), sizeof(struct so_mpkl_send_info));
5708 if (error != 0) {
5709 goto out;
5710 }
5711 uuid_copy(so->so_mpkl_send_uuid, so_mpkl_send_info.mpkl_uuid);
5712 so->so_mpkl_send_proto = so_mpkl_send_info.mpkl_proto;
5713
5714 if (uuid_is_null(so->so_mpkl_send_uuid) && so->so_mpkl_send_proto == 0) {
5715 so->so_flags1 &= ~SOF1_MPKL_SEND_INFO;
5716 } else {
5717 so->so_flags1 |= SOF1_MPKL_SEND_INFO;
5718 }
5719 break;
5720 }
5721 case SO_WANT_KEV_SOCKET_CLOSED: {
5722 error = sooptcopyin(sopt, &optval, sizeof(optval),
5723 sizeof(optval));
5724 if (error != 0) {
5725 goto out;
5726 }
5727 if (optval == 0) {
5728 so->so_flags1 &= ~SOF1_WANT_KEV_SOCK_CLOSED;
5729 } else {
5730 so->so_flags1 |= SOF1_WANT_KEV_SOCK_CLOSED;
5731 }
5732 break;
5733 }
5734 case SO_MARK_WAKE_PKT: {
5735 error = sooptcopyin(sopt, &optval, sizeof(optval),
5736 sizeof(optval));
5737 if (error != 0) {
5738 goto out;
5739 }
5740 if (optval == 0) {
5741 so->so_flags &= ~SOF_MARK_WAKE_PKT;
5742 } else {
5743 so->so_flags |= SOF_MARK_WAKE_PKT;
5744 }
5745 break;
5746 }
5747 case SO_RECV_WAKE_PKT: {
5748 error = sooptcopyin(sopt, &optval, sizeof(optval),
5749 sizeof(optval));
5750 if (error != 0) {
5751 goto out;
5752 }
5753 if (optval == 0) {
5754 so->so_flags &= ~SOF_RECV_WAKE_PKT;
5755 } else {
5756 so->so_flags |= SOF_RECV_WAKE_PKT;
5757 }
5758 break;
5759 }
5760 case SO_APPLICATION_ID: {
5761 so_application_id_t application_id = { 0 };
5762
5763 if (SOCK_DOM(so) != PF_INET && SOCK_DOM(so) != PF_INET6) {
5764 error = EINVAL;
5765 goto out;
5766 }
5767 error = sooptcopyin(sopt, &application_id, sizeof(application_id),
5768 sizeof(application_id));
5769 if (error != 0) {
5770 goto out;
5771 }
5772
5773 // The user needs to match
5774 if (kauth_cred_getuid(so->so_cred) != application_id.uid) {
5775 error = EINVAL;
5776 printf("setsockopt: SO_APPLICATION_ID - wrong uid");
5777 goto out;
5778 }
5779 error = so_set_effective_uuid(so, application_id.effective_uuid, sopt->sopt_p, true);
5780 if (error != 0) {
5781 printf("setsockopt: SO_APPLICATION_ID - failed to set e_uuid");
5782 goto out;
5783 }
5784 if (application_id.persona_id != PERSONA_ID_NONE) {
5785 so->so_persona_id = application_id.persona_id;
5786 }
5787 break;
5788 }
5789 case SO_MARK_DOMAIN_INFO_SILENT:
5790 error = sooptcopyin(sopt, &optval, sizeof(optval),
5791 sizeof(optval));
5792 if (error != 0) {
5793 goto out;
5794 }
5795 if (optval < 0) {
5796 error = EINVAL;
5797 goto out;
5798 }
5799 if (optval == 0) {
5800 so->so_flags1 &= ~SOF1_DOMAIN_INFO_SILENT;
5801 } else {
5802 so->so_flags1 |= SOF1_DOMAIN_INFO_SILENT;
5803 }
5804 break;
5805
5806 default:
5807 error = ENOPROTOOPT;
5808 break;
5809 }
5810 if (error == 0 && so->so_proto != NULL &&
5811 so->so_proto->pr_ctloutput != NULL) {
5812 (void) so->so_proto->pr_ctloutput(so, sopt);
5813 }
5814 }
5815 out:
5816 if (dolock) {
5817 socket_unlock(so, 1);
5818 }
5819 return error;
5820 }
5821
5822 /* Helper routines for getsockopt */
5823 int
sooptcopyout(struct sockopt * sopt,void * __sized_by (len)buf,size_t len)5824 sooptcopyout(struct sockopt *sopt, void *__sized_by(len) buf, size_t len)
5825 {
5826 int error;
5827 size_t valsize;
5828
5829 error = 0;
5830
5831 /*
5832 * Documented get behavior is that we always return a value,
5833 * possibly truncated to fit in the user's buffer.
5834 * Traditional behavior is that we always tell the user
5835 * precisely how much we copied, rather than something useful
5836 * like the total amount we had available for her.
5837 * Note that this interface is not idempotent; the entire answer must
5838 * generated ahead of time.
5839 */
5840 valsize = MIN(len, sopt->sopt_valsize);
5841 sopt->sopt_valsize = valsize;
5842 if (sopt->sopt_valsize != 0 && sopt->sopt_val != USER_ADDR_NULL) {
5843 if (sopt->sopt_p != kernproc) {
5844 error = copyout(buf, sopt->sopt_val, valsize);
5845 } else {
5846 caddr_t tmp = __unsafe_forge_bidi_indexable(caddr_t,
5847 CAST_DOWN(caddr_t, sopt->sopt_val),
5848 valsize);
5849 bcopy(buf, tmp, valsize);
5850 }
5851 }
5852 return error;
5853 }
5854
5855 static int
sooptcopyout_timeval(struct sockopt * sopt,const struct timeval * tv_p)5856 sooptcopyout_timeval(struct sockopt *sopt, const struct timeval *tv_p)
5857 {
5858 int error;
5859 size_t len;
5860 struct user64_timeval tv64 = {};
5861 struct user32_timeval tv32 = {};
5862 const void * val;
5863 size_t valsize;
5864
5865 error = 0;
5866 if (proc_is64bit(sopt->sopt_p)) {
5867 len = sizeof(tv64);
5868 tv64.tv_sec = tv_p->tv_sec;
5869 tv64.tv_usec = tv_p->tv_usec;
5870 val = &tv64;
5871 } else {
5872 len = sizeof(tv32);
5873 tv32.tv_sec = (user32_time_t)tv_p->tv_sec;
5874 tv32.tv_usec = tv_p->tv_usec;
5875 val = &tv32;
5876 }
5877 valsize = MIN(len, sopt->sopt_valsize);
5878 sopt->sopt_valsize = valsize;
5879 if (sopt->sopt_val != USER_ADDR_NULL) {
5880 if (sopt->sopt_p != kernproc) {
5881 error = copyout(val, sopt->sopt_val, valsize);
5882 } else {
5883 caddr_t tmp = __unsafe_forge_bidi_indexable(caddr_t,
5884 CAST_DOWN(caddr_t, sopt->sopt_val),
5885 valsize);
5886 bcopy(val, tmp, valsize);
5887 }
5888 }
5889 return error;
5890 }
5891
5892 /*
5893 * Return: 0 Success
5894 * ENOPROTOOPT
5895 * <pr_ctloutput>:EOPNOTSUPP[AF_UNIX]
5896 * <pr_ctloutput>:???
5897 * <sf_getoption>:???
5898 */
5899 int
sogetoptlock(struct socket * so,struct sockopt * sopt,int dolock)5900 sogetoptlock(struct socket *so, struct sockopt *sopt, int dolock)
5901 {
5902 int error, optval;
5903 struct linger l;
5904 struct timeval tv;
5905
5906 if (sopt->sopt_dir != SOPT_GET) {
5907 sopt->sopt_dir = SOPT_GET;
5908 }
5909
5910 if (dolock) {
5911 socket_lock(so, 1);
5912 }
5913
5914 error = sflt_getsockopt(so, sopt);
5915 if (error != 0) {
5916 if (error == EJUSTRETURN) {
5917 error = 0;
5918 }
5919 goto out;
5920 }
5921
5922 if (sopt->sopt_level != SOL_SOCKET || sopt->sopt_name == SO_BINDTODEVICE) {
5923 if (so->so_proto != NULL &&
5924 so->so_proto->pr_ctloutput != NULL) {
5925 error = (*so->so_proto->pr_ctloutput)(so, sopt);
5926 goto out;
5927 }
5928 error = ENOPROTOOPT;
5929 } else {
5930 /*
5931 * Allow socket-level (SOL_SOCKET) options to be filtered by
5932 * the protocol layer, if needed. A zero value returned from
5933 * the handler means use default socket-level processing as
5934 * done by the rest of this routine. Otherwise, any other
5935 * return value indicates that the option is unsupported.
5936 */
5937 if (so->so_proto != NULL && (error = so->so_proto->pr_usrreqs->
5938 pru_socheckopt(so, sopt)) != 0) {
5939 goto out;
5940 }
5941
5942 error = 0;
5943 switch (sopt->sopt_name) {
5944 case SO_LINGER:
5945 case SO_LINGER_SEC:
5946 l.l_onoff = ((so->so_options & SO_LINGER) ? 1 : 0);
5947 l.l_linger = (sopt->sopt_name == SO_LINGER) ?
5948 so->so_linger : so->so_linger / hz;
5949 error = sooptcopyout(sopt, &l, sizeof(l));
5950 break;
5951
5952 case SO_USELOOPBACK:
5953 case SO_DONTROUTE:
5954 case SO_DEBUG:
5955 case SO_KEEPALIVE:
5956 case SO_REUSEADDR:
5957 case SO_REUSEPORT:
5958 case SO_BROADCAST:
5959 case SO_OOBINLINE:
5960 case SO_TIMESTAMP:
5961 case SO_TIMESTAMP_MONOTONIC:
5962 case SO_TIMESTAMP_CONTINUOUS:
5963 case SO_DONTTRUNC:
5964 case SO_WANTMORE:
5965 case SO_WANTOOBFLAG:
5966 case SO_NOWAKEFROMSLEEP:
5967 case SO_NOAPNFALLBK:
5968 optval = so->so_options & sopt->sopt_name;
5969 integer:
5970 error = sooptcopyout(sopt, &optval, sizeof(optval));
5971 break;
5972
5973 case SO_TYPE:
5974 optval = so->so_type;
5975 goto integer;
5976
5977 case SO_NREAD:
5978 if (so->so_proto->pr_flags & PR_ATOMIC) {
5979 int pkt_total;
5980 struct mbuf *m1;
5981
5982 pkt_total = 0;
5983 m1 = so->so_rcv.sb_mb;
5984 while (m1 != NULL) {
5985 if (m_has_mtype(m1, MTF_DATA | MTF_HEADER | MTF_OOBDATA)) {
5986 pkt_total += m1->m_len;
5987 }
5988 m1 = m1->m_next;
5989 }
5990 optval = pkt_total;
5991 } else {
5992 optval = so->so_rcv.sb_cc - so->so_rcv.sb_ctl;
5993 }
5994 goto integer;
5995
5996 case SO_NUMRCVPKT:
5997 if (so->so_proto->pr_flags & PR_ATOMIC) {
5998 int cnt = 0;
5999 struct mbuf *m1;
6000
6001 m1 = so->so_rcv.sb_mb;
6002 while (m1 != NULL) {
6003 cnt += 1;
6004 m1 = m1->m_nextpkt;
6005 }
6006 optval = cnt;
6007 goto integer;
6008 } else {
6009 error = ENOPROTOOPT;
6010 break;
6011 }
6012
6013 case SO_NWRITE:
6014 optval = so->so_snd.sb_cc;
6015 goto integer;
6016
6017 case SO_ERROR:
6018 optval = so->so_error;
6019 so->so_error = 0;
6020 goto integer;
6021
6022 case SO_SNDBUF: {
6023 u_int32_t hiwat = so->so_snd.sb_hiwat;
6024
6025 if (so->so_snd.sb_flags & SB_UNIX) {
6026 struct unpcb *unp =
6027 (struct unpcb *)(so->so_pcb);
6028 if (unp != NULL && unp->unp_conn != NULL) {
6029 hiwat += unp->unp_conn->unp_cc;
6030 }
6031 }
6032
6033 optval = hiwat;
6034 goto integer;
6035 }
6036 case SO_RCVBUF:
6037 optval = so->so_rcv.sb_hiwat;
6038 goto integer;
6039
6040 case SO_SNDLOWAT:
6041 optval = so->so_snd.sb_lowat;
6042 goto integer;
6043
6044 case SO_RCVLOWAT:
6045 optval = so->so_rcv.sb_lowat;
6046 goto integer;
6047
6048 case SO_SNDTIMEO:
6049 case SO_RCVTIMEO:
6050 tv = (sopt->sopt_name == SO_SNDTIMEO ?
6051 so->so_snd.sb_timeo : so->so_rcv.sb_timeo);
6052
6053 error = sooptcopyout_timeval(sopt, &tv);
6054 break;
6055
6056 case SO_NOSIGPIPE:
6057 optval = (so->so_flags & SOF_NOSIGPIPE);
6058 goto integer;
6059
6060 case SO_NOADDRERR:
6061 optval = (so->so_flags & SOF_NOADDRAVAIL);
6062 goto integer;
6063
6064 case SO_REUSESHAREUID:
6065 optval = (so->so_flags & SOF_REUSESHAREUID);
6066 goto integer;
6067
6068
6069 case SO_NOTIFYCONFLICT:
6070 optval = (so->so_flags & SOF_NOTIFYCONFLICT);
6071 goto integer;
6072
6073 case SO_RESTRICTIONS:
6074 optval = so_get_restrictions(so);
6075 goto integer;
6076
6077 case SO_AWDL_UNRESTRICTED:
6078 if (SOCK_DOM(so) == PF_INET ||
6079 SOCK_DOM(so) == PF_INET6) {
6080 optval = inp_get_awdl_unrestricted(
6081 sotoinpcb(so));
6082 goto integer;
6083 } else {
6084 error = EOPNOTSUPP;
6085 }
6086 break;
6087
6088 case SO_INTCOPROC_ALLOW:
6089 if (SOCK_DOM(so) == PF_INET6) {
6090 optval = inp_get_intcoproc_allowed(
6091 sotoinpcb(so));
6092 goto integer;
6093 } else {
6094 error = EOPNOTSUPP;
6095 }
6096 break;
6097
6098 case SO_LABEL:
6099 error = EOPNOTSUPP;
6100 break;
6101
6102 case SO_PEERLABEL:
6103 error = EOPNOTSUPP;
6104 break;
6105
6106 #ifdef __APPLE_API_PRIVATE
6107 case SO_UPCALLCLOSEWAIT:
6108 optval = (so->so_flags & SOF_UPCALLCLOSEWAIT);
6109 goto integer;
6110 #endif
6111 case SO_RANDOMPORT:
6112 optval = (so->so_flags & SOF_BINDRANDOMPORT);
6113 goto integer;
6114
6115 case SO_NP_EXTENSIONS: {
6116 struct so_np_extensions sonpx = {};
6117
6118 sonpx.npx_flags = (so->so_flags & SOF_NPX_SETOPTSHUT) ?
6119 SONPX_SETOPTSHUT : 0;
6120 sonpx.npx_mask = SONPX_MASK_VALID;
6121
6122 error = sooptcopyout(sopt, &sonpx,
6123 sizeof(struct so_np_extensions));
6124 break;
6125 }
6126
6127 case SO_TRAFFIC_CLASS:
6128 optval = so->so_traffic_class;
6129 goto integer;
6130
6131 case SO_RECV_TRAFFIC_CLASS:
6132 optval = (so->so_flags & SOF_RECV_TRAFFIC_CLASS);
6133 goto integer;
6134
6135 #if (DEVELOPMENT || DEBUG)
6136 case SO_TRAFFIC_CLASS_DBG:
6137 error = sogetopt_tcdbg(so, sopt);
6138 break;
6139 #endif /* (DEVELOPMENT || DEBUG) */
6140
6141 case SO_PRIVILEGED_TRAFFIC_CLASS:
6142 optval = (so->so_flags & SOF_PRIVILEGED_TRAFFIC_CLASS);
6143 goto integer;
6144
6145 case SO_DEFUNCTOK:
6146 optval = !(so->so_flags & SOF_NODEFUNCT);
6147 goto integer;
6148
6149 case SO_ISDEFUNCT:
6150 optval = (so->so_flags & SOF_DEFUNCT);
6151 goto integer;
6152
6153 case SO_OPPORTUNISTIC:
6154 optval = so_get_opportunistic(so);
6155 goto integer;
6156
6157 case SO_FLUSH:
6158 /* This option is not gettable */
6159 error = EINVAL;
6160 break;
6161
6162 case SO_RECV_ANYIF:
6163 optval = so_get_recv_anyif(so);
6164 goto integer;
6165
6166 case SO_TRAFFIC_MGT_BACKGROUND:
6167 /* This option is handled by lower layer(s) */
6168 if (so->so_proto != NULL &&
6169 so->so_proto->pr_ctloutput != NULL) {
6170 (void) so->so_proto->pr_ctloutput(so, sopt);
6171 }
6172 break;
6173
6174 #if FLOW_DIVERT
6175 case SO_FLOW_DIVERT_TOKEN:
6176 error = flow_divert_token_get(so, sopt);
6177 break;
6178 #endif /* FLOW_DIVERT */
6179
6180 #if NECP
6181 case SO_NECP_ATTRIBUTES:
6182 if (SOCK_DOM(so) == PF_MULTIPATH) {
6183 /* Handled by MPTCP itself */
6184 break;
6185 }
6186
6187 if (SOCK_DOM(so) != PF_INET && SOCK_DOM(so) != PF_INET6) {
6188 error = EINVAL;
6189 goto out;
6190 }
6191
6192 error = necp_get_socket_attributes(&sotoinpcb(so)->inp_necp_attributes, sopt);
6193 break;
6194
6195 case SO_NECP_CLIENTUUID: {
6196 uuid_t *ncu;
6197
6198 if (SOCK_DOM(so) == PF_MULTIPATH) {
6199 ncu = &mpsotomppcb(so)->necp_client_uuid;
6200 } else if (SOCK_DOM(so) == PF_INET || SOCK_DOM(so) == PF_INET6) {
6201 ncu = &sotoinpcb(so)->necp_client_uuid;
6202 } else {
6203 error = EINVAL;
6204 goto out;
6205 }
6206
6207 error = sooptcopyout(sopt, ncu, sizeof(uuid_t));
6208 break;
6209 }
6210
6211 case SO_NECP_LISTENUUID: {
6212 uuid_t *nlu;
6213
6214 if (SOCK_DOM(so) == PF_INET || SOCK_DOM(so) == PF_INET6) {
6215 if (sotoinpcb(so)->inp_flags2 & INP2_EXTERNAL_PORT) {
6216 nlu = &sotoinpcb(so)->necp_client_uuid;
6217 } else {
6218 error = ENOENT;
6219 goto out;
6220 }
6221 } else {
6222 error = EINVAL;
6223 goto out;
6224 }
6225
6226 error = sooptcopyout(sopt, nlu, sizeof(uuid_t));
6227 break;
6228 }
6229
6230 case SO_RESOLVER_SIGNATURE: {
6231 if (SOCK_DOM(so) != PF_INET && SOCK_DOM(so) != PF_INET6) {
6232 error = EINVAL;
6233 goto out;
6234 }
6235 error = necp_get_socket_resolver_signature(sotoinpcb(so), sopt);
6236 break;
6237 }
6238
6239 #endif /* NECP */
6240
6241 #if CONTENT_FILTER
6242 case SO_CFIL_SOCK_ID: {
6243 cfil_sock_id_t sock_id;
6244
6245 sock_id = cfil_sock_id_from_socket(so);
6246
6247 error = sooptcopyout(sopt, &sock_id,
6248 sizeof(cfil_sock_id_t));
6249 break;
6250 }
6251 #endif /* CONTENT_FILTER */
6252
6253 case SO_EXTENDED_BK_IDLE:
6254 optval = (so->so_flags1 & SOF1_EXTEND_BK_IDLE_WANTED);
6255 goto integer;
6256 case SO_MARK_CELLFALLBACK:
6257 optval = ((so->so_flags1 & SOF1_CELLFALLBACK) > 0)
6258 ? 1 : 0;
6259 goto integer;
6260 case SO_FALLBACK_MODE:
6261 optval = so->so_fallback_mode;
6262 goto integer;
6263 case SO_MARK_KNOWN_TRACKER: {
6264 optval = ((so->so_flags1 & SOF1_KNOWN_TRACKER) > 0)
6265 ? 1 : 0;
6266 goto integer;
6267 }
6268 case SO_MARK_KNOWN_TRACKER_NON_APP_INITIATED: {
6269 optval = ((so->so_flags1 & SOF1_TRACKER_NON_APP_INITIATED) > 0)
6270 ? 1 : 0;
6271 goto integer;
6272 }
6273 case SO_MARK_APPROVED_APP_DOMAIN: {
6274 optval = ((so->so_flags1 & SOF1_APPROVED_APP_DOMAIN) > 0)
6275 ? 1 : 0;
6276 goto integer;
6277 }
6278 case SO_NET_SERVICE_TYPE: {
6279 if ((so->so_flags1 & SOF1_TC_NET_SERV_TYPE)) {
6280 optval = so->so_netsvctype;
6281 } else {
6282 optval = NET_SERVICE_TYPE_BE;
6283 }
6284 goto integer;
6285 }
6286 case SO_NETSVC_MARKING_LEVEL:
6287 optval = so_get_netsvc_marking_level(so);
6288 goto integer;
6289
6290 case SO_MPKL_SEND_INFO: {
6291 struct so_mpkl_send_info so_mpkl_send_info;
6292
6293 uuid_copy(so_mpkl_send_info.mpkl_uuid, so->so_mpkl_send_uuid);
6294 so_mpkl_send_info.mpkl_proto = so->so_mpkl_send_proto;
6295 error = sooptcopyout(sopt, &so_mpkl_send_info,
6296 sizeof(struct so_mpkl_send_info));
6297 break;
6298 }
6299 case SO_MARK_WAKE_PKT:
6300 optval = (so->so_flags & SOF_MARK_WAKE_PKT);
6301 goto integer;
6302 case SO_RECV_WAKE_PKT:
6303 optval = (so->so_flags & SOF_RECV_WAKE_PKT);
6304 goto integer;
6305 case SO_APPLICATION_ID: {
6306 if (SOCK_DOM(so) != PF_INET && SOCK_DOM(so) != PF_INET6) {
6307 error = EINVAL;
6308 goto out;
6309 }
6310 so_application_id_t application_id = { 0 };
6311 application_id.uid = kauth_cred_getuid(so->so_cred);
6312 uuid_copy(application_id.effective_uuid, !uuid_is_null(so->e_uuid) ? so->e_uuid : so->last_uuid);
6313 application_id.persona_id = so->so_persona_id;
6314 error = sooptcopyout(sopt, &application_id, sizeof(so_application_id_t));
6315 break;
6316 }
6317 case SO_MARK_DOMAIN_INFO_SILENT:
6318 optval = ((so->so_flags1 & SOF1_DOMAIN_INFO_SILENT) > 0)
6319 ? 1 : 0;
6320 goto integer;
6321 default:
6322 error = ENOPROTOOPT;
6323 break;
6324 }
6325 }
6326 out:
6327 if (dolock) {
6328 socket_unlock(so, 1);
6329 }
6330 return error;
6331 }
6332
6333 /*
6334 * The size limits on our soopt_getm is different from that on FreeBSD.
6335 * We limit the size of options to MCLBYTES. This will have to change
6336 * if we need to define options that need more space than MCLBYTES.
6337 */
6338 int
soopt_getm(struct sockopt * sopt,struct mbuf ** mp)6339 soopt_getm(struct sockopt *sopt, struct mbuf **mp)
6340 {
6341 struct mbuf *m, *m_prev;
6342 int sopt_size = (int)sopt->sopt_valsize;
6343 int how;
6344
6345 if (sopt_size <= 0 || sopt_size > MCLBYTES) {
6346 return EMSGSIZE;
6347 }
6348
6349 how = sopt->sopt_p != kernproc ? M_WAIT : M_DONTWAIT;
6350 MGET(m, how, MT_DATA);
6351 if (m == NULL) {
6352 return ENOBUFS;
6353 }
6354 if (sopt_size > MLEN) {
6355 MCLGET(m, how);
6356 if ((m->m_flags & M_EXT) == 0) {
6357 m_free(m);
6358 return ENOBUFS;
6359 }
6360 m->m_len = min(MCLBYTES, sopt_size);
6361 } else {
6362 m->m_len = min(MLEN, sopt_size);
6363 }
6364 sopt_size -= m->m_len;
6365 *mp = m;
6366 m_prev = m;
6367
6368 while (sopt_size > 0) {
6369 MGET(m, how, MT_DATA);
6370 if (m == NULL) {
6371 m_freem(*mp);
6372 return ENOBUFS;
6373 }
6374 if (sopt_size > MLEN) {
6375 MCLGET(m, how);
6376 if ((m->m_flags & M_EXT) == 0) {
6377 m_freem(*mp);
6378 m_freem(m);
6379 return ENOBUFS;
6380 }
6381 m->m_len = min(MCLBYTES, sopt_size);
6382 } else {
6383 m->m_len = min(MLEN, sopt_size);
6384 }
6385 sopt_size -= m->m_len;
6386 m_prev->m_next = m;
6387 m_prev = m;
6388 }
6389 return 0;
6390 }
6391
6392 /* copyin sopt data into mbuf chain */
6393 int
soopt_mcopyin(struct sockopt * sopt,struct mbuf * m)6394 soopt_mcopyin(struct sockopt *sopt, struct mbuf *m)
6395 {
6396 struct mbuf *m0 = m;
6397
6398 if (sopt->sopt_val == USER_ADDR_NULL) {
6399 return 0;
6400 }
6401 while (m != NULL && sopt->sopt_valsize >= m->m_len) {
6402 if (sopt->sopt_p != kernproc) {
6403 int error;
6404
6405 error = copyin(sopt->sopt_val, mtod(m, char *),
6406 m->m_len);
6407 if (error != 0) {
6408 m_freem(m0);
6409 return error;
6410 }
6411 } else {
6412 caddr_t tmp = __unsafe_forge_bidi_indexable(caddr_t,
6413 CAST_DOWN(caddr_t, sopt->sopt_val),
6414 m->m_len);
6415 bcopy(tmp, mtod(m, char *), m->m_len);
6416 }
6417 sopt->sopt_valsize -= m->m_len;
6418 sopt->sopt_val += m->m_len;
6419 m = m->m_next;
6420 }
6421 /* should be allocated enoughly at ip6_sooptmcopyin() */
6422 if (m != NULL) {
6423 panic("soopt_mcopyin");
6424 /* NOTREACHED */
6425 }
6426 return 0;
6427 }
6428
6429 /* copyout mbuf chain data into soopt */
6430 int
soopt_mcopyout(struct sockopt * sopt,struct mbuf * m)6431 soopt_mcopyout(struct sockopt *sopt, struct mbuf *m)
6432 {
6433 struct mbuf *m0 = m;
6434 size_t valsize = 0;
6435
6436 if (sopt->sopt_val == USER_ADDR_NULL) {
6437 return 0;
6438 }
6439 while (m != NULL && sopt->sopt_valsize >= m->m_len) {
6440 if (sopt->sopt_p != kernproc) {
6441 int error;
6442
6443 error = copyout(mtod(m, char *), sopt->sopt_val,
6444 m->m_len);
6445 if (error != 0) {
6446 m_freem(m0);
6447 return error;
6448 }
6449 } else {
6450 caddr_t tmp = __unsafe_forge_bidi_indexable(caddr_t,
6451 CAST_DOWN(caddr_t, sopt->sopt_val),
6452 m->m_len);
6453
6454 bcopy(mtod(m, char *), tmp, m->m_len);
6455 }
6456 sopt->sopt_valsize -= m->m_len;
6457 sopt->sopt_val += m->m_len;
6458 valsize += m->m_len;
6459 m = m->m_next;
6460 }
6461 if (m != NULL) {
6462 /* enough soopt buffer should be given from user-land */
6463 m_freem(m0);
6464 return EINVAL;
6465 }
6466 sopt->sopt_valsize = valsize;
6467 return 0;
6468 }
6469
6470 void
sohasoutofband(struct socket * so)6471 sohasoutofband(struct socket *so)
6472 {
6473 if (so->so_pgid < 0) {
6474 gsignal(-so->so_pgid, SIGURG);
6475 } else if (so->so_pgid > 0) {
6476 proc_signal(so->so_pgid, SIGURG);
6477 }
6478 selwakeup(&so->so_rcv.sb_sel);
6479 if (so->so_rcv.sb_flags & SB_KNOTE) {
6480 KNOTE(&so->so_rcv.sb_sel.si_note,
6481 (NOTE_OOB | SO_FILT_HINT_LOCKED));
6482 }
6483 }
6484
6485 int
sopoll(struct socket * so,int events,kauth_cred_t cred,void * wql)6486 sopoll(struct socket *so, int events, kauth_cred_t cred, void * wql)
6487 {
6488 #pragma unused(cred)
6489 struct proc *p = current_proc();
6490 int revents = 0;
6491
6492 socket_lock(so, 1);
6493 so_update_last_owner_locked(so, PROC_NULL);
6494 so_update_policy(so);
6495
6496 if (events & (POLLIN | POLLRDNORM)) {
6497 if (soreadable(so)) {
6498 revents |= events & (POLLIN | POLLRDNORM);
6499 }
6500 }
6501
6502 if (events & (POLLOUT | POLLWRNORM)) {
6503 if (sowriteable(so)) {
6504 revents |= events & (POLLOUT | POLLWRNORM);
6505 }
6506 }
6507
6508 if (events & (POLLPRI | POLLRDBAND)) {
6509 if (so->so_oobmark || (so->so_state & SS_RCVATMARK)) {
6510 revents |= events & (POLLPRI | POLLRDBAND);
6511 }
6512 }
6513
6514 if (revents == 0) {
6515 if (events & (POLLIN | POLLPRI | POLLRDNORM | POLLRDBAND)) {
6516 /*
6517 * Darwin sets the flag first,
6518 * BSD calls selrecord first
6519 */
6520 so->so_rcv.sb_flags |= SB_SEL;
6521 selrecord(p, &so->so_rcv.sb_sel, wql);
6522 }
6523
6524 if (events & (POLLOUT | POLLWRNORM)) {
6525 /*
6526 * Darwin sets the flag first,
6527 * BSD calls selrecord first
6528 */
6529 so->so_snd.sb_flags |= SB_SEL;
6530 selrecord(p, &so->so_snd.sb_sel, wql);
6531 }
6532 }
6533
6534 socket_unlock(so, 1);
6535 return revents;
6536 }
6537
6538 int
soo_kqfilter(struct fileproc * fp,struct knote * kn,struct kevent_qos_s * kev)6539 soo_kqfilter(struct fileproc *fp, struct knote *kn, struct kevent_qos_s *kev)
6540 {
6541 struct socket *so = (struct socket *)fp_get_data(fp);
6542 int result;
6543
6544 socket_lock(so, 1);
6545 so_update_last_owner_locked(so, PROC_NULL);
6546 so_update_policy(so);
6547
6548 switch (kn->kn_filter) {
6549 case EVFILT_READ:
6550 kn->kn_filtid = EVFILTID_SOREAD;
6551 break;
6552 case EVFILT_WRITE:
6553 kn->kn_filtid = EVFILTID_SOWRITE;
6554 break;
6555 case EVFILT_SOCK:
6556 kn->kn_filtid = EVFILTID_SCK;
6557 break;
6558 case EVFILT_EXCEPT:
6559 kn->kn_filtid = EVFILTID_SOEXCEPT;
6560 break;
6561 default:
6562 socket_unlock(so, 1);
6563 knote_set_error(kn, EINVAL);
6564 return 0;
6565 }
6566
6567 /*
6568 * call the appropriate sub-filter attach
6569 * with the socket still locked
6570 */
6571 result = knote_fops(kn)->f_attach(kn, kev);
6572
6573 socket_unlock(so, 1);
6574
6575 return result;
6576 }
6577
6578 static int
filt_soread_common(struct knote * kn,struct kevent_qos_s * kev,struct socket * so)6579 filt_soread_common(struct knote *kn, struct kevent_qos_s *kev, struct socket *so)
6580 {
6581 int retval = 0;
6582 int64_t data = 0;
6583
6584 if (so->so_options & SO_ACCEPTCONN) {
6585 /*
6586 * Radar 6615193 handle the listen case dynamically
6587 * for kqueue read filter. This allows to call listen()
6588 * after registering the kqueue EVFILT_READ.
6589 */
6590
6591 retval = !TAILQ_EMPTY(&so->so_comp);
6592 data = so->so_qlen;
6593 goto out;
6594 }
6595
6596 /* socket isn't a listener */
6597 /*
6598 * NOTE_LOWAT specifies new low water mark in data, i.e.
6599 * the bytes of protocol data. We therefore exclude any
6600 * control bytes.
6601 */
6602 data = so->so_rcv.sb_cc - so->so_rcv.sb_ctl;
6603
6604 if (kn->kn_sfflags & NOTE_OOB) {
6605 if (so->so_oobmark || (so->so_state & SS_RCVATMARK)) {
6606 kn->kn_fflags |= NOTE_OOB;
6607 data -= so->so_oobmark;
6608 retval = 1;
6609 goto out;
6610 }
6611 }
6612
6613 if ((so->so_state & SS_CANTRCVMORE)
6614 #if CONTENT_FILTER
6615 && cfil_sock_data_pending(&so->so_rcv) == 0
6616 #endif /* CONTENT_FILTER */
6617 ) {
6618 kn->kn_flags |= EV_EOF;
6619 kn->kn_fflags = so->so_error;
6620 retval = 1;
6621 goto out;
6622 }
6623
6624 if (so->so_error) { /* temporary udp error */
6625 retval = 1;
6626 goto out;
6627 }
6628
6629 int64_t lowwat = so->so_rcv.sb_lowat;
6630 /*
6631 * Ensure that when NOTE_LOWAT is used, the derived
6632 * low water mark is bounded by socket's rcv buf's
6633 * high and low water mark values.
6634 */
6635 if (kn->kn_sfflags & NOTE_LOWAT) {
6636 if (kn->kn_sdata > so->so_rcv.sb_hiwat) {
6637 lowwat = so->so_rcv.sb_hiwat;
6638 } else if (kn->kn_sdata > lowwat) {
6639 lowwat = kn->kn_sdata;
6640 }
6641 }
6642
6643 /*
6644 * While the `data` field is the amount of data to read,
6645 * 0-sized packets need to wake up the kqueue, see 58140856,
6646 * so we need to take control bytes into account too.
6647 */
6648 retval = (so->so_rcv.sb_cc >= lowwat);
6649
6650 out:
6651 if (retval && kev) {
6652 knote_fill_kevent(kn, kev, data);
6653 }
6654 return retval;
6655 }
6656
6657 static int
filt_sorattach(struct knote * kn,__unused struct kevent_qos_s * kev)6658 filt_sorattach(struct knote *kn, __unused struct kevent_qos_s *kev)
6659 {
6660 struct socket *so = (struct socket *)fp_get_data(kn->kn_fp);
6661
6662 /* socket locked */
6663
6664 /*
6665 * If the caller explicitly asked for OOB results (e.g. poll())
6666 * from EVFILT_READ, then save that off in the hookid field
6667 * and reserve the kn_flags EV_OOBAND bit for output only.
6668 */
6669 if (kn->kn_filter == EVFILT_READ &&
6670 kn->kn_flags & EV_OOBAND) {
6671 kn->kn_flags &= ~EV_OOBAND;
6672 kn->kn_hook32 = EV_OOBAND;
6673 } else {
6674 kn->kn_hook32 = 0;
6675 }
6676 if (KNOTE_ATTACH(&so->so_rcv.sb_sel.si_note, kn)) {
6677 so->so_rcv.sb_flags |= SB_KNOTE;
6678 }
6679
6680 /* indicate if event is already fired */
6681 return filt_soread_common(kn, NULL, so);
6682 }
6683
6684 static void
filt_sordetach(struct knote * kn)6685 filt_sordetach(struct knote *kn)
6686 {
6687 struct socket *so = (struct socket *)fp_get_data(kn->kn_fp);
6688
6689 socket_lock(so, 1);
6690 if (so->so_rcv.sb_flags & SB_KNOTE) {
6691 if (KNOTE_DETACH(&so->so_rcv.sb_sel.si_note, kn)) {
6692 so->so_rcv.sb_flags &= ~SB_KNOTE;
6693 }
6694 }
6695 socket_unlock(so, 1);
6696 }
6697
6698 /*ARGSUSED*/
6699 static int
filt_soread(struct knote * kn,long hint)6700 filt_soread(struct knote *kn, long hint)
6701 {
6702 struct socket *so = (struct socket *)fp_get_data(kn->kn_fp);
6703 int retval;
6704
6705 if ((hint & SO_FILT_HINT_LOCKED) == 0) {
6706 socket_lock(so, 1);
6707 }
6708
6709 retval = filt_soread_common(kn, NULL, so);
6710
6711 if ((hint & SO_FILT_HINT_LOCKED) == 0) {
6712 socket_unlock(so, 1);
6713 }
6714
6715 return retval;
6716 }
6717
6718 static int
filt_sortouch(struct knote * kn,struct kevent_qos_s * kev)6719 filt_sortouch(struct knote *kn, struct kevent_qos_s *kev)
6720 {
6721 struct socket *so = (struct socket *)fp_get_data(kn->kn_fp);
6722 int retval;
6723
6724 socket_lock(so, 1);
6725
6726 /* save off the new input fflags and data */
6727 kn->kn_sfflags = kev->fflags;
6728 kn->kn_sdata = kev->data;
6729
6730 /* determine if changes result in fired events */
6731 retval = filt_soread_common(kn, NULL, so);
6732
6733 socket_unlock(so, 1);
6734
6735 return retval;
6736 }
6737
6738 static int
filt_sorprocess(struct knote * kn,struct kevent_qos_s * kev)6739 filt_sorprocess(struct knote *kn, struct kevent_qos_s *kev)
6740 {
6741 struct socket *so = (struct socket *)fp_get_data(kn->kn_fp);
6742 int retval;
6743
6744 socket_lock(so, 1);
6745 retval = filt_soread_common(kn, kev, so);
6746 socket_unlock(so, 1);
6747
6748 return retval;
6749 }
6750
6751 int
so_wait_for_if_feedback(struct socket * so)6752 so_wait_for_if_feedback(struct socket *so)
6753 {
6754 if ((SOCK_DOM(so) == PF_INET || SOCK_DOM(so) == PF_INET6) &&
6755 (so->so_state & SS_ISCONNECTED)) {
6756 struct inpcb *inp = sotoinpcb(so);
6757 if (INP_WAIT_FOR_IF_FEEDBACK(inp)) {
6758 return 1;
6759 }
6760 }
6761 return 0;
6762 }
6763
6764 static int
filt_sowrite_common(struct knote * kn,struct kevent_qos_s * kev,struct socket * so)6765 filt_sowrite_common(struct knote *kn, struct kevent_qos_s *kev, struct socket *so)
6766 {
6767 int ret = 0;
6768 int64_t data = sbspace(&so->so_snd);
6769
6770 if (so->so_state & SS_CANTSENDMORE) {
6771 kn->kn_flags |= EV_EOF;
6772 kn->kn_fflags = so->so_error;
6773 ret = 1;
6774 goto out;
6775 }
6776
6777 if (so->so_error) { /* temporary udp error */
6778 ret = 1;
6779 goto out;
6780 }
6781
6782 if (!socanwrite(so)) {
6783 ret = 0;
6784 goto out;
6785 }
6786
6787 if (so->so_flags1 & SOF1_PRECONNECT_DATA) {
6788 ret = 1;
6789 goto out;
6790 }
6791
6792 int64_t lowwat = so->so_snd.sb_lowat;
6793 const int64_t hiwat = so->so_snd.sb_hiwat;
6794 /*
6795 * Deal with connected UNIX domain sockets which
6796 * rely on the fact that the sender's socket buffer is
6797 * actually the receiver's socket buffer.
6798 */
6799 if (SOCK_DOM(so) == PF_LOCAL) {
6800 struct unpcb *unp = sotounpcb(so);
6801 if (unp != NULL && unp->unp_conn != NULL &&
6802 unp->unp_conn->unp_socket != NULL) {
6803 struct socket *so2 = unp->unp_conn->unp_socket;
6804 /*
6805 * At this point we know that `so' is locked
6806 * and that `unp_conn` isn't going to change.
6807 * However, we don't lock `so2` because doing so
6808 * may require unlocking `so'
6809 * (see unp_get_locks_in_order()).
6810 *
6811 * Two cases can happen:
6812 *
6813 * 1) we return 1 and tell the application that
6814 * it can write. Meanwhile, another thread
6815 * fills up the socket buffer. This will either
6816 * lead to a blocking send or EWOULDBLOCK
6817 * which the application should deal with.
6818 * 2) we return 0 and tell the application that
6819 * the socket is not writable. Meanwhile,
6820 * another thread depletes the receive socket
6821 * buffer. In this case the application will
6822 * be woken up by sb_notify().
6823 *
6824 * MIN() is required because otherwise sosendcheck()
6825 * may return EWOULDBLOCK since it only considers
6826 * so->so_snd.
6827 */
6828 data = MIN(data, sbspace(&so2->so_rcv));
6829 }
6830 }
6831
6832 if (kn->kn_sfflags & NOTE_LOWAT) {
6833 if (kn->kn_sdata > hiwat) {
6834 lowwat = hiwat;
6835 } else if (kn->kn_sdata > lowwat) {
6836 lowwat = kn->kn_sdata;
6837 }
6838 }
6839
6840 if (data > 0 && data >= lowwat) {
6841 if ((so->so_flags & SOF_NOTSENT_LOWAT)
6842 #if (DEBUG || DEVELOPMENT)
6843 && so_notsent_lowat_check == 1
6844 #endif /* DEBUG || DEVELOPMENT */
6845 ) {
6846 if ((SOCK_DOM(so) == PF_INET ||
6847 SOCK_DOM(so) == PF_INET6) &&
6848 so->so_type == SOCK_STREAM) {
6849 ret = tcp_notsent_lowat_check(so);
6850 }
6851 #if MPTCP
6852 else if ((SOCK_DOM(so) == PF_MULTIPATH) &&
6853 (SOCK_PROTO(so) == IPPROTO_TCP)) {
6854 ret = mptcp_notsent_lowat_check(so);
6855 }
6856 #endif
6857 else {
6858 ret = 1;
6859 goto out;
6860 }
6861 } else {
6862 ret = 1;
6863 }
6864 }
6865 if (so_wait_for_if_feedback(so)) {
6866 ret = 0;
6867 }
6868
6869 out:
6870 if (ret && kev) {
6871 knote_fill_kevent(kn, kev, data);
6872 }
6873 return ret;
6874 }
6875
6876 static int
filt_sowattach(struct knote * kn,__unused struct kevent_qos_s * kev)6877 filt_sowattach(struct knote *kn, __unused struct kevent_qos_s *kev)
6878 {
6879 struct socket *so = (struct socket *)fp_get_data(kn->kn_fp);
6880
6881 /* socket locked */
6882 if (KNOTE_ATTACH(&so->so_snd.sb_sel.si_note, kn)) {
6883 so->so_snd.sb_flags |= SB_KNOTE;
6884 }
6885
6886 /* determine if its already fired */
6887 return filt_sowrite_common(kn, NULL, so);
6888 }
6889
6890 static void
filt_sowdetach(struct knote * kn)6891 filt_sowdetach(struct knote *kn)
6892 {
6893 struct socket *so = (struct socket *)fp_get_data(kn->kn_fp);
6894 socket_lock(so, 1);
6895
6896 if (so->so_snd.sb_flags & SB_KNOTE) {
6897 if (KNOTE_DETACH(&so->so_snd.sb_sel.si_note, kn)) {
6898 so->so_snd.sb_flags &= ~SB_KNOTE;
6899 }
6900 }
6901 socket_unlock(so, 1);
6902 }
6903
6904 /*ARGSUSED*/
6905 static int
filt_sowrite(struct knote * kn,long hint)6906 filt_sowrite(struct knote *kn, long hint)
6907 {
6908 struct socket *so = (struct socket *)fp_get_data(kn->kn_fp);
6909 int ret;
6910
6911 if ((hint & SO_FILT_HINT_LOCKED) == 0) {
6912 socket_lock(so, 1);
6913 }
6914
6915 ret = filt_sowrite_common(kn, NULL, so);
6916
6917 if ((hint & SO_FILT_HINT_LOCKED) == 0) {
6918 socket_unlock(so, 1);
6919 }
6920
6921 return ret;
6922 }
6923
6924 static int
filt_sowtouch(struct knote * kn,struct kevent_qos_s * kev)6925 filt_sowtouch(struct knote *kn, struct kevent_qos_s *kev)
6926 {
6927 struct socket *so = (struct socket *)fp_get_data(kn->kn_fp);
6928 int ret;
6929
6930 socket_lock(so, 1);
6931
6932 /*save off the new input fflags and data */
6933 kn->kn_sfflags = kev->fflags;
6934 kn->kn_sdata = kev->data;
6935
6936 /* determine if these changes result in a triggered event */
6937 ret = filt_sowrite_common(kn, NULL, so);
6938
6939 socket_unlock(so, 1);
6940
6941 return ret;
6942 }
6943
6944 static int
filt_sowprocess(struct knote * kn,struct kevent_qos_s * kev)6945 filt_sowprocess(struct knote *kn, struct kevent_qos_s *kev)
6946 {
6947 struct socket *so = (struct socket *)fp_get_data(kn->kn_fp);
6948 int ret;
6949
6950 socket_lock(so, 1);
6951 ret = filt_sowrite_common(kn, kev, so);
6952 socket_unlock(so, 1);
6953
6954 return ret;
6955 }
6956
6957 static int
filt_sockev_common(struct knote * kn,struct kevent_qos_s * kev,struct socket * so,long ev_hint)6958 filt_sockev_common(struct knote *kn, struct kevent_qos_s *kev,
6959 struct socket *so, long ev_hint)
6960 {
6961 int ret = 0;
6962 int64_t data = 0;
6963 uint32_t level_trigger = 0;
6964
6965 if (ev_hint & SO_FILT_HINT_CONNRESET) {
6966 kn->kn_fflags |= NOTE_CONNRESET;
6967 }
6968 if (ev_hint & SO_FILT_HINT_TIMEOUT) {
6969 kn->kn_fflags |= NOTE_TIMEOUT;
6970 }
6971 if (ev_hint & SO_FILT_HINT_NOSRCADDR) {
6972 kn->kn_fflags |= NOTE_NOSRCADDR;
6973 }
6974 if (ev_hint & SO_FILT_HINT_IFDENIED) {
6975 kn->kn_fflags |= NOTE_IFDENIED;
6976 }
6977 if (ev_hint & SO_FILT_HINT_KEEPALIVE) {
6978 kn->kn_fflags |= NOTE_KEEPALIVE;
6979 }
6980 if (ev_hint & SO_FILT_HINT_ADAPTIVE_WTIMO) {
6981 kn->kn_fflags |= NOTE_ADAPTIVE_WTIMO;
6982 }
6983 if (ev_hint & SO_FILT_HINT_ADAPTIVE_RTIMO) {
6984 kn->kn_fflags |= NOTE_ADAPTIVE_RTIMO;
6985 }
6986 if ((ev_hint & SO_FILT_HINT_CONNECTED) ||
6987 (so->so_state & SS_ISCONNECTED)) {
6988 kn->kn_fflags |= NOTE_CONNECTED;
6989 level_trigger |= NOTE_CONNECTED;
6990 }
6991 if ((ev_hint & SO_FILT_HINT_DISCONNECTED) ||
6992 (so->so_state & SS_ISDISCONNECTED)) {
6993 kn->kn_fflags |= NOTE_DISCONNECTED;
6994 level_trigger |= NOTE_DISCONNECTED;
6995 }
6996 if (ev_hint & SO_FILT_HINT_CONNINFO_UPDATED) {
6997 if (so->so_proto != NULL &&
6998 (so->so_proto->pr_flags & PR_EVCONNINFO)) {
6999 kn->kn_fflags |= NOTE_CONNINFO_UPDATED;
7000 }
7001 }
7002 if ((ev_hint & SO_FILT_HINT_NOTIFY_ACK) ||
7003 tcp_notify_ack_active(so)) {
7004 kn->kn_fflags |= NOTE_NOTIFY_ACK;
7005 }
7006 if (ev_hint & SO_FILT_HINT_WAKE_PKT) {
7007 kn->kn_fflags |= NOTE_WAKE_PKT;
7008 }
7009
7010 if ((so->so_state & SS_CANTRCVMORE)
7011 #if CONTENT_FILTER
7012 && cfil_sock_data_pending(&so->so_rcv) == 0
7013 #endif /* CONTENT_FILTER */
7014 ) {
7015 kn->kn_fflags |= NOTE_READCLOSED;
7016 level_trigger |= NOTE_READCLOSED;
7017 }
7018
7019 if (so->so_state & SS_CANTSENDMORE) {
7020 kn->kn_fflags |= NOTE_WRITECLOSED;
7021 level_trigger |= NOTE_WRITECLOSED;
7022 }
7023
7024 if ((ev_hint & SO_FILT_HINT_SUSPEND) ||
7025 (so->so_flags & SOF_SUSPENDED)) {
7026 kn->kn_fflags &= ~(NOTE_SUSPEND | NOTE_RESUME);
7027
7028 /* If resume event was delivered before, reset it */
7029 kn->kn_hook32 &= ~NOTE_RESUME;
7030
7031 kn->kn_fflags |= NOTE_SUSPEND;
7032 level_trigger |= NOTE_SUSPEND;
7033 }
7034
7035 if ((ev_hint & SO_FILT_HINT_RESUME) ||
7036 (so->so_flags & SOF_SUSPENDED) == 0) {
7037 kn->kn_fflags &= ~(NOTE_SUSPEND | NOTE_RESUME);
7038
7039 /* If suspend event was delivered before, reset it */
7040 kn->kn_hook32 &= ~NOTE_SUSPEND;
7041
7042 kn->kn_fflags |= NOTE_RESUME;
7043 level_trigger |= NOTE_RESUME;
7044 }
7045
7046 if (so->so_error != 0) {
7047 ret = 1;
7048 data = so->so_error;
7049 kn->kn_flags |= EV_EOF;
7050 } else {
7051 u_int32_t data32 = 0;
7052 get_sockev_state(so, &data32);
7053 data = data32;
7054 }
7055
7056 /* Reset any events that are not requested on this knote */
7057 kn->kn_fflags &= (kn->kn_sfflags & EVFILT_SOCK_ALL_MASK);
7058 level_trigger &= (kn->kn_sfflags & EVFILT_SOCK_ALL_MASK);
7059
7060 /* Find the level triggerred events that are already delivered */
7061 level_trigger &= kn->kn_hook32;
7062 level_trigger &= EVFILT_SOCK_LEVEL_TRIGGER_MASK;
7063
7064 /* Do not deliver level triggerred events more than once */
7065 if ((kn->kn_fflags & ~level_trigger) != 0) {
7066 ret = 1;
7067 }
7068
7069 if (ret && kev) {
7070 /*
7071 * Store the state of the events being delivered. This
7072 * state can be used to deliver level triggered events
7073 * ateast once and still avoid waking up the application
7074 * multiple times as long as the event is active.
7075 */
7076 if (kn->kn_fflags != 0) {
7077 kn->kn_hook32 |= (kn->kn_fflags &
7078 EVFILT_SOCK_LEVEL_TRIGGER_MASK);
7079 }
7080
7081 /*
7082 * NOTE_RESUME and NOTE_SUSPEND are an exception, deliver
7083 * only one of them and remember the last one that was
7084 * delivered last
7085 */
7086 if (kn->kn_fflags & NOTE_SUSPEND) {
7087 kn->kn_hook32 &= ~NOTE_RESUME;
7088 }
7089 if (kn->kn_fflags & NOTE_RESUME) {
7090 kn->kn_hook32 &= ~NOTE_SUSPEND;
7091 }
7092
7093 knote_fill_kevent(kn, kev, data);
7094 }
7095 return ret;
7096 }
7097
7098 static int
filt_sockattach(struct knote * kn,__unused struct kevent_qos_s * kev)7099 filt_sockattach(struct knote *kn, __unused struct kevent_qos_s *kev)
7100 {
7101 struct socket *so = (struct socket *)fp_get_data(kn->kn_fp);
7102
7103 /* socket locked */
7104 kn->kn_hook32 = 0;
7105 if (KNOTE_ATTACH(&so->so_klist, kn)) {
7106 so->so_flags |= SOF_KNOTE;
7107 }
7108
7109 /* determine if event already fired */
7110 return filt_sockev_common(kn, NULL, so, 0);
7111 }
7112
7113 static void
filt_sockdetach(struct knote * kn)7114 filt_sockdetach(struct knote *kn)
7115 {
7116 struct socket *so = (struct socket *)fp_get_data(kn->kn_fp);
7117 socket_lock(so, 1);
7118
7119 if ((so->so_flags & SOF_KNOTE) != 0) {
7120 if (KNOTE_DETACH(&so->so_klist, kn)) {
7121 so->so_flags &= ~SOF_KNOTE;
7122 }
7123 }
7124 socket_unlock(so, 1);
7125 }
7126
7127 static int
filt_sockev(struct knote * kn,long hint)7128 filt_sockev(struct knote *kn, long hint)
7129 {
7130 int ret = 0, locked = 0;
7131 struct socket *so = (struct socket *)fp_get_data(kn->kn_fp);
7132 long ev_hint = (hint & SO_FILT_HINT_EV);
7133
7134 if ((hint & SO_FILT_HINT_LOCKED) == 0) {
7135 socket_lock(so, 1);
7136 locked = 1;
7137 }
7138
7139 ret = filt_sockev_common(kn, NULL, so, ev_hint);
7140
7141 if (locked) {
7142 socket_unlock(so, 1);
7143 }
7144
7145 return ret;
7146 }
7147
7148
7149
7150 /*
7151 * filt_socktouch - update event state
7152 */
7153 static int
filt_socktouch(struct knote * kn,struct kevent_qos_s * kev)7154 filt_socktouch(
7155 struct knote *kn,
7156 struct kevent_qos_s *kev)
7157 {
7158 struct socket *so = (struct socket *)fp_get_data(kn->kn_fp);
7159 uint32_t changed_flags;
7160 int ret;
7161
7162 socket_lock(so, 1);
7163
7164 /* save off the [result] data and fflags */
7165 changed_flags = (kn->kn_sfflags ^ kn->kn_hook32);
7166
7167 /* save off the new input fflags and data */
7168 kn->kn_sfflags = kev->fflags;
7169 kn->kn_sdata = kev->data;
7170
7171 /* restrict the current results to the (smaller?) set of new interest */
7172 /*
7173 * For compatibility with previous implementations, we leave kn_fflags
7174 * as they were before.
7175 */
7176 //kn->kn_fflags &= kev->fflags;
7177
7178 /*
7179 * Since we keep track of events that are already
7180 * delivered, if any of those events are not requested
7181 * anymore the state related to them can be reset
7182 */
7183 kn->kn_hook32 &= ~(changed_flags & EVFILT_SOCK_LEVEL_TRIGGER_MASK);
7184
7185 /* determine if we have events to deliver */
7186 ret = filt_sockev_common(kn, NULL, so, 0);
7187
7188 socket_unlock(so, 1);
7189
7190 return ret;
7191 }
7192
7193 /*
7194 * filt_sockprocess - query event fired state and return data
7195 */
7196 static int
filt_sockprocess(struct knote * kn,struct kevent_qos_s * kev)7197 filt_sockprocess(struct knote *kn, struct kevent_qos_s *kev)
7198 {
7199 struct socket *so = (struct socket *)fp_get_data(kn->kn_fp);
7200 int ret = 0;
7201
7202 socket_lock(so, 1);
7203
7204 ret = filt_sockev_common(kn, kev, so, 0);
7205
7206 socket_unlock(so, 1);
7207
7208 return ret;
7209 }
7210
7211 void
get_sockev_state(struct socket * so,u_int32_t * statep)7212 get_sockev_state(struct socket *so, u_int32_t *statep)
7213 {
7214 u_int32_t state = *(statep);
7215
7216 /*
7217 * If the state variable is already used by a previous event,
7218 * reset it.
7219 */
7220 if (state != 0) {
7221 return;
7222 }
7223
7224 if (so->so_state & SS_ISCONNECTED) {
7225 state |= SOCKEV_CONNECTED;
7226 } else {
7227 state &= ~(SOCKEV_CONNECTED);
7228 }
7229 state |= ((so->so_state & SS_ISDISCONNECTED) ? SOCKEV_DISCONNECTED : 0);
7230 *(statep) = state;
7231 }
7232
7233 #define SO_LOCK_HISTORY_STR_LEN \
7234 (2 * SO_LCKDBG_MAX * (2 + (2 * sizeof (void *)) + 1) + 1)
7235
7236 __private_extern__ const char *
solockhistory_nr(struct socket * so)7237 solockhistory_nr(struct socket *so)
7238 {
7239 size_t n = 0;
7240 int i;
7241 static char lock_history_str[SO_LOCK_HISTORY_STR_LEN];
7242
7243 bzero(lock_history_str, sizeof(lock_history_str));
7244 for (i = SO_LCKDBG_MAX - 1; i >= 0; i--) {
7245 n += scnprintf(lock_history_str + n,
7246 SO_LOCK_HISTORY_STR_LEN - n, "%p:%p ",
7247 so->lock_lr[(so->next_lock_lr + i) % SO_LCKDBG_MAX],
7248 so->unlock_lr[(so->next_unlock_lr + i) % SO_LCKDBG_MAX]);
7249 }
7250 return __unsafe_null_terminated_from_indexable(lock_history_str);
7251 }
7252
7253 lck_mtx_t *
socket_getlock(struct socket * so,int flags)7254 socket_getlock(struct socket *so, int flags)
7255 {
7256 if (so->so_proto->pr_getlock != NULL) {
7257 return (*so->so_proto->pr_getlock)(so, flags);
7258 } else {
7259 return so->so_proto->pr_domain->dom_mtx;
7260 }
7261 }
7262
7263 void
socket_lock(struct socket * so,int refcount)7264 socket_lock(struct socket *so, int refcount)
7265 {
7266 void *__single lr_saved = __unsafe_forge_single(void *, __builtin_return_address(0));
7267
7268 if (so->so_proto->pr_lock) {
7269 (*so->so_proto->pr_lock)(so, refcount, lr_saved);
7270 } else {
7271 #ifdef MORE_LOCKING_DEBUG
7272 LCK_MTX_ASSERT(so->so_proto->pr_domain->dom_mtx,
7273 LCK_MTX_ASSERT_NOTOWNED);
7274 #endif
7275 lck_mtx_lock(so->so_proto->pr_domain->dom_mtx);
7276 if (refcount) {
7277 so->so_usecount++;
7278 }
7279 so->lock_lr[so->next_lock_lr] = lr_saved;
7280 so->next_lock_lr = (so->next_lock_lr + 1) % SO_LCKDBG_MAX;
7281 }
7282 }
7283
7284 void
socket_lock_assert_owned(struct socket * so)7285 socket_lock_assert_owned(struct socket *so)
7286 {
7287 lck_mtx_t *mutex_held;
7288
7289 if (so->so_proto->pr_getlock != NULL) {
7290 mutex_held = (*so->so_proto->pr_getlock)(so, 0);
7291 } else {
7292 mutex_held = so->so_proto->pr_domain->dom_mtx;
7293 }
7294
7295 LCK_MTX_ASSERT(mutex_held, LCK_MTX_ASSERT_OWNED);
7296 }
7297
7298 int
socket_try_lock(struct socket * so)7299 socket_try_lock(struct socket *so)
7300 {
7301 lck_mtx_t *mtx;
7302
7303 if (so->so_proto->pr_getlock != NULL) {
7304 mtx = (*so->so_proto->pr_getlock)(so, 0);
7305 } else {
7306 mtx = so->so_proto->pr_domain->dom_mtx;
7307 }
7308
7309 return lck_mtx_try_lock(mtx);
7310 }
7311
7312 void
socket_unlock(struct socket * so,int refcount)7313 socket_unlock(struct socket *so, int refcount)
7314 {
7315 lck_mtx_t *mutex_held;
7316 void *__single lr_saved = __unsafe_forge_single(void *, __builtin_return_address(0));
7317
7318 if (so == NULL || so->so_proto == NULL) {
7319 panic("%s: null so_proto so=%p", __func__, so);
7320 /* NOTREACHED */
7321 }
7322
7323 if (so->so_proto->pr_unlock) {
7324 (*so->so_proto->pr_unlock)(so, refcount, lr_saved);
7325 } else {
7326 mutex_held = so->so_proto->pr_domain->dom_mtx;
7327 #ifdef MORE_LOCKING_DEBUG
7328 LCK_MTX_ASSERT(mutex_held, LCK_MTX_ASSERT_OWNED);
7329 #endif
7330 so->unlock_lr[so->next_unlock_lr] = lr_saved;
7331 so->next_unlock_lr = (so->next_unlock_lr + 1) % SO_LCKDBG_MAX;
7332
7333 if (refcount) {
7334 if (so->so_usecount <= 0) {
7335 panic("%s: bad refcount=%d so=%p (%d, %d, %d) "
7336 "lrh=%s", __func__, so->so_usecount, so,
7337 SOCK_DOM(so), so->so_type,
7338 SOCK_PROTO(so), solockhistory_nr(so));
7339 /* NOTREACHED */
7340 }
7341
7342 so->so_usecount--;
7343 if (so->so_usecount == 0) {
7344 sofreelastref(so, 1);
7345 }
7346 }
7347 lck_mtx_unlock(mutex_held);
7348 }
7349 }
7350
7351 /* Called with socket locked, will unlock socket */
7352 void
sofree(struct socket * so)7353 sofree(struct socket *so)
7354 {
7355 lck_mtx_t *mutex_held;
7356
7357 if (so->so_proto->pr_getlock != NULL) {
7358 mutex_held = (*so->so_proto->pr_getlock)(so, 0);
7359 } else {
7360 mutex_held = so->so_proto->pr_domain->dom_mtx;
7361 }
7362 LCK_MTX_ASSERT(mutex_held, LCK_MTX_ASSERT_OWNED);
7363
7364 sofreelastref(so, 0);
7365 }
7366
7367 void
soreference(struct socket * so)7368 soreference(struct socket *so)
7369 {
7370 socket_lock(so, 1); /* locks & take one reference on socket */
7371 socket_unlock(so, 0); /* unlock only */
7372 }
7373
7374 void
sodereference(struct socket * so)7375 sodereference(struct socket *so)
7376 {
7377 socket_lock(so, 0);
7378 socket_unlock(so, 1);
7379 }
7380
7381 /*
7382 * Set or clear SOF_MULTIPAGES on the socket to enable or disable the
7383 * possibility of using jumbo clusters. Caller must ensure to hold
7384 * the socket lock.
7385 */
7386 void
somultipages(struct socket * so,boolean_t set)7387 somultipages(struct socket *so, boolean_t set)
7388 {
7389 if (set) {
7390 so->so_flags |= SOF_MULTIPAGES;
7391 } else {
7392 so->so_flags &= ~SOF_MULTIPAGES;
7393 }
7394 }
7395
7396 void
soif2kcl(struct socket * so,boolean_t set)7397 soif2kcl(struct socket *so, boolean_t set)
7398 {
7399 if (set) {
7400 so->so_flags1 |= SOF1_IF_2KCL;
7401 } else {
7402 so->so_flags1 &= ~SOF1_IF_2KCL;
7403 }
7404 }
7405
7406 int
so_isdstlocal(struct socket * so)7407 so_isdstlocal(struct socket *so)
7408 {
7409 struct inpcb *inp = (struct inpcb *)so->so_pcb;
7410
7411 if (SOCK_DOM(so) == PF_INET) {
7412 return inaddr_local(inp->inp_faddr);
7413 } else if (SOCK_DOM(so) == PF_INET6) {
7414 return in6addr_local(&inp->in6p_faddr);
7415 }
7416
7417 return 0;
7418 }
7419
7420 int
sosetdefunct(struct proc * p,struct socket * so,int level,boolean_t noforce)7421 sosetdefunct(struct proc *p, struct socket *so, int level, boolean_t noforce)
7422 {
7423 struct sockbuf *rcv, *snd;
7424 int err = 0, defunct;
7425
7426 rcv = &so->so_rcv;
7427 snd = &so->so_snd;
7428
7429 defunct = (so->so_flags & SOF_DEFUNCT);
7430 if (defunct) {
7431 if (!(snd->sb_flags & rcv->sb_flags & SB_DROP)) {
7432 panic("%s: SB_DROP not set", __func__);
7433 /* NOTREACHED */
7434 }
7435 goto done;
7436 }
7437
7438 if (so->so_flags & SOF_NODEFUNCT) {
7439 if (noforce) {
7440 err = EOPNOTSUPP;
7441 if (p != PROC_NULL) {
7442 SODEFUNCTLOG("%s[%d, %s]: (target pid %d "
7443 "name %s level %d) so 0x%llu [%d,%d] "
7444 "is not eligible for defunct "
7445 "(%d)\n", __func__, proc_selfpid(),
7446 proc_best_name(current_proc()), proc_pid(p),
7447 proc_best_name(p), level,
7448 so->so_gencnt,
7449 SOCK_DOM(so), SOCK_TYPE(so), err);
7450 }
7451 return err;
7452 }
7453 so->so_flags &= ~SOF_NODEFUNCT;
7454 if (p != PROC_NULL) {
7455 SODEFUNCTLOG("%s[%d, %s]: (target pid %d "
7456 "name %s level %d) so 0x%llu [%d,%d] "
7457 "defunct by force "
7458 "(%d)\n", __func__, proc_selfpid(),
7459 proc_best_name(current_proc()), proc_pid(p),
7460 proc_best_name(p), level,
7461 so->so_gencnt,
7462 SOCK_DOM(so), SOCK_TYPE(so), err);
7463 }
7464 } else if (so->so_flags1 & SOF1_EXTEND_BK_IDLE_WANTED) {
7465 struct inpcb *inp = (struct inpcb *)so->so_pcb;
7466 struct ifnet *ifp = inp->inp_last_outifp;
7467
7468 if (ifp && IFNET_IS_CELLULAR(ifp)) {
7469 OSIncrementAtomic(&soextbkidlestat.so_xbkidle_nocell);
7470 } else if (so->so_flags & SOF_DELEGATED) {
7471 OSIncrementAtomic(&soextbkidlestat.so_xbkidle_nodlgtd);
7472 } else if (soextbkidlestat.so_xbkidle_time == 0) {
7473 OSIncrementAtomic(&soextbkidlestat.so_xbkidle_notime);
7474 } else if (noforce && p != PROC_NULL) {
7475 OSIncrementAtomic(&soextbkidlestat.so_xbkidle_active);
7476
7477 so->so_flags1 |= SOF1_EXTEND_BK_IDLE_INPROG;
7478 so->so_extended_bk_start = net_uptime();
7479 OSBitOrAtomic(P_LXBKIDLEINPROG, &p->p_ladvflag);
7480
7481 inpcb_timer_sched(inp->inp_pcbinfo, INPCB_TIMER_LAZY);
7482
7483 err = EOPNOTSUPP;
7484 SODEFUNCTLOG("%s[%d, %s]: (target pid %d "
7485 "name %s level %d) so 0x%llu [%d,%d] "
7486 "extend bk idle "
7487 "(%d)\n", __func__, proc_selfpid(),
7488 proc_best_name(current_proc()), proc_pid(p),
7489 proc_best_name(p), level,
7490 so->so_gencnt,
7491 SOCK_DOM(so), SOCK_TYPE(so), err);
7492 return err;
7493 } else {
7494 OSIncrementAtomic(&soextbkidlestat.so_xbkidle_forced);
7495 }
7496 }
7497
7498 so->so_flags |= SOF_DEFUNCT;
7499
7500 /* Prevent further data from being appended to the socket buffers */
7501 snd->sb_flags |= SB_DROP;
7502 rcv->sb_flags |= SB_DROP;
7503
7504 /* Flush any existing data in the socket buffers */
7505 if (rcv->sb_cc != 0) {
7506 rcv->sb_flags &= ~SB_SEL;
7507 selthreadclear(&rcv->sb_sel);
7508 sbrelease(rcv);
7509 }
7510 if (snd->sb_cc != 0) {
7511 snd->sb_flags &= ~SB_SEL;
7512 selthreadclear(&snd->sb_sel);
7513 sbrelease(snd);
7514 }
7515
7516 done:
7517 if (p != PROC_NULL) {
7518 SODEFUNCTLOG("%s[%d, %s]: (target pid %d name %s level %d) "
7519 "so 0x%llu [%d,%d] %s defunct%s\n", __func__,
7520 proc_selfpid(), proc_best_name(current_proc()),
7521 proc_pid(p), proc_best_name(p), level,
7522 so->so_gencnt, SOCK_DOM(so),
7523 SOCK_TYPE(so), defunct ? "is already" : "marked as",
7524 (so->so_flags1 & SOF1_EXTEND_BK_IDLE_WANTED) ?
7525 " extbkidle" : "");
7526 }
7527 return err;
7528 }
7529
7530 int
sodefunct(struct proc * p,struct socket * so,int level)7531 sodefunct(struct proc *p, struct socket *so, int level)
7532 {
7533 struct sockbuf *rcv, *snd;
7534
7535 if (!(so->so_flags & SOF_DEFUNCT)) {
7536 panic("%s improperly called", __func__);
7537 /* NOTREACHED */
7538 }
7539 if (so->so_state & SS_DEFUNCT) {
7540 goto done;
7541 }
7542
7543 rcv = &so->so_rcv;
7544 snd = &so->so_snd;
7545
7546 if (SOCK_DOM(so) == PF_INET || SOCK_DOM(so) == PF_INET6) {
7547 char s[MAX_IPv6_STR_LEN];
7548 char d[MAX_IPv6_STR_LEN];
7549 struct inpcb *inp = sotoinpcb(so);
7550
7551 if (p != PROC_NULL) {
7552 SODEFUNCTLOG(
7553 "%s[%d, %s]: (target pid %d name %s level %d) "
7554 "so 0x%llu [%s %s:%d -> %s:%d] is now defunct "
7555 "[rcv_si 0x%x, snd_si 0x%x, rcv_fl 0x%x, "
7556 " snd_fl 0x%x]\n", __func__,
7557 proc_selfpid(), proc_best_name(current_proc()),
7558 proc_pid(p), proc_best_name(p), level,
7559 so->so_gencnt,
7560 (SOCK_TYPE(so) == SOCK_STREAM) ? "TCP" : "UDP",
7561 inet_ntop(SOCK_DOM(so), ((SOCK_DOM(so) == PF_INET) ?
7562 (void *)&inp->inp_laddr.s_addr :
7563 (void *)&inp->in6p_laddr),
7564 s, sizeof(s)), ntohs(inp->in6p_lport),
7565 inet_ntop(SOCK_DOM(so), (SOCK_DOM(so) == PF_INET) ?
7566 (void *)&inp->inp_faddr.s_addr :
7567 (void *)&inp->in6p_faddr,
7568 d, sizeof(d)), ntohs(inp->in6p_fport),
7569 (uint32_t)rcv->sb_sel.si_flags,
7570 (uint32_t)snd->sb_sel.si_flags,
7571 rcv->sb_flags, snd->sb_flags);
7572 }
7573 } else if (p != PROC_NULL) {
7574 SODEFUNCTLOG("%s[%d, %s]: (target pid %d name %s level %d) "
7575 "so 0x%llu [%d,%d] is now defunct [rcv_si 0x%x, "
7576 "snd_si 0x%x, rcv_fl 0x%x, snd_fl 0x%x]\n", __func__,
7577 proc_selfpid(), proc_best_name(current_proc()),
7578 proc_pid(p), proc_best_name(p), level,
7579 so->so_gencnt,
7580 SOCK_DOM(so), SOCK_TYPE(so),
7581 (uint32_t)rcv->sb_sel.si_flags,
7582 (uint32_t)snd->sb_sel.si_flags, rcv->sb_flags,
7583 snd->sb_flags);
7584 }
7585
7586 /*
7587 * First tell the protocol the flow is defunct
7588 */
7589 (void) (*so->so_proto->pr_usrreqs->pru_defunct)(so);
7590
7591 /*
7592 * Unwedge threads blocked on sbwait() and sb_lock().
7593 */
7594 sbwakeup(rcv);
7595 sbwakeup(snd);
7596
7597 so->so_flags1 |= SOF1_DEFUNCTINPROG;
7598 if (rcv->sb_flags & SB_LOCK) {
7599 sbunlock(rcv, TRUE); /* keep socket locked */
7600 }
7601 if (snd->sb_flags & SB_LOCK) {
7602 sbunlock(snd, TRUE); /* keep socket locked */
7603 }
7604 /*
7605 * Flush the buffers and disconnect. We explicitly call shutdown
7606 * on both data directions to ensure that SS_CANT{RCV,SEND}MORE
7607 * states are set for the socket. This would also flush out data
7608 * hanging off the receive list of this socket.
7609 */
7610 (void) soshutdownlock_final(so, SHUT_RD);
7611 (void) soshutdownlock_final(so, SHUT_WR);
7612 (void) sodisconnectlocked(so);
7613
7614 /*
7615 * Explicitly handle connectionless-protocol disconnection
7616 * and release any remaining data in the socket buffers.
7617 */
7618 if (!(so->so_state & SS_ISDISCONNECTED)) {
7619 (void) soisdisconnected(so);
7620 }
7621
7622 if (so->so_error == 0) {
7623 so->so_error = EBADF;
7624 }
7625
7626 if (rcv->sb_cc != 0) {
7627 rcv->sb_flags &= ~SB_SEL;
7628 selthreadclear(&rcv->sb_sel);
7629 sbrelease(rcv);
7630 }
7631 if (snd->sb_cc != 0) {
7632 snd->sb_flags &= ~SB_SEL;
7633 selthreadclear(&snd->sb_sel);
7634 sbrelease(snd);
7635 }
7636 so->so_state |= SS_DEFUNCT;
7637 OSIncrementAtomicLong((volatile long *)&sodefunct_calls);
7638
7639 done:
7640 return 0;
7641 }
7642
7643 int
soresume(struct proc * p,struct socket * so,int locked)7644 soresume(struct proc *p, struct socket *so, int locked)
7645 {
7646 if (locked == 0) {
7647 socket_lock(so, 1);
7648 }
7649
7650 if (so->so_flags1 & SOF1_EXTEND_BK_IDLE_INPROG) {
7651 SODEFUNCTLOG("%s[%d, %s]: (target pid %d name %s) so 0x%llu "
7652 "[%d,%d] resumed from bk idle\n",
7653 __func__, proc_selfpid(), proc_best_name(current_proc()),
7654 proc_pid(p), proc_best_name(p),
7655 so->so_gencnt,
7656 SOCK_DOM(so), SOCK_TYPE(so));
7657
7658 so->so_flags1 &= ~SOF1_EXTEND_BK_IDLE_INPROG;
7659 so->so_extended_bk_start = 0;
7660 OSBitAndAtomic(~P_LXBKIDLEINPROG, &p->p_ladvflag);
7661
7662 OSIncrementAtomic(&soextbkidlestat.so_xbkidle_resumed);
7663 OSDecrementAtomic(&soextbkidlestat.so_xbkidle_active);
7664 VERIFY(soextbkidlestat.so_xbkidle_active >= 0);
7665 }
7666 if (locked == 0) {
7667 socket_unlock(so, 1);
7668 }
7669
7670 return 0;
7671 }
7672
7673 /*
7674 * Does not attempt to account for sockets that are delegated from
7675 * the current process
7676 */
7677 int
so_set_extended_bk_idle(struct socket * so,int optval)7678 so_set_extended_bk_idle(struct socket *so, int optval)
7679 {
7680 int error = 0;
7681
7682 if ((SOCK_DOM(so) != PF_INET && SOCK_DOM(so) != PF_INET6) ||
7683 SOCK_PROTO(so) != IPPROTO_TCP) {
7684 OSDecrementAtomic(&soextbkidlestat.so_xbkidle_notsupp);
7685 error = EOPNOTSUPP;
7686 } else if (optval == 0) {
7687 so->so_flags1 &= ~SOF1_EXTEND_BK_IDLE_WANTED;
7688
7689 soresume(current_proc(), so, 1);
7690 } else {
7691 struct proc *p = current_proc();
7692 struct fileproc *fp;
7693 int count = 0;
7694
7695 /*
7696 * Unlock socket to avoid lock ordering issue with
7697 * the proc fd table lock
7698 */
7699 socket_unlock(so, 0);
7700
7701 proc_fdlock(p);
7702 fdt_foreach(fp, p) {
7703 struct socket *so2;
7704
7705 if (FILEGLOB_DTYPE(fp->fp_glob) != DTYPE_SOCKET) {
7706 continue;
7707 }
7708
7709 so2 = (struct socket *)fp_get_data(fp);
7710 if (so != so2 &&
7711 so2->so_flags1 & SOF1_EXTEND_BK_IDLE_WANTED) {
7712 count++;
7713 }
7714 if (count >= soextbkidlestat.so_xbkidle_maxperproc) {
7715 break;
7716 }
7717 }
7718 proc_fdunlock(p);
7719
7720 socket_lock(so, 0);
7721
7722 if (count >= soextbkidlestat.so_xbkidle_maxperproc) {
7723 OSIncrementAtomic(&soextbkidlestat.so_xbkidle_toomany);
7724 error = EBUSY;
7725 } else if (so->so_flags & SOF_DELEGATED) {
7726 OSIncrementAtomic(&soextbkidlestat.so_xbkidle_nodlgtd);
7727 error = EBUSY;
7728 } else {
7729 so->so_flags1 |= SOF1_EXTEND_BK_IDLE_WANTED;
7730 OSIncrementAtomic(&soextbkidlestat.so_xbkidle_wantok);
7731 }
7732 SODEFUNCTLOG("%s[%d, %s]: so 0x%llu [%d,%d] "
7733 "%s marked for extended bk idle\n",
7734 __func__, proc_selfpid(), proc_best_name(current_proc()),
7735 so->so_gencnt,
7736 SOCK_DOM(so), SOCK_TYPE(so),
7737 (so->so_flags1 & SOF1_EXTEND_BK_IDLE_WANTED) ?
7738 "is" : "not");
7739 }
7740
7741 return error;
7742 }
7743
7744 static void
so_stop_extended_bk_idle(struct socket * so)7745 so_stop_extended_bk_idle(struct socket *so)
7746 {
7747 so->so_flags1 &= ~SOF1_EXTEND_BK_IDLE_INPROG;
7748 so->so_extended_bk_start = 0;
7749
7750 OSDecrementAtomic(&soextbkidlestat.so_xbkidle_active);
7751 VERIFY(soextbkidlestat.so_xbkidle_active >= 0);
7752 /*
7753 * Force defunct
7754 */
7755 sosetdefunct(current_proc(), so,
7756 SHUTDOWN_SOCKET_LEVEL_DISCONNECT_INTERNAL, FALSE);
7757 if (so->so_flags & SOF_DEFUNCT) {
7758 sodefunct(current_proc(), so,
7759 SHUTDOWN_SOCKET_LEVEL_DISCONNECT_INTERNAL);
7760 }
7761 }
7762
7763 void
so_drain_extended_bk_idle(struct socket * so)7764 so_drain_extended_bk_idle(struct socket *so)
7765 {
7766 if (so && (so->so_flags1 & SOF1_EXTEND_BK_IDLE_INPROG)) {
7767 /*
7768 * Only penalize sockets that have outstanding data
7769 */
7770 if (so->so_rcv.sb_cc || so->so_snd.sb_cc) {
7771 so_stop_extended_bk_idle(so);
7772
7773 OSIncrementAtomic(&soextbkidlestat.so_xbkidle_drained);
7774 }
7775 }
7776 }
7777
7778 /*
7779 * Return values tells if socket is still in extended background idle
7780 */
7781 int
so_check_extended_bk_idle_time(struct socket * so)7782 so_check_extended_bk_idle_time(struct socket *so)
7783 {
7784 int ret = 1;
7785
7786 if ((so->so_flags1 & SOF1_EXTEND_BK_IDLE_INPROG)) {
7787 SODEFUNCTLOG("%s[%d, %s]: so 0x%llu [%d,%d]\n",
7788 __func__, proc_selfpid(), proc_best_name(current_proc()),
7789 so->so_gencnt,
7790 SOCK_DOM(so), SOCK_TYPE(so));
7791 if (net_uptime() - so->so_extended_bk_start >
7792 soextbkidlestat.so_xbkidle_time) {
7793 so_stop_extended_bk_idle(so);
7794
7795 OSIncrementAtomic(&soextbkidlestat.so_xbkidle_expired);
7796
7797 ret = 0;
7798 } else {
7799 struct inpcb *inp = (struct inpcb *)so->so_pcb;
7800
7801 inpcb_timer_sched(inp->inp_pcbinfo, INPCB_TIMER_LAZY);
7802 OSIncrementAtomic(&soextbkidlestat.so_xbkidle_resched);
7803 }
7804 }
7805
7806 return ret;
7807 }
7808
7809 void
resume_proc_sockets(proc_t p)7810 resume_proc_sockets(proc_t p)
7811 {
7812 if (p->p_ladvflag & P_LXBKIDLEINPROG) {
7813 struct fileproc *fp;
7814 struct socket *so;
7815
7816 proc_fdlock(p);
7817 fdt_foreach(fp, p) {
7818 if (FILEGLOB_DTYPE(fp->fp_glob) != DTYPE_SOCKET) {
7819 continue;
7820 }
7821
7822 so = (struct socket *)fp_get_data(fp);
7823 (void) soresume(p, so, 0);
7824 }
7825 proc_fdunlock(p);
7826
7827 OSBitAndAtomic(~P_LXBKIDLEINPROG, &p->p_ladvflag);
7828 }
7829 }
7830
7831 __private_extern__ int
so_set_recv_anyif(struct socket * so,int optval)7832 so_set_recv_anyif(struct socket *so, int optval)
7833 {
7834 int ret = 0;
7835
7836 if (SOCK_DOM(so) == PF_INET || SOCK_DOM(so) == PF_INET6) {
7837 if (optval) {
7838 sotoinpcb(so)->inp_flags |= INP_RECV_ANYIF;
7839 } else {
7840 sotoinpcb(so)->inp_flags &= ~INP_RECV_ANYIF;
7841 }
7842 #if SKYWALK
7843 inp_update_netns_flags(so);
7844 #endif /* SKYWALK */
7845 }
7846
7847
7848 return ret;
7849 }
7850
7851 __private_extern__ int
so_get_recv_anyif(struct socket * so)7852 so_get_recv_anyif(struct socket *so)
7853 {
7854 int ret = 0;
7855
7856 if (SOCK_DOM(so) == PF_INET || SOCK_DOM(so) == PF_INET6) {
7857 ret = (sotoinpcb(so)->inp_flags & INP_RECV_ANYIF) ? 1 : 0;
7858 }
7859
7860 return ret;
7861 }
7862
7863 int
so_set_restrictions(struct socket * so,uint32_t vals)7864 so_set_restrictions(struct socket *so, uint32_t vals)
7865 {
7866 int nocell_old, nocell_new;
7867 int noexpensive_old, noexpensive_new;
7868 int noconstrained_old, noconstrained_new;
7869
7870 /*
7871 * Deny-type restrictions are trapdoors; once set they cannot be
7872 * unset for the lifetime of the socket. This allows them to be
7873 * issued by a framework on behalf of the application without
7874 * having to worry that they can be undone.
7875 *
7876 * Note here that socket-level restrictions overrides any protocol
7877 * level restrictions. For instance, SO_RESTRICT_DENY_CELLULAR
7878 * socket restriction issued on the socket has a higher precendence
7879 * than INP_NO_IFT_CELLULAR. The latter is affected by the UUID
7880 * policy PROC_UUID_NO_CELLULAR for unrestricted sockets only,
7881 * i.e. when SO_RESTRICT_DENY_CELLULAR has not been issued.
7882 */
7883 nocell_old = (so->so_restrictions & SO_RESTRICT_DENY_CELLULAR);
7884 noexpensive_old = (so->so_restrictions & SO_RESTRICT_DENY_EXPENSIVE);
7885 noconstrained_old = (so->so_restrictions & SO_RESTRICT_DENY_CONSTRAINED);
7886 so->so_restrictions |= (vals & (SO_RESTRICT_DENY_IN |
7887 SO_RESTRICT_DENY_OUT | SO_RESTRICT_DENY_CELLULAR |
7888 SO_RESTRICT_DENY_EXPENSIVE | SO_RESTRICT_DENY_CONSTRAINED));
7889 nocell_new = (so->so_restrictions & SO_RESTRICT_DENY_CELLULAR);
7890 noexpensive_new = (so->so_restrictions & SO_RESTRICT_DENY_EXPENSIVE);
7891 noconstrained_new = (so->so_restrictions & SO_RESTRICT_DENY_CONSTRAINED);
7892
7893 /* we can only set, not clear restrictions */
7894 if ((nocell_new - nocell_old) == 0 &&
7895 (noexpensive_new - noexpensive_old) == 0 &&
7896 (noconstrained_new - noconstrained_old) == 0) {
7897 return 0;
7898 }
7899 if (SOCK_DOM(so) == PF_INET || SOCK_DOM(so) == PF_INET6) {
7900 if (nocell_new - nocell_old != 0) {
7901 /*
7902 * if deny cellular is now set, do what's needed
7903 * for INPCB
7904 */
7905 inp_set_nocellular(sotoinpcb(so));
7906 }
7907 if (noexpensive_new - noexpensive_old != 0) {
7908 inp_set_noexpensive(sotoinpcb(so));
7909 }
7910 if (noconstrained_new - noconstrained_old != 0) {
7911 inp_set_noconstrained(sotoinpcb(so));
7912 }
7913 }
7914
7915 if (SOCK_DOM(so) == PF_MULTIPATH) {
7916 mptcp_set_restrictions(so);
7917 }
7918
7919 return 0;
7920 }
7921
7922 uint32_t
so_get_restrictions(struct socket * so)7923 so_get_restrictions(struct socket *so)
7924 {
7925 return so->so_restrictions & (SO_RESTRICT_DENY_IN |
7926 SO_RESTRICT_DENY_OUT |
7927 SO_RESTRICT_DENY_CELLULAR | SO_RESTRICT_DENY_EXPENSIVE);
7928 }
7929
7930 int
so_set_effective_pid(struct socket * so,int epid,struct proc * p,boolean_t check_cred)7931 so_set_effective_pid(struct socket *so, int epid, struct proc *p, boolean_t check_cred)
7932 {
7933 struct proc *ep = PROC_NULL;
7934 int error = 0;
7935
7936 /* pid 0 is reserved for kernel */
7937 if (epid == 0) {
7938 error = EINVAL;
7939 goto done;
7940 }
7941
7942 /*
7943 * If this is an in-kernel socket, prevent its delegate
7944 * association from changing unless the socket option is
7945 * coming from within the kernel itself.
7946 */
7947 if (so->last_pid == 0 && p != kernproc) {
7948 error = EACCES;
7949 goto done;
7950 }
7951
7952 /*
7953 * If this is issued by a process that's recorded as the
7954 * real owner of the socket, or if the pid is the same as
7955 * the process's own pid, then proceed. Otherwise ensure
7956 * that the issuing process has the necessary privileges.
7957 */
7958 if (check_cred && (epid != so->last_pid || epid != proc_pid(p))) {
7959 if ((error = priv_check_cred(kauth_cred_get(),
7960 PRIV_NET_PRIVILEGED_SOCKET_DELEGATE, 0))) {
7961 error = EACCES;
7962 goto done;
7963 }
7964 }
7965
7966 /* Find the process that corresponds to the effective pid */
7967 if ((ep = proc_find(epid)) == PROC_NULL) {
7968 error = ESRCH;
7969 goto done;
7970 }
7971
7972 /*
7973 * If a process tries to delegate the socket to itself, then
7974 * there's really nothing to do; treat it as a way for the
7975 * delegate association to be cleared. Note that we check
7976 * the passed-in proc rather than calling proc_selfpid(),
7977 * as we need to check the process issuing the socket option
7978 * which could be kernproc. Given that we don't allow 0 for
7979 * effective pid, it means that a delegated in-kernel socket
7980 * stays delegated during its lifetime (which is probably OK.)
7981 */
7982 if (epid == proc_pid(p)) {
7983 so->so_flags &= ~SOF_DELEGATED;
7984 so->e_upid = 0;
7985 so->e_pid = 0;
7986 uuid_clear(so->e_uuid);
7987 } else {
7988 so->so_flags |= SOF_DELEGATED;
7989 so->e_upid = proc_uniqueid(ep);
7990 so->e_pid = proc_pid(ep);
7991 proc_getexecutableuuid(ep, so->e_uuid, sizeof(so->e_uuid));
7992
7993 #if defined(XNU_TARGET_OS_OSX)
7994 if (ep->p_responsible_pid != so->e_pid) {
7995 proc_t rp = proc_find(ep->p_responsible_pid);
7996 if (rp != PROC_NULL) {
7997 proc_getexecutableuuid(rp, so->so_ruuid, sizeof(so->so_ruuid));
7998 so->so_rpid = ep->p_responsible_pid;
7999 proc_rele(rp);
8000 } else {
8001 uuid_clear(so->so_ruuid);
8002 so->so_rpid = -1;
8003 }
8004 }
8005 #endif
8006 }
8007 if (so->so_proto != NULL && so->so_proto->pr_update_last_owner != NULL) {
8008 (*so->so_proto->pr_update_last_owner)(so, NULL, ep);
8009 }
8010 done:
8011 if (error == 0 && net_io_policy_log) {
8012 uuid_string_t buf;
8013
8014 uuid_unparse(so->e_uuid, buf);
8015 log(LOG_DEBUG, "%s[%s,%d]: so 0x%llx [%d,%d] epid %d (%s) "
8016 "euuid %s%s\n", __func__, proc_name_address(p),
8017 proc_pid(p), (uint64_t)DEBUG_KERNEL_ADDRPERM(so),
8018 SOCK_DOM(so), SOCK_TYPE(so),
8019 so->e_pid, proc_name_address(ep), buf,
8020 ((so->so_flags & SOF_DELEGATED) ? " [delegated]" : ""));
8021 } else if (error != 0 && net_io_policy_log) {
8022 log(LOG_ERR, "%s[%s,%d]: so 0x%llx [%d,%d] epid %d (%s) "
8023 "ERROR (%d)\n", __func__, proc_name_address(p),
8024 proc_pid(p), (uint64_t)DEBUG_KERNEL_ADDRPERM(so),
8025 SOCK_DOM(so), SOCK_TYPE(so),
8026 epid, (ep == PROC_NULL) ? "PROC_NULL" :
8027 proc_name_address(ep), error);
8028 }
8029
8030 /* Update this socket's policy upon success */
8031 if (error == 0) {
8032 so->so_policy_gencnt *= -1;
8033 so_update_policy(so);
8034 #if NECP
8035 so_update_necp_policy(so, NULL, NULL);
8036 #endif /* NECP */
8037 }
8038
8039 if (ep != PROC_NULL) {
8040 proc_rele(ep);
8041 }
8042
8043 return error;
8044 }
8045
8046 int
so_set_effective_uuid(struct socket * so,uuid_t euuid,struct proc * p,boolean_t check_cred)8047 so_set_effective_uuid(struct socket *so, uuid_t euuid, struct proc *p, boolean_t check_cred)
8048 {
8049 uuid_string_t buf;
8050 uuid_t uuid;
8051 int error = 0;
8052
8053 /* UUID must not be all-zeroes (reserved for kernel) */
8054 if (uuid_is_null(euuid)) {
8055 error = EINVAL;
8056 goto done;
8057 }
8058
8059 /*
8060 * If this is an in-kernel socket, prevent its delegate
8061 * association from changing unless the socket option is
8062 * coming from within the kernel itself.
8063 */
8064 if (so->last_pid == 0 && p != kernproc) {
8065 error = EACCES;
8066 goto done;
8067 }
8068
8069 /* Get the UUID of the issuing process */
8070 proc_getexecutableuuid(p, uuid, sizeof(uuid));
8071
8072 /*
8073 * If this is issued by a process that's recorded as the
8074 * real owner of the socket, or if the uuid is the same as
8075 * the process's own uuid, then proceed. Otherwise ensure
8076 * that the issuing process has the necessary privileges.
8077 */
8078 if (check_cred &&
8079 (uuid_compare(euuid, so->last_uuid) != 0 ||
8080 uuid_compare(euuid, uuid) != 0)) {
8081 if ((error = priv_check_cred(kauth_cred_get(),
8082 PRIV_NET_PRIVILEGED_SOCKET_DELEGATE, 0))) {
8083 error = EACCES;
8084 goto done;
8085 }
8086 }
8087
8088 /*
8089 * If a process tries to delegate the socket to itself, then
8090 * there's really nothing to do; treat it as a way for the
8091 * delegate association to be cleared. Note that we check
8092 * the uuid of the passed-in proc rather than that of the
8093 * current process, as we need to check the process issuing
8094 * the socket option which could be kernproc itself. Given
8095 * that we don't allow 0 for effective uuid, it means that
8096 * a delegated in-kernel socket stays delegated during its
8097 * lifetime (which is okay.)
8098 */
8099 if (uuid_compare(euuid, uuid) == 0) {
8100 so->so_flags &= ~SOF_DELEGATED;
8101 so->e_upid = 0;
8102 so->e_pid = 0;
8103 uuid_clear(so->e_uuid);
8104 } else {
8105 so->so_flags |= SOF_DELEGATED;
8106 /*
8107 * Unlike so_set_effective_pid(), we only have the UUID
8108 * here and the process ID is not known. Inherit the
8109 * real {pid,upid} of the socket.
8110 */
8111 so->e_upid = so->last_upid;
8112 so->e_pid = so->last_pid;
8113 uuid_copy(so->e_uuid, euuid);
8114 }
8115 /*
8116 * The following will clear the effective process name as it's the same
8117 * as the real process
8118 */
8119 if (so->so_proto != NULL && so->so_proto->pr_update_last_owner != NULL) {
8120 (*so->so_proto->pr_update_last_owner)(so, NULL, NULL);
8121 }
8122 done:
8123 if (error == 0 && net_io_policy_log) {
8124 uuid_unparse(so->e_uuid, buf);
8125 log(LOG_DEBUG, "%s[%s,%d]: so 0x%llx [%d,%d] epid %d "
8126 "euuid %s%s\n", __func__, proc_name_address(p), proc_pid(p),
8127 (uint64_t)DEBUG_KERNEL_ADDRPERM(so), SOCK_DOM(so),
8128 SOCK_TYPE(so), so->e_pid, buf,
8129 ((so->so_flags & SOF_DELEGATED) ? " [delegated]" : ""));
8130 } else if (error != 0 && net_io_policy_log) {
8131 uuid_unparse(euuid, buf);
8132 log(LOG_DEBUG, "%s[%s,%d]: so 0x%llx [%d,%d] euuid %s "
8133 "ERROR (%d)\n", __func__, proc_name_address(p), proc_pid(p),
8134 (uint64_t)DEBUG_KERNEL_ADDRPERM(so), SOCK_DOM(so),
8135 SOCK_TYPE(so), buf, error);
8136 }
8137
8138 /* Update this socket's policy upon success */
8139 if (error == 0) {
8140 so->so_policy_gencnt *= -1;
8141 so_update_policy(so);
8142 #if NECP
8143 so_update_necp_policy(so, NULL, NULL);
8144 #endif /* NECP */
8145 }
8146
8147 return error;
8148 }
8149
8150 void
netpolicy_post_msg(uint32_t ev_code,struct netpolicy_event_data * ev_data,uint32_t ev_datalen)8151 netpolicy_post_msg(uint32_t ev_code, struct netpolicy_event_data *ev_data,
8152 uint32_t ev_datalen)
8153 {
8154 struct kev_msg ev_msg;
8155
8156 /*
8157 * A netpolicy event always starts with a netpolicy_event_data
8158 * structure, but the caller can provide for a longer event
8159 * structure to post, depending on the event code.
8160 */
8161 VERIFY(ev_data != NULL && ev_datalen >= sizeof(*ev_data));
8162
8163 bzero(&ev_msg, sizeof(ev_msg));
8164 ev_msg.vendor_code = KEV_VENDOR_APPLE;
8165 ev_msg.kev_class = KEV_NETWORK_CLASS;
8166 ev_msg.kev_subclass = KEV_NETPOLICY_SUBCLASS;
8167 ev_msg.event_code = ev_code;
8168
8169 ev_msg.dv[0].data_ptr = ev_data;
8170 ev_msg.dv[0].data_length = ev_datalen;
8171
8172 kev_post_msg(&ev_msg);
8173 }
8174
8175 void
socket_post_kev_msg(uint32_t ev_code,struct kev_socket_event_data * ev_data,uint32_t ev_datalen)8176 socket_post_kev_msg(uint32_t ev_code,
8177 struct kev_socket_event_data *ev_data,
8178 uint32_t ev_datalen)
8179 {
8180 struct kev_msg ev_msg;
8181
8182 bzero(&ev_msg, sizeof(ev_msg));
8183 ev_msg.vendor_code = KEV_VENDOR_APPLE;
8184 ev_msg.kev_class = KEV_NETWORK_CLASS;
8185 ev_msg.kev_subclass = KEV_SOCKET_SUBCLASS;
8186 ev_msg.event_code = ev_code;
8187
8188 ev_msg.dv[0].data_ptr = ev_data;
8189 ev_msg.dv[0].data_length = ev_datalen;
8190
8191 kev_post_msg(&ev_msg);
8192 }
8193
8194 void
socket_post_kev_msg_closed(struct socket * so)8195 socket_post_kev_msg_closed(struct socket *so)
8196 {
8197 struct kev_socket_closed ev = {};
8198 struct sockaddr *__single socksa = NULL, *__single peersa = NULL;
8199 int err;
8200
8201 if ((so->so_flags1 & SOF1_WANT_KEV_SOCK_CLOSED) == 0) {
8202 return;
8203 }
8204 err = (*so->so_proto->pr_usrreqs->pru_sockaddr)(so, &socksa);
8205 if (err == 0) {
8206 err = (*so->so_proto->pr_usrreqs->pru_peeraddr)(so,
8207 &peersa);
8208 if (err == 0) {
8209 SOCKADDR_COPY(socksa, &ev.ev_data.kev_sockname,
8210 min(socksa->sa_len,
8211 sizeof(ev.ev_data.kev_sockname)));
8212 SOCKADDR_COPY(peersa, &ev.ev_data.kev_peername,
8213 min(peersa->sa_len,
8214 sizeof(ev.ev_data.kev_peername)));
8215 socket_post_kev_msg(KEV_SOCKET_CLOSED,
8216 &ev.ev_data, sizeof(ev));
8217 }
8218 }
8219 free_sockaddr(socksa);
8220 free_sockaddr(peersa);
8221 }
8222
8223 __attribute__((noinline, cold, not_tail_called, noreturn))
8224 __private_extern__ int
assfail(const char * a,const char * f,int l)8225 assfail(const char *a, const char *f, int l)
8226 {
8227 panic("assertion failed: %s, file: %s, line: %d", a, f, l);
8228 /* NOTREACHED */
8229 __builtin_unreachable();
8230 }
8231