1 /*
2 * Copyright (c) 2004-2024 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include "kpi_interface.h"
30
31 #include <sys/queue.h>
32 #include <sys/param.h> /* for definition of NULL */
33 #include <kern/debug.h> /* for panic */
34 #include <sys/errno.h>
35 #include <sys/socket.h>
36 #include <sys/kern_event.h>
37 #include <sys/kernel.h>
38 #include <sys/malloc.h>
39 #include <sys/kpi_mbuf.h>
40 #include <sys/mcache.h>
41 #include <sys/protosw.h>
42 #include <sys/syslog.h>
43 #include <net/if_var.h>
44 #include <net/if_dl.h>
45 #include <net/dlil.h>
46 #include <net/if_types.h>
47 #include <net/if_dl.h>
48 #include <net/if_arp.h>
49 #include <net/if_llreach.h>
50 #include <net/if_ether.h>
51 #include <net/net_api_stats.h>
52 #include <net/route.h>
53 #include <net/if_ports_used.h>
54 #include <libkern/libkern.h>
55 #include <libkern/OSAtomic.h>
56 #include <kern/locks.h>
57 #include <kern/clock.h>
58 #include <sys/sockio.h>
59 #include <sys/proc.h>
60 #include <sys/sysctl.h>
61 #include <sys/mbuf.h>
62 #include <netinet/ip_var.h>
63 #include <netinet/udp.h>
64 #include <netinet/udp_var.h>
65 #include <netinet/tcp.h>
66 #include <netinet/tcp_var.h>
67 #include <netinet/in_pcb.h>
68 #ifdef INET
69 #include <netinet/igmp_var.h>
70 #endif
71 #include <netinet6/mld6_var.h>
72 #include <netkey/key.h>
73 #include <stdbool.h>
74
75 #include "net/net_str_id.h"
76 #include <net/sockaddr_utils.h>
77
78 #if CONFIG_MACF
79 #include <sys/kauth.h>
80 #include <security/mac_framework.h>
81 #endif
82
83 #if SKYWALK
84 #include <skywalk/os_skywalk_private.h>
85 #include <skywalk/nexus/netif/nx_netif.h>
86 #endif /* SKYWALK */
87
88 extern uint64_t if_creation_generation_count;
89
90 #undef ifnet_allocate
91 errno_t ifnet_allocate(const struct ifnet_init_params *init,
92 ifnet_t *ifp);
93
94 static errno_t ifnet_allocate_common(const struct ifnet_init_params *init,
95 ifnet_t *ifp, bool is_internal);
96
97
98 #define TOUCHLASTCHANGE(__if_lastchange) { \
99 (__if_lastchange)->tv_sec = (time_t)net_uptime(); \
100 (__if_lastchange)->tv_usec = 0; \
101 }
102
103 static errno_t ifnet_defrouter_llreachinfo(ifnet_t, sa_family_t,
104 struct ifnet_llreach_info *);
105 static void ifnet_kpi_free(ifnet_t);
106 static errno_t ifnet_list_get_common(ifnet_family_t, boolean_t, ifnet_t *__counted_by(*count) *list,
107 u_int32_t *count);
108 static errno_t ifnet_set_lladdr_internal(ifnet_t,
109 const void *__sized_by(lladdr_len) lladdr, size_t lladdr_len,
110 u_char, int);
111 static errno_t ifnet_awdl_check_eflags(ifnet_t, u_int32_t *, u_int32_t *);
112
113
114 /*
115 * Temporary work around until we have real reference counting
116 *
117 * We keep the bits about calling dlil_if_release (which should be
118 * called recycle) transparent by calling it from our if_free function
119 * pointer. We have to keep the client's original detach function
120 * somewhere so we can call it.
121 */
122 static void
ifnet_kpi_free(ifnet_t ifp)123 ifnet_kpi_free(ifnet_t ifp)
124 {
125 if ((ifp->if_refflags & IFRF_EMBRYONIC) == 0) {
126 ifnet_detached_func detach_func;
127
128 detach_func = ifp->if_detach;
129 if (detach_func != NULL) {
130 (*detach_func)(ifp);
131 }
132 }
133
134 ifnet_dispose(ifp);
135 }
136
137 errno_t
ifnet_allocate_common(const struct ifnet_init_params * init,ifnet_t * ifp,bool is_internal)138 ifnet_allocate_common(const struct ifnet_init_params *init,
139 ifnet_t *ifp, bool is_internal)
140 {
141 struct ifnet_init_eparams einit;
142
143 bzero(&einit, sizeof(einit));
144
145 einit.ver = IFNET_INIT_CURRENT_VERSION;
146 einit.len = sizeof(einit);
147 einit.flags = IFNET_INIT_LEGACY | IFNET_INIT_NX_NOAUTO;
148 if (!is_internal) {
149 einit.flags |= IFNET_INIT_ALLOC_KPI;
150 }
151 einit.uniqueid = init->uniqueid;
152 einit.uniqueid_len = init->uniqueid_len;
153 einit.name = init->name;
154 einit.unit = init->unit;
155 einit.family = init->family;
156 einit.type = init->type;
157 einit.output = init->output;
158 einit.demux = init->demux;
159 einit.add_proto = init->add_proto;
160 einit.del_proto = init->del_proto;
161 einit.check_multi = init->check_multi;
162 einit.framer = init->framer;
163 einit.softc = init->softc;
164 einit.ioctl = init->ioctl;
165 einit.set_bpf_tap = init->set_bpf_tap;
166 einit.detach = init->detach;
167 einit.event = init->event;
168 einit.broadcast_addr = init->broadcast_addr;
169 einit.broadcast_len = init->broadcast_len;
170
171 return ifnet_allocate_extended(&einit, ifp);
172 }
173
174 errno_t
ifnet_allocate_internal(const struct ifnet_init_params * init,ifnet_t * ifp)175 ifnet_allocate_internal(const struct ifnet_init_params *init, ifnet_t *ifp)
176 {
177 return ifnet_allocate_common(init, ifp, true);
178 }
179
180 errno_t
ifnet_allocate(const struct ifnet_init_params * init,ifnet_t * ifp)181 ifnet_allocate(const struct ifnet_init_params *init, ifnet_t *ifp)
182 {
183 return ifnet_allocate_common(init, ifp, false);
184 }
185
186 static void
ifnet_set_broadcast_addr(ifnet_t ifp,const void * __sized_by (broadcast_len)broadcast_addr,u_int32_t broadcast_len)187 ifnet_set_broadcast_addr(ifnet_t ifp,
188 const void *__sized_by(broadcast_len) broadcast_addr,
189 u_int32_t broadcast_len)
190 {
191 if (ifp->if_broadcast.length != 0) {
192 kfree_data_counted_by(ifp->if_broadcast.ptr,
193 ifp->if_broadcast.length);
194 }
195 if (broadcast_len != 0 && broadcast_addr != NULL) {
196 ifp->if_broadcast.ptr = kalloc_data(broadcast_len,
197 Z_WAITOK | Z_NOFAIL);
198 ifp->if_broadcast.length = broadcast_len;
199 bcopy(broadcast_addr, ifp->if_broadcast.ptr,
200 broadcast_len);
201 }
202 }
203
204 errno_t
ifnet_allocate_extended(const struct ifnet_init_eparams * einit0,ifnet_t * interface)205 ifnet_allocate_extended(const struct ifnet_init_eparams *einit0,
206 ifnet_t *interface)
207 {
208 #if SKYWALK
209 ifnet_start_func ostart = NULL;
210 #endif /* SKYWALK */
211 struct ifnet_init_eparams einit;
212 ifnet_ref_t ifp = NULL;
213 char if_xname[IFXNAMSIZ] = {0};
214 int error;
215
216 einit = *einit0;
217
218 if (einit.ver != IFNET_INIT_CURRENT_VERSION ||
219 einit.len < sizeof(einit)) {
220 return EINVAL;
221 }
222
223 if (einit.family == 0 || einit.name == NULL ||
224 strlen(einit.name) >= IFNAMSIZ ||
225 (einit.type & 0xFFFFFF00) != 0 || einit.type == 0) {
226 return EINVAL;
227 }
228
229 #if SKYWALK
230 /* headroom must be a multiple of 8 bytes */
231 if ((einit.tx_headroom & 0x7) != 0) {
232 return EINVAL;
233 }
234 if ((einit.flags & IFNET_INIT_SKYWALK_NATIVE) == 0) {
235 /*
236 * Currently Interface advisory reporting is supported only
237 * for skywalk interface.
238 */
239 if ((einit.flags & IFNET_INIT_IF_ADV) != 0) {
240 return EINVAL;
241 }
242 }
243 #endif /* SKYWALK */
244
245 if (einit.flags & IFNET_INIT_LEGACY) {
246 #if SKYWALK
247 if (einit.flags & IFNET_INIT_SKYWALK_NATIVE) {
248 return EINVAL;
249 }
250 #endif /* SKYWALK */
251 if (einit.output == NULL ||
252 (einit.flags & IFNET_INIT_INPUT_POLL)) {
253 return EINVAL;
254 }
255 einit.pre_enqueue = NULL;
256 einit.start = NULL;
257 einit.output_ctl = NULL;
258 einit.output_sched_model = IFNET_SCHED_MODEL_NORMAL;
259 einit.input_poll = NULL;
260 einit.input_ctl = NULL;
261 } else {
262 #if SKYWALK
263 /*
264 * For native Skywalk drivers, steer all start requests
265 * to ifp_if_start() until the netif device adapter is
266 * fully activated, at which point we will point it to
267 * nx_netif_doorbell().
268 */
269 if (einit.flags & IFNET_INIT_SKYWALK_NATIVE) {
270 if (einit.start != NULL) {
271 return EINVAL;
272 }
273 /* override output start callback */
274 ostart = einit.start = ifp_if_start;
275 } else {
276 ostart = einit.start;
277 }
278 #endif /* SKYWALK */
279 if (einit.start == NULL) {
280 return EINVAL;
281 }
282
283 einit.output = NULL;
284 if (einit.output_sched_model >= IFNET_SCHED_MODEL_MAX) {
285 return EINVAL;
286 }
287
288 if (einit.flags & IFNET_INIT_INPUT_POLL) {
289 if (einit.input_poll == NULL || einit.input_ctl == NULL) {
290 return EINVAL;
291 }
292 } else {
293 einit.input_poll = NULL;
294 einit.input_ctl = NULL;
295 }
296 }
297
298 if (einit.type > UCHAR_MAX) {
299 return EINVAL;
300 }
301
302 if (einit.unit > SHRT_MAX) {
303 return EINVAL;
304 }
305
306 /* Initialize external name (name + unit) */
307 snprintf(if_xname, sizeof(if_xname), "%s%d",
308 einit.name, einit.unit);
309
310 if (einit.uniqueid == NULL) {
311 einit.uniqueid_len = (uint32_t)strbuflen(if_xname);
312 einit.uniqueid = if_xname;
313 }
314
315 error = dlil_if_acquire(einit.family, einit.uniqueid,
316 einit.uniqueid_len,
317 __unsafe_null_terminated_from_indexable(if_xname), &ifp);
318
319 if (error == 0) {
320 uint64_t br;
321
322 /*
323 * Cast ifp->if_name as non const. dlil_if_acquire sets it up
324 * to point to storage of at least IFNAMSIZ bytes. It is safe
325 * to write to this.
326 */
327 char *ifname = __unsafe_forge_bidi_indexable(char *, __DECONST(char *, ifp->if_name), IFNAMSIZ);
328 const char *einit_name = __unsafe_forge_bidi_indexable(const char *, einit.name, IFNAMSIZ);
329 strbufcpy(ifname, IFNAMSIZ, einit_name, IFNAMSIZ);
330 ifp->if_type = (u_char)einit.type;
331 ifp->if_family = einit.family;
332 ifp->if_subfamily = einit.subfamily;
333 ifp->if_unit = (short)einit.unit;
334 ifp->if_output = einit.output;
335 ifp->if_pre_enqueue = einit.pre_enqueue;
336 ifp->if_start = einit.start;
337 ifp->if_output_ctl = einit.output_ctl;
338 ifp->if_output_sched_model = einit.output_sched_model;
339 ifp->if_output_bw.eff_bw = einit.output_bw;
340 ifp->if_output_bw.max_bw = einit.output_bw_max;
341 ifp->if_output_lt.eff_lt = einit.output_lt;
342 ifp->if_output_lt.max_lt = einit.output_lt_max;
343 ifp->if_input_poll = einit.input_poll;
344 ifp->if_input_ctl = einit.input_ctl;
345 ifp->if_input_bw.eff_bw = einit.input_bw;
346 ifp->if_input_bw.max_bw = einit.input_bw_max;
347 ifp->if_input_lt.eff_lt = einit.input_lt;
348 ifp->if_input_lt.max_lt = einit.input_lt_max;
349 ifp->if_demux = einit.demux;
350 ifp->if_add_proto = einit.add_proto;
351 ifp->if_del_proto = einit.del_proto;
352 ifp->if_check_multi = einit.check_multi;
353 ifp->if_framer_legacy = einit.framer;
354 ifp->if_framer = einit.framer_extended;
355 ifp->if_softc = einit.softc;
356 ifp->if_ioctl = einit.ioctl;
357 ifp->if_set_bpf_tap = einit.set_bpf_tap;
358 ifp->if_free = (einit.free != NULL) ? einit.free : ifnet_kpi_free;
359 ifp->if_event = einit.event;
360 ifp->if_detach = einit.detach;
361
362 /* Initialize Network ID */
363 ifp->network_id_len = 0;
364 bzero(&ifp->network_id, sizeof(ifp->network_id));
365
366 /* Initialize external name (name + unit) */
367 char *ifxname = __unsafe_forge_bidi_indexable(char *, __DECONST(char *, ifp->if_xname), IFXNAMSIZ);
368 snprintf(ifxname, IFXNAMSIZ, "%s", if_xname);
369
370 /*
371 * On embedded, framer() is already in the extended form;
372 * we simply use it as is, unless the caller specifies
373 * framer_extended() which will then override it.
374 *
375 * On non-embedded, framer() has long been exposed as part
376 * of the public KPI, and therefore its signature must
377 * remain the same (without the pre- and postpend length
378 * parameters.) We special case ether_frameout, such that
379 * it gets mapped to its extended variant. All other cases
380 * utilize the stub routine which will simply return zeroes
381 * for those new parameters.
382 *
383 * Internally, DLIL will only use the extended callback
384 * variant which is represented by if_framer.
385 */
386 #if !XNU_TARGET_OS_OSX
387 if (ifp->if_framer == NULL && ifp->if_framer_legacy != NULL) {
388 ifp->if_framer = ifp->if_framer_legacy;
389 }
390 #else /* XNU_TARGET_OS_OSX */
391 if (ifp->if_framer == NULL && ifp->if_framer_legacy != NULL) {
392 if (ifp->if_framer_legacy == ether_frameout) {
393 ifp->if_framer = ether_frameout_extended;
394 } else {
395 ifp->if_framer = ifnet_framer_stub;
396 }
397 }
398 #endif /* XNU_TARGET_OS_OSX */
399
400 if (ifp->if_output_bw.eff_bw > ifp->if_output_bw.max_bw) {
401 ifp->if_output_bw.max_bw = ifp->if_output_bw.eff_bw;
402 } else if (ifp->if_output_bw.eff_bw == 0) {
403 ifp->if_output_bw.eff_bw = ifp->if_output_bw.max_bw;
404 }
405
406 if (ifp->if_input_bw.eff_bw > ifp->if_input_bw.max_bw) {
407 ifp->if_input_bw.max_bw = ifp->if_input_bw.eff_bw;
408 } else if (ifp->if_input_bw.eff_bw == 0) {
409 ifp->if_input_bw.eff_bw = ifp->if_input_bw.max_bw;
410 }
411
412 if (ifp->if_output_bw.max_bw == 0) {
413 ifp->if_output_bw = ifp->if_input_bw;
414 } else if (ifp->if_input_bw.max_bw == 0) {
415 ifp->if_input_bw = ifp->if_output_bw;
416 }
417
418 /* Pin if_baudrate to 32 bits */
419 br = MAX(ifp->if_output_bw.max_bw, ifp->if_input_bw.max_bw);
420 if (br != 0) {
421 ifp->if_baudrate = (br > UINT32_MAX) ? UINT32_MAX : (uint32_t)br;
422 }
423
424 if (ifp->if_output_lt.eff_lt > ifp->if_output_lt.max_lt) {
425 ifp->if_output_lt.max_lt = ifp->if_output_lt.eff_lt;
426 } else if (ifp->if_output_lt.eff_lt == 0) {
427 ifp->if_output_lt.eff_lt = ifp->if_output_lt.max_lt;
428 }
429
430 if (ifp->if_input_lt.eff_lt > ifp->if_input_lt.max_lt) {
431 ifp->if_input_lt.max_lt = ifp->if_input_lt.eff_lt;
432 } else if (ifp->if_input_lt.eff_lt == 0) {
433 ifp->if_input_lt.eff_lt = ifp->if_input_lt.max_lt;
434 }
435
436 if (ifp->if_output_lt.max_lt == 0) {
437 ifp->if_output_lt = ifp->if_input_lt;
438 } else if (ifp->if_input_lt.max_lt == 0) {
439 ifp->if_input_lt = ifp->if_output_lt;
440 }
441
442 if (ifp->if_ioctl == NULL) {
443 ifp->if_ioctl = ifp_if_ioctl;
444 }
445
446 if_clear_eflags(ifp, -1);
447 if (ifp->if_start != NULL) {
448 if_set_eflags(ifp, IFEF_TXSTART);
449 if (ifp->if_pre_enqueue == NULL) {
450 ifp->if_pre_enqueue = ifnet_enqueue;
451 }
452 ifp->if_output = ifp->if_pre_enqueue;
453 }
454
455 if (ifp->if_input_poll != NULL) {
456 if_set_eflags(ifp, IFEF_RXPOLL);
457 }
458
459 ifp->if_output_dlil = dlil_output_handler;
460 ifp->if_input_dlil = dlil_input_handler;
461
462 VERIFY(!(einit.flags & IFNET_INIT_LEGACY) ||
463 (ifp->if_pre_enqueue == NULL && ifp->if_start == NULL &&
464 ifp->if_output_ctl == NULL && ifp->if_input_poll == NULL &&
465 ifp->if_input_ctl == NULL));
466 VERIFY(!(einit.flags & IFNET_INIT_INPUT_POLL) ||
467 (ifp->if_input_poll != NULL && ifp->if_input_ctl != NULL));
468
469 ifnet_set_broadcast_addr(ifp, einit.broadcast_addr,
470 einit.broadcast_len);
471
472 if_clear_xflags(ifp, -1);
473 #if SKYWALK
474 ifp->if_tx_headroom = 0;
475 ifp->if_tx_trailer = 0;
476 ifp->if_rx_mit_ival = 0;
477 ifp->if_save_start = ostart;
478 if (einit.flags & IFNET_INIT_SKYWALK_NATIVE) {
479 VERIFY(ifp->if_eflags & IFEF_TXSTART);
480 VERIFY(!(einit.flags & IFNET_INIT_LEGACY));
481 if_set_eflags(ifp, IFEF_SKYWALK_NATIVE);
482 ifp->if_tx_headroom = einit.tx_headroom;
483 ifp->if_tx_trailer = einit.tx_trailer;
484 ifp->if_rx_mit_ival = einit.rx_mit_ival;
485 /*
486 * For native Skywalk drivers, make sure packets
487 * emitted by the BSD stack get dropped until the
488 * interface is in service. When the netif host
489 * adapter is fully activated, we'll point it to
490 * nx_netif_output().
491 */
492 ifp->if_output = ifp_if_output;
493 /*
494 * Override driver-supplied parameters
495 * and force IFEF_ENQUEUE_MULTI?
496 */
497 if (sk_netif_native_txmodel ==
498 NETIF_NATIVE_TXMODEL_ENQUEUE_MULTI) {
499 einit.start_delay_qlen = sk_tx_delay_qlen;
500 einit.start_delay_timeout = sk_tx_delay_timeout;
501 }
502 /* netif comes with native interfaces */
503 VERIFY((ifp->if_xflags & IFXF_LEGACY) == 0);
504 } else if (!ifnet_needs_compat(ifp)) {
505 /*
506 * If we're told not to plumb in netif compat
507 * for this interface, set IFXF_NX_NOAUTO to
508 * prevent DLIL from auto-attaching the nexus.
509 */
510 einit.flags |= IFNET_INIT_NX_NOAUTO;
511 /* legacy (non-netif) interface */
512 if_set_xflags(ifp, IFXF_LEGACY);
513 }
514
515 ifp->if_save_output = ifp->if_output;
516 if ((einit.flags & IFNET_INIT_NX_NOAUTO) != 0) {
517 if_set_xflags(ifp, IFXF_NX_NOAUTO);
518 }
519 if ((einit.flags & IFNET_INIT_IF_ADV) != 0) {
520 if_set_eflags(ifp, IFEF_ADV_REPORT);
521 }
522 #else /* !SKYWALK */
523 /* legacy interface */
524 if_set_xflags(ifp, IFXF_LEGACY);
525 #endif /* !SKYWALK */
526
527 if ((ifp->if_snd = ifclassq_alloc()) == NULL) {
528 panic_plain("%s: ifp=%p couldn't allocate class queues",
529 __func__, ifp);
530 /* NOTREACHED */
531 }
532
533 /*
534 * output target queue delay is specified in millisecond
535 * convert it to nanoseconds
536 */
537 IFCQ_TARGET_QDELAY(ifp->if_snd) =
538 einit.output_target_qdelay * 1000 * 1000;
539 IFCQ_MAXLEN(ifp->if_snd) = einit.sndq_maxlen;
540
541 ifnet_enqueue_multi_setup(ifp, einit.start_delay_qlen,
542 einit.start_delay_timeout);
543
544 IFCQ_PKT_DROP_LIMIT(ifp->if_snd) = IFCQ_DEFAULT_PKT_DROP_LIMIT;
545
546 /*
547 * Set embryonic flag; this will be cleared
548 * later when it is fully attached.
549 */
550 ifp->if_refflags = IFRF_EMBRYONIC;
551
552 /*
553 * Count the newly allocated ifnet
554 */
555 OSIncrementAtomic64(&net_api_stats.nas_ifnet_alloc_count);
556 INC_ATOMIC_INT64_LIM(net_api_stats.nas_ifnet_alloc_total);
557 if ((einit.flags & IFNET_INIT_ALLOC_KPI) != 0) {
558 if_set_xflags(ifp, IFXF_ALLOC_KPI);
559 } else {
560 OSIncrementAtomic64(
561 &net_api_stats.nas_ifnet_alloc_os_count);
562 INC_ATOMIC_INT64_LIM(
563 net_api_stats.nas_ifnet_alloc_os_total);
564 }
565
566 if (ifp->if_subfamily == IFNET_SUBFAMILY_MANAGEMENT) {
567 if_set_xflags(ifp, IFXF_MANAGEMENT);
568 if_management_interface_check_needed = true;
569 }
570
571 /*
572 * Increment the generation count on interface creation
573 */
574 ifp->if_creation_generation_id = os_atomic_inc(&if_creation_generation_count, relaxed);
575
576 *interface = ifp;
577 }
578 return error;
579 }
580
581 errno_t
ifnet_reference(ifnet_t ifp)582 ifnet_reference(ifnet_t ifp)
583 {
584 return dlil_if_ref(ifp);
585 }
586
587 void
ifnet_dispose(ifnet_t ifp)588 ifnet_dispose(ifnet_t ifp)
589 {
590 dlil_if_release(ifp);
591 }
592
593 errno_t
ifnet_release(ifnet_t ifp)594 ifnet_release(ifnet_t ifp)
595 {
596 return dlil_if_free(ifp);
597 }
598
599 errno_t
ifnet_interface_family_find(const char * module_string,ifnet_family_t * family_id)600 ifnet_interface_family_find(const char *module_string,
601 ifnet_family_t *family_id)
602 {
603 if (module_string == NULL || family_id == NULL) {
604 return EINVAL;
605 }
606
607 return net_str_id_find_internal(module_string, family_id,
608 NSI_IF_FAM_ID, 1);
609 }
610
611 void *
ifnet_softc(ifnet_t interface)612 ifnet_softc(ifnet_t interface)
613 {
614 return (interface == NULL) ? NULL : interface->if_softc;
615 }
616
617 const char *
ifnet_name(ifnet_t interface)618 ifnet_name(ifnet_t interface)
619 {
620 return (interface == NULL) ? NULL : interface->if_name;
621 }
622
623 ifnet_family_t
ifnet_family(ifnet_t interface)624 ifnet_family(ifnet_t interface)
625 {
626 return (interface == NULL) ? 0 : interface->if_family;
627 }
628
629 ifnet_subfamily_t
ifnet_subfamily(ifnet_t interface)630 ifnet_subfamily(ifnet_t interface)
631 {
632 return (interface == NULL) ? 0 : interface->if_subfamily;
633 }
634
635 u_int32_t
ifnet_unit(ifnet_t interface)636 ifnet_unit(ifnet_t interface)
637 {
638 return (interface == NULL) ? (u_int32_t)0xffffffff :
639 (u_int32_t)interface->if_unit;
640 }
641
642 u_int32_t
ifnet_index(ifnet_t interface)643 ifnet_index(ifnet_t interface)
644 {
645 return (interface == NULL) ? (u_int32_t)0xffffffff :
646 interface->if_index;
647 }
648
649 errno_t
ifnet_set_flags(ifnet_t interface,u_int16_t new_flags,u_int16_t mask)650 ifnet_set_flags(ifnet_t interface, u_int16_t new_flags, u_int16_t mask)
651 {
652 bool set_IFF_UP;
653 bool change_IFF_UP;
654 uint16_t old_flags;
655
656 if (interface == NULL) {
657 return EINVAL;
658 }
659 set_IFF_UP = (new_flags & IFF_UP) != 0;
660 change_IFF_UP = (mask & IFF_UP) != 0;
661 #if SKYWALK
662 if (set_IFF_UP && change_IFF_UP) {
663 /*
664 * When a native skywalk interface is marked IFF_UP, ensure
665 * the flowswitch is attached.
666 */
667 ifnet_attach_native_flowswitch(interface);
668 }
669 #endif /* SKYWALK */
670
671 ifnet_lock_exclusive(interface);
672
673 /* If we are modifying the up/down state, call if_updown */
674 if (change_IFF_UP) {
675 if_updown(interface, set_IFF_UP);
676 }
677
678 old_flags = interface->if_flags;
679 interface->if_flags = (new_flags & mask) | (interface->if_flags & ~mask);
680 /* If we are modifying the multicast flag, set/unset the silent flag */
681 if ((old_flags & IFF_MULTICAST) !=
682 (interface->if_flags & IFF_MULTICAST)) {
683 #if INET
684 if (IGMP_IFINFO(interface) != NULL) {
685 igmp_initsilent(interface, IGMP_IFINFO(interface));
686 }
687 #endif /* INET */
688 if (MLD_IFINFO(interface) != NULL) {
689 mld6_initsilent(interface, MLD_IFINFO(interface));
690 }
691 }
692
693 ifnet_lock_done(interface);
694
695 return 0;
696 }
697
698 u_int16_t
ifnet_flags(ifnet_t interface)699 ifnet_flags(ifnet_t interface)
700 {
701 return (interface == NULL) ? 0 : interface->if_flags;
702 }
703
704 /*
705 * This routine ensures the following:
706 *
707 * If IFEF_AWDL is set by the caller, also set the rest of flags as
708 * defined in IFEF_AWDL_MASK.
709 *
710 * If IFEF_AWDL has been set on the interface and the caller attempts
711 * to clear one or more of the associated flags in IFEF_AWDL_MASK,
712 * return failure.
713 *
714 * If IFEF_AWDL_RESTRICTED is set by the caller, make sure IFEF_AWDL is set
715 * on the interface.
716 *
717 * All other flags not associated with AWDL are not affected.
718 *
719 * See <net/if.h> for current definition of IFEF_AWDL_MASK.
720 */
721 static errno_t
ifnet_awdl_check_eflags(ifnet_t ifp,u_int32_t * new_eflags,u_int32_t * mask)722 ifnet_awdl_check_eflags(ifnet_t ifp, u_int32_t *new_eflags, u_int32_t *mask)
723 {
724 u_int32_t eflags;
725
726 ifnet_lock_assert(ifp, IFNET_LCK_ASSERT_EXCLUSIVE);
727
728 eflags = (*new_eflags & *mask) | (ifp->if_eflags & ~(*mask));
729
730 if (ifp->if_eflags & IFEF_AWDL) {
731 if (eflags & IFEF_AWDL) {
732 if ((eflags & IFEF_AWDL_MASK) != IFEF_AWDL_MASK) {
733 return EINVAL;
734 }
735 } else {
736 *new_eflags &= ~IFEF_AWDL_MASK;
737 *mask |= IFEF_AWDL_MASK;
738 }
739 } else if (eflags & IFEF_AWDL) {
740 *new_eflags |= IFEF_AWDL_MASK;
741 *mask |= IFEF_AWDL_MASK;
742 } else if (eflags & IFEF_AWDL_RESTRICTED &&
743 !(ifp->if_eflags & IFEF_AWDL)) {
744 return EINVAL;
745 }
746
747 return 0;
748 }
749
750 errno_t
ifnet_set_eflags(ifnet_t interface,u_int32_t new_flags,u_int32_t mask)751 ifnet_set_eflags(ifnet_t interface, u_int32_t new_flags, u_int32_t mask)
752 {
753 uint32_t oeflags;
754 struct kev_msg ev_msg;
755 struct net_event_data ev_data;
756
757 if (interface == NULL) {
758 return EINVAL;
759 }
760
761 bzero(&ev_msg, sizeof(ev_msg));
762 ifnet_lock_exclusive(interface);
763 /*
764 * Sanity checks for IFEF_AWDL and its related flags.
765 */
766 if (ifnet_awdl_check_eflags(interface, &new_flags, &mask) != 0) {
767 ifnet_lock_done(interface);
768 return EINVAL;
769 }
770 /*
771 * Currently Interface advisory reporting is supported only for
772 * skywalk interface.
773 */
774 if ((((new_flags & mask) & IFEF_ADV_REPORT) != 0) &&
775 ((interface->if_eflags & IFEF_SKYWALK_NATIVE) == 0)) {
776 ifnet_lock_done(interface);
777 return EINVAL;
778 }
779 oeflags = interface->if_eflags;
780 if_clear_eflags(interface, mask);
781 if (new_flags != 0) {
782 if_set_eflags(interface, (new_flags & mask));
783 }
784 ifnet_lock_done(interface);
785 if (interface->if_eflags & IFEF_AWDL_RESTRICTED &&
786 !(oeflags & IFEF_AWDL_RESTRICTED)) {
787 ev_msg.event_code = KEV_DL_AWDL_RESTRICTED;
788 /*
789 * The interface is now restricted to applications that have
790 * the entitlement.
791 * The check for the entitlement will be done in the data
792 * path, so we don't have to do anything here.
793 */
794 } else if (oeflags & IFEF_AWDL_RESTRICTED &&
795 !(interface->if_eflags & IFEF_AWDL_RESTRICTED)) {
796 ev_msg.event_code = KEV_DL_AWDL_UNRESTRICTED;
797 }
798 /*
799 * Notify configd so that it has a chance to perform better
800 * reachability detection.
801 */
802 if (ev_msg.event_code) {
803 bzero(&ev_data, sizeof(ev_data));
804 ev_msg.vendor_code = KEV_VENDOR_APPLE;
805 ev_msg.kev_class = KEV_NETWORK_CLASS;
806 ev_msg.kev_subclass = KEV_DL_SUBCLASS;
807 strlcpy(ev_data.if_name, interface->if_name, IFNAMSIZ);
808 ev_data.if_family = interface->if_family;
809 ev_data.if_unit = interface->if_unit;
810 ev_msg.dv[0].data_length = sizeof(struct net_event_data);
811 ev_msg.dv[0].data_ptr = &ev_data;
812 ev_msg.dv[1].data_length = 0;
813 dlil_post_complete_msg(interface, &ev_msg);
814 }
815
816 return 0;
817 }
818
819 u_int32_t
ifnet_eflags(ifnet_t interface)820 ifnet_eflags(ifnet_t interface)
821 {
822 return (interface == NULL) ? 0 : interface->if_eflags;
823 }
824
825 errno_t
ifnet_set_idle_flags_locked(ifnet_t ifp,u_int32_t new_flags,u_int32_t mask)826 ifnet_set_idle_flags_locked(ifnet_t ifp, u_int32_t new_flags, u_int32_t mask)
827 {
828 if (ifp == NULL) {
829 return EINVAL;
830 }
831 ifnet_lock_assert(ifp, IFNET_LCK_ASSERT_EXCLUSIVE);
832
833 /*
834 * If this is called prior to ifnet attach, the actual work will
835 * be done at attach time. Otherwise, if it is called after
836 * ifnet detach, then it is a no-op.
837 */
838 if (!ifnet_is_attached(ifp, 0)) {
839 ifp->if_idle_new_flags = new_flags;
840 ifp->if_idle_new_flags_mask = mask;
841 return 0;
842 } else {
843 ifp->if_idle_new_flags = ifp->if_idle_new_flags_mask = 0;
844 }
845
846 ifp->if_idle_flags = (new_flags & mask) | (ifp->if_idle_flags & ~mask);
847 return 0;
848 }
849
850 errno_t
ifnet_set_idle_flags(ifnet_t ifp,u_int32_t new_flags,u_int32_t mask)851 ifnet_set_idle_flags(ifnet_t ifp, u_int32_t new_flags, u_int32_t mask)
852 {
853 errno_t err;
854
855 ifnet_lock_exclusive(ifp);
856 err = ifnet_set_idle_flags_locked(ifp, new_flags, mask);
857 ifnet_lock_done(ifp);
858
859 return err;
860 }
861
862 u_int32_t
ifnet_idle_flags(ifnet_t ifp)863 ifnet_idle_flags(ifnet_t ifp)
864 {
865 return (ifp == NULL) ? 0 : ifp->if_idle_flags;
866 }
867
868 errno_t
ifnet_set_link_quality(ifnet_t ifp,int quality)869 ifnet_set_link_quality(ifnet_t ifp, int quality)
870 {
871 errno_t err = 0;
872
873 if (ifp == NULL || quality < IFNET_LQM_MIN || quality > IFNET_LQM_MAX) {
874 err = EINVAL;
875 goto done;
876 }
877
878 if (!ifnet_is_attached(ifp, 0)) {
879 err = ENXIO;
880 goto done;
881 }
882
883 if_lqm_update(ifp, quality, 0);
884
885 done:
886 return err;
887 }
888
889 int
ifnet_link_quality(ifnet_t ifp)890 ifnet_link_quality(ifnet_t ifp)
891 {
892 int lqm;
893
894 if (ifp == NULL) {
895 return IFNET_LQM_THRESH_OFF;
896 }
897
898 ifnet_lock_shared(ifp);
899 lqm = ifp->if_interface_state.lqm_state;
900 ifnet_lock_done(ifp);
901
902 return lqm;
903 }
904
905 errno_t
ifnet_set_interface_state(ifnet_t ifp,struct if_interface_state * if_interface_state)906 ifnet_set_interface_state(ifnet_t ifp,
907 struct if_interface_state *if_interface_state)
908 {
909 errno_t err = 0;
910
911 if (ifp == NULL || if_interface_state == NULL) {
912 err = EINVAL;
913 goto done;
914 }
915
916 if (!ifnet_is_attached(ifp, 0)) {
917 err = ENXIO;
918 goto done;
919 }
920
921 if_state_update(ifp, if_interface_state);
922
923 done:
924 return err;
925 }
926
927 errno_t
ifnet_get_interface_state(ifnet_t ifp,struct if_interface_state * if_interface_state)928 ifnet_get_interface_state(ifnet_t ifp,
929 struct if_interface_state *if_interface_state)
930 {
931 errno_t err = 0;
932
933 if (ifp == NULL || if_interface_state == NULL) {
934 err = EINVAL;
935 goto done;
936 }
937
938 if (!ifnet_is_attached(ifp, 0)) {
939 err = ENXIO;
940 goto done;
941 }
942
943 if_get_state(ifp, if_interface_state);
944
945 done:
946 return err;
947 }
948
949
950 static errno_t
ifnet_defrouter_llreachinfo(ifnet_t ifp,sa_family_t af,struct ifnet_llreach_info * iflri)951 ifnet_defrouter_llreachinfo(ifnet_t ifp, sa_family_t af,
952 struct ifnet_llreach_info *iflri)
953 {
954 if (ifp == NULL || iflri == NULL) {
955 return EINVAL;
956 }
957
958 VERIFY(af == AF_INET || af == AF_INET6);
959
960 return ifnet_llreach_get_defrouter(ifp, af, iflri);
961 }
962
963 errno_t
ifnet_inet_defrouter_llreachinfo(ifnet_t ifp,struct ifnet_llreach_info * iflri)964 ifnet_inet_defrouter_llreachinfo(ifnet_t ifp, struct ifnet_llreach_info *iflri)
965 {
966 return ifnet_defrouter_llreachinfo(ifp, AF_INET, iflri);
967 }
968
969 errno_t
ifnet_inet6_defrouter_llreachinfo(ifnet_t ifp,struct ifnet_llreach_info * iflri)970 ifnet_inet6_defrouter_llreachinfo(ifnet_t ifp, struct ifnet_llreach_info *iflri)
971 {
972 return ifnet_defrouter_llreachinfo(ifp, AF_INET6, iflri);
973 }
974
975 errno_t
ifnet_set_capabilities_supported(ifnet_t ifp,u_int32_t new_caps,u_int32_t mask)976 ifnet_set_capabilities_supported(ifnet_t ifp, u_int32_t new_caps,
977 u_int32_t mask)
978 {
979 errno_t error = 0;
980 int tmp;
981
982 if (ifp == NULL) {
983 return EINVAL;
984 }
985
986 ifnet_lock_exclusive(ifp);
987 tmp = (new_caps & mask) | (ifp->if_capabilities & ~mask);
988 if ((tmp & ~IFCAP_VALID)) {
989 error = EINVAL;
990 } else {
991 ifp->if_capabilities = tmp;
992 }
993 ifnet_lock_done(ifp);
994
995 return error;
996 }
997
998 u_int32_t
ifnet_capabilities_supported(ifnet_t ifp)999 ifnet_capabilities_supported(ifnet_t ifp)
1000 {
1001 return (ifp == NULL) ? 0 : ifp->if_capabilities;
1002 }
1003
1004
1005 errno_t
ifnet_set_capabilities_enabled(ifnet_t ifp,u_int32_t new_caps,u_int32_t mask)1006 ifnet_set_capabilities_enabled(ifnet_t ifp, u_int32_t new_caps,
1007 u_int32_t mask)
1008 {
1009 errno_t error = 0;
1010 int tmp;
1011 struct kev_msg ev_msg;
1012 struct net_event_data ev_data;
1013
1014 if (ifp == NULL) {
1015 return EINVAL;
1016 }
1017
1018 ifnet_lock_exclusive(ifp);
1019 tmp = (new_caps & mask) | (ifp->if_capenable & ~mask);
1020 if ((tmp & ~IFCAP_VALID) || (tmp & ~ifp->if_capabilities)) {
1021 error = EINVAL;
1022 } else {
1023 ifp->if_capenable = tmp;
1024 }
1025 ifnet_lock_done(ifp);
1026
1027 /* Notify application of the change */
1028 bzero(&ev_data, sizeof(struct net_event_data));
1029 bzero(&ev_msg, sizeof(struct kev_msg));
1030 ev_msg.vendor_code = KEV_VENDOR_APPLE;
1031 ev_msg.kev_class = KEV_NETWORK_CLASS;
1032 ev_msg.kev_subclass = KEV_DL_SUBCLASS;
1033
1034 ev_msg.event_code = KEV_DL_IFCAP_CHANGED;
1035 strlcpy(&ev_data.if_name[0], ifp->if_name, IFNAMSIZ);
1036 ev_data.if_family = ifp->if_family;
1037 ev_data.if_unit = (u_int32_t)ifp->if_unit;
1038 ev_msg.dv[0].data_length = sizeof(struct net_event_data);
1039 ev_msg.dv[0].data_ptr = &ev_data;
1040 ev_msg.dv[1].data_length = 0;
1041 dlil_post_complete_msg(ifp, &ev_msg);
1042
1043 return error;
1044 }
1045
1046 u_int32_t
ifnet_capabilities_enabled(ifnet_t ifp)1047 ifnet_capabilities_enabled(ifnet_t ifp)
1048 {
1049 return (ifp == NULL) ? 0 : ifp->if_capenable;
1050 }
1051
1052 static const ifnet_offload_t offload_mask =
1053 (IFNET_CSUM_IP | IFNET_CSUM_TCP | IFNET_CSUM_UDP | IFNET_CSUM_FRAGMENT |
1054 IFNET_IP_FRAGMENT | IFNET_CSUM_TCPIPV6 | IFNET_CSUM_UDPIPV6 |
1055 IFNET_IPV6_FRAGMENT | IFNET_CSUM_PARTIAL | IFNET_CSUM_ZERO_INVERT |
1056 IFNET_VLAN_TAGGING | IFNET_VLAN_MTU | IFNET_MULTIPAGES |
1057 IFNET_TSO_IPV4 | IFNET_TSO_IPV6 | IFNET_TX_STATUS | IFNET_HW_TIMESTAMP |
1058 IFNET_SW_TIMESTAMP);
1059
1060 static const ifnet_offload_t any_offload_csum = IFNET_CHECKSUMF;
1061
1062 static errno_t
ifnet_set_offload_common(ifnet_t interface,ifnet_offload_t offload,boolean_t set_both)1063 ifnet_set_offload_common(ifnet_t interface, ifnet_offload_t offload, boolean_t set_both)
1064 {
1065 u_int32_t ifcaps = 0;
1066
1067 if (interface == NULL) {
1068 return EINVAL;
1069 }
1070
1071 ifnet_lock_exclusive(interface);
1072 interface->if_hwassist = (offload & offload_mask);
1073
1074 #if SKYWALK
1075 /* preserve skywalk capability */
1076 if ((interface->if_capabilities & IFCAP_SKYWALK) != 0) {
1077 ifcaps |= IFCAP_SKYWALK;
1078 }
1079 #endif /* SKYWALK */
1080 if (dlil_verbose) {
1081 log(LOG_DEBUG, "%s: set offload flags=0x%x\n",
1082 if_name(interface),
1083 interface->if_hwassist);
1084 }
1085 ifnet_lock_done(interface);
1086
1087 if ((offload & any_offload_csum)) {
1088 ifcaps |= IFCAP_HWCSUM;
1089 }
1090 if ((offload & IFNET_TSO_IPV4)) {
1091 ifcaps |= IFCAP_TSO4;
1092 }
1093 if ((offload & IFNET_TSO_IPV6)) {
1094 ifcaps |= IFCAP_TSO6;
1095 }
1096 if ((offload & IFNET_LRO)) {
1097 ifcaps |= IFCAP_LRO;
1098 }
1099 if ((offload & IFNET_VLAN_MTU)) {
1100 ifcaps |= IFCAP_VLAN_MTU;
1101 }
1102 if ((offload & IFNET_VLAN_TAGGING)) {
1103 ifcaps |= IFCAP_VLAN_HWTAGGING;
1104 }
1105 if ((offload & IFNET_TX_STATUS)) {
1106 ifcaps |= IFCAP_TXSTATUS;
1107 }
1108 if ((offload & IFNET_HW_TIMESTAMP)) {
1109 ifcaps |= IFCAP_HW_TIMESTAMP;
1110 }
1111 if ((offload & IFNET_SW_TIMESTAMP)) {
1112 ifcaps |= IFCAP_SW_TIMESTAMP;
1113 }
1114 if ((offload & IFNET_CSUM_PARTIAL)) {
1115 ifcaps |= IFCAP_CSUM_PARTIAL;
1116 }
1117 if ((offload & IFNET_CSUM_ZERO_INVERT)) {
1118 ifcaps |= IFCAP_CSUM_ZERO_INVERT;
1119 }
1120 if (ifcaps != 0) {
1121 if (set_both) {
1122 (void) ifnet_set_capabilities_supported(interface,
1123 ifcaps, IFCAP_VALID);
1124 }
1125 (void) ifnet_set_capabilities_enabled(interface, ifcaps,
1126 IFCAP_VALID);
1127 }
1128
1129 return 0;
1130 }
1131
1132 errno_t
ifnet_set_offload(ifnet_t interface,ifnet_offload_t offload)1133 ifnet_set_offload(ifnet_t interface, ifnet_offload_t offload)
1134 {
1135 return ifnet_set_offload_common(interface, offload, TRUE);
1136 }
1137
1138 errno_t
ifnet_set_offload_enabled(ifnet_t interface,ifnet_offload_t offload)1139 ifnet_set_offload_enabled(ifnet_t interface, ifnet_offload_t offload)
1140 {
1141 return ifnet_set_offload_common(interface, offload, FALSE);
1142 }
1143
1144 ifnet_offload_t
ifnet_offload(ifnet_t interface)1145 ifnet_offload(ifnet_t interface)
1146 {
1147 return (interface == NULL) ?
1148 0 : (interface->if_hwassist & offload_mask);
1149 }
1150
1151 errno_t
ifnet_set_tso_mtu(ifnet_t interface,sa_family_t family,u_int32_t mtuLen)1152 ifnet_set_tso_mtu(ifnet_t interface, sa_family_t family, u_int32_t mtuLen)
1153 {
1154 errno_t error = 0;
1155
1156 if (interface == NULL || mtuLen < interface->if_mtu) {
1157 return EINVAL;
1158 }
1159 if (mtuLen > IP_MAXPACKET) {
1160 return EINVAL;
1161 }
1162
1163 switch (family) {
1164 case AF_INET:
1165 if (interface->if_hwassist & IFNET_TSO_IPV4) {
1166 interface->if_tso_v4_mtu = mtuLen;
1167 } else {
1168 error = EINVAL;
1169 }
1170 break;
1171
1172 case AF_INET6:
1173 if (interface->if_hwassist & IFNET_TSO_IPV6) {
1174 interface->if_tso_v6_mtu = mtuLen;
1175 } else {
1176 error = EINVAL;
1177 }
1178 break;
1179
1180 default:
1181 error = EPROTONOSUPPORT;
1182 break;
1183 }
1184
1185 if (error == 0) {
1186 struct ifclassq *ifq = interface->if_snd;
1187 ASSERT(ifq != NULL);
1188 /* Inform all transmit queues about the new TSO MTU */
1189 IFCQ_LOCK(ifq);
1190 ifnet_update_sndq(ifq, CLASSQ_EV_LINK_MTU);
1191 IFCQ_UNLOCK(ifq);
1192 }
1193
1194 return error;
1195 }
1196
1197 errno_t
ifnet_get_tso_mtu(ifnet_t interface,sa_family_t family,u_int32_t * mtuLen)1198 ifnet_get_tso_mtu(ifnet_t interface, sa_family_t family, u_int32_t *mtuLen)
1199 {
1200 errno_t error = 0;
1201
1202 if (interface == NULL || mtuLen == NULL) {
1203 return EINVAL;
1204 }
1205
1206 switch (family) {
1207 case AF_INET:
1208 if (interface->if_hwassist & IFNET_TSO_IPV4) {
1209 *mtuLen = interface->if_tso_v4_mtu;
1210 } else {
1211 error = EINVAL;
1212 }
1213 break;
1214
1215 case AF_INET6:
1216 if (interface->if_hwassist & IFNET_TSO_IPV6) {
1217 *mtuLen = interface->if_tso_v6_mtu;
1218 } else {
1219 error = EINVAL;
1220 }
1221 break;
1222
1223 default:
1224 error = EPROTONOSUPPORT;
1225 break;
1226 }
1227
1228 return error;
1229 }
1230
1231 errno_t
ifnet_set_wake_flags(ifnet_t interface,u_int32_t properties,u_int32_t mask)1232 ifnet_set_wake_flags(ifnet_t interface, u_int32_t properties, u_int32_t mask)
1233 {
1234 struct kev_msg ev_msg;
1235 struct net_event_data ev_data;
1236
1237 bzero(&ev_data, sizeof(struct net_event_data));
1238 bzero(&ev_msg, sizeof(struct kev_msg));
1239
1240 if (interface == NULL) {
1241 return EINVAL;
1242 }
1243
1244 /* Do not accept wacky values */
1245 if ((properties & mask) & ~IF_WAKE_VALID_FLAGS) {
1246 return EINVAL;
1247 }
1248
1249 if ((mask & IF_WAKE_ON_MAGIC_PACKET) != 0) {
1250 if ((properties & IF_WAKE_ON_MAGIC_PACKET) != 0) {
1251 if_set_xflags(interface, IFXF_WAKE_ON_MAGIC_PACKET);
1252 } else {
1253 if_clear_xflags(interface, IFXF_WAKE_ON_MAGIC_PACKET);
1254 }
1255 }
1256
1257 (void) ifnet_touch_lastchange(interface);
1258
1259 /* Notify application of the change */
1260 ev_msg.vendor_code = KEV_VENDOR_APPLE;
1261 ev_msg.kev_class = KEV_NETWORK_CLASS;
1262 ev_msg.kev_subclass = KEV_DL_SUBCLASS;
1263
1264 ev_msg.event_code = KEV_DL_WAKEFLAGS_CHANGED;
1265 strlcpy(&ev_data.if_name[0], interface->if_name, IFNAMSIZ);
1266 ev_data.if_family = interface->if_family;
1267 ev_data.if_unit = (u_int32_t)interface->if_unit;
1268 ev_msg.dv[0].data_length = sizeof(struct net_event_data);
1269 ev_msg.dv[0].data_ptr = &ev_data;
1270 ev_msg.dv[1].data_length = 0;
1271 dlil_post_complete_msg(interface, &ev_msg);
1272
1273 return 0;
1274 }
1275
1276 u_int32_t
ifnet_get_wake_flags(ifnet_t interface)1277 ifnet_get_wake_flags(ifnet_t interface)
1278 {
1279 u_int32_t flags = 0;
1280
1281 if (interface == NULL) {
1282 return 0;
1283 }
1284
1285 if ((interface->if_xflags & IFXF_WAKE_ON_MAGIC_PACKET) != 0) {
1286 flags |= IF_WAKE_ON_MAGIC_PACKET;
1287 }
1288
1289 return flags;
1290 }
1291
1292 /*
1293 * Should MIB data store a copy?
1294 */
1295 errno_t
ifnet_set_link_mib_data(ifnet_t interface,void * __sized_by (mibLen)mibData,uint32_t mibLen)1296 ifnet_set_link_mib_data(ifnet_t interface, void *__sized_by(mibLen) mibData, uint32_t mibLen)
1297 {
1298 if (interface == NULL) {
1299 return EINVAL;
1300 }
1301
1302 ifnet_lock_exclusive(interface);
1303 interface->if_linkmib = (void*)mibData;
1304 interface->if_linkmiblen = mibLen;
1305 ifnet_lock_done(interface);
1306 return 0;
1307 }
1308
1309 errno_t
ifnet_get_link_mib_data(ifnet_t interface,void * __sized_by (* mibLen)mibData,uint32_t * mibLen)1310 ifnet_get_link_mib_data(ifnet_t interface, void *__sized_by(*mibLen) mibData, uint32_t *mibLen)
1311 {
1312 errno_t result = 0;
1313
1314 if (interface == NULL) {
1315 return EINVAL;
1316 }
1317
1318 ifnet_lock_shared(interface);
1319 if (*mibLen < interface->if_linkmiblen) {
1320 result = EMSGSIZE;
1321 }
1322 if (result == 0 && interface->if_linkmib == NULL) {
1323 result = ENOTSUP;
1324 }
1325
1326 if (result == 0) {
1327 *mibLen = interface->if_linkmiblen;
1328 bcopy(interface->if_linkmib, mibData, *mibLen);
1329 }
1330 ifnet_lock_done(interface);
1331
1332 return result;
1333 }
1334
1335 uint32_t
ifnet_get_link_mib_data_length(ifnet_t interface)1336 ifnet_get_link_mib_data_length(ifnet_t interface)
1337 {
1338 return (interface == NULL) ? 0 : interface->if_linkmiblen;
1339 }
1340
1341 errno_t
ifnet_output(ifnet_t interface,protocol_family_t protocol_family,mbuf_t m,void * route,const struct sockaddr * dest)1342 ifnet_output(ifnet_t interface, protocol_family_t protocol_family,
1343 mbuf_t m, void *route, const struct sockaddr *dest)
1344 {
1345 if (interface == NULL || protocol_family == 0 || m == NULL) {
1346 if (m != NULL) {
1347 mbuf_freem_list(m);
1348 }
1349 return EINVAL;
1350 }
1351 return dlil_output(interface, protocol_family, m, route, dest,
1352 DLIL_OUTPUT_FLAGS_NONE, NULL);
1353 }
1354
1355 errno_t
ifnet_output_raw(ifnet_t interface,protocol_family_t protocol_family,mbuf_t m)1356 ifnet_output_raw(ifnet_t interface, protocol_family_t protocol_family, mbuf_t m)
1357 {
1358 if (interface == NULL || m == NULL) {
1359 if (m != NULL) {
1360 mbuf_freem_list(m);
1361 }
1362 return EINVAL;
1363 }
1364 return dlil_output(interface, protocol_family, m, NULL, NULL,
1365 DLIL_OUTPUT_FLAGS_RAW, NULL);
1366 }
1367
1368 errno_t
ifnet_set_mtu(ifnet_t interface,u_int32_t mtu)1369 ifnet_set_mtu(ifnet_t interface, u_int32_t mtu)
1370 {
1371 if (interface == NULL) {
1372 return EINVAL;
1373 }
1374
1375 interface->if_mtu = mtu;
1376 return 0;
1377 }
1378
1379 u_int32_t
ifnet_mtu(ifnet_t interface)1380 ifnet_mtu(ifnet_t interface)
1381 {
1382 return (interface == NULL) ? 0 : interface->if_mtu;
1383 }
1384
1385 u_char
ifnet_type(ifnet_t interface)1386 ifnet_type(ifnet_t interface)
1387 {
1388 return (interface == NULL) ? 0 : interface->if_data.ifi_type;
1389 }
1390
1391 errno_t
ifnet_set_addrlen(ifnet_t interface,u_char addrlen)1392 ifnet_set_addrlen(ifnet_t interface, u_char addrlen)
1393 {
1394 if (interface == NULL) {
1395 return EINVAL;
1396 }
1397
1398 interface->if_data.ifi_addrlen = addrlen;
1399 return 0;
1400 }
1401
1402 u_char
ifnet_addrlen(ifnet_t interface)1403 ifnet_addrlen(ifnet_t interface)
1404 {
1405 return (interface == NULL) ? 0 : interface->if_data.ifi_addrlen;
1406 }
1407
1408 errno_t
ifnet_set_hdrlen(ifnet_t interface,u_char hdrlen)1409 ifnet_set_hdrlen(ifnet_t interface, u_char hdrlen)
1410 {
1411 if (interface == NULL) {
1412 return EINVAL;
1413 }
1414
1415 interface->if_data.ifi_hdrlen = hdrlen;
1416 return 0;
1417 }
1418
1419 u_char
ifnet_hdrlen(ifnet_t interface)1420 ifnet_hdrlen(ifnet_t interface)
1421 {
1422 return (interface == NULL) ? 0 : interface->if_data.ifi_hdrlen;
1423 }
1424
1425 errno_t
ifnet_set_metric(ifnet_t interface,u_int32_t metric)1426 ifnet_set_metric(ifnet_t interface, u_int32_t metric)
1427 {
1428 if (interface == NULL) {
1429 return EINVAL;
1430 }
1431
1432 interface->if_data.ifi_metric = metric;
1433 return 0;
1434 }
1435
1436 u_int32_t
ifnet_metric(ifnet_t interface)1437 ifnet_metric(ifnet_t interface)
1438 {
1439 return (interface == NULL) ? 0 : interface->if_data.ifi_metric;
1440 }
1441
1442 errno_t
ifnet_set_baudrate(struct ifnet * ifp,uint64_t baudrate)1443 ifnet_set_baudrate(struct ifnet *ifp, uint64_t baudrate)
1444 {
1445 if (ifp == NULL) {
1446 return EINVAL;
1447 }
1448
1449 ifp->if_output_bw.max_bw = ifp->if_input_bw.max_bw =
1450 ifp->if_output_bw.eff_bw = ifp->if_input_bw.eff_bw = baudrate;
1451
1452 /* Pin if_baudrate to 32 bits until we can change the storage size */
1453 ifp->if_baudrate = (baudrate > UINT32_MAX) ? UINT32_MAX : (uint32_t)baudrate;
1454
1455 return 0;
1456 }
1457
1458 u_int64_t
ifnet_baudrate(struct ifnet * ifp)1459 ifnet_baudrate(struct ifnet *ifp)
1460 {
1461 return (ifp == NULL) ? 0 : ifp->if_baudrate;
1462 }
1463
1464 errno_t
ifnet_set_bandwidths(struct ifnet * ifp,struct if_bandwidths * output_bw,struct if_bandwidths * input_bw)1465 ifnet_set_bandwidths(struct ifnet *ifp, struct if_bandwidths *output_bw,
1466 struct if_bandwidths *input_bw)
1467 {
1468 if (ifp == NULL) {
1469 return EINVAL;
1470 }
1471
1472 /* set input values first (if any), as output values depend on them */
1473 if (input_bw != NULL) {
1474 (void) ifnet_set_input_bandwidths(ifp, input_bw);
1475 }
1476
1477 if (output_bw != NULL) {
1478 (void) ifnet_set_output_bandwidths(ifp, output_bw, FALSE);
1479 }
1480
1481 return 0;
1482 }
1483
1484 static void
ifnet_set_link_status_outbw(struct ifnet * ifp)1485 ifnet_set_link_status_outbw(struct ifnet *ifp)
1486 {
1487 struct if_wifi_status_v1 *sr;
1488 sr = &ifp->if_link_status->ifsr_u.ifsr_wifi.if_wifi_u.if_status_v1;
1489 if (ifp->if_output_bw.eff_bw != 0) {
1490 sr->valid_bitmask |=
1491 IF_WIFI_UL_EFFECTIVE_BANDWIDTH_VALID;
1492 sr->ul_effective_bandwidth =
1493 ifp->if_output_bw.eff_bw > UINT32_MAX ?
1494 UINT32_MAX :
1495 (uint32_t)ifp->if_output_bw.eff_bw;
1496 }
1497 if (ifp->if_output_bw.max_bw != 0) {
1498 sr->valid_bitmask |=
1499 IF_WIFI_UL_MAX_BANDWIDTH_VALID;
1500 sr->ul_max_bandwidth =
1501 ifp->if_output_bw.max_bw > UINT32_MAX ?
1502 UINT32_MAX :
1503 (uint32_t)ifp->if_output_bw.max_bw;
1504 }
1505 }
1506
1507 errno_t
ifnet_set_output_bandwidths(struct ifnet * ifp,struct if_bandwidths * bw,boolean_t locked)1508 ifnet_set_output_bandwidths(struct ifnet *ifp, struct if_bandwidths *bw,
1509 boolean_t locked)
1510 {
1511 struct if_bandwidths old_bw;
1512 struct ifclassq *ifq;
1513 u_int64_t br;
1514
1515 VERIFY(ifp != NULL && bw != NULL);
1516
1517 ifq = ifp->if_snd;
1518 if (!locked) {
1519 IFCQ_LOCK(ifq);
1520 }
1521 IFCQ_LOCK_ASSERT_HELD(ifq);
1522
1523 old_bw = ifp->if_output_bw;
1524 if (bw->eff_bw != 0) {
1525 ifp->if_output_bw.eff_bw = bw->eff_bw;
1526 }
1527 if (bw->max_bw != 0) {
1528 ifp->if_output_bw.max_bw = bw->max_bw;
1529 }
1530 if (ifp->if_output_bw.eff_bw > ifp->if_output_bw.max_bw) {
1531 ifp->if_output_bw.max_bw = ifp->if_output_bw.eff_bw;
1532 } else if (ifp->if_output_bw.eff_bw == 0) {
1533 ifp->if_output_bw.eff_bw = ifp->if_output_bw.max_bw;
1534 }
1535
1536 /* Pin if_baudrate to 32 bits */
1537 br = MAX(ifp->if_output_bw.max_bw, ifp->if_input_bw.max_bw);
1538 if (br != 0) {
1539 ifp->if_baudrate = (br > UINT32_MAX) ? UINT32_MAX : (uint32_t)br;
1540 }
1541
1542 /* Adjust queue parameters if needed */
1543 if (old_bw.eff_bw != ifp->if_output_bw.eff_bw ||
1544 old_bw.max_bw != ifp->if_output_bw.max_bw) {
1545 ifnet_update_sndq(ifq, CLASSQ_EV_LINK_BANDWIDTH);
1546 }
1547
1548 if (!locked) {
1549 IFCQ_UNLOCK(ifq);
1550 }
1551
1552 /*
1553 * If this is a Wifi interface, update the values in
1554 * if_link_status structure also.
1555 */
1556 if (IFNET_IS_WIFI(ifp) && ifp->if_link_status != NULL) {
1557 lck_rw_lock_exclusive(&ifp->if_link_status_lock);
1558 ifnet_set_link_status_outbw(ifp);
1559 lck_rw_done(&ifp->if_link_status_lock);
1560 }
1561
1562 return 0;
1563 }
1564
1565 static void
ifnet_set_link_status_inbw(struct ifnet * ifp)1566 ifnet_set_link_status_inbw(struct ifnet *ifp)
1567 {
1568 struct if_wifi_status_v1 *sr;
1569
1570 sr = &ifp->if_link_status->ifsr_u.ifsr_wifi.if_wifi_u.if_status_v1;
1571 if (ifp->if_input_bw.eff_bw != 0) {
1572 sr->valid_bitmask |=
1573 IF_WIFI_DL_EFFECTIVE_BANDWIDTH_VALID;
1574 sr->dl_effective_bandwidth =
1575 ifp->if_input_bw.eff_bw > UINT32_MAX ?
1576 UINT32_MAX :
1577 (uint32_t)ifp->if_input_bw.eff_bw;
1578 }
1579 if (ifp->if_input_bw.max_bw != 0) {
1580 sr->valid_bitmask |=
1581 IF_WIFI_DL_MAX_BANDWIDTH_VALID;
1582 sr->dl_max_bandwidth = ifp->if_input_bw.max_bw > UINT32_MAX ?
1583 UINT32_MAX :
1584 (uint32_t)ifp->if_input_bw.max_bw;
1585 }
1586 }
1587
1588 errno_t
ifnet_set_input_bandwidths(struct ifnet * ifp,struct if_bandwidths * bw)1589 ifnet_set_input_bandwidths(struct ifnet *ifp, struct if_bandwidths *bw)
1590 {
1591 struct if_bandwidths old_bw;
1592
1593 VERIFY(ifp != NULL && bw != NULL);
1594
1595 old_bw = ifp->if_input_bw;
1596 if (bw->eff_bw != 0) {
1597 ifp->if_input_bw.eff_bw = bw->eff_bw;
1598 }
1599 if (bw->max_bw != 0) {
1600 ifp->if_input_bw.max_bw = bw->max_bw;
1601 }
1602 if (ifp->if_input_bw.eff_bw > ifp->if_input_bw.max_bw) {
1603 ifp->if_input_bw.max_bw = ifp->if_input_bw.eff_bw;
1604 } else if (ifp->if_input_bw.eff_bw == 0) {
1605 ifp->if_input_bw.eff_bw = ifp->if_input_bw.max_bw;
1606 }
1607
1608 if (IFNET_IS_WIFI(ifp) && ifp->if_link_status != NULL) {
1609 lck_rw_lock_exclusive(&ifp->if_link_status_lock);
1610 ifnet_set_link_status_inbw(ifp);
1611 lck_rw_done(&ifp->if_link_status_lock);
1612 }
1613
1614 if (old_bw.eff_bw != ifp->if_input_bw.eff_bw ||
1615 old_bw.max_bw != ifp->if_input_bw.max_bw) {
1616 ifnet_update_rcv(ifp, CLASSQ_EV_LINK_BANDWIDTH);
1617 }
1618
1619 return 0;
1620 }
1621
1622 u_int64_t
ifnet_output_linkrate(struct ifnet * ifp)1623 ifnet_output_linkrate(struct ifnet *ifp)
1624 {
1625 struct ifclassq *ifq = ifp->if_snd;
1626 u_int64_t rate;
1627
1628 IFCQ_LOCK_ASSERT_HELD(ifq);
1629
1630 rate = ifp->if_output_bw.eff_bw;
1631 if (IFCQ_TBR_IS_ENABLED(ifq)) {
1632 u_int64_t tbr_rate = ifq->ifcq_tbr.tbr_rate_raw;
1633 VERIFY(tbr_rate > 0);
1634 rate = MIN(rate, ifq->ifcq_tbr.tbr_rate_raw);
1635 }
1636
1637 return rate;
1638 }
1639
1640 u_int64_t
ifnet_input_linkrate(struct ifnet * ifp)1641 ifnet_input_linkrate(struct ifnet *ifp)
1642 {
1643 return ifp->if_input_bw.eff_bw;
1644 }
1645
1646 errno_t
ifnet_bandwidths(struct ifnet * ifp,struct if_bandwidths * output_bw,struct if_bandwidths * input_bw)1647 ifnet_bandwidths(struct ifnet *ifp, struct if_bandwidths *output_bw,
1648 struct if_bandwidths *input_bw)
1649 {
1650 if (ifp == NULL) {
1651 return EINVAL;
1652 }
1653
1654 if (output_bw != NULL) {
1655 *output_bw = ifp->if_output_bw;
1656 }
1657 if (input_bw != NULL) {
1658 *input_bw = ifp->if_input_bw;
1659 }
1660
1661 return 0;
1662 }
1663
1664 errno_t
ifnet_set_latencies(struct ifnet * ifp,struct if_latencies * output_lt,struct if_latencies * input_lt)1665 ifnet_set_latencies(struct ifnet *ifp, struct if_latencies *output_lt,
1666 struct if_latencies *input_lt)
1667 {
1668 if (ifp == NULL) {
1669 return EINVAL;
1670 }
1671
1672 if (output_lt != NULL) {
1673 (void) ifnet_set_output_latencies(ifp, output_lt, FALSE);
1674 }
1675
1676 if (input_lt != NULL) {
1677 (void) ifnet_set_input_latencies(ifp, input_lt);
1678 }
1679
1680 return 0;
1681 }
1682
1683 errno_t
ifnet_set_output_latencies(struct ifnet * ifp,struct if_latencies * lt,boolean_t locked)1684 ifnet_set_output_latencies(struct ifnet *ifp, struct if_latencies *lt,
1685 boolean_t locked)
1686 {
1687 struct if_latencies old_lt;
1688 struct ifclassq *ifq;
1689
1690 VERIFY(ifp != NULL && lt != NULL);
1691
1692 ifq = ifp->if_snd;
1693 if (!locked) {
1694 IFCQ_LOCK(ifq);
1695 }
1696 IFCQ_LOCK_ASSERT_HELD(ifq);
1697
1698 old_lt = ifp->if_output_lt;
1699 if (lt->eff_lt != 0) {
1700 ifp->if_output_lt.eff_lt = lt->eff_lt;
1701 }
1702 if (lt->max_lt != 0) {
1703 ifp->if_output_lt.max_lt = lt->max_lt;
1704 }
1705 if (ifp->if_output_lt.eff_lt > ifp->if_output_lt.max_lt) {
1706 ifp->if_output_lt.max_lt = ifp->if_output_lt.eff_lt;
1707 } else if (ifp->if_output_lt.eff_lt == 0) {
1708 ifp->if_output_lt.eff_lt = ifp->if_output_lt.max_lt;
1709 }
1710
1711 /* Adjust queue parameters if needed */
1712 if (old_lt.eff_lt != ifp->if_output_lt.eff_lt ||
1713 old_lt.max_lt != ifp->if_output_lt.max_lt) {
1714 ifnet_update_sndq(ifq, CLASSQ_EV_LINK_LATENCY);
1715 }
1716
1717 if (!locked) {
1718 IFCQ_UNLOCK(ifq);
1719 }
1720
1721 return 0;
1722 }
1723
1724 errno_t
ifnet_set_input_latencies(struct ifnet * ifp,struct if_latencies * lt)1725 ifnet_set_input_latencies(struct ifnet *ifp, struct if_latencies *lt)
1726 {
1727 struct if_latencies old_lt;
1728
1729 VERIFY(ifp != NULL && lt != NULL);
1730
1731 old_lt = ifp->if_input_lt;
1732 if (lt->eff_lt != 0) {
1733 ifp->if_input_lt.eff_lt = lt->eff_lt;
1734 }
1735 if (lt->max_lt != 0) {
1736 ifp->if_input_lt.max_lt = lt->max_lt;
1737 }
1738 if (ifp->if_input_lt.eff_lt > ifp->if_input_lt.max_lt) {
1739 ifp->if_input_lt.max_lt = ifp->if_input_lt.eff_lt;
1740 } else if (ifp->if_input_lt.eff_lt == 0) {
1741 ifp->if_input_lt.eff_lt = ifp->if_input_lt.max_lt;
1742 }
1743
1744 if (old_lt.eff_lt != ifp->if_input_lt.eff_lt ||
1745 old_lt.max_lt != ifp->if_input_lt.max_lt) {
1746 ifnet_update_rcv(ifp, CLASSQ_EV_LINK_LATENCY);
1747 }
1748
1749 return 0;
1750 }
1751
1752 errno_t
ifnet_latencies(struct ifnet * ifp,struct if_latencies * output_lt,struct if_latencies * input_lt)1753 ifnet_latencies(struct ifnet *ifp, struct if_latencies *output_lt,
1754 struct if_latencies *input_lt)
1755 {
1756 if (ifp == NULL) {
1757 return EINVAL;
1758 }
1759
1760 if (output_lt != NULL) {
1761 *output_lt = ifp->if_output_lt;
1762 }
1763 if (input_lt != NULL) {
1764 *input_lt = ifp->if_input_lt;
1765 }
1766
1767 return 0;
1768 }
1769
1770 errno_t
ifnet_set_poll_params(struct ifnet * ifp,struct ifnet_poll_params * p)1771 ifnet_set_poll_params(struct ifnet *ifp, struct ifnet_poll_params *p)
1772 {
1773 errno_t err;
1774
1775 if (ifp == NULL) {
1776 return EINVAL;
1777 } else if (!ifnet_is_attached(ifp, 1)) {
1778 return ENXIO;
1779 }
1780
1781 #if SKYWALK
1782 if (SKYWALK_CAPABLE(ifp)) {
1783 err = netif_rxpoll_set_params(ifp, p, FALSE);
1784 ifnet_decr_iorefcnt(ifp);
1785 return err;
1786 }
1787 #endif /* SKYWALK */
1788 err = dlil_rxpoll_set_params(ifp, p, FALSE);
1789
1790 /* Release the io ref count */
1791 ifnet_decr_iorefcnt(ifp);
1792
1793 return err;
1794 }
1795
1796 errno_t
ifnet_poll_params(struct ifnet * ifp,struct ifnet_poll_params * p)1797 ifnet_poll_params(struct ifnet *ifp, struct ifnet_poll_params *p)
1798 {
1799 errno_t err;
1800
1801 if (ifp == NULL || p == NULL) {
1802 return EINVAL;
1803 } else if (!ifnet_is_attached(ifp, 1)) {
1804 return ENXIO;
1805 }
1806
1807 err = dlil_rxpoll_get_params(ifp, p);
1808
1809 /* Release the io ref count */
1810 ifnet_decr_iorefcnt(ifp);
1811
1812 return err;
1813 }
1814
1815 errno_t
ifnet_stat_increment(struct ifnet * ifp,const struct ifnet_stat_increment_param * s)1816 ifnet_stat_increment(struct ifnet *ifp,
1817 const struct ifnet_stat_increment_param *s)
1818 {
1819 if (ifp == NULL) {
1820 return EINVAL;
1821 }
1822
1823 if (s->packets_in != 0) {
1824 os_atomic_add(&ifp->if_data.ifi_ipackets, s->packets_in, relaxed);
1825 }
1826 if (s->bytes_in != 0) {
1827 os_atomic_add(&ifp->if_data.ifi_ibytes, s->bytes_in, relaxed);
1828 }
1829 if (s->errors_in != 0) {
1830 os_atomic_add(&ifp->if_data.ifi_ierrors, s->errors_in, relaxed);
1831 }
1832
1833 if (s->packets_out != 0) {
1834 os_atomic_add(&ifp->if_data.ifi_opackets, s->packets_out, relaxed);
1835 }
1836 if (s->bytes_out != 0) {
1837 os_atomic_add(&ifp->if_data.ifi_obytes, s->bytes_out, relaxed);
1838 }
1839 if (s->errors_out != 0) {
1840 os_atomic_add(&ifp->if_data.ifi_oerrors, s->errors_out, relaxed);
1841 }
1842
1843 if (s->collisions != 0) {
1844 os_atomic_add(&ifp->if_data.ifi_collisions, s->collisions, relaxed);
1845 }
1846 if (s->dropped != 0) {
1847 os_atomic_add(&ifp->if_data.ifi_iqdrops, s->dropped, relaxed);
1848 }
1849
1850 /* Touch the last change time. */
1851 TOUCHLASTCHANGE(&ifp->if_lastchange);
1852
1853 if (ifp->if_data_threshold != 0) {
1854 ifnet_notify_data_threshold(ifp);
1855 }
1856
1857 return 0;
1858 }
1859
1860 errno_t
ifnet_stat_increment_in(struct ifnet * ifp,u_int32_t packets_in,u_int32_t bytes_in,u_int32_t errors_in)1861 ifnet_stat_increment_in(struct ifnet *ifp, u_int32_t packets_in,
1862 u_int32_t bytes_in, u_int32_t errors_in)
1863 {
1864 if (ifp == NULL) {
1865 return EINVAL;
1866 }
1867
1868 if (packets_in != 0) {
1869 os_atomic_add(&ifp->if_data.ifi_ipackets, packets_in, relaxed);
1870 }
1871 if (bytes_in != 0) {
1872 os_atomic_add(&ifp->if_data.ifi_ibytes, bytes_in, relaxed);
1873 }
1874 if (errors_in != 0) {
1875 os_atomic_add(&ifp->if_data.ifi_ierrors, errors_in, relaxed);
1876 }
1877
1878 TOUCHLASTCHANGE(&ifp->if_lastchange);
1879
1880 if (ifp->if_data_threshold != 0) {
1881 ifnet_notify_data_threshold(ifp);
1882 }
1883
1884 return 0;
1885 }
1886
1887 errno_t
ifnet_stat_increment_out(struct ifnet * ifp,u_int32_t packets_out,u_int32_t bytes_out,u_int32_t errors_out)1888 ifnet_stat_increment_out(struct ifnet *ifp, u_int32_t packets_out,
1889 u_int32_t bytes_out, u_int32_t errors_out)
1890 {
1891 if (ifp == NULL) {
1892 return EINVAL;
1893 }
1894
1895 if (packets_out != 0) {
1896 os_atomic_add(&ifp->if_data.ifi_opackets, packets_out, relaxed);
1897 }
1898 if (bytes_out != 0) {
1899 os_atomic_add(&ifp->if_data.ifi_obytes, bytes_out, relaxed);
1900 }
1901 if (errors_out != 0) {
1902 os_atomic_add(&ifp->if_data.ifi_oerrors, errors_out, relaxed);
1903 }
1904
1905 TOUCHLASTCHANGE(&ifp->if_lastchange);
1906
1907 if (ifp->if_data_threshold != 0) {
1908 ifnet_notify_data_threshold(ifp);
1909 }
1910
1911 return 0;
1912 }
1913
1914 errno_t
ifnet_set_stat(struct ifnet * ifp,const struct ifnet_stats_param * s)1915 ifnet_set_stat(struct ifnet *ifp, const struct ifnet_stats_param *s)
1916 {
1917 if (ifp == NULL) {
1918 return EINVAL;
1919 }
1920
1921 os_atomic_store(&ifp->if_data.ifi_ipackets, s->packets_in, release);
1922 os_atomic_store(&ifp->if_data.ifi_ibytes, s->bytes_in, release);
1923 os_atomic_store(&ifp->if_data.ifi_imcasts, s->multicasts_in, release);
1924 os_atomic_store(&ifp->if_data.ifi_ierrors, s->errors_in, release);
1925
1926 os_atomic_store(&ifp->if_data.ifi_opackets, s->packets_out, release);
1927 os_atomic_store(&ifp->if_data.ifi_obytes, s->bytes_out, release);
1928 os_atomic_store(&ifp->if_data.ifi_omcasts, s->multicasts_out, release);
1929 os_atomic_store(&ifp->if_data.ifi_oerrors, s->errors_out, release);
1930
1931 os_atomic_store(&ifp->if_data.ifi_collisions, s->collisions, release);
1932 os_atomic_store(&ifp->if_data.ifi_iqdrops, s->dropped, release);
1933 os_atomic_store(&ifp->if_data.ifi_noproto, s->no_protocol, release);
1934
1935 /* Touch the last change time. */
1936 TOUCHLASTCHANGE(&ifp->if_lastchange);
1937
1938 if (ifp->if_data_threshold != 0) {
1939 ifnet_notify_data_threshold(ifp);
1940 }
1941
1942 return 0;
1943 }
1944
1945 errno_t
ifnet_stat(struct ifnet * ifp,struct ifnet_stats_param * s)1946 ifnet_stat(struct ifnet *ifp, struct ifnet_stats_param *s)
1947 {
1948 if (ifp == NULL) {
1949 return EINVAL;
1950 }
1951
1952 s->packets_in = os_atomic_load(&ifp->if_data.ifi_ipackets, relaxed);
1953 s->bytes_in = os_atomic_load(&ifp->if_data.ifi_ibytes, relaxed);
1954 s->multicasts_in = os_atomic_load(&ifp->if_data.ifi_imcasts, relaxed);
1955 s->errors_in = os_atomic_load(&ifp->if_data.ifi_ierrors, relaxed);
1956
1957 s->packets_out = os_atomic_load(&ifp->if_data.ifi_opackets, relaxed);
1958 s->bytes_out = os_atomic_load(&ifp->if_data.ifi_obytes, relaxed);
1959 s->multicasts_out = os_atomic_load(&ifp->if_data.ifi_omcasts, relaxed);
1960 s->errors_out = os_atomic_load(&ifp->if_data.ifi_oerrors, relaxed);
1961
1962 s->collisions = os_atomic_load(&ifp->if_data.ifi_collisions, relaxed);
1963 s->dropped = os_atomic_load(&ifp->if_data.ifi_iqdrops, relaxed);
1964 s->no_protocol = os_atomic_load(&ifp->if_data.ifi_noproto, relaxed);
1965
1966 if (ifp->if_data_threshold != 0) {
1967 ifnet_notify_data_threshold(ifp);
1968 }
1969
1970 return 0;
1971 }
1972
1973 errno_t
ifnet_touch_lastchange(ifnet_t interface)1974 ifnet_touch_lastchange(ifnet_t interface)
1975 {
1976 if (interface == NULL) {
1977 return EINVAL;
1978 }
1979
1980 TOUCHLASTCHANGE(&interface->if_lastchange);
1981
1982 return 0;
1983 }
1984
1985 errno_t
ifnet_lastchange(ifnet_t interface,struct timeval * last_change)1986 ifnet_lastchange(ifnet_t interface, struct timeval *last_change)
1987 {
1988 if (interface == NULL) {
1989 return EINVAL;
1990 }
1991
1992 *last_change = interface->if_data.ifi_lastchange;
1993 /* Crude conversion from uptime to calendar time */
1994 last_change->tv_sec += boottime_sec();
1995
1996 return 0;
1997 }
1998
1999 errno_t
ifnet_touch_lastupdown(ifnet_t interface)2000 ifnet_touch_lastupdown(ifnet_t interface)
2001 {
2002 if (interface == NULL) {
2003 return EINVAL;
2004 }
2005
2006 TOUCHLASTCHANGE(&interface->if_lastupdown);
2007
2008 return 0;
2009 }
2010
2011 errno_t
ifnet_updown_delta(ifnet_t interface,struct timeval * updown_delta)2012 ifnet_updown_delta(ifnet_t interface, struct timeval *updown_delta)
2013 {
2014 if (interface == NULL) {
2015 return EINVAL;
2016 }
2017
2018 /* Calculate the delta */
2019 updown_delta->tv_sec = (time_t)net_uptime();
2020 if (updown_delta->tv_sec > interface->if_data.ifi_lastupdown.tv_sec) {
2021 updown_delta->tv_sec -= interface->if_data.ifi_lastupdown.tv_sec;
2022 } else {
2023 updown_delta->tv_sec = 0;
2024 }
2025 updown_delta->tv_usec = 0;
2026
2027 return 0;
2028 }
2029
2030 errno_t
ifnet_get_address_list(ifnet_t interface,ifaddr_t * __null_terminated * addresses)2031 ifnet_get_address_list(ifnet_t interface, ifaddr_t *__null_terminated *addresses)
2032 {
2033 return addresses == NULL ? EINVAL :
2034 ifnet_get_address_list_family(interface, addresses, 0);
2035 }
2036
2037 errno_t
ifnet_get_address_list_with_count(ifnet_t interface,ifaddr_t * __counted_by (* addresses_count)* addresses,uint16_t * addresses_count)2038 ifnet_get_address_list_with_count(ifnet_t interface,
2039 ifaddr_t *__counted_by(*addresses_count) * addresses,
2040 uint16_t *addresses_count)
2041 {
2042 return ifnet_get_address_list_family_internal(interface, addresses,
2043 addresses_count, 0, 0, Z_WAITOK, 0);
2044 }
2045
2046 struct ifnet_addr_list {
2047 SLIST_ENTRY(ifnet_addr_list) ifal_le;
2048 struct ifaddr *ifal_ifa;
2049 };
2050
2051 errno_t
ifnet_get_address_list_family(ifnet_t interface,ifaddr_t * __null_terminated * ret_addresses,sa_family_t family)2052 ifnet_get_address_list_family(ifnet_t interface, ifaddr_t *__null_terminated *ret_addresses,
2053 sa_family_t family)
2054 {
2055 uint16_t addresses_count = 0;
2056 ifaddr_t *__counted_by(addresses_count) addresses = NULL;
2057 errno_t error;
2058
2059 error = ifnet_get_address_list_family_internal(interface, &addresses,
2060 &addresses_count, family, 0, Z_WAITOK, 0);
2061 if (addresses_count > 0) {
2062 *ret_addresses = __unsafe_null_terminated_from_indexable(addresses,
2063 &addresses[addresses_count - 1]);
2064 } else {
2065 *ret_addresses = NULL;
2066 }
2067
2068 return error;
2069 }
2070
2071 errno_t
ifnet_get_address_list_family_with_count(ifnet_t interface,ifaddr_t * __counted_by (* addresses_count)* addresses,uint16_t * addresses_count,sa_family_t family)2072 ifnet_get_address_list_family_with_count(ifnet_t interface,
2073 ifaddr_t *__counted_by(*addresses_count) *addresses,
2074 uint16_t *addresses_count, sa_family_t family)
2075 {
2076 return ifnet_get_address_list_family_internal(interface, addresses,
2077 addresses_count, family, 0, Z_WAITOK, 0);
2078 }
2079
2080 errno_t
ifnet_get_inuse_address_list(ifnet_t interface,ifaddr_t * __null_terminated * ret_addresses)2081 ifnet_get_inuse_address_list(ifnet_t interface, ifaddr_t *__null_terminated *ret_addresses)
2082 {
2083 uint16_t addresses_count = 0;
2084 ifaddr_t *__counted_by(addresses_count) addresses = NULL;
2085 errno_t error;
2086
2087 error = ifnet_get_address_list_family_internal(interface, &addresses,
2088 &addresses_count, 0, 0, Z_WAITOK, 1);
2089 if (addresses_count > 0) {
2090 *ret_addresses = __unsafe_null_terminated_from_indexable(addresses,
2091 &addresses[addresses_count - 1]);
2092 } else {
2093 *ret_addresses = NULL;
2094 }
2095
2096 return error;
2097 }
2098
2099 extern uint32_t tcp_find_anypcb_byaddr(struct ifaddr *ifa);
2100 extern uint32_t udp_find_anypcb_byaddr(struct ifaddr *ifa);
2101
2102 __private_extern__ errno_t
ifnet_get_address_list_family_internal(ifnet_t interface,ifaddr_t * __counted_by (* addresses_count)* addresses,uint16_t * addresses_count,sa_family_t family,int detached,int how,int return_inuse_addrs)2103 ifnet_get_address_list_family_internal(ifnet_t interface,
2104 ifaddr_t *__counted_by(*addresses_count) *addresses,
2105 uint16_t *addresses_count, sa_family_t family, int detached, int how,
2106 int return_inuse_addrs)
2107 {
2108 SLIST_HEAD(, ifnet_addr_list) ifal_head;
2109 struct ifnet_addr_list *ifal, *ifal_tmp;
2110 struct ifnet *ifp;
2111 uint16_t count = 0;
2112 errno_t err = 0;
2113 int usecount = 0;
2114 int index = 0;
2115
2116 SLIST_INIT(&ifal_head);
2117
2118 if (addresses == NULL || addresses_count == NULL) {
2119 err = EINVAL;
2120 goto done;
2121 }
2122 *addresses = NULL;
2123 *addresses_count = 0;
2124
2125 if (detached) {
2126 /*
2127 * Interface has been detached, so skip the lookup
2128 * at ifnet_head and go directly to inner loop.
2129 */
2130 ifp = interface;
2131 if (ifp == NULL) {
2132 err = EINVAL;
2133 goto done;
2134 }
2135 goto one;
2136 }
2137
2138 ifnet_head_lock_shared();
2139 TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
2140 if (interface != NULL && ifp != interface) {
2141 continue;
2142 }
2143 one:
2144 ifnet_lock_shared(ifp);
2145 if (interface == NULL || interface == ifp) {
2146 struct ifaddr *ifa;
2147 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
2148 IFA_LOCK(ifa);
2149 if (family != 0 &&
2150 ifa->ifa_addr->sa_family != family) {
2151 IFA_UNLOCK(ifa);
2152 continue;
2153 }
2154 ifal = kalloc_type(struct ifnet_addr_list, how);
2155 if (ifal == NULL) {
2156 IFA_UNLOCK(ifa);
2157 ifnet_lock_done(ifp);
2158 if (!detached) {
2159 ifnet_head_done();
2160 }
2161 err = ENOMEM;
2162 goto done;
2163 }
2164 ifal->ifal_ifa = ifa;
2165 ifa_addref(ifa);
2166 SLIST_INSERT_HEAD(&ifal_head, ifal, ifal_le);
2167 IFA_UNLOCK(ifa);
2168 if (__improbable(os_inc_overflow(&count))) {
2169 ifnet_lock_done(ifp);
2170 if (!detached) {
2171 ifnet_head_done();
2172 }
2173 err = EINVAL;
2174 goto done;
2175 }
2176 }
2177 }
2178 ifnet_lock_done(ifp);
2179 if (detached) {
2180 break;
2181 }
2182 }
2183 if (!detached) {
2184 ifnet_head_done();
2185 }
2186
2187 if (count == 0) {
2188 err = ENXIO;
2189 goto done;
2190 }
2191
2192 uint16_t allocation_size = 0;
2193 if (__improbable(os_add_overflow(count, 1, &allocation_size))) {
2194 err = EINVAL;
2195 goto done;
2196 }
2197 ifaddr_t *allocation = kalloc_type(ifaddr_t, allocation_size, how | Z_ZERO);
2198 if (allocation == NULL) {
2199 err = ENOMEM;
2200 goto done;
2201 }
2202 *addresses = allocation;
2203 *addresses_count = allocation_size;
2204
2205 done:
2206 SLIST_FOREACH_SAFE(ifal, &ifal_head, ifal_le, ifal_tmp) {
2207 SLIST_REMOVE(&ifal_head, ifal, ifnet_addr_list, ifal_le);
2208 if (err == 0) {
2209 if (return_inuse_addrs) {
2210 usecount = tcp_find_anypcb_byaddr(ifal->ifal_ifa);
2211 usecount += udp_find_anypcb_byaddr(ifal->ifal_ifa);
2212 if (usecount) {
2213 (*addresses)[index] = ifal->ifal_ifa;
2214 index++;
2215 } else {
2216 ifa_remref(ifal->ifal_ifa);
2217 }
2218 } else {
2219 (*addresses)[--count] = ifal->ifal_ifa;
2220 }
2221 } else {
2222 ifa_remref(ifal->ifal_ifa);
2223 }
2224 kfree_type(struct ifnet_addr_list, ifal);
2225 }
2226
2227 VERIFY(err == 0 || *addresses == NULL);
2228 if ((err == 0) && (count) && ((*addresses)[0] == NULL)) {
2229 VERIFY(return_inuse_addrs == 1);
2230 kfree_type_counted_by(ifaddr_t, *addresses_count, *addresses);
2231 err = ENXIO;
2232 }
2233 return err;
2234 }
2235
2236 void
ifnet_free_address_list(ifaddr_t * __null_terminated addresses)2237 ifnet_free_address_list(ifaddr_t *__null_terminated addresses)
2238 {
2239 int i = 0;
2240
2241 if (addresses == NULL) {
2242 return;
2243 }
2244
2245 for (ifaddr_t *__null_terminated ptr = addresses; *ptr != NULL; ++ptr, i++) {
2246 ifa_remref(*ptr);
2247 }
2248
2249 ifaddr_t *free_addresses = __unsafe_null_terminated_to_indexable(addresses);
2250 kfree_type(ifaddr_t, i + 1, free_addresses);
2251 }
2252
2253 void
ifnet_address_list_free_counted_by_internal(ifaddr_t * __counted_by (addresses_count)addresses,uint16_t addresses_count)2254 ifnet_address_list_free_counted_by_internal(ifaddr_t *__counted_by(addresses_count) addresses,
2255 uint16_t addresses_count)
2256 {
2257 if (addresses == NULL) {
2258 return;
2259 }
2260 for (int i = 0; i < addresses_count; i++) {
2261 if (addresses[i] != NULL) {
2262 ifa_remref(addresses[i]);
2263 }
2264 }
2265 kfree_type_counted_by(ifaddr_t, addresses_count, addresses);
2266 }
2267
2268 void *
ifnet_lladdr(ifnet_t interface)2269 ifnet_lladdr(ifnet_t interface)
2270 {
2271 struct ifaddr *ifa;
2272 void *lladdr;
2273
2274 if (interface == NULL) {
2275 return NULL;
2276 }
2277
2278 /*
2279 * if_lladdr points to the permanent link address of
2280 * the interface and it never gets deallocated; internal
2281 * code should simply use IF_LLADDR() for performance.
2282 */
2283 ifa = interface->if_lladdr;
2284 IFA_LOCK_SPIN(ifa);
2285 struct sockaddr_dl *sdl = SDL(ifa->ifa_addr);
2286 lladdr = LLADDR(sdl);
2287 IFA_UNLOCK(ifa);
2288
2289 return lladdr;
2290 }
2291
2292 errno_t
ifnet_llbroadcast_copy_bytes(ifnet_t interface,void * __sized_by (buffer_len)addr,size_t buffer_len,size_t * out_len)2293 ifnet_llbroadcast_copy_bytes(ifnet_t interface, void *__sized_by(buffer_len) addr,
2294 size_t buffer_len, size_t *out_len)
2295 {
2296 if (interface == NULL || addr == NULL || out_len == NULL) {
2297 return EINVAL;
2298 }
2299
2300 *out_len = interface->if_broadcast.length;
2301
2302 if (buffer_len < interface->if_broadcast.length) {
2303 return EMSGSIZE;
2304 }
2305
2306 if (interface->if_broadcast.length == 0) {
2307 return ENXIO;
2308 }
2309
2310 bcopy(interface->if_broadcast.ptr, addr,
2311 interface->if_broadcast.length);
2312
2313 return 0;
2314 }
2315
2316 static errno_t
ifnet_lladdr_copy_bytes_internal(ifnet_t interface,void * __sized_by (lladdr_len)lladdr,size_t lladdr_len,kauth_cred_t * credp)2317 ifnet_lladdr_copy_bytes_internal(ifnet_t interface, void *__sized_by(lladdr_len) lladdr,
2318 size_t lladdr_len, kauth_cred_t *credp)
2319 {
2320 size_t bytes_len;
2321 const u_int8_t *bytes;
2322 struct ifaddr *ifa;
2323 uint8_t sdlbuf[SOCK_MAXADDRLEN + 1];
2324 errno_t error = 0;
2325
2326 /*
2327 * Make sure to accomodate the largest possible
2328 * size of SA(if_lladdr)->sa_len.
2329 */
2330 _CASSERT(sizeof(sdlbuf) == (SOCK_MAXADDRLEN + 1));
2331
2332 if (interface == NULL || lladdr == NULL) {
2333 return EINVAL;
2334 }
2335
2336 ifa = interface->if_lladdr;
2337 IFA_LOCK_SPIN(ifa);
2338 const struct sockaddr_dl *sdl = SDL(sdlbuf);
2339 SOCKADDR_COPY(ifa->ifa_addr, sdl, SA(ifa->ifa_addr)->sa_len);
2340 IFA_UNLOCK(ifa);
2341
2342 bytes = dlil_ifaddr_bytes_indexable(SDL(sdlbuf), &bytes_len, credp);
2343 if (bytes_len != lladdr_len) {
2344 bzero(lladdr, lladdr_len);
2345 error = EMSGSIZE;
2346 } else {
2347 bcopy(bytes, lladdr, bytes_len);
2348 }
2349
2350 return error;
2351 }
2352
2353 errno_t
ifnet_lladdr_copy_bytes(ifnet_t interface,void * __sized_by (length)lladdr,size_t length)2354 ifnet_lladdr_copy_bytes(ifnet_t interface, void *__sized_by(length) lladdr, size_t length)
2355 {
2356 return ifnet_lladdr_copy_bytes_internal(interface, lladdr, length,
2357 NULL);
2358 }
2359
2360 errno_t
ifnet_guarded_lladdr_copy_bytes(ifnet_t interface,void * __sized_by (length)lladdr,size_t length)2361 ifnet_guarded_lladdr_copy_bytes(ifnet_t interface, void *__sized_by(length) lladdr, size_t length)
2362 {
2363 #if CONFIG_MACF
2364 kauth_cred_t __single cred;
2365 net_thread_marks_t __single marks;
2366 #endif
2367 kauth_cred_t *__single credp;
2368 errno_t error;
2369
2370 #if CONFIG_MACF
2371 marks = net_thread_marks_push(NET_THREAD_CKREQ_LLADDR);
2372 cred = current_cached_proc_cred(PROC_NULL);
2373 credp = &cred;
2374 #else
2375 credp = NULL;
2376 #endif
2377
2378 error = ifnet_lladdr_copy_bytes_internal(interface, lladdr, length,
2379 credp);
2380
2381 #if CONFIG_MACF
2382 net_thread_marks_pop(marks);
2383 #endif
2384
2385 return error;
2386 }
2387
2388 static errno_t
ifnet_set_lladdr_internal(ifnet_t interface,const void * __sized_by (lladdr_len)lladdr,size_t lladdr_len,u_char new_type,int apply_type)2389 ifnet_set_lladdr_internal(ifnet_t interface, const void *__sized_by(lladdr_len) lladdr,
2390 size_t lladdr_len, u_char new_type, int apply_type)
2391 {
2392 struct ifaddr *ifa;
2393 errno_t error = 0;
2394
2395 if (interface == NULL) {
2396 return EINVAL;
2397 }
2398
2399 ifnet_head_lock_shared();
2400 ifnet_lock_exclusive(interface);
2401 if (lladdr_len != 0 &&
2402 (lladdr_len != interface->if_addrlen || lladdr == 0)) {
2403 ifnet_lock_done(interface);
2404 ifnet_head_done();
2405 return EINVAL;
2406 }
2407 /* The interface needs to be attached to add an address */
2408 if (interface->if_refflags & IFRF_EMBRYONIC) {
2409 ifnet_lock_done(interface);
2410 ifnet_head_done();
2411 return ENXIO;
2412 }
2413
2414 ifa = ifnet_addrs[interface->if_index - 1];
2415 if (ifa != NULL) {
2416 struct sockaddr_dl *sdl;
2417
2418 IFA_LOCK_SPIN(ifa);
2419 sdl = (struct sockaddr_dl *)(void *)ifa->ifa_addr;
2420 if (lladdr_len != 0) {
2421 bcopy(lladdr, LLADDR(sdl), lladdr_len);
2422 } else {
2423 bzero(LLADDR(sdl), interface->if_addrlen);
2424 }
2425 /* lladdr_len-check with if_addrlen makes sure it fits in u_char */
2426 sdl->sdl_alen = (u_char)lladdr_len;
2427
2428 if (apply_type) {
2429 sdl->sdl_type = new_type;
2430 }
2431 IFA_UNLOCK(ifa);
2432 } else {
2433 error = ENXIO;
2434 }
2435 ifnet_lock_done(interface);
2436 ifnet_head_done();
2437
2438 /* Generate a kernel event */
2439 if (error == 0) {
2440 intf_event_enqueue_nwk_wq_entry(interface, NULL,
2441 INTF_EVENT_CODE_LLADDR_UPDATE);
2442 dlil_post_msg(interface, KEV_DL_SUBCLASS,
2443 KEV_DL_LINK_ADDRESS_CHANGED, NULL, 0, FALSE);
2444 }
2445
2446 return error;
2447 }
2448
2449 errno_t
ifnet_set_lladdr(ifnet_t interface,const void * __sized_by (lladdr_len)lladdr,size_t lladdr_len)2450 ifnet_set_lladdr(ifnet_t interface, const void *__sized_by(lladdr_len) lladdr, size_t lladdr_len)
2451 {
2452 return ifnet_set_lladdr_internal(interface, lladdr, lladdr_len, 0, 0);
2453 }
2454
2455 errno_t
ifnet_set_lladdr_and_type(ifnet_t interface,const void * __sized_by (lladdr_len)lladdr,size_t lladdr_len,u_char type)2456 ifnet_set_lladdr_and_type(ifnet_t interface, const void *__sized_by(lladdr_len) lladdr,
2457 size_t lladdr_len, u_char type)
2458 {
2459 return ifnet_set_lladdr_internal(interface, lladdr,
2460 lladdr_len, type, 1);
2461 }
2462
2463 errno_t
ifnet_add_multicast(ifnet_t interface,const struct sockaddr * maddr,ifmultiaddr_t * ifmap)2464 ifnet_add_multicast(ifnet_t interface, const struct sockaddr *maddr,
2465 ifmultiaddr_t *ifmap)
2466 {
2467 if (interface == NULL || maddr == NULL) {
2468 return EINVAL;
2469 }
2470
2471 /* Don't let users screw up protocols' entries. */
2472 switch (maddr->sa_family) {
2473 case AF_LINK: {
2474 const struct sockaddr_dl *sdl = SDL(maddr);
2475 if (sdl->sdl_len < sizeof(struct sockaddr_dl) ||
2476 (sdl->sdl_nlen + sdl->sdl_alen + sdl->sdl_slen +
2477 offsetof(struct sockaddr_dl, sdl_data) > sdl->sdl_len)) {
2478 return EINVAL;
2479 }
2480 break;
2481 }
2482 case AF_UNSPEC:
2483 if (maddr->sa_len < ETHER_ADDR_LEN +
2484 offsetof(struct sockaddr, sa_data)) {
2485 return EINVAL;
2486 }
2487 break;
2488 default:
2489 return EINVAL;
2490 }
2491
2492 return if_addmulti_anon(interface, maddr, ifmap);
2493 }
2494
2495 errno_t
ifnet_remove_multicast(ifmultiaddr_t ifma)2496 ifnet_remove_multicast(ifmultiaddr_t ifma)
2497 {
2498 struct sockaddr *maddr;
2499
2500 if (ifma == NULL) {
2501 return EINVAL;
2502 }
2503
2504 maddr = ifma->ifma_addr;
2505 /* Don't let users screw up protocols' entries. */
2506 if (maddr->sa_family != AF_UNSPEC && maddr->sa_family != AF_LINK) {
2507 return EINVAL;
2508 }
2509
2510 return if_delmulti_anon(ifma->ifma_ifp, maddr);
2511 }
2512
2513 errno_t
ifnet_get_multicast_list(ifnet_t ifp,ifmultiaddr_t * __null_terminated * ret_addresses)2514 ifnet_get_multicast_list(ifnet_t ifp, ifmultiaddr_t *__null_terminated *ret_addresses)
2515 {
2516 int count = 0;
2517 int cmax = 0;
2518 struct ifmultiaddr *addr;
2519
2520 if (ifp == NULL || ret_addresses == NULL) {
2521 return EINVAL;
2522 }
2523 *ret_addresses = NULL;
2524
2525 ifnet_lock_shared(ifp);
2526 LIST_FOREACH(addr, &ifp->if_multiaddrs, ifma_link) {
2527 cmax++;
2528 }
2529
2530 ifmultiaddr_t *addresses = kalloc_type(ifmultiaddr_t, cmax + 1, Z_WAITOK);
2531 if (addresses == NULL) {
2532 ifnet_lock_done(ifp);
2533 return ENOMEM;
2534 }
2535
2536 LIST_FOREACH(addr, &ifp->if_multiaddrs, ifma_link) {
2537 if (count + 1 > cmax) {
2538 break;
2539 }
2540 addresses[count] = (ifmultiaddr_t)addr;
2541 ifmaddr_reference(addresses[count]);
2542 count++;
2543 }
2544 addresses[cmax] = NULL;
2545 ifnet_lock_done(ifp);
2546
2547 *ret_addresses = __unsafe_null_terminated_from_indexable(addresses, &addresses[cmax]);
2548
2549 return 0;
2550 }
2551
2552 void
ifnet_free_multicast_list(ifmultiaddr_t * __null_terminated addresses)2553 ifnet_free_multicast_list(ifmultiaddr_t *__null_terminated addresses)
2554 {
2555 int i = 0;
2556
2557 if (addresses == NULL) {
2558 return;
2559 }
2560
2561 for (ifmultiaddr_t *__null_terminated ptr = addresses; *ptr != NULL; ptr++, i++) {
2562 ifmaddr_release(*ptr);
2563 }
2564
2565 ifmultiaddr_t *free_addresses = __unsafe_null_terminated_to_indexable(addresses);
2566 kfree_type(ifmultiaddr_t, i + 1, free_addresses);
2567 }
2568
2569 errno_t
ifnet_find_by_name(const char * ifname,ifnet_t * ifpp)2570 ifnet_find_by_name(const char *ifname, ifnet_t *ifpp)
2571 {
2572 struct ifnet *ifp;
2573 size_t namelen;
2574
2575 if (ifname == NULL) {
2576 return EINVAL;
2577 }
2578
2579 namelen = strlen(ifname);
2580
2581 *ifpp = NULL;
2582
2583 ifnet_head_lock_shared();
2584 TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
2585 struct ifaddr *ifa;
2586 struct sockaddr_dl *ll_addr;
2587
2588 ifa = ifnet_addrs[ifp->if_index - 1];
2589 if (ifa == NULL) {
2590 continue;
2591 }
2592
2593 IFA_LOCK(ifa);
2594 ll_addr = SDL(ifa->ifa_addr);
2595
2596 if (namelen == ll_addr->sdl_nlen &&
2597 strlcmp(ll_addr->sdl_data, ifname, namelen) == 0) {
2598 IFA_UNLOCK(ifa);
2599 *ifpp = ifp;
2600 ifnet_reference(*ifpp);
2601 break;
2602 }
2603 IFA_UNLOCK(ifa);
2604 }
2605 ifnet_head_done();
2606
2607 return (ifp == NULL) ? ENXIO : 0;
2608 }
2609
2610 errno_t
ifnet_list_get(ifnet_family_t family,ifnet_t * __counted_by (* count)* list,u_int32_t * count)2611 ifnet_list_get(ifnet_family_t family, ifnet_t *__counted_by(*count) *list,
2612 u_int32_t *count)
2613 {
2614 return ifnet_list_get_common(family, FALSE, list, count);
2615 }
2616
2617 __private_extern__ errno_t
ifnet_list_get_all(ifnet_family_t family,ifnet_t * __counted_by (* count)* list,u_int32_t * count)2618 ifnet_list_get_all(ifnet_family_t family, ifnet_t *__counted_by(*count) *list,
2619 u_int32_t *count)
2620 {
2621 return ifnet_list_get_common(family, TRUE, list, count);
2622 }
2623
2624 struct ifnet_list {
2625 SLIST_ENTRY(ifnet_list) ifl_le;
2626 struct ifnet *ifl_ifp;
2627 };
2628
2629 static errno_t
ifnet_list_get_common(ifnet_family_t family,boolean_t get_all,ifnet_t * __counted_by (* count)* list,u_int32_t * count)2630 ifnet_list_get_common(ifnet_family_t family, boolean_t get_all,
2631 ifnet_t *__counted_by(*count) *list, u_int32_t *count)
2632 {
2633 #pragma unused(get_all)
2634 SLIST_HEAD(, ifnet_list) ifl_head;
2635 struct ifnet_list *ifl, *ifl_tmp;
2636 struct ifnet *ifp;
2637 ifnet_t *tmp_list = NULL;
2638 int cnt = 0;
2639 errno_t err = 0;
2640
2641 SLIST_INIT(&ifl_head);
2642
2643 if (list == NULL || count == NULL) {
2644 err = EINVAL;
2645 goto done;
2646 }
2647 *list = NULL;
2648 *count = 0;
2649
2650 ifnet_head_lock_shared();
2651 TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
2652 if (family == IFNET_FAMILY_ANY || ifp->if_family == family) {
2653 ifl = kalloc_type(struct ifnet_list, Z_WAITOK | Z_ZERO);
2654 if (ifl == NULL) {
2655 ifnet_head_done();
2656 err = ENOMEM;
2657 goto done;
2658 }
2659 ifl->ifl_ifp = ifp;
2660 ifnet_reference(ifp);
2661 SLIST_INSERT_HEAD(&ifl_head, ifl, ifl_le);
2662 ++cnt;
2663 }
2664 }
2665 ifnet_head_done();
2666
2667 if (cnt == 0) {
2668 err = ENXIO;
2669 goto done;
2670 }
2671
2672 tmp_list = kalloc_type(ifnet_t, cnt + 1, Z_WAITOK | Z_ZERO);
2673 if (tmp_list == NULL) {
2674 err = ENOMEM;
2675 goto done;
2676 }
2677 *list = tmp_list;
2678 *count = cnt;
2679
2680 done:
2681 SLIST_FOREACH_SAFE(ifl, &ifl_head, ifl_le, ifl_tmp) {
2682 SLIST_REMOVE(&ifl_head, ifl, ifnet_list, ifl_le);
2683 if (err == 0) {
2684 (*list)[--cnt] = ifl->ifl_ifp;
2685 } else {
2686 ifnet_release(ifl->ifl_ifp);
2687 }
2688 kfree_type(struct ifnet_list, ifl);
2689 }
2690
2691 return err;
2692 }
2693
2694 void
ifnet_list_free(ifnet_t * __null_terminated interfaces)2695 ifnet_list_free(ifnet_t *__null_terminated interfaces)
2696 {
2697 int i = 0;
2698
2699 if (interfaces == NULL) {
2700 return;
2701 }
2702
2703 for (ifnet_t *__null_terminated ptr = interfaces; *ptr != NULL; ptr++, i++) {
2704 ifnet_release(*ptr);
2705 }
2706
2707 ifnet_t *free_interfaces = __unsafe_null_terminated_to_indexable(interfaces);
2708 kfree_type(ifnet_t, i + 1, free_interfaces);
2709 }
2710
2711 void
ifnet_list_free_counted_by_internal(ifnet_t * __counted_by (count)interfaces,uint32_t count)2712 ifnet_list_free_counted_by_internal(ifnet_t *__counted_by(count) interfaces, uint32_t count)
2713 {
2714 if (interfaces == NULL) {
2715 return;
2716 }
2717 for (int i = 0; i < count; i++) {
2718 ifnet_release(interfaces[i]);
2719 }
2720
2721 /*
2722 * When we allocated the ifnet_list, we returned only the number
2723 * of ifnet_t pointers without the null terminator in the `count'
2724 * variable, so we cheat here by freeing everything.
2725 */
2726 ifnet_t *free_interfaces = interfaces;
2727 kfree_type(ifnet_t, count + 1, free_interfaces);
2728 interfaces = NULL;
2729 count = 0;
2730 }
2731
2732 /*************************************************************************/
2733 /* ifaddr_t accessors */
2734 /*************************************************************************/
2735
2736 errno_t
ifaddr_reference(ifaddr_t ifa)2737 ifaddr_reference(ifaddr_t ifa)
2738 {
2739 if (ifa == NULL) {
2740 return EINVAL;
2741 }
2742
2743 ifa_addref(ifa);
2744 return 0;
2745 }
2746
2747 errno_t
ifaddr_release(ifaddr_t ifa)2748 ifaddr_release(ifaddr_t ifa)
2749 {
2750 if (ifa == NULL) {
2751 return EINVAL;
2752 }
2753
2754 ifa_remref(ifa);
2755 return 0;
2756 }
2757
2758 sa_family_t
ifaddr_address_family(ifaddr_t ifa)2759 ifaddr_address_family(ifaddr_t ifa)
2760 {
2761 sa_family_t family = 0;
2762
2763 if (ifa != NULL) {
2764 IFA_LOCK_SPIN(ifa);
2765 if (ifa->ifa_addr != NULL) {
2766 family = ifa->ifa_addr->sa_family;
2767 }
2768 IFA_UNLOCK(ifa);
2769 }
2770 return family;
2771 }
2772
2773 errno_t
ifaddr_address(ifaddr_t ifa,struct sockaddr * out_addr,u_int32_t addr_size)2774 ifaddr_address(ifaddr_t ifa, struct sockaddr *out_addr, u_int32_t addr_size)
2775 {
2776 u_int32_t copylen;
2777
2778 if (ifa == NULL || out_addr == NULL) {
2779 return EINVAL;
2780 }
2781
2782 IFA_LOCK_SPIN(ifa);
2783 if (ifa->ifa_addr == NULL) {
2784 IFA_UNLOCK(ifa);
2785 return ENOTSUP;
2786 }
2787
2788 copylen = (addr_size >= ifa->ifa_addr->sa_len) ?
2789 ifa->ifa_addr->sa_len : addr_size;
2790 SOCKADDR_COPY(ifa->ifa_addr, out_addr, copylen);
2791
2792 if (ifa->ifa_addr->sa_len > addr_size) {
2793 IFA_UNLOCK(ifa);
2794 return EMSGSIZE;
2795 }
2796
2797 IFA_UNLOCK(ifa);
2798 return 0;
2799 }
2800
2801 errno_t
ifaddr_dstaddress(ifaddr_t ifa,struct sockaddr * out_addr,u_int32_t addr_size)2802 ifaddr_dstaddress(ifaddr_t ifa, struct sockaddr *out_addr, u_int32_t addr_size)
2803 {
2804 u_int32_t copylen;
2805
2806 if (ifa == NULL || out_addr == NULL) {
2807 return EINVAL;
2808 }
2809
2810 IFA_LOCK_SPIN(ifa);
2811 if (ifa->ifa_dstaddr == NULL) {
2812 IFA_UNLOCK(ifa);
2813 return ENOTSUP;
2814 }
2815
2816 copylen = (addr_size >= ifa->ifa_dstaddr->sa_len) ?
2817 ifa->ifa_dstaddr->sa_len : addr_size;
2818 SOCKADDR_COPY(ifa->ifa_dstaddr, out_addr, copylen);
2819
2820 if (ifa->ifa_dstaddr->sa_len > addr_size) {
2821 IFA_UNLOCK(ifa);
2822 return EMSGSIZE;
2823 }
2824
2825 IFA_UNLOCK(ifa);
2826 return 0;
2827 }
2828
2829 errno_t
ifaddr_netmask(ifaddr_t ifa,struct sockaddr * out_addr,u_int32_t addr_size)2830 ifaddr_netmask(ifaddr_t ifa, struct sockaddr *out_addr, u_int32_t addr_size)
2831 {
2832 u_int32_t copylen;
2833
2834 if (ifa == NULL || out_addr == NULL) {
2835 return EINVAL;
2836 }
2837
2838 IFA_LOCK_SPIN(ifa);
2839 if (ifa->ifa_netmask == NULL) {
2840 IFA_UNLOCK(ifa);
2841 return ENOTSUP;
2842 }
2843
2844 copylen = addr_size >= ifa->ifa_netmask->sa_len ?
2845 ifa->ifa_netmask->sa_len : addr_size;
2846 SOCKADDR_COPY(ifa->ifa_netmask, out_addr, copylen);
2847
2848 if (ifa->ifa_netmask->sa_len > addr_size) {
2849 IFA_UNLOCK(ifa);
2850 return EMSGSIZE;
2851 }
2852
2853 IFA_UNLOCK(ifa);
2854 return 0;
2855 }
2856
2857 ifnet_t
ifaddr_ifnet(ifaddr_t ifa)2858 ifaddr_ifnet(ifaddr_t ifa)
2859 {
2860 struct ifnet *ifp;
2861
2862 if (ifa == NULL) {
2863 return NULL;
2864 }
2865
2866 /* ifa_ifp is set once at creation time; it is never changed */
2867 ifp = ifa->ifa_ifp;
2868
2869 return ifp;
2870 }
2871
2872 ifaddr_t
ifaddr_withaddr(const struct sockaddr * address)2873 ifaddr_withaddr(const struct sockaddr *address)
2874 {
2875 if (address == NULL) {
2876 return NULL;
2877 }
2878
2879 return ifa_ifwithaddr(address);
2880 }
2881
2882 ifaddr_t
ifaddr_withdstaddr(const struct sockaddr * address)2883 ifaddr_withdstaddr(const struct sockaddr *address)
2884 {
2885 if (address == NULL) {
2886 return NULL;
2887 }
2888
2889 return ifa_ifwithdstaddr(address);
2890 }
2891
2892 ifaddr_t
ifaddr_withnet(const struct sockaddr * net)2893 ifaddr_withnet(const struct sockaddr *net)
2894 {
2895 if (net == NULL) {
2896 return NULL;
2897 }
2898
2899 return ifa_ifwithnet(net);
2900 }
2901
2902 ifaddr_t
ifaddr_withroute(int flags,const struct sockaddr * destination,const struct sockaddr * gateway)2903 ifaddr_withroute(int flags, const struct sockaddr *destination,
2904 const struct sockaddr *gateway)
2905 {
2906 if (destination == NULL || gateway == NULL) {
2907 return NULL;
2908 }
2909
2910 return ifa_ifwithroute(flags, destination, gateway);
2911 }
2912
2913 ifaddr_t
ifaddr_findbestforaddr(const struct sockaddr * addr,ifnet_t interface)2914 ifaddr_findbestforaddr(const struct sockaddr *addr, ifnet_t interface)
2915 {
2916 if (addr == NULL || interface == NULL) {
2917 return NULL;
2918 }
2919
2920 return ifaof_ifpforaddr_select(addr, interface);
2921 }
2922
2923 errno_t
ifaddr_get_ia6_flags(ifaddr_t ifa,u_int32_t * out_flags)2924 ifaddr_get_ia6_flags(ifaddr_t ifa, u_int32_t *out_flags)
2925 {
2926 sa_family_t family = 0;
2927
2928 if (ifa == NULL || out_flags == NULL) {
2929 return EINVAL;
2930 }
2931
2932 IFA_LOCK_SPIN(ifa);
2933 if (ifa->ifa_addr != NULL) {
2934 family = ifa->ifa_addr->sa_family;
2935 }
2936 IFA_UNLOCK(ifa);
2937
2938 if (family != AF_INET6) {
2939 return EINVAL;
2940 }
2941
2942 *out_flags = ifatoia6(ifa)->ia6_flags;
2943 return 0;
2944 }
2945
2946 errno_t
ifmaddr_reference(ifmultiaddr_t ifmaddr)2947 ifmaddr_reference(ifmultiaddr_t ifmaddr)
2948 {
2949 if (ifmaddr == NULL) {
2950 return EINVAL;
2951 }
2952
2953 IFMA_ADDREF(ifmaddr);
2954 return 0;
2955 }
2956
2957 errno_t
ifmaddr_release(ifmultiaddr_t ifmaddr)2958 ifmaddr_release(ifmultiaddr_t ifmaddr)
2959 {
2960 if (ifmaddr == NULL) {
2961 return EINVAL;
2962 }
2963
2964 IFMA_REMREF(ifmaddr);
2965 return 0;
2966 }
2967
2968 errno_t
ifmaddr_address(ifmultiaddr_t ifma,struct sockaddr * out_addr,u_int32_t addr_size)2969 ifmaddr_address(ifmultiaddr_t ifma, struct sockaddr *out_addr,
2970 u_int32_t addr_size)
2971 {
2972 u_int32_t copylen;
2973
2974 if (ifma == NULL || out_addr == NULL) {
2975 return EINVAL;
2976 }
2977
2978 IFMA_LOCK(ifma);
2979 if (ifma->ifma_addr == NULL) {
2980 IFMA_UNLOCK(ifma);
2981 return ENOTSUP;
2982 }
2983
2984 copylen = (addr_size >= ifma->ifma_addr->sa_len ?
2985 ifma->ifma_addr->sa_len : addr_size);
2986 SOCKADDR_COPY(ifma->ifma_addr, out_addr, copylen);
2987
2988 if (ifma->ifma_addr->sa_len > addr_size) {
2989 IFMA_UNLOCK(ifma);
2990 return EMSGSIZE;
2991 }
2992 IFMA_UNLOCK(ifma);
2993 return 0;
2994 }
2995
2996 errno_t
ifmaddr_lladdress(ifmultiaddr_t ifma,struct sockaddr * out_addr,u_int32_t addr_size)2997 ifmaddr_lladdress(ifmultiaddr_t ifma, struct sockaddr *out_addr,
2998 u_int32_t addr_size)
2999 {
3000 struct ifmultiaddr *ifma_ll;
3001
3002 if (ifma == NULL || out_addr == NULL) {
3003 return EINVAL;
3004 }
3005 if ((ifma_ll = ifma->ifma_ll) == NULL) {
3006 return ENOTSUP;
3007 }
3008
3009 return ifmaddr_address(ifma_ll, out_addr, addr_size);
3010 }
3011
3012 ifnet_t
ifmaddr_ifnet(ifmultiaddr_t ifma)3013 ifmaddr_ifnet(ifmultiaddr_t ifma)
3014 {
3015 return (ifma == NULL) ? NULL : ifma->ifma_ifp;
3016 }
3017
3018 /**************************************************************************/
3019 /* interface cloner */
3020 /**************************************************************************/
3021
3022 errno_t
ifnet_clone_attach(struct ifnet_clone_params * cloner_params,if_clone_t * ifcloner)3023 ifnet_clone_attach(struct ifnet_clone_params *cloner_params,
3024 if_clone_t *ifcloner)
3025 {
3026 errno_t error = 0;
3027 struct if_clone *ifc = NULL;
3028 size_t namelen;
3029
3030 if (cloner_params == NULL || ifcloner == NULL ||
3031 cloner_params->ifc_name == NULL ||
3032 cloner_params->ifc_create == NULL ||
3033 cloner_params->ifc_destroy == NULL ||
3034 (namelen = strlen(cloner_params->ifc_name)) >= IFNAMSIZ) {
3035 error = EINVAL;
3036 goto fail;
3037 }
3038
3039 if (if_clone_lookup(__terminated_by_to_indexable(cloner_params->ifc_name),
3040 namelen, NULL) != NULL) {
3041 printf("%s: already a cloner for %s\n", __func__,
3042 cloner_params->ifc_name);
3043 error = EEXIST;
3044 goto fail;
3045 }
3046
3047 ifc = kalloc_type(struct if_clone, Z_WAITOK | Z_ZERO | Z_NOFAIL);
3048 strlcpy(ifc->ifc_name, cloner_params->ifc_name, IFNAMSIZ + 1);
3049 ifc->ifc_namelen = (uint8_t)namelen;
3050 ifc->ifc_maxunit = IF_MAXUNIT;
3051 ifc->ifc_create = cloner_params->ifc_create;
3052 ifc->ifc_destroy = cloner_params->ifc_destroy;
3053
3054 error = if_clone_attach(ifc);
3055 if (error != 0) {
3056 printf("%s: if_clone_attach failed %d\n", __func__, error);
3057 goto fail;
3058 }
3059 *ifcloner = ifc;
3060
3061 return 0;
3062 fail:
3063 if (ifc != NULL) {
3064 kfree_type(struct if_clone, ifc);
3065 }
3066 return error;
3067 }
3068
3069 errno_t
ifnet_clone_detach(if_clone_t ifcloner)3070 ifnet_clone_detach(if_clone_t ifcloner)
3071 {
3072 errno_t error = 0;
3073 struct if_clone *ifc = ifcloner;
3074
3075 if (ifc == NULL) {
3076 return EINVAL;
3077 }
3078
3079 if ((if_clone_lookup(ifc->ifc_name, ifc->ifc_namelen, NULL)) == NULL) {
3080 printf("%s: no cloner for %s\n", __func__, ifc->ifc_name);
3081 error = EINVAL;
3082 goto fail;
3083 }
3084
3085 if_clone_detach(ifc);
3086
3087 kfree_type(struct if_clone, ifc);
3088
3089 fail:
3090 return error;
3091 }
3092
3093 /**************************************************************************/
3094 /* misc */
3095 /**************************************************************************/
3096
3097 static errno_t
ifnet_get_local_ports_extended_inner(ifnet_t ifp,protocol_family_t protocol,u_int32_t flags,u_int8_t bitfield[bitstr_size (IP_PORTRANGE_SIZE)])3098 ifnet_get_local_ports_extended_inner(ifnet_t ifp, protocol_family_t protocol,
3099 u_int32_t flags, u_int8_t bitfield[bitstr_size(IP_PORTRANGE_SIZE)])
3100 {
3101 u_int32_t ifindex;
3102
3103 /* no point in continuing if no address is assigned */
3104 if (ifp != NULL && TAILQ_EMPTY(&ifp->if_addrhead)) {
3105 return 0;
3106 }
3107
3108 if_ports_used_update_wakeuuid(ifp);
3109
3110 #if SKYWALK
3111 if (netns_is_enabled()) {
3112 netns_get_local_ports(ifp, protocol, flags, bitfield);
3113 }
3114 #endif /* SKYWALK */
3115
3116 ifindex = (ifp != NULL) ? ifp->if_index : 0;
3117
3118 if (!(flags & IFNET_GET_LOCAL_PORTS_TCPONLY)) {
3119 udp_get_ports_used(ifp, protocol, flags,
3120 bitfield);
3121 }
3122
3123 if (!(flags & IFNET_GET_LOCAL_PORTS_UDPONLY)) {
3124 tcp_get_ports_used(ifp, protocol, flags,
3125 bitfield);
3126 }
3127
3128 return 0;
3129 }
3130
3131 errno_t
ifnet_get_local_ports_extended(ifnet_t ifp,protocol_family_t protocol,u_int32_t flags,u_int8_t bitfield[IP_PORTRANGE_BITFIELD_LEN])3132 ifnet_get_local_ports_extended(ifnet_t ifp, protocol_family_t protocol,
3133 u_int32_t flags, u_int8_t bitfield[IP_PORTRANGE_BITFIELD_LEN])
3134 {
3135 ifnet_ref_t parent_ifp = NULL;
3136
3137 if (bitfield == NULL) {
3138 return EINVAL;
3139 }
3140
3141 switch (protocol) {
3142 case PF_UNSPEC:
3143 case PF_INET:
3144 case PF_INET6:
3145 break;
3146 default:
3147 return EINVAL;
3148 }
3149
3150 /* bit string is long enough to hold 16-bit port values */
3151 bzero(bitfield, bitstr_size(IP_PORTRANGE_SIZE));
3152
3153 ifnet_get_local_ports_extended_inner(ifp, protocol, flags, bitfield);
3154
3155 /* get local ports for parent interface */
3156 if (ifp != NULL && ifnet_get_delegate_parent(ifp, &parent_ifp) == 0) {
3157 ifnet_get_local_ports_extended_inner(parent_ifp, protocol,
3158 flags, bitfield);
3159 ifnet_release_delegate_parent(ifp);
3160 }
3161
3162 return 0;
3163 }
3164
3165 errno_t
ifnet_get_local_ports(ifnet_t ifp,u_int8_t bitfield[IP_PORTRANGE_BITFIELD_LEN])3166 ifnet_get_local_ports(ifnet_t ifp, u_int8_t bitfield[IP_PORTRANGE_BITFIELD_LEN])
3167 {
3168 u_int32_t flags = IFNET_GET_LOCAL_PORTS_WILDCARDOK;
3169
3170 return ifnet_get_local_ports_extended(ifp, PF_UNSPEC, flags,
3171 bitfield);
3172 }
3173
3174 errno_t
ifnet_notice_node_presence(ifnet_t ifp,struct sockaddr * sa,int32_t rssi,int lqm,int npm,u_int8_t srvinfo[48])3175 ifnet_notice_node_presence(ifnet_t ifp, struct sockaddr *sa, int32_t rssi,
3176 int lqm, int npm, u_int8_t srvinfo[48])
3177 {
3178 if (ifp == NULL || sa == NULL || srvinfo == NULL) {
3179 return EINVAL;
3180 }
3181 if (sa->sa_len > sizeof(struct sockaddr_storage)) {
3182 return EINVAL;
3183 }
3184 if (sa->sa_family != AF_LINK && sa->sa_family != AF_INET6) {
3185 return EINVAL;
3186 }
3187
3188 return dlil_node_present(ifp, sa, rssi, lqm, npm, srvinfo);
3189 }
3190
3191 errno_t
ifnet_notice_node_presence_v2(ifnet_t ifp,struct sockaddr * sa,struct sockaddr_dl * sdl,int32_t rssi,int lqm,int npm,u_int8_t srvinfo[48])3192 ifnet_notice_node_presence_v2(ifnet_t ifp, struct sockaddr *sa, struct sockaddr_dl *sdl,
3193 int32_t rssi, int lqm, int npm, u_int8_t srvinfo[48])
3194 {
3195 /* Support older version if sdl is NULL */
3196 if (sdl == NULL) {
3197 return ifnet_notice_node_presence(ifp, sa, rssi, lqm, npm, srvinfo);
3198 }
3199
3200 if (ifp == NULL || sa == NULL || srvinfo == NULL) {
3201 return EINVAL;
3202 }
3203 if (sa->sa_len > sizeof(struct sockaddr_storage)) {
3204 return EINVAL;
3205 }
3206
3207 if (sa->sa_family != AF_INET6) {
3208 return EINVAL;
3209 }
3210
3211 if (sdl->sdl_family != AF_LINK) {
3212 return EINVAL;
3213 }
3214
3215 return dlil_node_present_v2(ifp, sa, sdl, rssi, lqm, npm, srvinfo);
3216 }
3217
3218 errno_t
ifnet_notice_node_absence(ifnet_t ifp,struct sockaddr * sa)3219 ifnet_notice_node_absence(ifnet_t ifp, struct sockaddr *sa)
3220 {
3221 if (ifp == NULL || sa == NULL) {
3222 return EINVAL;
3223 }
3224 if (sa->sa_len > sizeof(struct sockaddr_storage)) {
3225 return EINVAL;
3226 }
3227 if (sa->sa_family != AF_LINK && sa->sa_family != AF_INET6) {
3228 return EINVAL;
3229 }
3230
3231 dlil_node_absent(ifp, sa);
3232 return 0;
3233 }
3234
3235 errno_t
ifnet_notice_primary_elected(ifnet_t ifp)3236 ifnet_notice_primary_elected(ifnet_t ifp)
3237 {
3238 if (ifp == NULL) {
3239 return EINVAL;
3240 }
3241
3242 dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_PRIMARY_ELECTED, NULL, 0, FALSE);
3243 return 0;
3244 }
3245
3246 errno_t
ifnet_tx_compl_status(ifnet_t ifp,mbuf_t m,tx_compl_val_t val)3247 ifnet_tx_compl_status(ifnet_t ifp, mbuf_t m, tx_compl_val_t val)
3248 {
3249 #pragma unused(val)
3250
3251 m_do_tx_compl_callback(m, ifp);
3252
3253 return 0;
3254 }
3255
3256 errno_t
ifnet_tx_compl(ifnet_t ifp,mbuf_t m)3257 ifnet_tx_compl(ifnet_t ifp, mbuf_t m)
3258 {
3259 m_do_tx_compl_callback(m, ifp);
3260
3261 return 0;
3262 }
3263
3264 errno_t
ifnet_report_issues(ifnet_t ifp,u_int8_t modid[IFNET_MODIDLEN],u_int8_t info[IFNET_MODARGLEN])3265 ifnet_report_issues(ifnet_t ifp, u_int8_t modid[IFNET_MODIDLEN],
3266 u_int8_t info[IFNET_MODARGLEN])
3267 {
3268 if (ifp == NULL || modid == NULL) {
3269 return EINVAL;
3270 }
3271
3272 dlil_report_issues(ifp, modid, info);
3273 return 0;
3274 }
3275
3276 errno_t
ifnet_set_delegate(ifnet_t ifp,ifnet_t delegated_ifp)3277 ifnet_set_delegate(ifnet_t ifp, ifnet_t delegated_ifp)
3278 {
3279 ifnet_t odifp = NULL;
3280
3281 if (ifp == NULL) {
3282 return EINVAL;
3283 } else if (!ifnet_is_attached(ifp, 1)) {
3284 return ENXIO;
3285 }
3286
3287 ifnet_lock_exclusive(ifp);
3288 odifp = ifp->if_delegated.ifp;
3289 if (odifp != NULL && odifp == delegated_ifp) {
3290 /* delegate info is unchanged; nothing more to do */
3291 ifnet_lock_done(ifp);
3292 goto done;
3293 }
3294 // Test if this delegate interface would cause a loop
3295 ifnet_t delegate_check_ifp = delegated_ifp;
3296 while (delegate_check_ifp != NULL) {
3297 if (delegate_check_ifp == ifp) {
3298 printf("%s: delegating to %s would cause a loop\n",
3299 ifp->if_xname, delegated_ifp->if_xname);
3300 ifnet_lock_done(ifp);
3301 goto done;
3302 }
3303 delegate_check_ifp = delegate_check_ifp->if_delegated.ifp;
3304 }
3305 bzero(&ifp->if_delegated, sizeof(ifp->if_delegated));
3306 if (delegated_ifp != NULL && ifp != delegated_ifp) {
3307 uint32_t set_eflags;
3308
3309 ifp->if_delegated.ifp = delegated_ifp;
3310 ifnet_reference(delegated_ifp);
3311 ifp->if_delegated.type = delegated_ifp->if_type;
3312 ifp->if_delegated.family = delegated_ifp->if_family;
3313 ifp->if_delegated.subfamily = delegated_ifp->if_subfamily;
3314 ifp->if_delegated.expensive =
3315 delegated_ifp->if_eflags & IFEF_EXPENSIVE ? 1 : 0;
3316 ifp->if_delegated.constrained =
3317 delegated_ifp->if_xflags & IFXF_CONSTRAINED ? 1 : 0;
3318 ifp->if_delegated.ultra_constrained =
3319 delegated_ifp->if_xflags & IFXF_ULTRA_CONSTRAINED ? 1 : 0;
3320
3321 /*
3322 * Propogate flags related to ECN from delegated interface
3323 */
3324 if_clear_eflags(ifp, IFEF_ECN_ENABLE | IFEF_ECN_DISABLE);
3325 set_eflags = (delegated_ifp->if_eflags &
3326 (IFEF_ECN_ENABLE | IFEF_ECN_DISABLE));
3327 if_set_eflags(ifp, set_eflags);
3328 printf("%s: is now delegating %s (type 0x%x, family %u, "
3329 "sub-family %u)\n", ifp->if_xname, delegated_ifp->if_xname,
3330 delegated_ifp->if_type, delegated_ifp->if_family,
3331 delegated_ifp->if_subfamily);
3332 }
3333
3334 ifnet_lock_done(ifp);
3335
3336 if (odifp != NULL) {
3337 if (odifp != delegated_ifp) {
3338 printf("%s: is no longer delegating %s\n",
3339 ifp->if_xname, odifp->if_xname);
3340 }
3341 ifnet_release(odifp);
3342 }
3343
3344 /* Generate a kernel event */
3345 dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_IFDELEGATE_CHANGED, NULL, 0, FALSE);
3346
3347 done:
3348 /* Release the io ref count */
3349 ifnet_decr_iorefcnt(ifp);
3350
3351 return 0;
3352 }
3353
3354 errno_t
ifnet_get_delegate(ifnet_t ifp,ifnet_t * pdelegated_ifp)3355 ifnet_get_delegate(ifnet_t ifp, ifnet_t *pdelegated_ifp)
3356 {
3357 if (ifp == NULL || pdelegated_ifp == NULL) {
3358 return EINVAL;
3359 } else if (!ifnet_is_attached(ifp, 1)) {
3360 return ENXIO;
3361 }
3362
3363 ifnet_lock_shared(ifp);
3364 if (ifp->if_delegated.ifp != NULL) {
3365 ifnet_reference(ifp->if_delegated.ifp);
3366 }
3367 *pdelegated_ifp = ifp->if_delegated.ifp;
3368 ifnet_lock_done(ifp);
3369
3370 /* Release the io ref count */
3371 ifnet_decr_iorefcnt(ifp);
3372
3373 return 0;
3374 }
3375
3376 errno_t
ifnet_get_keepalive_offload_frames(ifnet_t ifp,struct ifnet_keepalive_offload_frame * __counted_by (frames_array_count)frames_array,u_int32_t frames_array_count,size_t frame_data_offset,u_int32_t * used_frames_count)3377 ifnet_get_keepalive_offload_frames(ifnet_t ifp,
3378 struct ifnet_keepalive_offload_frame *__counted_by(frames_array_count) frames_array,
3379 u_int32_t frames_array_count, size_t frame_data_offset,
3380 u_int32_t *used_frames_count)
3381 {
3382 u_int32_t i;
3383
3384 if (frames_array == NULL || used_frames_count == NULL ||
3385 frame_data_offset >= IFNET_KEEPALIVE_OFFLOAD_FRAME_DATA_SIZE) {
3386 return EINVAL;
3387 }
3388
3389 /* frame_data_offset should be 32-bit aligned */
3390 if (P2ROUNDUP(frame_data_offset, sizeof(u_int32_t)) !=
3391 frame_data_offset) {
3392 return EINVAL;
3393 }
3394
3395 *used_frames_count = 0;
3396 if (frames_array_count == 0) {
3397 return 0;
3398 }
3399
3400
3401 for (i = 0; i < frames_array_count; i++) {
3402 struct ifnet_keepalive_offload_frame *frame = frames_array + i;
3403 bzero(frame, sizeof(struct ifnet_keepalive_offload_frame));
3404 }
3405
3406 /* First collect IPsec related keep-alive frames */
3407 *used_frames_count = key_fill_offload_frames_for_savs(ifp,
3408 frames_array, frames_array_count, frame_data_offset);
3409
3410 /* Keep-alive offload not required for TCP/UDP on CLAT interface */
3411 if (IS_INTF_CLAT46(ifp)) {
3412 return 0;
3413 }
3414
3415 /* If there is more room, collect other UDP keep-alive frames */
3416 if (*used_frames_count < frames_array_count) {
3417 udp_fill_keepalive_offload_frames(ifp, frames_array,
3418 frames_array_count, frame_data_offset,
3419 used_frames_count);
3420 }
3421
3422 /* If there is more room, collect other TCP keep-alive frames */
3423 if (*used_frames_count < frames_array_count) {
3424 tcp_fill_keepalive_offload_frames(ifp, frames_array,
3425 frames_array_count, frame_data_offset,
3426 used_frames_count);
3427 }
3428
3429 VERIFY(*used_frames_count <= frames_array_count);
3430
3431 return 0;
3432 }
3433
3434 errno_t
ifnet_notify_tcp_keepalive_offload_timeout(ifnet_t ifp,struct ifnet_keepalive_offload_frame * frame)3435 ifnet_notify_tcp_keepalive_offload_timeout(ifnet_t ifp,
3436 struct ifnet_keepalive_offload_frame *frame)
3437 {
3438 errno_t error = 0;
3439
3440 if (ifp == NULL || frame == NULL) {
3441 return EINVAL;
3442 }
3443
3444 if (frame->type != IFNET_KEEPALIVE_OFFLOAD_FRAME_TCP) {
3445 return EINVAL;
3446 }
3447 if (frame->ether_type != IFNET_KEEPALIVE_OFFLOAD_FRAME_ETHERTYPE_IPV4 &&
3448 frame->ether_type != IFNET_KEEPALIVE_OFFLOAD_FRAME_ETHERTYPE_IPV6) {
3449 return EINVAL;
3450 }
3451 if (frame->local_port == 0 || frame->remote_port == 0) {
3452 return EINVAL;
3453 }
3454
3455 error = tcp_notify_kao_timeout(ifp, frame);
3456
3457 return error;
3458 }
3459
3460 errno_t
ifnet_link_status_report(ifnet_t ifp,const void * __sized_by (buffer_len)buffer,size_t buffer_len)3461 ifnet_link_status_report(ifnet_t ifp, const void *__sized_by(buffer_len) buffer,
3462 size_t buffer_len)
3463 {
3464 struct if_link_status ifsr = {};
3465 errno_t err = 0;
3466
3467 if (ifp == NULL || buffer == NULL || buffer_len == 0) {
3468 return EINVAL;
3469 }
3470
3471 ifnet_lock_shared(ifp);
3472
3473 /*
3474 * Make sure that the interface is attached but there is no need
3475 * to take a reference because this call is coming from the driver.
3476 */
3477 if (!ifnet_is_attached(ifp, 0)) {
3478 ifnet_lock_done(ifp);
3479 return ENXIO;
3480 }
3481
3482 lck_rw_lock_exclusive(&ifp->if_link_status_lock);
3483
3484 /*
3485 * If this is the first status report then allocate memory
3486 * to store it.
3487 */
3488 if (ifp->if_link_status == NULL) {
3489 ifp->if_link_status = kalloc_type(struct if_link_status, Z_ZERO);
3490 if (ifp->if_link_status == NULL) {
3491 err = ENOMEM;
3492 goto done;
3493 }
3494 }
3495
3496 memcpy(&ifsr, buffer, MIN(sizeof(ifsr), buffer_len));
3497 if (ifp->if_type == IFT_CELLULAR) {
3498 struct if_cellular_status_v1 *if_cell_sr, *new_cell_sr;
3499 /*
3500 * Currently we have a single version -- if it does
3501 * not match, just return.
3502 */
3503 if (ifsr.ifsr_version !=
3504 IF_CELLULAR_STATUS_REPORT_CURRENT_VERSION) {
3505 err = ENOTSUP;
3506 goto done;
3507 }
3508
3509 if (ifsr.ifsr_len != sizeof(*if_cell_sr)) {
3510 err = EINVAL;
3511 goto done;
3512 }
3513
3514 if_cell_sr =
3515 &ifp->if_link_status->ifsr_u.ifsr_cell.if_cell_u.if_status_v1;
3516 new_cell_sr = &ifsr.ifsr_u.ifsr_cell.if_cell_u.if_status_v1;
3517 /* Check if we need to act on any new notifications */
3518 if ((new_cell_sr->valid_bitmask &
3519 IF_CELL_UL_MSS_RECOMMENDED_VALID) &&
3520 new_cell_sr->mss_recommended !=
3521 if_cell_sr->mss_recommended) {
3522 os_atomic_or(&tcbinfo.ipi_flags, INPCBINFO_UPDATE_MSS, relaxed);
3523 inpcb_timer_sched(&tcbinfo, INPCB_TIMER_FAST);
3524 #if NECP
3525 necp_update_all_clients();
3526 #endif
3527 }
3528
3529 /* Finally copy the new information */
3530 ifp->if_link_status->ifsr_version = ifsr.ifsr_version;
3531 ifp->if_link_status->ifsr_len = ifsr.ifsr_len;
3532 if_cell_sr->valid_bitmask = 0;
3533 bcopy(new_cell_sr, if_cell_sr, sizeof(*if_cell_sr));
3534 } else if (IFNET_IS_WIFI(ifp)) {
3535 struct if_wifi_status_v1 *if_wifi_sr, *new_wifi_sr;
3536
3537 /* Check version */
3538 if (ifsr.ifsr_version !=
3539 IF_WIFI_STATUS_REPORT_CURRENT_VERSION) {
3540 err = ENOTSUP;
3541 goto done;
3542 }
3543
3544 if (ifsr.ifsr_len != sizeof(*if_wifi_sr)) {
3545 err = EINVAL;
3546 goto done;
3547 }
3548
3549 if_wifi_sr =
3550 &ifp->if_link_status->ifsr_u.ifsr_wifi.if_wifi_u.if_status_v1;
3551 new_wifi_sr =
3552 &ifsr.ifsr_u.ifsr_wifi.if_wifi_u.if_status_v1;
3553 ifp->if_link_status->ifsr_version = ifsr.ifsr_version;
3554 ifp->if_link_status->ifsr_len = ifsr.ifsr_len;
3555 if_wifi_sr->valid_bitmask = 0;
3556 bcopy(new_wifi_sr, if_wifi_sr, sizeof(*if_wifi_sr));
3557
3558 /*
3559 * Update the bandwidth values if we got recent values
3560 * reported through the other KPI.
3561 */
3562 if (!(new_wifi_sr->valid_bitmask &
3563 IF_WIFI_UL_MAX_BANDWIDTH_VALID) &&
3564 ifp->if_output_bw.max_bw > 0) {
3565 if_wifi_sr->valid_bitmask |=
3566 IF_WIFI_UL_MAX_BANDWIDTH_VALID;
3567 if_wifi_sr->ul_max_bandwidth =
3568 ifp->if_output_bw.max_bw > UINT32_MAX ?
3569 UINT32_MAX :
3570 (uint32_t)ifp->if_output_bw.max_bw;
3571 }
3572 if (!(new_wifi_sr->valid_bitmask &
3573 IF_WIFI_UL_EFFECTIVE_BANDWIDTH_VALID) &&
3574 ifp->if_output_bw.eff_bw > 0) {
3575 if_wifi_sr->valid_bitmask |=
3576 IF_WIFI_UL_EFFECTIVE_BANDWIDTH_VALID;
3577 if_wifi_sr->ul_effective_bandwidth =
3578 ifp->if_output_bw.eff_bw > UINT32_MAX ?
3579 UINT32_MAX :
3580 (uint32_t)ifp->if_output_bw.eff_bw;
3581 }
3582 if (!(new_wifi_sr->valid_bitmask &
3583 IF_WIFI_DL_MAX_BANDWIDTH_VALID) &&
3584 ifp->if_input_bw.max_bw > 0) {
3585 if_wifi_sr->valid_bitmask |=
3586 IF_WIFI_DL_MAX_BANDWIDTH_VALID;
3587 if_wifi_sr->dl_max_bandwidth =
3588 ifp->if_input_bw.max_bw > UINT32_MAX ?
3589 UINT32_MAX :
3590 (uint32_t)ifp->if_input_bw.max_bw;
3591 }
3592 if (!(new_wifi_sr->valid_bitmask &
3593 IF_WIFI_DL_EFFECTIVE_BANDWIDTH_VALID) &&
3594 ifp->if_input_bw.eff_bw > 0) {
3595 if_wifi_sr->valid_bitmask |=
3596 IF_WIFI_DL_EFFECTIVE_BANDWIDTH_VALID;
3597 if_wifi_sr->dl_effective_bandwidth =
3598 ifp->if_input_bw.eff_bw > UINT32_MAX ?
3599 UINT32_MAX :
3600 (uint32_t)ifp->if_input_bw.eff_bw;
3601 }
3602 }
3603
3604 done:
3605 lck_rw_done(&ifp->if_link_status_lock);
3606 ifnet_lock_done(ifp);
3607 return err;
3608 }
3609
3610 /*************************************************************************/
3611 /* Fastlane QoS Ca */
3612 /*************************************************************************/
3613
3614 errno_t
ifnet_set_fastlane_capable(ifnet_t interface,boolean_t capable)3615 ifnet_set_fastlane_capable(ifnet_t interface, boolean_t capable)
3616 {
3617 if (interface == NULL) {
3618 return EINVAL;
3619 }
3620
3621 if_set_qosmarking_mode(interface,
3622 capable ? IFRTYPE_QOSMARKING_FASTLANE : IFRTYPE_QOSMARKING_MODE_NONE);
3623
3624 return 0;
3625 }
3626
3627 errno_t
ifnet_get_fastlane_capable(ifnet_t interface,boolean_t * capable)3628 ifnet_get_fastlane_capable(ifnet_t interface, boolean_t *capable)
3629 {
3630 if (interface == NULL || capable == NULL) {
3631 return EINVAL;
3632 }
3633 if (interface->if_qosmarking_mode == IFRTYPE_QOSMARKING_FASTLANE) {
3634 *capable = true;
3635 } else {
3636 *capable = false;
3637 }
3638 return 0;
3639 }
3640
3641 errno_t
ifnet_get_unsent_bytes(ifnet_t interface,int64_t * unsent_bytes)3642 ifnet_get_unsent_bytes(ifnet_t interface, int64_t *unsent_bytes)
3643 {
3644 int64_t bytes;
3645
3646 if (interface == NULL || unsent_bytes == NULL) {
3647 return EINVAL;
3648 }
3649
3650 bytes = *unsent_bytes = 0;
3651
3652 if (!IF_FULLY_ATTACHED(interface)) {
3653 return ENXIO;
3654 }
3655
3656 bytes = interface->if_sndbyte_unsent;
3657
3658 if (interface->if_eflags & IFEF_TXSTART) {
3659 bytes += IFCQ_BYTES(interface->if_snd);
3660 }
3661 *unsent_bytes = bytes;
3662
3663 return 0;
3664 }
3665
3666 errno_t
ifnet_get_buffer_status(const ifnet_t ifp,ifnet_buffer_status_t * buf_status)3667 ifnet_get_buffer_status(const ifnet_t ifp, ifnet_buffer_status_t *buf_status)
3668 {
3669 if (ifp == NULL || buf_status == NULL) {
3670 return EINVAL;
3671 }
3672
3673 bzero(buf_status, sizeof(*buf_status));
3674
3675 if (!IF_FULLY_ATTACHED(ifp)) {
3676 return ENXIO;
3677 }
3678
3679 if (ifp->if_eflags & IFEF_TXSTART) {
3680 buf_status->buf_interface = IFCQ_BYTES(ifp->if_snd);
3681 }
3682
3683 buf_status->buf_sndbuf = ((buf_status->buf_interface != 0) ||
3684 (ifp->if_sndbyte_unsent != 0)) ? 1 : 0;
3685
3686 return 0;
3687 }
3688
3689 void
ifnet_normalise_unsent_data(void)3690 ifnet_normalise_unsent_data(void)
3691 {
3692 struct ifnet *ifp;
3693
3694 ifnet_head_lock_shared();
3695 TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
3696 ifnet_lock_exclusive(ifp);
3697 if (!IF_FULLY_ATTACHED(ifp)) {
3698 ifnet_lock_done(ifp);
3699 continue;
3700 }
3701 if (!(ifp->if_eflags & IFEF_TXSTART)) {
3702 ifnet_lock_done(ifp);
3703 continue;
3704 }
3705
3706 if (ifp->if_sndbyte_total > 0 ||
3707 IFCQ_BYTES(ifp->if_snd) > 0) {
3708 ifp->if_unsent_data_cnt++;
3709 }
3710
3711 ifnet_lock_done(ifp);
3712 }
3713 ifnet_head_done();
3714 }
3715
3716 errno_t
ifnet_set_low_power_mode(ifnet_t ifp,boolean_t on)3717 ifnet_set_low_power_mode(ifnet_t ifp, boolean_t on)
3718 {
3719 errno_t error;
3720
3721 error = if_set_low_power(ifp, on);
3722
3723 return error;
3724 }
3725
3726 errno_t
ifnet_get_low_power_mode(ifnet_t ifp,boolean_t * on)3727 ifnet_get_low_power_mode(ifnet_t ifp, boolean_t *on)
3728 {
3729 if (ifp == NULL || on == NULL) {
3730 return EINVAL;
3731 }
3732
3733 *on = ((ifp->if_xflags & IFXF_LOW_POWER) != 0);
3734 return 0;
3735 }
3736