1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1982, 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * @(#)if_ethersubr.c 8.1 (Berkeley) 6/10/93
32 * $FreeBSD$
33 */
34
35 #include "opt_inet.h"
36 #include "opt_inet6.h"
37 #include "opt_netgraph.h"
38 #include "opt_mbuf_profiling.h"
39 #include "opt_rss.h"
40
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/devctl.h>
44 #include <sys/eventhandler.h>
45 #include <sys/jail.h>
46 #include <sys/kernel.h>
47 #include <sys/lock.h>
48 #include <sys/malloc.h>
49 #include <sys/module.h>
50 #include <sys/mbuf.h>
51 #include <sys/proc.h>
52 #include <sys/priv.h>
53 #include <sys/random.h>
54 #include <sys/socket.h>
55 #include <sys/sockio.h>
56 #include <sys/sysctl.h>
57 #include <sys/uuid.h>
58
59 #include <net/ieee_oui.h>
60 #include <net/if.h>
61 #include <net/if_var.h>
62 #include <net/if_arp.h>
63 #include <net/netisr.h>
64 #include <net/route.h>
65 #include <net/if_llc.h>
66 #include <net/if_dl.h>
67 #include <net/if_types.h>
68 #include <net/bpf.h>
69 #include <net/ethernet.h>
70 #include <net/if_bridgevar.h>
71 #include <net/if_vlan_var.h>
72 #include <net/if_llatbl.h>
73 #include <net/pfil.h>
74 #include <net/rss_config.h>
75 #include <net/vnet.h>
76
77 #include <netpfil/pf/pf_mtag.h>
78
79 #if defined(INET) || defined(INET6)
80 #include <netinet/in.h>
81 #include <netinet/in_var.h>
82 #include <netinet/if_ether.h>
83 #include <netinet/ip_carp.h>
84 #include <netinet/ip_var.h>
85 #endif
86 #ifdef INET6
87 #include <netinet6/nd6.h>
88 #endif
89 #include <security/mac/mac_framework.h>
90
91 #include <crypto/sha1.h>
92
93 #ifdef CTASSERT
94 CTASSERT(sizeof (struct ether_header) == ETHER_ADDR_LEN * 2 + 2);
95 CTASSERT(sizeof (struct ether_addr) == ETHER_ADDR_LEN);
96 #endif
97
98 VNET_DEFINE(pfil_head_t, link_pfil_head); /* Packet filter hooks */
99
100 /* netgraph node hooks for ng_ether(4) */
101 void (*ng_ether_input_p)(struct ifnet *ifp, struct mbuf **mp);
102 void (*ng_ether_input_orphan_p)(struct ifnet *ifp, struct mbuf *m);
103 int (*ng_ether_output_p)(struct ifnet *ifp, struct mbuf **mp);
104 void (*ng_ether_attach_p)(struct ifnet *ifp);
105 void (*ng_ether_detach_p)(struct ifnet *ifp);
106
107 void (*vlan_input_p)(struct ifnet *, struct mbuf *);
108
109 /* if_bridge(4) support */
110 void (*bridge_dn_p)(struct mbuf *, struct ifnet *);
111
112 /* if_lagg(4) support */
113 struct mbuf *(*lagg_input_ethernet_p)(struct ifnet *, struct mbuf *);
114
115 static const u_char etherbroadcastaddr[ETHER_ADDR_LEN] =
116 { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
117
118 static int ether_resolvemulti(struct ifnet *, struct sockaddr **,
119 struct sockaddr *);
120 #ifdef VIMAGE
121 static void ether_reassign(struct ifnet *, struct vnet *, char *);
122 #endif
123 static int ether_requestencap(struct ifnet *, struct if_encap_req *);
124
125 #define senderr(e) do { error = (e); goto bad;} while (0)
126
127 static void
update_mbuf_csumflags(struct mbuf * src,struct mbuf * dst)128 update_mbuf_csumflags(struct mbuf *src, struct mbuf *dst)
129 {
130 int csum_flags = 0;
131
132 if (src->m_pkthdr.csum_flags & CSUM_IP)
133 csum_flags |= (CSUM_IP_CHECKED|CSUM_IP_VALID);
134 if (src->m_pkthdr.csum_flags & CSUM_DELAY_DATA)
135 csum_flags |= (CSUM_DATA_VALID|CSUM_PSEUDO_HDR);
136 if (src->m_pkthdr.csum_flags & CSUM_SCTP)
137 csum_flags |= CSUM_SCTP_VALID;
138 dst->m_pkthdr.csum_flags |= csum_flags;
139 if (csum_flags & CSUM_DATA_VALID)
140 dst->m_pkthdr.csum_data = 0xffff;
141 }
142
143 /*
144 * Handle link-layer encapsulation requests.
145 */
146 static int
ether_requestencap(struct ifnet * ifp,struct if_encap_req * req)147 ether_requestencap(struct ifnet *ifp, struct if_encap_req *req)
148 {
149 struct ether_header *eh;
150 struct arphdr *ah;
151 uint16_t etype;
152 const u_char *lladdr;
153
154 if (req->rtype != IFENCAP_LL)
155 return (EOPNOTSUPP);
156
157 if (req->bufsize < ETHER_HDR_LEN)
158 return (ENOMEM);
159
160 eh = (struct ether_header *)req->buf;
161 lladdr = req->lladdr;
162 req->lladdr_off = 0;
163
164 switch (req->family) {
165 case AF_INET:
166 etype = htons(ETHERTYPE_IP);
167 break;
168 case AF_INET6:
169 etype = htons(ETHERTYPE_IPV6);
170 break;
171 case AF_ARP:
172 ah = (struct arphdr *)req->hdata;
173 ah->ar_hrd = htons(ARPHRD_ETHER);
174
175 switch(ntohs(ah->ar_op)) {
176 case ARPOP_REVREQUEST:
177 case ARPOP_REVREPLY:
178 etype = htons(ETHERTYPE_REVARP);
179 break;
180 case ARPOP_REQUEST:
181 case ARPOP_REPLY:
182 default:
183 etype = htons(ETHERTYPE_ARP);
184 break;
185 }
186
187 if (req->flags & IFENCAP_FLAG_BROADCAST)
188 lladdr = ifp->if_broadcastaddr;
189 break;
190 default:
191 return (EAFNOSUPPORT);
192 }
193
194 memcpy(&eh->ether_type, &etype, sizeof(eh->ether_type));
195 memcpy(eh->ether_dhost, lladdr, ETHER_ADDR_LEN);
196 memcpy(eh->ether_shost, IF_LLADDR(ifp), ETHER_ADDR_LEN);
197 req->bufsize = sizeof(struct ether_header);
198
199 return (0);
200 }
201
202 static int
ether_resolve_addr(struct ifnet * ifp,struct mbuf * m,const struct sockaddr * dst,struct route * ro,u_char * phdr,uint32_t * pflags,struct llentry ** plle)203 ether_resolve_addr(struct ifnet *ifp, struct mbuf *m,
204 const struct sockaddr *dst, struct route *ro, u_char *phdr,
205 uint32_t *pflags, struct llentry **plle)
206 {
207 struct ether_header *eh;
208 uint32_t lleflags = 0;
209 int error = 0;
210 #if defined(INET) || defined(INET6)
211 uint16_t etype;
212 #endif
213
214 if (plle)
215 *plle = NULL;
216 eh = (struct ether_header *)phdr;
217
218 switch (dst->sa_family) {
219 #ifdef INET
220 case AF_INET:
221 if ((m->m_flags & (M_BCAST | M_MCAST)) == 0)
222 error = arpresolve(ifp, 0, m, dst, phdr, &lleflags,
223 plle);
224 else {
225 if (m->m_flags & M_BCAST)
226 memcpy(eh->ether_dhost, ifp->if_broadcastaddr,
227 ETHER_ADDR_LEN);
228 else {
229 const struct in_addr *a;
230 a = &(((const struct sockaddr_in *)dst)->sin_addr);
231 ETHER_MAP_IP_MULTICAST(a, eh->ether_dhost);
232 }
233 etype = htons(ETHERTYPE_IP);
234 memcpy(&eh->ether_type, &etype, sizeof(etype));
235 memcpy(eh->ether_shost, IF_LLADDR(ifp), ETHER_ADDR_LEN);
236 }
237 break;
238 #endif
239 #ifdef INET6
240 case AF_INET6:
241 if ((m->m_flags & M_MCAST) == 0)
242 error = nd6_resolve(ifp, 0, m, dst, phdr, &lleflags,
243 plle);
244 else {
245 const struct in6_addr *a6;
246 a6 = &(((const struct sockaddr_in6 *)dst)->sin6_addr);
247 ETHER_MAP_IPV6_MULTICAST(a6, eh->ether_dhost);
248 etype = htons(ETHERTYPE_IPV6);
249 memcpy(&eh->ether_type, &etype, sizeof(etype));
250 memcpy(eh->ether_shost, IF_LLADDR(ifp), ETHER_ADDR_LEN);
251 }
252 break;
253 #endif
254 default:
255 if_printf(ifp, "can't handle af%d\n", dst->sa_family);
256 if (m != NULL)
257 m_freem(m);
258 return (EAFNOSUPPORT);
259 }
260
261 if (error == EHOSTDOWN) {
262 if (ro != NULL && (ro->ro_flags & RT_HAS_GW) != 0)
263 error = EHOSTUNREACH;
264 }
265
266 if (error != 0)
267 return (error);
268
269 *pflags = RT_MAY_LOOP;
270 if (lleflags & LLE_IFADDR)
271 *pflags |= RT_L2_ME;
272
273 return (0);
274 }
275
276 /*
277 * Ethernet output routine.
278 * Encapsulate a packet of type family for the local net.
279 * Use trailer local net encapsulation if enough data in first
280 * packet leaves a multiple of 512 bytes of data in remainder.
281 */
282 int
ether_output(struct ifnet * ifp,struct mbuf * m,const struct sockaddr * dst,struct route * ro)283 ether_output(struct ifnet *ifp, struct mbuf *m,
284 const struct sockaddr *dst, struct route *ro)
285 {
286 int error = 0;
287 char linkhdr[ETHER_HDR_LEN], *phdr;
288 struct ether_header *eh;
289 struct pf_mtag *t;
290 bool loop_copy;
291 int hlen; /* link layer header length */
292 uint32_t pflags;
293 struct llentry *lle = NULL;
294 int addref = 0;
295
296 phdr = NULL;
297 pflags = 0;
298 if (ro != NULL) {
299 /* XXX BPF uses ro_prepend */
300 if (ro->ro_prepend != NULL) {
301 phdr = ro->ro_prepend;
302 hlen = ro->ro_plen;
303 } else if (!(m->m_flags & (M_BCAST | M_MCAST))) {
304 if ((ro->ro_flags & RT_LLE_CACHE) != 0) {
305 lle = ro->ro_lle;
306 if (lle != NULL &&
307 (lle->la_flags & LLE_VALID) == 0) {
308 LLE_FREE(lle);
309 lle = NULL; /* redundant */
310 ro->ro_lle = NULL;
311 }
312 if (lle == NULL) {
313 /* if we lookup, keep cache */
314 addref = 1;
315 } else
316 /*
317 * Notify LLE code that
318 * the entry was used
319 * by datapath.
320 */
321 llentry_mark_used(lle);
322 }
323 if (lle != NULL) {
324 phdr = lle->r_linkdata;
325 hlen = lle->r_hdrlen;
326 pflags = lle->r_flags;
327 }
328 }
329 }
330
331 #ifdef MAC
332 error = mac_ifnet_check_transmit(ifp, m);
333 if (error)
334 senderr(error);
335 #endif
336
337 M_PROFILE(m);
338 if (ifp->if_flags & IFF_MONITOR)
339 senderr(ENETDOWN);
340 if (!((ifp->if_flags & IFF_UP) &&
341 (ifp->if_drv_flags & IFF_DRV_RUNNING)))
342 senderr(ENETDOWN);
343
344 if (phdr == NULL) {
345 /* No prepend data supplied. Try to calculate ourselves. */
346 phdr = linkhdr;
347 hlen = ETHER_HDR_LEN;
348 error = ether_resolve_addr(ifp, m, dst, ro, phdr, &pflags,
349 addref ? &lle : NULL);
350 if (addref && lle != NULL)
351 ro->ro_lle = lle;
352 if (error != 0)
353 return (error == EWOULDBLOCK ? 0 : error);
354 }
355
356 if ((pflags & RT_L2_ME) != 0) {
357 update_mbuf_csumflags(m, m);
358 return (if_simloop(ifp, m, dst->sa_family, 0));
359 }
360 loop_copy = (pflags & RT_MAY_LOOP) != 0;
361
362 /*
363 * Add local net header. If no space in first mbuf,
364 * allocate another.
365 *
366 * Note that we do prepend regardless of RT_HAS_HEADER flag.
367 * This is done because BPF code shifts m_data pointer
368 * to the end of ethernet header prior to calling if_output().
369 */
370 M_PREPEND(m, hlen, M_NOWAIT);
371 if (m == NULL)
372 senderr(ENOBUFS);
373 if ((pflags & RT_HAS_HEADER) == 0) {
374 eh = mtod(m, struct ether_header *);
375 memcpy(eh, phdr, hlen);
376 }
377
378 /*
379 * If a simplex interface, and the packet is being sent to our
380 * Ethernet address or a broadcast address, loopback a copy.
381 * XXX To make a simplex device behave exactly like a duplex
382 * device, we should copy in the case of sending to our own
383 * ethernet address (thus letting the original actually appear
384 * on the wire). However, we don't do that here for security
385 * reasons and compatibility with the original behavior.
386 */
387 if ((m->m_flags & M_BCAST) && loop_copy && (ifp->if_flags & IFF_SIMPLEX) &&
388 ((t = pf_find_mtag(m)) == NULL || !t->routed)) {
389 struct mbuf *n;
390
391 /*
392 * Because if_simloop() modifies the packet, we need a
393 * writable copy through m_dup() instead of a readonly
394 * one as m_copy[m] would give us. The alternative would
395 * be to modify if_simloop() to handle the readonly mbuf,
396 * but performancewise it is mostly equivalent (trading
397 * extra data copying vs. extra locking).
398 *
399 * XXX This is a local workaround. A number of less
400 * often used kernel parts suffer from the same bug.
401 * See PR kern/105943 for a proposed general solution.
402 */
403 if ((n = m_dup(m, M_NOWAIT)) != NULL) {
404 update_mbuf_csumflags(m, n);
405 (void)if_simloop(ifp, n, dst->sa_family, hlen);
406 } else
407 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
408 }
409
410 /*
411 * Bridges require special output handling.
412 */
413 if (ifp->if_bridge) {
414 BRIDGE_OUTPUT(ifp, m, error);
415 return (error);
416 }
417
418 #if defined(INET) || defined(INET6)
419 if (ifp->if_carp &&
420 (error = (*carp_output_p)(ifp, m, dst)))
421 goto bad;
422 #endif
423
424 /* Handle ng_ether(4) processing, if any */
425 if (ifp->if_l2com != NULL) {
426 KASSERT(ng_ether_output_p != NULL,
427 ("ng_ether_output_p is NULL"));
428 if ((error = (*ng_ether_output_p)(ifp, &m)) != 0) {
429 bad: if (m != NULL)
430 m_freem(m);
431 return (error);
432 }
433 if (m == NULL)
434 return (0);
435 }
436
437 /* Continue with link-layer output */
438 return ether_output_frame(ifp, m);
439 }
440
441 static bool
ether_set_pcp(struct mbuf ** mp,struct ifnet * ifp,uint8_t pcp)442 ether_set_pcp(struct mbuf **mp, struct ifnet *ifp, uint8_t pcp)
443 {
444 struct ether_8021q_tag qtag;
445 struct ether_header *eh;
446
447 eh = mtod(*mp, struct ether_header *);
448 if (ntohs(eh->ether_type) == ETHERTYPE_VLAN ||
449 ntohs(eh->ether_type) == ETHERTYPE_QINQ)
450 return (true);
451
452 qtag.vid = 0;
453 qtag.pcp = pcp;
454 qtag.proto = ETHERTYPE_VLAN;
455 if (ether_8021q_frame(mp, ifp, ifp, &qtag))
456 return (true);
457 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
458 return (false);
459 }
460
461 /*
462 * Ethernet link layer output routine to send a raw frame to the device.
463 *
464 * This assumes that the 14 byte Ethernet header is present and contiguous
465 * in the first mbuf (if BRIDGE'ing).
466 */
467 int
ether_output_frame(struct ifnet * ifp,struct mbuf * m)468 ether_output_frame(struct ifnet *ifp, struct mbuf *m)
469 {
470 uint8_t pcp;
471
472 pcp = ifp->if_pcp;
473 if (pcp != IFNET_PCP_NONE && ifp->if_type != IFT_L2VLAN &&
474 !ether_set_pcp(&m, ifp, pcp))
475 return (0);
476
477 if (PFIL_HOOKED_OUT(V_link_pfil_head))
478 switch (pfil_run_hooks(V_link_pfil_head, &m, ifp, PFIL_OUT,
479 NULL)) {
480 case PFIL_DROPPED:
481 return (EACCES);
482 case PFIL_CONSUMED:
483 return (0);
484 }
485
486 #ifdef EXPERIMENTAL
487 #if defined(INET6) && defined(INET)
488 /* draft-ietf-6man-ipv6only-flag */
489 /* Catch ETHERTYPE_IP, and ETHERTYPE_[REV]ARP if we are v6-only. */
490 if ((ND_IFINFO(ifp)->flags & ND6_IFF_IPV6_ONLY_MASK) != 0) {
491 struct ether_header *eh;
492
493 eh = mtod(m, struct ether_header *);
494 switch (ntohs(eh->ether_type)) {
495 case ETHERTYPE_IP:
496 case ETHERTYPE_ARP:
497 case ETHERTYPE_REVARP:
498 m_freem(m);
499 return (EAFNOSUPPORT);
500 /* NOTREACHED */
501 break;
502 };
503 }
504 #endif
505 #endif
506
507 /*
508 * Queue message on interface, update output statistics if
509 * successful, and start output if interface not yet active.
510 */
511 return ((ifp->if_transmit)(ifp, m));
512 }
513
514 /*
515 * Process a received Ethernet packet; the packet is in the
516 * mbuf chain m with the ethernet header at the front.
517 */
518 static void
ether_input_internal(struct ifnet * ifp,struct mbuf * m)519 ether_input_internal(struct ifnet *ifp, struct mbuf *m)
520 {
521 struct ether_header *eh;
522 u_short etype;
523
524 if ((ifp->if_flags & IFF_UP) == 0) {
525 m_freem(m);
526 return;
527 }
528 #ifdef DIAGNOSTIC
529 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
530 if_printf(ifp, "discard frame at !IFF_DRV_RUNNING\n");
531 m_freem(m);
532 return;
533 }
534 #endif
535 if (m->m_len < ETHER_HDR_LEN) {
536 /* XXX maybe should pullup? */
537 if_printf(ifp, "discard frame w/o leading ethernet "
538 "header (len %u pkt len %u)\n",
539 m->m_len, m->m_pkthdr.len);
540 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
541 m_freem(m);
542 return;
543 }
544 eh = mtod(m, struct ether_header *);
545 etype = ntohs(eh->ether_type);
546 random_harvest_queue_ether(m, sizeof(*m));
547
548 #ifdef EXPERIMENTAL
549 #if defined(INET6) && defined(INET)
550 /* draft-ietf-6man-ipv6only-flag */
551 /* Catch ETHERTYPE_IP, and ETHERTYPE_[REV]ARP if we are v6-only. */
552 if ((ND_IFINFO(ifp)->flags & ND6_IFF_IPV6_ONLY_MASK) != 0) {
553 switch (etype) {
554 case ETHERTYPE_IP:
555 case ETHERTYPE_ARP:
556 case ETHERTYPE_REVARP:
557 m_freem(m);
558 return;
559 /* NOTREACHED */
560 break;
561 };
562 }
563 #endif
564 #endif
565
566 CURVNET_SET_QUIET(ifp->if_vnet);
567
568 if (ETHER_IS_MULTICAST(eh->ether_dhost)) {
569 if (ETHER_IS_BROADCAST(eh->ether_dhost))
570 m->m_flags |= M_BCAST;
571 else
572 m->m_flags |= M_MCAST;
573 if_inc_counter(ifp, IFCOUNTER_IMCASTS, 1);
574 }
575
576 #ifdef MAC
577 /*
578 * Tag the mbuf with an appropriate MAC label before any other
579 * consumers can get to it.
580 */
581 mac_ifnet_create_mbuf(ifp, m);
582 #endif
583
584 /*
585 * Give bpf a chance at the packet.
586 */
587 ETHER_BPF_MTAP(ifp, m);
588
589 /*
590 * If the CRC is still on the packet, trim it off. We do this once
591 * and once only in case we are re-entered. Nothing else on the
592 * Ethernet receive path expects to see the FCS.
593 */
594 if (m->m_flags & M_HASFCS) {
595 m_adj(m, -ETHER_CRC_LEN);
596 m->m_flags &= ~M_HASFCS;
597 }
598
599 if (!(ifp->if_capenable & IFCAP_HWSTATS))
600 if_inc_counter(ifp, IFCOUNTER_IBYTES, m->m_pkthdr.len);
601
602 /* Allow monitor mode to claim this frame, after stats are updated. */
603 if (ifp->if_flags & IFF_MONITOR) {
604 m_freem(m);
605 CURVNET_RESTORE();
606 return;
607 }
608
609 /* Handle input from a lagg(4) port */
610 if (ifp->if_type == IFT_IEEE8023ADLAG) {
611 KASSERT(lagg_input_ethernet_p != NULL,
612 ("%s: if_lagg not loaded!", __func__));
613 m = (*lagg_input_ethernet_p)(ifp, m);
614 if (m != NULL)
615 ifp = m->m_pkthdr.rcvif;
616 else {
617 CURVNET_RESTORE();
618 return;
619 }
620 }
621
622 /*
623 * If the hardware did not process an 802.1Q tag, do this now,
624 * to allow 802.1P priority frames to be passed to the main input
625 * path correctly.
626 */
627 if ((m->m_flags & M_VLANTAG) == 0 &&
628 ((etype == ETHERTYPE_VLAN) || (etype == ETHERTYPE_QINQ))) {
629 struct ether_vlan_header *evl;
630
631 if (m->m_len < sizeof(*evl) &&
632 (m = m_pullup(m, sizeof(*evl))) == NULL) {
633 #ifdef DIAGNOSTIC
634 if_printf(ifp, "cannot pullup VLAN header\n");
635 #endif
636 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
637 CURVNET_RESTORE();
638 return;
639 }
640
641 evl = mtod(m, struct ether_vlan_header *);
642 m->m_pkthdr.ether_vtag = ntohs(evl->evl_tag);
643 m->m_flags |= M_VLANTAG;
644
645 bcopy((char *)evl, (char *)evl + ETHER_VLAN_ENCAP_LEN,
646 ETHER_HDR_LEN - ETHER_TYPE_LEN);
647 m_adj(m, ETHER_VLAN_ENCAP_LEN);
648 eh = mtod(m, struct ether_header *);
649 }
650
651 M_SETFIB(m, ifp->if_fib);
652
653 /* Allow ng_ether(4) to claim this frame. */
654 if (ifp->if_l2com != NULL) {
655 KASSERT(ng_ether_input_p != NULL,
656 ("%s: ng_ether_input_p is NULL", __func__));
657 m->m_flags &= ~M_PROMISC;
658 (*ng_ether_input_p)(ifp, &m);
659 if (m == NULL) {
660 CURVNET_RESTORE();
661 return;
662 }
663 eh = mtod(m, struct ether_header *);
664 }
665
666 /*
667 * Allow if_bridge(4) to claim this frame.
668 * The BRIDGE_INPUT() macro will update ifp if the bridge changed it
669 * and the frame should be delivered locally.
670 */
671 if (ifp->if_bridge != NULL) {
672 m->m_flags &= ~M_PROMISC;
673 BRIDGE_INPUT(ifp, m);
674 if (m == NULL) {
675 CURVNET_RESTORE();
676 return;
677 }
678 eh = mtod(m, struct ether_header *);
679 }
680
681 #if defined(INET) || defined(INET6)
682 /*
683 * Clear M_PROMISC on frame so that carp(4) will see it when the
684 * mbuf flows up to Layer 3.
685 * FreeBSD's implementation of carp(4) uses the inprotosw
686 * to dispatch IPPROTO_CARP. carp(4) also allocates its own
687 * Ethernet addresses of the form 00:00:5e:00:01:xx, which
688 * is outside the scope of the M_PROMISC test below.
689 * TODO: Maintain a hash table of ethernet addresses other than
690 * ether_dhost which may be active on this ifp.
691 */
692 if (ifp->if_carp && (*carp_forus_p)(ifp, eh->ether_dhost)) {
693 m->m_flags &= ~M_PROMISC;
694 } else
695 #endif
696 {
697 /*
698 * If the frame received was not for our MAC address, set the
699 * M_PROMISC flag on the mbuf chain. The frame may need to
700 * be seen by the rest of the Ethernet input path in case of
701 * re-entry (e.g. bridge, vlan, netgraph) but should not be
702 * seen by upper protocol layers.
703 */
704 if (!ETHER_IS_MULTICAST(eh->ether_dhost) &&
705 bcmp(IF_LLADDR(ifp), eh->ether_dhost, ETHER_ADDR_LEN) != 0)
706 m->m_flags |= M_PROMISC;
707 }
708
709 ether_demux(ifp, m);
710 CURVNET_RESTORE();
711 }
712
713 /*
714 * Ethernet input dispatch; by default, direct dispatch here regardless of
715 * global configuration. However, if RSS is enabled, hook up RSS affinity
716 * so that when deferred or hybrid dispatch is enabled, we can redistribute
717 * load based on RSS.
718 *
719 * XXXRW: Would be nice if the ifnet passed up a flag indicating whether or
720 * not it had already done work distribution via multi-queue. Then we could
721 * direct dispatch in the event load balancing was already complete and
722 * handle the case of interfaces with different capabilities better.
723 *
724 * XXXRW: Sort of want an M_DISTRIBUTED flag to avoid multiple distributions
725 * at multiple layers?
726 *
727 * XXXRW: For now, enable all this only if RSS is compiled in, although it
728 * works fine without RSS. Need to characterise the performance overhead
729 * of the detour through the netisr code in the event the result is always
730 * direct dispatch.
731 */
732 static void
ether_nh_input(struct mbuf * m)733 ether_nh_input(struct mbuf *m)
734 {
735
736 M_ASSERTPKTHDR(m);
737 KASSERT(m->m_pkthdr.rcvif != NULL,
738 ("%s: NULL interface pointer", __func__));
739 ether_input_internal(m->m_pkthdr.rcvif, m);
740 }
741
742 static struct netisr_handler ether_nh = {
743 .nh_name = "ether",
744 .nh_handler = ether_nh_input,
745 .nh_proto = NETISR_ETHER,
746 #ifdef RSS
747 .nh_policy = NETISR_POLICY_CPU,
748 .nh_dispatch = NETISR_DISPATCH_DIRECT,
749 .nh_m2cpuid = rss_m2cpuid,
750 #else
751 .nh_policy = NETISR_POLICY_SOURCE,
752 .nh_dispatch = NETISR_DISPATCH_DIRECT,
753 #endif
754 };
755
756 static void
ether_init(__unused void * arg)757 ether_init(__unused void *arg)
758 {
759
760 netisr_register(ðer_nh);
761 }
762 SYSINIT(ether, SI_SUB_INIT_IF, SI_ORDER_ANY, ether_init, NULL);
763
764 static void
vnet_ether_init(__unused void * arg)765 vnet_ether_init(__unused void *arg)
766 {
767 struct pfil_head_args args;
768
769 args.pa_version = PFIL_VERSION;
770 args.pa_flags = PFIL_IN | PFIL_OUT;
771 args.pa_type = PFIL_TYPE_ETHERNET;
772 args.pa_headname = PFIL_ETHER_NAME;
773 V_link_pfil_head = pfil_head_register(&args);
774
775 #ifdef VIMAGE
776 netisr_register_vnet(ðer_nh);
777 #endif
778 }
779 VNET_SYSINIT(vnet_ether_init, SI_SUB_PROTO_IF, SI_ORDER_ANY,
780 vnet_ether_init, NULL);
781
782 #ifdef VIMAGE
783 static void
vnet_ether_pfil_destroy(__unused void * arg)784 vnet_ether_pfil_destroy(__unused void *arg)
785 {
786
787 pfil_head_unregister(V_link_pfil_head);
788 }
789 VNET_SYSUNINIT(vnet_ether_pfil_uninit, SI_SUB_PROTO_PFIL, SI_ORDER_ANY,
790 vnet_ether_pfil_destroy, NULL);
791
792 static void
vnet_ether_destroy(__unused void * arg)793 vnet_ether_destroy(__unused void *arg)
794 {
795
796 netisr_unregister_vnet(ðer_nh);
797 }
798 VNET_SYSUNINIT(vnet_ether_uninit, SI_SUB_PROTO_IF, SI_ORDER_ANY,
799 vnet_ether_destroy, NULL);
800 #endif
801
802 static void
ether_input(struct ifnet * ifp,struct mbuf * m)803 ether_input(struct ifnet *ifp, struct mbuf *m)
804 {
805 struct epoch_tracker et;
806 struct mbuf *mn;
807 bool needs_epoch;
808
809 needs_epoch = !(ifp->if_flags & IFF_KNOWSEPOCH);
810
811 /*
812 * The drivers are allowed to pass in a chain of packets linked with
813 * m_nextpkt. We split them up into separate packets here and pass
814 * them up. This allows the drivers to amortize the receive lock.
815 */
816 CURVNET_SET_QUIET(ifp->if_vnet);
817 if (__predict_false(needs_epoch))
818 NET_EPOCH_ENTER(et);
819 while (m) {
820 mn = m->m_nextpkt;
821 m->m_nextpkt = NULL;
822
823 /*
824 * We will rely on rcvif being set properly in the deferred
825 * context, so assert it is correct here.
826 */
827 MPASS((m->m_pkthdr.csum_flags & CSUM_SND_TAG) == 0);
828 KASSERT(m->m_pkthdr.rcvif == ifp, ("%s: ifnet mismatch m %p "
829 "rcvif %p ifp %p", __func__, m, m->m_pkthdr.rcvif, ifp));
830 netisr_dispatch(NETISR_ETHER, m);
831 m = mn;
832 }
833 if (__predict_false(needs_epoch))
834 NET_EPOCH_EXIT(et);
835 CURVNET_RESTORE();
836 }
837
838 /*
839 * Upper layer processing for a received Ethernet packet.
840 */
841 void
ether_demux(struct ifnet * ifp,struct mbuf * m)842 ether_demux(struct ifnet *ifp, struct mbuf *m)
843 {
844 struct ether_header *eh;
845 int i, isr;
846 u_short ether_type;
847
848 NET_EPOCH_ASSERT();
849 KASSERT(ifp != NULL, ("%s: NULL interface pointer", __func__));
850
851 /* Do not grab PROMISC frames in case we are re-entered. */
852 if (PFIL_HOOKED_IN(V_link_pfil_head) && !(m->m_flags & M_PROMISC)) {
853 i = pfil_run_hooks(V_link_pfil_head, &m, ifp, PFIL_IN, NULL);
854 if (i != 0 || m == NULL)
855 return;
856 }
857
858 eh = mtod(m, struct ether_header *);
859 ether_type = ntohs(eh->ether_type);
860
861 /*
862 * If this frame has a VLAN tag other than 0, call vlan_input()
863 * if its module is loaded. Otherwise, drop.
864 */
865 if ((m->m_flags & M_VLANTAG) &&
866 EVL_VLANOFTAG(m->m_pkthdr.ether_vtag) != 0) {
867 if (ifp->if_vlantrunk == NULL) {
868 if_inc_counter(ifp, IFCOUNTER_NOPROTO, 1);
869 m_freem(m);
870 return;
871 }
872 KASSERT(vlan_input_p != NULL,("%s: VLAN not loaded!",
873 __func__));
874 /* Clear before possibly re-entering ether_input(). */
875 m->m_flags &= ~M_PROMISC;
876 (*vlan_input_p)(ifp, m);
877 return;
878 }
879
880 /*
881 * Pass promiscuously received frames to the upper layer if the user
882 * requested this by setting IFF_PPROMISC. Otherwise, drop them.
883 */
884 if ((ifp->if_flags & IFF_PPROMISC) == 0 && (m->m_flags & M_PROMISC)) {
885 m_freem(m);
886 return;
887 }
888
889 /*
890 * Reset layer specific mbuf flags to avoid confusing upper layers.
891 * Strip off Ethernet header.
892 */
893 m->m_flags &= ~M_VLANTAG;
894 m_clrprotoflags(m);
895 m_adj(m, ETHER_HDR_LEN);
896
897 /*
898 * Dispatch frame to upper layer.
899 */
900 switch (ether_type) {
901 #ifdef INET
902 case ETHERTYPE_IP:
903 isr = NETISR_IP;
904 break;
905
906 case ETHERTYPE_ARP:
907 if (ifp->if_flags & IFF_NOARP) {
908 /* Discard packet if ARP is disabled on interface */
909 m_freem(m);
910 return;
911 }
912 isr = NETISR_ARP;
913 break;
914 #endif
915 #ifdef INET6
916 case ETHERTYPE_IPV6:
917 isr = NETISR_IPV6;
918 break;
919 #endif
920 default:
921 goto discard;
922 }
923 netisr_dispatch(isr, m);
924 return;
925
926 discard:
927 /*
928 * Packet is to be discarded. If netgraph is present,
929 * hand the packet to it for last chance processing;
930 * otherwise dispose of it.
931 */
932 if (ifp->if_l2com != NULL) {
933 KASSERT(ng_ether_input_orphan_p != NULL,
934 ("ng_ether_input_orphan_p is NULL"));
935 /*
936 * Put back the ethernet header so netgraph has a
937 * consistent view of inbound packets.
938 */
939 M_PREPEND(m, ETHER_HDR_LEN, M_NOWAIT);
940 (*ng_ether_input_orphan_p)(ifp, m);
941 return;
942 }
943 m_freem(m);
944 }
945
946 /*
947 * Convert Ethernet address to printable (loggable) representation.
948 * This routine is for compatibility; it's better to just use
949 *
950 * printf("%6D", <pointer to address>, ":");
951 *
952 * since there's no static buffer involved.
953 */
954 #pragma GCC diagnostic ignored "-Wformat"
955 #pragma GCC diagnostic ignored "-Wformat-extra-args"
956 char *
ether_sprintf(const u_char * ap)957 ether_sprintf(const u_char *ap)
958 {
959 static char etherbuf[18];
960 snprintf(etherbuf, sizeof (etherbuf), "%6D", ap, ":");
961 return (etherbuf);
962 }
963
964 /*
965 * Perform common duties while attaching to interface list
966 */
967 void
ether_ifattach(struct ifnet * ifp,const u_int8_t * lla)968 ether_ifattach(struct ifnet *ifp, const u_int8_t *lla)
969 {
970 int i;
971 struct ifaddr *ifa;
972 struct sockaddr_dl *sdl;
973
974 ifp->if_addrlen = ETHER_ADDR_LEN;
975 ifp->if_hdrlen = ETHER_HDR_LEN;
976 ifp->if_mtu = ETHERMTU;
977 if_attach(ifp);
978 ifp->if_output = ether_output;
979 ifp->if_input = ether_input;
980 ifp->if_resolvemulti = ether_resolvemulti;
981 ifp->if_requestencap = ether_requestencap;
982 #ifdef VIMAGE
983 ifp->if_reassign = ether_reassign;
984 #endif
985 if (ifp->if_baudrate == 0)
986 ifp->if_baudrate = IF_Mbps(10); /* just a default */
987 ifp->if_broadcastaddr = etherbroadcastaddr;
988
989 ifa = ifp->if_addr;
990 KASSERT(ifa != NULL, ("%s: no lladdr!\n", __func__));
991 sdl = (struct sockaddr_dl *)ifa->ifa_addr;
992 sdl->sdl_type = IFT_ETHER;
993 sdl->sdl_alen = ifp->if_addrlen;
994 bcopy(lla, LLADDR(sdl), ifp->if_addrlen);
995
996 if (ifp->if_hw_addr != NULL)
997 bcopy(lla, ifp->if_hw_addr, ifp->if_addrlen);
998
999 bpfattach(ifp, DLT_EN10MB, ETHER_HDR_LEN);
1000 if (ng_ether_attach_p != NULL)
1001 (*ng_ether_attach_p)(ifp);
1002
1003 /* Announce Ethernet MAC address if non-zero. */
1004 for (i = 0; i < ifp->if_addrlen; i++)
1005 if (lla[i] != 0)
1006 break;
1007 if (i != ifp->if_addrlen)
1008 if_printf(ifp, "Ethernet address: %6D\n", lla, ":");
1009
1010 uuid_ether_add(LLADDR(sdl));
1011
1012 /* Add necessary bits are setup; announce it now. */
1013 EVENTHANDLER_INVOKE(ether_ifattach_event, ifp);
1014 if (IS_DEFAULT_VNET(curvnet))
1015 devctl_notify("ETHERNET", ifp->if_xname, "IFATTACH", NULL);
1016 }
1017 #pragma GCC diagnostic error "-Wformat"
1018 #pragma GCC diagnostic error "-Wformat-extra-args"
1019
1020 /*
1021 * Perform common duties while detaching an Ethernet interface
1022 */
1023 void
ether_ifdetach(struct ifnet * ifp)1024 ether_ifdetach(struct ifnet *ifp)
1025 {
1026 struct sockaddr_dl *sdl;
1027
1028 sdl = (struct sockaddr_dl *)(ifp->if_addr->ifa_addr);
1029 uuid_ether_del(LLADDR(sdl));
1030
1031 if (ifp->if_l2com != NULL) {
1032 KASSERT(ng_ether_detach_p != NULL,
1033 ("ng_ether_detach_p is NULL"));
1034 (*ng_ether_detach_p)(ifp);
1035 }
1036
1037 bpfdetach(ifp);
1038 if_detach(ifp);
1039 }
1040
1041 #ifdef VIMAGE
1042 void
ether_reassign(struct ifnet * ifp,struct vnet * new_vnet,char * unused __unused)1043 ether_reassign(struct ifnet *ifp, struct vnet *new_vnet, char *unused __unused)
1044 {
1045
1046 if (ifp->if_l2com != NULL) {
1047 KASSERT(ng_ether_detach_p != NULL,
1048 ("ng_ether_detach_p is NULL"));
1049 (*ng_ether_detach_p)(ifp);
1050 }
1051
1052 if (ng_ether_attach_p != NULL) {
1053 CURVNET_SET_QUIET(new_vnet);
1054 (*ng_ether_attach_p)(ifp);
1055 CURVNET_RESTORE();
1056 }
1057 }
1058 #endif
1059
1060 SYSCTL_DECL(_net_link);
1061 SYSCTL_NODE(_net_link, IFT_ETHER, ether, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1062 "Ethernet");
1063
1064 #if 0
1065 /*
1066 * This is for reference. We have a table-driven version
1067 * of the little-endian crc32 generator, which is faster
1068 * than the double-loop.
1069 */
1070 uint32_t
1071 ether_crc32_le(const uint8_t *buf, size_t len)
1072 {
1073 size_t i;
1074 uint32_t crc;
1075 int bit;
1076 uint8_t data;
1077
1078 crc = 0xffffffff; /* initial value */
1079
1080 for (i = 0; i < len; i++) {
1081 for (data = *buf++, bit = 0; bit < 8; bit++, data >>= 1) {
1082 carry = (crc ^ data) & 1;
1083 crc >>= 1;
1084 if (carry)
1085 crc = (crc ^ ETHER_CRC_POLY_LE);
1086 }
1087 }
1088
1089 return (crc);
1090 }
1091 #else
1092 uint32_t
ether_crc32_le(const uint8_t * buf,size_t len)1093 ether_crc32_le(const uint8_t *buf, size_t len)
1094 {
1095 static const uint32_t crctab[] = {
1096 0x00000000, 0x1db71064, 0x3b6e20c8, 0x26d930ac,
1097 0x76dc4190, 0x6b6b51f4, 0x4db26158, 0x5005713c,
1098 0xedb88320, 0xf00f9344, 0xd6d6a3e8, 0xcb61b38c,
1099 0x9b64c2b0, 0x86d3d2d4, 0xa00ae278, 0xbdbdf21c
1100 };
1101 size_t i;
1102 uint32_t crc;
1103
1104 crc = 0xffffffff; /* initial value */
1105
1106 for (i = 0; i < len; i++) {
1107 crc ^= buf[i];
1108 crc = (crc >> 4) ^ crctab[crc & 0xf];
1109 crc = (crc >> 4) ^ crctab[crc & 0xf];
1110 }
1111
1112 return (crc);
1113 }
1114 #endif
1115
1116 uint32_t
ether_crc32_be(const uint8_t * buf,size_t len)1117 ether_crc32_be(const uint8_t *buf, size_t len)
1118 {
1119 size_t i;
1120 uint32_t crc, carry;
1121 int bit;
1122 uint8_t data;
1123
1124 crc = 0xffffffff; /* initial value */
1125
1126 for (i = 0; i < len; i++) {
1127 for (data = *buf++, bit = 0; bit < 8; bit++, data >>= 1) {
1128 carry = ((crc & 0x80000000) ? 1 : 0) ^ (data & 0x01);
1129 crc <<= 1;
1130 if (carry)
1131 crc = (crc ^ ETHER_CRC_POLY_BE) | carry;
1132 }
1133 }
1134
1135 return (crc);
1136 }
1137
1138 int
ether_ioctl(struct ifnet * ifp,u_long command,caddr_t data)1139 ether_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1140 {
1141 struct ifaddr *ifa = (struct ifaddr *) data;
1142 struct ifreq *ifr = (struct ifreq *) data;
1143 int error = 0;
1144
1145 switch (command) {
1146 case SIOCSIFADDR:
1147 ifp->if_flags |= IFF_UP;
1148
1149 switch (ifa->ifa_addr->sa_family) {
1150 #ifdef INET
1151 case AF_INET:
1152 ifp->if_init(ifp->if_softc); /* before arpwhohas */
1153 arp_ifinit(ifp, ifa);
1154 break;
1155 #endif
1156 default:
1157 ifp->if_init(ifp->if_softc);
1158 break;
1159 }
1160 break;
1161
1162 case SIOCGIFADDR:
1163 bcopy(IF_LLADDR(ifp), &ifr->ifr_addr.sa_data[0],
1164 ETHER_ADDR_LEN);
1165 break;
1166
1167 case SIOCSIFMTU:
1168 /*
1169 * Set the interface MTU.
1170 */
1171 if (ifr->ifr_mtu > ETHERMTU) {
1172 error = EINVAL;
1173 } else {
1174 ifp->if_mtu = ifr->ifr_mtu;
1175 }
1176 break;
1177
1178 case SIOCSLANPCP:
1179 error = priv_check(curthread, PRIV_NET_SETLANPCP);
1180 if (error != 0)
1181 break;
1182 if (ifr->ifr_lan_pcp > 7 &&
1183 ifr->ifr_lan_pcp != IFNET_PCP_NONE) {
1184 error = EINVAL;
1185 } else {
1186 ifp->if_pcp = ifr->ifr_lan_pcp;
1187 /* broadcast event about PCP change */
1188 EVENTHANDLER_INVOKE(ifnet_event, ifp, IFNET_EVENT_PCP);
1189 }
1190 break;
1191
1192 case SIOCGLANPCP:
1193 ifr->ifr_lan_pcp = ifp->if_pcp;
1194 break;
1195
1196 default:
1197 error = EINVAL; /* XXX netbsd has ENOTTY??? */
1198 break;
1199 }
1200 return (error);
1201 }
1202
1203 static int
ether_resolvemulti(struct ifnet * ifp,struct sockaddr ** llsa,struct sockaddr * sa)1204 ether_resolvemulti(struct ifnet *ifp, struct sockaddr **llsa,
1205 struct sockaddr *sa)
1206 {
1207 struct sockaddr_dl *sdl;
1208 #ifdef INET
1209 struct sockaddr_in *sin;
1210 #endif
1211 #ifdef INET6
1212 struct sockaddr_in6 *sin6;
1213 #endif
1214 u_char *e_addr;
1215
1216 switch(sa->sa_family) {
1217 case AF_LINK:
1218 /*
1219 * No mapping needed. Just check that it's a valid MC address.
1220 */
1221 sdl = (struct sockaddr_dl *)sa;
1222 e_addr = LLADDR(sdl);
1223 if (!ETHER_IS_MULTICAST(e_addr))
1224 return EADDRNOTAVAIL;
1225 *llsa = NULL;
1226 return 0;
1227
1228 #ifdef INET
1229 case AF_INET:
1230 sin = (struct sockaddr_in *)sa;
1231 if (!IN_MULTICAST(ntohl(sin->sin_addr.s_addr)))
1232 return EADDRNOTAVAIL;
1233 sdl = link_init_sdl(ifp, *llsa, IFT_ETHER);
1234 sdl->sdl_alen = ETHER_ADDR_LEN;
1235 e_addr = LLADDR(sdl);
1236 ETHER_MAP_IP_MULTICAST(&sin->sin_addr, e_addr);
1237 *llsa = (struct sockaddr *)sdl;
1238 return 0;
1239 #endif
1240 #ifdef INET6
1241 case AF_INET6:
1242 sin6 = (struct sockaddr_in6 *)sa;
1243 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
1244 /*
1245 * An IP6 address of 0 means listen to all
1246 * of the Ethernet multicast address used for IP6.
1247 * (This is used for multicast routers.)
1248 */
1249 ifp->if_flags |= IFF_ALLMULTI;
1250 *llsa = NULL;
1251 return 0;
1252 }
1253 if (!IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr))
1254 return EADDRNOTAVAIL;
1255 sdl = link_init_sdl(ifp, *llsa, IFT_ETHER);
1256 sdl->sdl_alen = ETHER_ADDR_LEN;
1257 e_addr = LLADDR(sdl);
1258 ETHER_MAP_IPV6_MULTICAST(&sin6->sin6_addr, e_addr);
1259 *llsa = (struct sockaddr *)sdl;
1260 return 0;
1261 #endif
1262
1263 default:
1264 /*
1265 * Well, the text isn't quite right, but it's the name
1266 * that counts...
1267 */
1268 return EAFNOSUPPORT;
1269 }
1270 }
1271
1272 static moduledata_t ether_mod = {
1273 .name = "ether",
1274 };
1275
1276 void
ether_vlan_mtap(struct bpf_if * bp,struct mbuf * m,void * data,u_int dlen)1277 ether_vlan_mtap(struct bpf_if *bp, struct mbuf *m, void *data, u_int dlen)
1278 {
1279 struct ether_vlan_header vlan;
1280 struct mbuf mv, mb;
1281
1282 KASSERT((m->m_flags & M_VLANTAG) != 0,
1283 ("%s: vlan information not present", __func__));
1284 KASSERT(m->m_len >= sizeof(struct ether_header),
1285 ("%s: mbuf not large enough for header", __func__));
1286 bcopy(mtod(m, char *), &vlan, sizeof(struct ether_header));
1287 vlan.evl_proto = vlan.evl_encap_proto;
1288 vlan.evl_encap_proto = htons(ETHERTYPE_VLAN);
1289 vlan.evl_tag = htons(m->m_pkthdr.ether_vtag);
1290 m->m_len -= sizeof(struct ether_header);
1291 m->m_data += sizeof(struct ether_header);
1292 /*
1293 * If a data link has been supplied by the caller, then we will need to
1294 * re-create a stack allocated mbuf chain with the following structure:
1295 *
1296 * (1) mbuf #1 will contain the supplied data link
1297 * (2) mbuf #2 will contain the vlan header
1298 * (3) mbuf #3 will contain the original mbuf's packet data
1299 *
1300 * Otherwise, submit the packet and vlan header via bpf_mtap2().
1301 */
1302 if (data != NULL) {
1303 mv.m_next = m;
1304 mv.m_data = (caddr_t)&vlan;
1305 mv.m_len = sizeof(vlan);
1306 mb.m_next = &mv;
1307 mb.m_data = data;
1308 mb.m_len = dlen;
1309 bpf_mtap(bp, &mb);
1310 } else
1311 bpf_mtap2(bp, &vlan, sizeof(vlan), m);
1312 m->m_len += sizeof(struct ether_header);
1313 m->m_data -= sizeof(struct ether_header);
1314 }
1315
1316 struct mbuf *
ether_vlanencap_proto(struct mbuf * m,uint16_t tag,uint16_t proto)1317 ether_vlanencap_proto(struct mbuf *m, uint16_t tag, uint16_t proto)
1318 {
1319 struct ether_vlan_header *evl;
1320
1321 M_PREPEND(m, ETHER_VLAN_ENCAP_LEN, M_NOWAIT);
1322 if (m == NULL)
1323 return (NULL);
1324 /* M_PREPEND takes care of m_len, m_pkthdr.len for us */
1325
1326 if (m->m_len < sizeof(*evl)) {
1327 m = m_pullup(m, sizeof(*evl));
1328 if (m == NULL)
1329 return (NULL);
1330 }
1331
1332 /*
1333 * Transform the Ethernet header into an Ethernet header
1334 * with 802.1Q encapsulation.
1335 */
1336 evl = mtod(m, struct ether_vlan_header *);
1337 bcopy((char *)evl + ETHER_VLAN_ENCAP_LEN,
1338 (char *)evl, ETHER_HDR_LEN - ETHER_TYPE_LEN);
1339 evl->evl_encap_proto = htons(proto);
1340 evl->evl_tag = htons(tag);
1341 return (m);
1342 }
1343
1344 static SYSCTL_NODE(_net_link, IFT_L2VLAN, vlan, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1345 "IEEE 802.1Q VLAN");
1346 static SYSCTL_NODE(_net_link_vlan, PF_LINK, link,
1347 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1348 "for consistency");
1349
1350 VNET_DEFINE_STATIC(int, soft_pad);
1351 #define V_soft_pad VNET(soft_pad)
1352 SYSCTL_INT(_net_link_vlan, OID_AUTO, soft_pad, CTLFLAG_RW | CTLFLAG_VNET,
1353 &VNET_NAME(soft_pad), 0,
1354 "pad short frames before tagging");
1355
1356 /*
1357 * For now, make preserving PCP via an mbuf tag optional, as it increases
1358 * per-packet memory allocations and frees. In the future, it would be
1359 * preferable to reuse ether_vtag for this, or similar.
1360 */
1361 int vlan_mtag_pcp = 0;
1362 SYSCTL_INT(_net_link_vlan, OID_AUTO, mtag_pcp, CTLFLAG_RW,
1363 &vlan_mtag_pcp, 0,
1364 "Retain VLAN PCP information as packets are passed up the stack");
1365
1366 bool
ether_8021q_frame(struct mbuf ** mp,struct ifnet * ife,struct ifnet * p,struct ether_8021q_tag * qtag)1367 ether_8021q_frame(struct mbuf **mp, struct ifnet *ife, struct ifnet *p,
1368 struct ether_8021q_tag *qtag)
1369 {
1370 struct m_tag *mtag;
1371 int n;
1372 uint16_t tag;
1373 static const char pad[8]; /* just zeros */
1374
1375 /*
1376 * Pad the frame to the minimum size allowed if told to.
1377 * This option is in accord with IEEE Std 802.1Q, 2003 Ed.,
1378 * paragraph C.4.4.3.b. It can help to work around buggy
1379 * bridges that violate paragraph C.4.4.3.a from the same
1380 * document, i.e., fail to pad short frames after untagging.
1381 * E.g., a tagged frame 66 bytes long (incl. FCS) is OK, but
1382 * untagging it will produce a 62-byte frame, which is a runt
1383 * and requires padding. There are VLAN-enabled network
1384 * devices that just discard such runts instead or mishandle
1385 * them somehow.
1386 */
1387 if (V_soft_pad && p->if_type == IFT_ETHER) {
1388 for (n = ETHERMIN + ETHER_HDR_LEN - (*mp)->m_pkthdr.len;
1389 n > 0; n -= sizeof(pad)) {
1390 if (!m_append(*mp, min(n, sizeof(pad)), pad))
1391 break;
1392 }
1393 if (n > 0) {
1394 m_freem(*mp);
1395 *mp = NULL;
1396 if_printf(ife, "cannot pad short frame");
1397 return (false);
1398 }
1399 }
1400
1401 /*
1402 * If PCP is set in mbuf, use it
1403 */
1404 if ((*mp)->m_flags & M_VLANTAG) {
1405 qtag->pcp = EVL_PRIOFTAG((*mp)->m_pkthdr.ether_vtag);
1406 }
1407
1408 /*
1409 * If underlying interface can do VLAN tag insertion itself,
1410 * just pass the packet along. However, we need some way to
1411 * tell the interface where the packet came from so that it
1412 * knows how to find the VLAN tag to use, so we attach a
1413 * packet tag that holds it.
1414 */
1415 if (vlan_mtag_pcp && (mtag = m_tag_locate(*mp, MTAG_8021Q,
1416 MTAG_8021Q_PCP_OUT, NULL)) != NULL)
1417 tag = EVL_MAKETAG(qtag->vid, *(uint8_t *)(mtag + 1), 0);
1418 else
1419 tag = EVL_MAKETAG(qtag->vid, qtag->pcp, 0);
1420 if ((p->if_capenable & IFCAP_VLAN_HWTAGGING) &&
1421 (qtag->proto == ETHERTYPE_VLAN)) {
1422 (*mp)->m_pkthdr.ether_vtag = tag;
1423 (*mp)->m_flags |= M_VLANTAG;
1424 } else {
1425 *mp = ether_vlanencap_proto(*mp, tag, qtag->proto);
1426 if (*mp == NULL) {
1427 if_printf(ife, "unable to prepend 802.1Q header");
1428 return (false);
1429 }
1430 }
1431 return (true);
1432 }
1433
1434 /*
1435 * Allocate an address from the FreeBSD Foundation OUI. This uses a
1436 * cryptographic hash function on the containing jail's name, UUID and the
1437 * interface name to attempt to provide a unique but stable address.
1438 * Pseudo-interfaces which require a MAC address should use this function to
1439 * allocate non-locally-administered addresses.
1440 */
1441 void
ether_gen_addr(struct ifnet * ifp,struct ether_addr * hwaddr)1442 ether_gen_addr(struct ifnet *ifp, struct ether_addr *hwaddr)
1443 {
1444 SHA1_CTX ctx;
1445 char *buf;
1446 char uuid[HOSTUUIDLEN + 1];
1447 uint64_t addr;
1448 int i, sz;
1449 char digest[SHA1_RESULTLEN];
1450 char jailname[MAXHOSTNAMELEN];
1451
1452 getcredhostuuid(curthread->td_ucred, uuid, sizeof(uuid));
1453 /* If each (vnet) jail would also have a unique hostuuid this would not
1454 * be necessary. */
1455 getjailname(curthread->td_ucred, jailname, sizeof(jailname));
1456 sz = asprintf(&buf, M_TEMP, "%s-%s-%s", uuid, if_name(ifp),
1457 jailname);
1458 if (sz < 0) {
1459 /* Fall back to a random mac address. */
1460 arc4rand(hwaddr, sizeof(*hwaddr), 0);
1461 hwaddr->octet[0] = 0x02;
1462 return;
1463 }
1464
1465 SHA1Init(&ctx);
1466 SHA1Update(&ctx, buf, sz);
1467 SHA1Final(digest, &ctx);
1468 free(buf, M_TEMP);
1469
1470 addr = ((digest[0] << 16) | (digest[1] << 8) | digest[2]) &
1471 OUI_FREEBSD_GENERATED_MASK;
1472 addr = OUI_FREEBSD(addr);
1473 for (i = 0; i < ETHER_ADDR_LEN; ++i) {
1474 hwaddr->octet[i] = addr >> ((ETHER_ADDR_LEN - i - 1) * 8) &
1475 0xFF;
1476 }
1477 }
1478
1479 DECLARE_MODULE(ether, ether_mod, SI_SUB_INIT_IF, SI_ORDER_ANY);
1480 MODULE_VERSION(ether, 1);
1481