1 /*-
2 * Copyright (c) 2014, Bryan Venteicher <[email protected]>
3 * All rights reserved.
4 * Copyright (c) 2020, Chelsio Communications.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice unmodified, this list of conditions, and the following
11 * disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
28 #include "opt_inet.h"
29 #include "opt_inet6.h"
30
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33
34 #include <sys/param.h>
35 #include <sys/eventhandler.h>
36 #include <sys/kernel.h>
37 #include <sys/lock.h>
38 #include <sys/hash.h>
39 #include <sys/malloc.h>
40 #include <sys/mbuf.h>
41 #include <sys/module.h>
42 #include <sys/refcount.h>
43 #include <sys/rmlock.h>
44 #include <sys/priv.h>
45 #include <sys/proc.h>
46 #include <sys/queue.h>
47 #include <sys/sbuf.h>
48 #include <sys/socket.h>
49 #include <sys/socketvar.h>
50 #include <sys/sockio.h>
51 #include <sys/sysctl.h>
52 #include <sys/systm.h>
53
54 #include <net/bpf.h>
55 #include <net/ethernet.h>
56 #include <net/if.h>
57 #include <net/if_var.h>
58 #include <net/if_clone.h>
59 #include <net/if_dl.h>
60 #include <net/if_media.h>
61 #include <net/if_types.h>
62 #include <net/if_vxlan.h>
63 #include <net/netisr.h>
64 #include <net/route.h>
65 #include <net/route/nhop.h>
66
67 #include <netinet/in.h>
68 #include <netinet/in_systm.h>
69 #include <netinet/in_var.h>
70 #include <netinet/in_pcb.h>
71 #include <netinet/ip.h>
72 #include <netinet/ip6.h>
73 #include <netinet/ip_var.h>
74 #include <netinet/udp.h>
75 #include <netinet/udp_var.h>
76 #include <netinet/in_fib.h>
77 #include <netinet6/in6_fib.h>
78
79 #include <netinet6/ip6_var.h>
80 #include <netinet6/scope6_var.h>
81
82 struct vxlan_softc;
83 LIST_HEAD(vxlan_softc_head, vxlan_softc);
84
85 struct sx vxlan_sx;
86 SX_SYSINIT(vxlan, &vxlan_sx, "VXLAN global start/stop lock");
87
88 struct vxlan_socket_mc_info {
89 union vxlan_sockaddr vxlsomc_saddr;
90 union vxlan_sockaddr vxlsomc_gaddr;
91 int vxlsomc_ifidx;
92 int vxlsomc_users;
93 };
94
95 /*
96 * The maximum MTU of encapsulated ethernet frame within IPv4/UDP packet.
97 */
98 #define VXLAN_MAX_MTU (IP_MAXPACKET - \
99 60 /* Maximum IPv4 header len */ - \
100 sizeof(struct udphdr) - \
101 sizeof(struct vxlan_header) - \
102 ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN)
103 #define VXLAN_BASIC_IFCAPS (IFCAP_LINKSTATE | IFCAP_JUMBO_MTU)
104
105 #define VXLAN_SO_MC_MAX_GROUPS 32
106
107 #define VXLAN_SO_VNI_HASH_SHIFT 6
108 #define VXLAN_SO_VNI_HASH_SIZE (1 << VXLAN_SO_VNI_HASH_SHIFT)
109 #define VXLAN_SO_VNI_HASH(_vni) ((_vni) % VXLAN_SO_VNI_HASH_SIZE)
110
111 struct vxlan_socket {
112 struct socket *vxlso_sock;
113 struct rmlock vxlso_lock;
114 u_int vxlso_refcnt;
115 union vxlan_sockaddr vxlso_laddr;
116 LIST_ENTRY(vxlan_socket) vxlso_entry;
117 struct vxlan_softc_head vxlso_vni_hash[VXLAN_SO_VNI_HASH_SIZE];
118 struct vxlan_socket_mc_info vxlso_mc[VXLAN_SO_MC_MAX_GROUPS];
119 };
120
121 #define VXLAN_SO_RLOCK(_vso, _p) rm_rlock(&(_vso)->vxlso_lock, (_p))
122 #define VXLAN_SO_RUNLOCK(_vso, _p) rm_runlock(&(_vso)->vxlso_lock, (_p))
123 #define VXLAN_SO_WLOCK(_vso) rm_wlock(&(_vso)->vxlso_lock)
124 #define VXLAN_SO_WUNLOCK(_vso) rm_wunlock(&(_vso)->vxlso_lock)
125 #define VXLAN_SO_LOCK_ASSERT(_vso) \
126 rm_assert(&(_vso)->vxlso_lock, RA_LOCKED)
127 #define VXLAN_SO_LOCK_WASSERT(_vso) \
128 rm_assert(&(_vso)->vxlso_lock, RA_WLOCKED)
129
130 #define VXLAN_SO_ACQUIRE(_vso) refcount_acquire(&(_vso)->vxlso_refcnt)
131 #define VXLAN_SO_RELEASE(_vso) refcount_release(&(_vso)->vxlso_refcnt)
132
133 struct vxlan_ftable_entry {
134 LIST_ENTRY(vxlan_ftable_entry) vxlfe_hash;
135 uint16_t vxlfe_flags;
136 uint8_t vxlfe_mac[ETHER_ADDR_LEN];
137 union vxlan_sockaddr vxlfe_raddr;
138 time_t vxlfe_expire;
139 };
140
141 #define VXLAN_FE_FLAG_DYNAMIC 0x01
142 #define VXLAN_FE_FLAG_STATIC 0x02
143
144 #define VXLAN_FE_IS_DYNAMIC(_fe) \
145 ((_fe)->vxlfe_flags & VXLAN_FE_FLAG_DYNAMIC)
146
147 #define VXLAN_SC_FTABLE_SHIFT 9
148 #define VXLAN_SC_FTABLE_SIZE (1 << VXLAN_SC_FTABLE_SHIFT)
149 #define VXLAN_SC_FTABLE_MASK (VXLAN_SC_FTABLE_SIZE - 1)
150 #define VXLAN_SC_FTABLE_HASH(_sc, _mac) \
151 (vxlan_mac_hash(_sc, _mac) % VXLAN_SC_FTABLE_SIZE)
152
153 LIST_HEAD(vxlan_ftable_head, vxlan_ftable_entry);
154
155 struct vxlan_statistics {
156 uint32_t ftable_nospace;
157 uint32_t ftable_lock_upgrade_failed;
158 counter_u64_t txcsum;
159 counter_u64_t tso;
160 counter_u64_t rxcsum;
161 };
162
163 struct vxlan_softc {
164 struct ifnet *vxl_ifp;
165 int vxl_reqcap;
166 struct vxlan_socket *vxl_sock;
167 uint32_t vxl_vni;
168 union vxlan_sockaddr vxl_src_addr;
169 union vxlan_sockaddr vxl_dst_addr;
170 uint32_t vxl_flags;
171 #define VXLAN_FLAG_INIT 0x0001
172 #define VXLAN_FLAG_TEARDOWN 0x0002
173 #define VXLAN_FLAG_LEARN 0x0004
174
175 uint32_t vxl_port_hash_key;
176 uint16_t vxl_min_port;
177 uint16_t vxl_max_port;
178 uint8_t vxl_ttl;
179
180 /* Lookup table from MAC address to forwarding entry. */
181 uint32_t vxl_ftable_cnt;
182 uint32_t vxl_ftable_max;
183 uint32_t vxl_ftable_timeout;
184 uint32_t vxl_ftable_hash_key;
185 struct vxlan_ftable_head *vxl_ftable;
186
187 /* Derived from vxl_dst_addr. */
188 struct vxlan_ftable_entry vxl_default_fe;
189
190 struct ip_moptions *vxl_im4o;
191 struct ip6_moptions *vxl_im6o;
192
193 struct rmlock vxl_lock;
194 volatile u_int vxl_refcnt;
195
196 int vxl_unit;
197 int vxl_vso_mc_index;
198 struct vxlan_statistics vxl_stats;
199 struct sysctl_oid *vxl_sysctl_node;
200 struct sysctl_ctx_list vxl_sysctl_ctx;
201 struct callout vxl_callout;
202 struct ether_addr vxl_hwaddr;
203 int vxl_mc_ifindex;
204 struct ifnet *vxl_mc_ifp;
205 struct ifmedia vxl_media;
206 char vxl_mc_ifname[IFNAMSIZ];
207 LIST_ENTRY(vxlan_softc) vxl_entry;
208 LIST_ENTRY(vxlan_softc) vxl_ifdetach_list;
209
210 /* For rate limiting errors on the tx fast path. */
211 struct timeval err_time;
212 int err_pps;
213 };
214
215 #define VXLAN_RLOCK(_sc, _p) rm_rlock(&(_sc)->vxl_lock, (_p))
216 #define VXLAN_RUNLOCK(_sc, _p) rm_runlock(&(_sc)->vxl_lock, (_p))
217 #define VXLAN_WLOCK(_sc) rm_wlock(&(_sc)->vxl_lock)
218 #define VXLAN_WUNLOCK(_sc) rm_wunlock(&(_sc)->vxl_lock)
219 #define VXLAN_LOCK_WOWNED(_sc) rm_wowned(&(_sc)->vxl_lock)
220 #define VXLAN_LOCK_ASSERT(_sc) rm_assert(&(_sc)->vxl_lock, RA_LOCKED)
221 #define VXLAN_LOCK_WASSERT(_sc) rm_assert(&(_sc)->vxl_lock, RA_WLOCKED)
222 #define VXLAN_UNLOCK(_sc, _p) do { \
223 if (VXLAN_LOCK_WOWNED(_sc)) \
224 VXLAN_WUNLOCK(_sc); \
225 else \
226 VXLAN_RUNLOCK(_sc, _p); \
227 } while (0)
228
229 #define VXLAN_ACQUIRE(_sc) refcount_acquire(&(_sc)->vxl_refcnt)
230 #define VXLAN_RELEASE(_sc) refcount_release(&(_sc)->vxl_refcnt)
231
232 #define satoconstsin(sa) ((const struct sockaddr_in *)(sa))
233 #define satoconstsin6(sa) ((const struct sockaddr_in6 *)(sa))
234
235 struct vxlanudphdr {
236 struct udphdr vxlh_udp;
237 struct vxlan_header vxlh_hdr;
238 } __packed;
239
240 static int vxlan_ftable_addr_cmp(const uint8_t *, const uint8_t *);
241 static void vxlan_ftable_init(struct vxlan_softc *);
242 static void vxlan_ftable_fini(struct vxlan_softc *);
243 static void vxlan_ftable_flush(struct vxlan_softc *, int);
244 static void vxlan_ftable_expire(struct vxlan_softc *);
245 static int vxlan_ftable_update_locked(struct vxlan_softc *,
246 const union vxlan_sockaddr *, const uint8_t *,
247 struct rm_priotracker *);
248 static int vxlan_ftable_learn(struct vxlan_softc *,
249 const struct sockaddr *, const uint8_t *);
250 static int vxlan_ftable_sysctl_dump(SYSCTL_HANDLER_ARGS);
251
252 static struct vxlan_ftable_entry *
253 vxlan_ftable_entry_alloc(void);
254 static void vxlan_ftable_entry_free(struct vxlan_ftable_entry *);
255 static void vxlan_ftable_entry_init(struct vxlan_softc *,
256 struct vxlan_ftable_entry *, const uint8_t *,
257 const struct sockaddr *, uint32_t);
258 static void vxlan_ftable_entry_destroy(struct vxlan_softc *,
259 struct vxlan_ftable_entry *);
260 static int vxlan_ftable_entry_insert(struct vxlan_softc *,
261 struct vxlan_ftable_entry *);
262 static struct vxlan_ftable_entry *
263 vxlan_ftable_entry_lookup(struct vxlan_softc *,
264 const uint8_t *);
265 static void vxlan_ftable_entry_dump(struct vxlan_ftable_entry *,
266 struct sbuf *);
267
268 static struct vxlan_socket *
269 vxlan_socket_alloc(const union vxlan_sockaddr *);
270 static void vxlan_socket_destroy(struct vxlan_socket *);
271 static void vxlan_socket_release(struct vxlan_socket *);
272 static struct vxlan_socket *
273 vxlan_socket_lookup(union vxlan_sockaddr *vxlsa);
274 static void vxlan_socket_insert(struct vxlan_socket *);
275 static int vxlan_socket_init(struct vxlan_socket *, struct ifnet *);
276 static int vxlan_socket_bind(struct vxlan_socket *, struct ifnet *);
277 static int vxlan_socket_create(struct ifnet *, int,
278 const union vxlan_sockaddr *, struct vxlan_socket **);
279 static void vxlan_socket_ifdetach(struct vxlan_socket *,
280 struct ifnet *, struct vxlan_softc_head *);
281
282 static struct vxlan_socket *
283 vxlan_socket_mc_lookup(const union vxlan_sockaddr *);
284 static int vxlan_sockaddr_mc_info_match(
285 const struct vxlan_socket_mc_info *,
286 const union vxlan_sockaddr *,
287 const union vxlan_sockaddr *, int);
288 static int vxlan_socket_mc_join_group(struct vxlan_socket *,
289 const union vxlan_sockaddr *, const union vxlan_sockaddr *,
290 int *, union vxlan_sockaddr *);
291 static int vxlan_socket_mc_leave_group(struct vxlan_socket *,
292 const union vxlan_sockaddr *,
293 const union vxlan_sockaddr *, int);
294 static int vxlan_socket_mc_add_group(struct vxlan_socket *,
295 const union vxlan_sockaddr *, const union vxlan_sockaddr *,
296 int, int *);
297 static void vxlan_socket_mc_release_group_by_idx(struct vxlan_socket *,
298 int);
299
300 static struct vxlan_softc *
301 vxlan_socket_lookup_softc_locked(struct vxlan_socket *,
302 uint32_t);
303 static struct vxlan_softc *
304 vxlan_socket_lookup_softc(struct vxlan_socket *, uint32_t);
305 static int vxlan_socket_insert_softc(struct vxlan_socket *,
306 struct vxlan_softc *);
307 static void vxlan_socket_remove_softc(struct vxlan_socket *,
308 struct vxlan_softc *);
309
310 static struct ifnet *
311 vxlan_multicast_if_ref(struct vxlan_softc *, int);
312 static void vxlan_free_multicast(struct vxlan_softc *);
313 static int vxlan_setup_multicast_interface(struct vxlan_softc *);
314
315 static int vxlan_setup_multicast(struct vxlan_softc *);
316 static int vxlan_setup_socket(struct vxlan_softc *);
317 #ifdef INET6
318 static void vxlan_setup_zero_checksum_port(struct vxlan_softc *);
319 #endif
320 static void vxlan_setup_interface_hdrlen(struct vxlan_softc *);
321 static int vxlan_valid_init_config(struct vxlan_softc *);
322 static void vxlan_init_wait(struct vxlan_softc *);
323 static void vxlan_init_complete(struct vxlan_softc *);
324 static void vxlan_init(void *);
325 static void vxlan_release(struct vxlan_softc *);
326 static void vxlan_teardown_wait(struct vxlan_softc *);
327 static void vxlan_teardown_complete(struct vxlan_softc *);
328 static void vxlan_teardown_locked(struct vxlan_softc *);
329 static void vxlan_teardown(struct vxlan_softc *);
330 static void vxlan_ifdetach(struct vxlan_softc *, struct ifnet *,
331 struct vxlan_softc_head *);
332 static void vxlan_timer(void *);
333
334 static int vxlan_ctrl_get_config(struct vxlan_softc *, void *);
335 static int vxlan_ctrl_set_vni(struct vxlan_softc *, void *);
336 static int vxlan_ctrl_set_local_addr(struct vxlan_softc *, void *);
337 static int vxlan_ctrl_set_remote_addr(struct vxlan_softc *, void *);
338 static int vxlan_ctrl_set_local_port(struct vxlan_softc *, void *);
339 static int vxlan_ctrl_set_remote_port(struct vxlan_softc *, void *);
340 static int vxlan_ctrl_set_port_range(struct vxlan_softc *, void *);
341 static int vxlan_ctrl_set_ftable_timeout(struct vxlan_softc *, void *);
342 static int vxlan_ctrl_set_ftable_max(struct vxlan_softc *, void *);
343 static int vxlan_ctrl_set_multicast_if(struct vxlan_softc * , void *);
344 static int vxlan_ctrl_set_ttl(struct vxlan_softc *, void *);
345 static int vxlan_ctrl_set_learn(struct vxlan_softc *, void *);
346 static int vxlan_ctrl_ftable_entry_add(struct vxlan_softc *, void *);
347 static int vxlan_ctrl_ftable_entry_rem(struct vxlan_softc *, void *);
348 static int vxlan_ctrl_flush(struct vxlan_softc *, void *);
349 static int vxlan_ioctl_drvspec(struct vxlan_softc *,
350 struct ifdrv *, int);
351 static int vxlan_ioctl_ifflags(struct vxlan_softc *);
352 static int vxlan_ioctl(struct ifnet *, u_long, caddr_t);
353
354 #if defined(INET) || defined(INET6)
355 static uint16_t vxlan_pick_source_port(struct vxlan_softc *, struct mbuf *);
356 static void vxlan_encap_header(struct vxlan_softc *, struct mbuf *,
357 int, uint16_t, uint16_t);
358 #endif
359 static int vxlan_encap4(struct vxlan_softc *,
360 const union vxlan_sockaddr *, struct mbuf *);
361 static int vxlan_encap6(struct vxlan_softc *,
362 const union vxlan_sockaddr *, struct mbuf *);
363 static int vxlan_transmit(struct ifnet *, struct mbuf *);
364 static void vxlan_qflush(struct ifnet *);
365 static void vxlan_rcv_udp_packet(struct mbuf *, int, struct inpcb *,
366 const struct sockaddr *, void *);
367 static int vxlan_input(struct vxlan_socket *, uint32_t, struct mbuf **,
368 const struct sockaddr *);
369
370 static int vxlan_stats_alloc(struct vxlan_softc *);
371 static void vxlan_stats_free(struct vxlan_softc *);
372 static void vxlan_set_default_config(struct vxlan_softc *);
373 static int vxlan_set_user_config(struct vxlan_softc *,
374 struct ifvxlanparam *);
375 static int vxlan_set_reqcap(struct vxlan_softc *, struct ifnet *, int);
376 static void vxlan_set_hwcaps(struct vxlan_softc *);
377 static int vxlan_clone_create(struct if_clone *, int, caddr_t);
378 static void vxlan_clone_destroy(struct ifnet *);
379
380 static uint32_t vxlan_mac_hash(struct vxlan_softc *, const uint8_t *);
381 static int vxlan_media_change(struct ifnet *);
382 static void vxlan_media_status(struct ifnet *, struct ifmediareq *);
383
384 static int vxlan_sockaddr_cmp(const union vxlan_sockaddr *,
385 const struct sockaddr *);
386 static void vxlan_sockaddr_copy(union vxlan_sockaddr *,
387 const struct sockaddr *);
388 static int vxlan_sockaddr_in_equal(const union vxlan_sockaddr *,
389 const struct sockaddr *);
390 static void vxlan_sockaddr_in_copy(union vxlan_sockaddr *,
391 const struct sockaddr *);
392 static int vxlan_sockaddr_supported(const union vxlan_sockaddr *, int);
393 static int vxlan_sockaddr_in_any(const union vxlan_sockaddr *);
394 static int vxlan_sockaddr_in_multicast(const union vxlan_sockaddr *);
395 static int vxlan_sockaddr_in6_embedscope(union vxlan_sockaddr *);
396
397 static int vxlan_can_change_config(struct vxlan_softc *);
398 static int vxlan_check_vni(uint32_t);
399 static int vxlan_check_ttl(int);
400 static int vxlan_check_ftable_timeout(uint32_t);
401 static int vxlan_check_ftable_max(uint32_t);
402
403 static void vxlan_sysctl_setup(struct vxlan_softc *);
404 static void vxlan_sysctl_destroy(struct vxlan_softc *);
405 static int vxlan_tunable_int(struct vxlan_softc *, const char *, int);
406
407 static void vxlan_ifdetach_event(void *, struct ifnet *);
408 static void vxlan_load(void);
409 static void vxlan_unload(void);
410 static int vxlan_modevent(module_t, int, void *);
411
412 static const char vxlan_name[] = "vxlan";
413 static MALLOC_DEFINE(M_VXLAN, vxlan_name,
414 "Virtual eXtensible LAN Interface");
415 static struct if_clone *vxlan_cloner;
416
417 static struct mtx vxlan_list_mtx;
418 #define VXLAN_LIST_LOCK() mtx_lock(&vxlan_list_mtx)
419 #define VXLAN_LIST_UNLOCK() mtx_unlock(&vxlan_list_mtx)
420
421 static LIST_HEAD(, vxlan_socket) vxlan_socket_list;
422
423 static eventhandler_tag vxlan_ifdetach_event_tag;
424
425 SYSCTL_DECL(_net_link);
426 SYSCTL_NODE(_net_link, OID_AUTO, vxlan, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
427 "Virtual eXtensible Local Area Network");
428
429 static int vxlan_legacy_port = 0;
430 TUNABLE_INT("net.link.vxlan.legacy_port", &vxlan_legacy_port);
431 static int vxlan_reuse_port = 0;
432 TUNABLE_INT("net.link.vxlan.reuse_port", &vxlan_reuse_port);
433
434 /* Default maximum number of addresses in the forwarding table. */
435 #ifndef VXLAN_FTABLE_MAX
436 #define VXLAN_FTABLE_MAX 2000
437 #endif
438
439 /* Timeout (in seconds) of addresses learned in the forwarding table. */
440 #ifndef VXLAN_FTABLE_TIMEOUT
441 #define VXLAN_FTABLE_TIMEOUT (20 * 60)
442 #endif
443
444 /*
445 * Maximum timeout (in seconds) of addresses learned in the forwarding
446 * table.
447 */
448 #ifndef VXLAN_FTABLE_MAX_TIMEOUT
449 #define VXLAN_FTABLE_MAX_TIMEOUT (60 * 60 * 24)
450 #endif
451
452 /* Number of seconds between pruning attempts of the forwarding table. */
453 #ifndef VXLAN_FTABLE_PRUNE
454 #define VXLAN_FTABLE_PRUNE (5 * 60)
455 #endif
456
457 static int vxlan_ftable_prune_period = VXLAN_FTABLE_PRUNE;
458
459 struct vxlan_control {
460 int (*vxlc_func)(struct vxlan_softc *, void *);
461 int vxlc_argsize;
462 int vxlc_flags;
463 #define VXLAN_CTRL_FLAG_COPYIN 0x01
464 #define VXLAN_CTRL_FLAG_COPYOUT 0x02
465 #define VXLAN_CTRL_FLAG_SUSER 0x04
466 };
467
468 static const struct vxlan_control vxlan_control_table[] = {
469 [VXLAN_CMD_GET_CONFIG] =
470 { vxlan_ctrl_get_config, sizeof(struct ifvxlancfg),
471 VXLAN_CTRL_FLAG_COPYOUT
472 },
473
474 [VXLAN_CMD_SET_VNI] =
475 { vxlan_ctrl_set_vni, sizeof(struct ifvxlancmd),
476 VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER,
477 },
478
479 [VXLAN_CMD_SET_LOCAL_ADDR] =
480 { vxlan_ctrl_set_local_addr, sizeof(struct ifvxlancmd),
481 VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER,
482 },
483
484 [VXLAN_CMD_SET_REMOTE_ADDR] =
485 { vxlan_ctrl_set_remote_addr, sizeof(struct ifvxlancmd),
486 VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER,
487 },
488
489 [VXLAN_CMD_SET_LOCAL_PORT] =
490 { vxlan_ctrl_set_local_port, sizeof(struct ifvxlancmd),
491 VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER,
492 },
493
494 [VXLAN_CMD_SET_REMOTE_PORT] =
495 { vxlan_ctrl_set_remote_port, sizeof(struct ifvxlancmd),
496 VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER,
497 },
498
499 [VXLAN_CMD_SET_PORT_RANGE] =
500 { vxlan_ctrl_set_port_range, sizeof(struct ifvxlancmd),
501 VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER,
502 },
503
504 [VXLAN_CMD_SET_FTABLE_TIMEOUT] =
505 { vxlan_ctrl_set_ftable_timeout, sizeof(struct ifvxlancmd),
506 VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER,
507 },
508
509 [VXLAN_CMD_SET_FTABLE_MAX] =
510 { vxlan_ctrl_set_ftable_max, sizeof(struct ifvxlancmd),
511 VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER,
512 },
513
514 [VXLAN_CMD_SET_MULTICAST_IF] =
515 { vxlan_ctrl_set_multicast_if, sizeof(struct ifvxlancmd),
516 VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER,
517 },
518
519 [VXLAN_CMD_SET_TTL] =
520 { vxlan_ctrl_set_ttl, sizeof(struct ifvxlancmd),
521 VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER,
522 },
523
524 [VXLAN_CMD_SET_LEARN] =
525 { vxlan_ctrl_set_learn, sizeof(struct ifvxlancmd),
526 VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER,
527 },
528
529 [VXLAN_CMD_FTABLE_ENTRY_ADD] =
530 { vxlan_ctrl_ftable_entry_add, sizeof(struct ifvxlancmd),
531 VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER,
532 },
533
534 [VXLAN_CMD_FTABLE_ENTRY_REM] =
535 { vxlan_ctrl_ftable_entry_rem, sizeof(struct ifvxlancmd),
536 VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER,
537 },
538
539 [VXLAN_CMD_FLUSH] =
540 { vxlan_ctrl_flush, sizeof(struct ifvxlancmd),
541 VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER,
542 },
543 };
544
545 static const int vxlan_control_table_size = nitems(vxlan_control_table);
546
547 static int
vxlan_ftable_addr_cmp(const uint8_t * a,const uint8_t * b)548 vxlan_ftable_addr_cmp(const uint8_t *a, const uint8_t *b)
549 {
550 int i, d;
551
552 for (i = 0, d = 0; i < ETHER_ADDR_LEN && d == 0; i++)
553 d = ((int)a[i]) - ((int)b[i]);
554
555 return (d);
556 }
557
558 static void
vxlan_ftable_init(struct vxlan_softc * sc)559 vxlan_ftable_init(struct vxlan_softc *sc)
560 {
561 int i;
562
563 sc->vxl_ftable = malloc(sizeof(struct vxlan_ftable_head) *
564 VXLAN_SC_FTABLE_SIZE, M_VXLAN, M_ZERO | M_WAITOK);
565
566 for (i = 0; i < VXLAN_SC_FTABLE_SIZE; i++)
567 LIST_INIT(&sc->vxl_ftable[i]);
568 sc->vxl_ftable_hash_key = arc4random();
569 }
570
571 static void
vxlan_ftable_fini(struct vxlan_softc * sc)572 vxlan_ftable_fini(struct vxlan_softc *sc)
573 {
574 int i;
575
576 for (i = 0; i < VXLAN_SC_FTABLE_SIZE; i++) {
577 KASSERT(LIST_EMPTY(&sc->vxl_ftable[i]),
578 ("%s: vxlan %p ftable[%d] not empty", __func__, sc, i));
579 }
580 MPASS(sc->vxl_ftable_cnt == 0);
581
582 free(sc->vxl_ftable, M_VXLAN);
583 sc->vxl_ftable = NULL;
584 }
585
586 static void
vxlan_ftable_flush(struct vxlan_softc * sc,int all)587 vxlan_ftable_flush(struct vxlan_softc *sc, int all)
588 {
589 struct vxlan_ftable_entry *fe, *tfe;
590 int i;
591
592 for (i = 0; i < VXLAN_SC_FTABLE_SIZE; i++) {
593 LIST_FOREACH_SAFE(fe, &sc->vxl_ftable[i], vxlfe_hash, tfe) {
594 if (all || VXLAN_FE_IS_DYNAMIC(fe))
595 vxlan_ftable_entry_destroy(sc, fe);
596 }
597 }
598 }
599
600 static void
vxlan_ftable_expire(struct vxlan_softc * sc)601 vxlan_ftable_expire(struct vxlan_softc *sc)
602 {
603 struct vxlan_ftable_entry *fe, *tfe;
604 int i;
605
606 VXLAN_LOCK_WASSERT(sc);
607
608 for (i = 0; i < VXLAN_SC_FTABLE_SIZE; i++) {
609 LIST_FOREACH_SAFE(fe, &sc->vxl_ftable[i], vxlfe_hash, tfe) {
610 if (VXLAN_FE_IS_DYNAMIC(fe) &&
611 time_uptime >= fe->vxlfe_expire)
612 vxlan_ftable_entry_destroy(sc, fe);
613 }
614 }
615 }
616
617 static int
vxlan_ftable_update_locked(struct vxlan_softc * sc,const union vxlan_sockaddr * vxlsa,const uint8_t * mac,struct rm_priotracker * tracker)618 vxlan_ftable_update_locked(struct vxlan_softc *sc,
619 const union vxlan_sockaddr *vxlsa, const uint8_t *mac,
620 struct rm_priotracker *tracker)
621 {
622 struct vxlan_ftable_entry *fe;
623 int error __unused;
624
625 VXLAN_LOCK_ASSERT(sc);
626
627 again:
628 /*
629 * A forwarding entry for this MAC address might already exist. If
630 * so, update it, otherwise create a new one. We may have to upgrade
631 * the lock if we have to change or create an entry.
632 */
633 fe = vxlan_ftable_entry_lookup(sc, mac);
634 if (fe != NULL) {
635 fe->vxlfe_expire = time_uptime + sc->vxl_ftable_timeout;
636
637 if (!VXLAN_FE_IS_DYNAMIC(fe) ||
638 vxlan_sockaddr_in_equal(&fe->vxlfe_raddr, &vxlsa->sa))
639 return (0);
640 if (!VXLAN_LOCK_WOWNED(sc)) {
641 VXLAN_RUNLOCK(sc, tracker);
642 VXLAN_WLOCK(sc);
643 sc->vxl_stats.ftable_lock_upgrade_failed++;
644 goto again;
645 }
646 vxlan_sockaddr_in_copy(&fe->vxlfe_raddr, &vxlsa->sa);
647 return (0);
648 }
649
650 if (!VXLAN_LOCK_WOWNED(sc)) {
651 VXLAN_RUNLOCK(sc, tracker);
652 VXLAN_WLOCK(sc);
653 sc->vxl_stats.ftable_lock_upgrade_failed++;
654 goto again;
655 }
656
657 if (sc->vxl_ftable_cnt >= sc->vxl_ftable_max) {
658 sc->vxl_stats.ftable_nospace++;
659 return (ENOSPC);
660 }
661
662 fe = vxlan_ftable_entry_alloc();
663 if (fe == NULL)
664 return (ENOMEM);
665
666 vxlan_ftable_entry_init(sc, fe, mac, &vxlsa->sa, VXLAN_FE_FLAG_DYNAMIC);
667
668 /* The prior lookup failed, so the insert should not. */
669 error = vxlan_ftable_entry_insert(sc, fe);
670 MPASS(error == 0);
671
672 return (0);
673 }
674
675 static int
vxlan_ftable_learn(struct vxlan_softc * sc,const struct sockaddr * sa,const uint8_t * mac)676 vxlan_ftable_learn(struct vxlan_softc *sc, const struct sockaddr *sa,
677 const uint8_t *mac)
678 {
679 struct rm_priotracker tracker;
680 union vxlan_sockaddr vxlsa;
681 int error;
682
683 /*
684 * The source port may be randomly selected by the remote host, so
685 * use the port of the default destination address.
686 */
687 vxlan_sockaddr_copy(&vxlsa, sa);
688 vxlsa.in4.sin_port = sc->vxl_dst_addr.in4.sin_port;
689
690 if (VXLAN_SOCKADDR_IS_IPV6(&vxlsa)) {
691 error = vxlan_sockaddr_in6_embedscope(&vxlsa);
692 if (error)
693 return (error);
694 }
695
696 VXLAN_RLOCK(sc, &tracker);
697 error = vxlan_ftable_update_locked(sc, &vxlsa, mac, &tracker);
698 VXLAN_UNLOCK(sc, &tracker);
699
700 return (error);
701 }
702
703 static int
vxlan_ftable_sysctl_dump(SYSCTL_HANDLER_ARGS)704 vxlan_ftable_sysctl_dump(SYSCTL_HANDLER_ARGS)
705 {
706 struct rm_priotracker tracker;
707 struct sbuf sb;
708 struct vxlan_softc *sc;
709 struct vxlan_ftable_entry *fe;
710 size_t size;
711 int i, error;
712
713 /*
714 * This is mostly intended for debugging during development. It is
715 * not practical to dump an entire large table this way.
716 */
717
718 sc = arg1;
719 size = PAGE_SIZE; /* Calculate later. */
720
721 sbuf_new(&sb, NULL, size, SBUF_FIXEDLEN);
722 sbuf_putc(&sb, '\n');
723
724 VXLAN_RLOCK(sc, &tracker);
725 for (i = 0; i < VXLAN_SC_FTABLE_SIZE; i++) {
726 LIST_FOREACH(fe, &sc->vxl_ftable[i], vxlfe_hash) {
727 if (sbuf_error(&sb) != 0)
728 break;
729 vxlan_ftable_entry_dump(fe, &sb);
730 }
731 }
732 VXLAN_RUNLOCK(sc, &tracker);
733
734 if (sbuf_len(&sb) == 1)
735 sbuf_setpos(&sb, 0);
736
737 sbuf_finish(&sb);
738 error = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
739 sbuf_delete(&sb);
740
741 return (error);
742 }
743
744 static struct vxlan_ftable_entry *
vxlan_ftable_entry_alloc(void)745 vxlan_ftable_entry_alloc(void)
746 {
747 struct vxlan_ftable_entry *fe;
748
749 fe = malloc(sizeof(*fe), M_VXLAN, M_ZERO | M_NOWAIT);
750
751 return (fe);
752 }
753
754 static void
vxlan_ftable_entry_free(struct vxlan_ftable_entry * fe)755 vxlan_ftable_entry_free(struct vxlan_ftable_entry *fe)
756 {
757
758 free(fe, M_VXLAN);
759 }
760
761 static void
vxlan_ftable_entry_init(struct vxlan_softc * sc,struct vxlan_ftable_entry * fe,const uint8_t * mac,const struct sockaddr * sa,uint32_t flags)762 vxlan_ftable_entry_init(struct vxlan_softc *sc, struct vxlan_ftable_entry *fe,
763 const uint8_t *mac, const struct sockaddr *sa, uint32_t flags)
764 {
765
766 fe->vxlfe_flags = flags;
767 fe->vxlfe_expire = time_uptime + sc->vxl_ftable_timeout;
768 memcpy(fe->vxlfe_mac, mac, ETHER_ADDR_LEN);
769 vxlan_sockaddr_copy(&fe->vxlfe_raddr, sa);
770 }
771
772 static void
vxlan_ftable_entry_destroy(struct vxlan_softc * sc,struct vxlan_ftable_entry * fe)773 vxlan_ftable_entry_destroy(struct vxlan_softc *sc,
774 struct vxlan_ftable_entry *fe)
775 {
776
777 sc->vxl_ftable_cnt--;
778 LIST_REMOVE(fe, vxlfe_hash);
779 vxlan_ftable_entry_free(fe);
780 }
781
782 static int
vxlan_ftable_entry_insert(struct vxlan_softc * sc,struct vxlan_ftable_entry * fe)783 vxlan_ftable_entry_insert(struct vxlan_softc *sc,
784 struct vxlan_ftable_entry *fe)
785 {
786 struct vxlan_ftable_entry *lfe;
787 uint32_t hash;
788 int dir;
789
790 VXLAN_LOCK_WASSERT(sc);
791 hash = VXLAN_SC_FTABLE_HASH(sc, fe->vxlfe_mac);
792
793 lfe = LIST_FIRST(&sc->vxl_ftable[hash]);
794 if (lfe == NULL) {
795 LIST_INSERT_HEAD(&sc->vxl_ftable[hash], fe, vxlfe_hash);
796 goto out;
797 }
798
799 do {
800 dir = vxlan_ftable_addr_cmp(fe->vxlfe_mac, lfe->vxlfe_mac);
801 if (dir == 0)
802 return (EEXIST);
803 if (dir > 0) {
804 LIST_INSERT_BEFORE(lfe, fe, vxlfe_hash);
805 goto out;
806 } else if (LIST_NEXT(lfe, vxlfe_hash) == NULL) {
807 LIST_INSERT_AFTER(lfe, fe, vxlfe_hash);
808 goto out;
809 } else
810 lfe = LIST_NEXT(lfe, vxlfe_hash);
811 } while (lfe != NULL);
812
813 out:
814 sc->vxl_ftable_cnt++;
815
816 return (0);
817 }
818
819 static struct vxlan_ftable_entry *
vxlan_ftable_entry_lookup(struct vxlan_softc * sc,const uint8_t * mac)820 vxlan_ftable_entry_lookup(struct vxlan_softc *sc, const uint8_t *mac)
821 {
822 struct vxlan_ftable_entry *fe;
823 uint32_t hash;
824 int dir;
825
826 VXLAN_LOCK_ASSERT(sc);
827 hash = VXLAN_SC_FTABLE_HASH(sc, mac);
828
829 LIST_FOREACH(fe, &sc->vxl_ftable[hash], vxlfe_hash) {
830 dir = vxlan_ftable_addr_cmp(mac, fe->vxlfe_mac);
831 if (dir == 0)
832 return (fe);
833 if (dir > 0)
834 break;
835 }
836
837 return (NULL);
838 }
839
840 static void
vxlan_ftable_entry_dump(struct vxlan_ftable_entry * fe,struct sbuf * sb)841 vxlan_ftable_entry_dump(struct vxlan_ftable_entry *fe, struct sbuf *sb)
842 {
843 char buf[64];
844 const union vxlan_sockaddr *sa;
845 const void *addr;
846 int i, len, af, width;
847
848 sa = &fe->vxlfe_raddr;
849 af = sa->sa.sa_family;
850 len = sbuf_len(sb);
851
852 sbuf_printf(sb, "%c 0x%02X ", VXLAN_FE_IS_DYNAMIC(fe) ? 'D' : 'S',
853 fe->vxlfe_flags);
854
855 for (i = 0; i < ETHER_ADDR_LEN - 1; i++)
856 sbuf_printf(sb, "%02X:", fe->vxlfe_mac[i]);
857 sbuf_printf(sb, "%02X ", fe->vxlfe_mac[i]);
858
859 if (af == AF_INET) {
860 addr = &sa->in4.sin_addr;
861 width = INET_ADDRSTRLEN - 1;
862 } else {
863 addr = &sa->in6.sin6_addr;
864 width = INET6_ADDRSTRLEN - 1;
865 }
866 inet_ntop(af, addr, buf, sizeof(buf));
867 sbuf_printf(sb, "%*s ", width, buf);
868
869 sbuf_printf(sb, "%08jd", (intmax_t)fe->vxlfe_expire);
870
871 sbuf_putc(sb, '\n');
872
873 /* Truncate a partial line. */
874 if (sbuf_error(sb) != 0)
875 sbuf_setpos(sb, len);
876 }
877
878 static struct vxlan_socket *
vxlan_socket_alloc(const union vxlan_sockaddr * sa)879 vxlan_socket_alloc(const union vxlan_sockaddr *sa)
880 {
881 struct vxlan_socket *vso;
882 int i;
883
884 vso = malloc(sizeof(*vso), M_VXLAN, M_WAITOK | M_ZERO);
885 rm_init(&vso->vxlso_lock, "vxlansorm");
886 refcount_init(&vso->vxlso_refcnt, 0);
887 for (i = 0; i < VXLAN_SO_VNI_HASH_SIZE; i++)
888 LIST_INIT(&vso->vxlso_vni_hash[i]);
889 vso->vxlso_laddr = *sa;
890
891 return (vso);
892 }
893
894 static void
vxlan_socket_destroy(struct vxlan_socket * vso)895 vxlan_socket_destroy(struct vxlan_socket *vso)
896 {
897 struct socket *so;
898 #ifdef INVARIANTS
899 int i;
900 struct vxlan_socket_mc_info *mc;
901
902 for (i = 0; i < VXLAN_SO_MC_MAX_GROUPS; i++) {
903 mc = &vso->vxlso_mc[i];
904 KASSERT(mc->vxlsomc_gaddr.sa.sa_family == AF_UNSPEC,
905 ("%s: socket %p mc[%d] still has address",
906 __func__, vso, i));
907 }
908
909 for (i = 0; i < VXLAN_SO_VNI_HASH_SIZE; i++) {
910 KASSERT(LIST_EMPTY(&vso->vxlso_vni_hash[i]),
911 ("%s: socket %p vni_hash[%d] not empty",
912 __func__, vso, i));
913 }
914 #endif
915 so = vso->vxlso_sock;
916 if (so != NULL) {
917 vso->vxlso_sock = NULL;
918 soclose(so);
919 }
920
921 rm_destroy(&vso->vxlso_lock);
922 free(vso, M_VXLAN);
923 }
924
925 static void
vxlan_socket_release(struct vxlan_socket * vso)926 vxlan_socket_release(struct vxlan_socket *vso)
927 {
928 int destroy;
929
930 VXLAN_LIST_LOCK();
931 destroy = VXLAN_SO_RELEASE(vso);
932 if (destroy != 0)
933 LIST_REMOVE(vso, vxlso_entry);
934 VXLAN_LIST_UNLOCK();
935
936 if (destroy != 0)
937 vxlan_socket_destroy(vso);
938 }
939
940 static struct vxlan_socket *
vxlan_socket_lookup(union vxlan_sockaddr * vxlsa)941 vxlan_socket_lookup(union vxlan_sockaddr *vxlsa)
942 {
943 struct vxlan_socket *vso;
944
945 VXLAN_LIST_LOCK();
946 LIST_FOREACH(vso, &vxlan_socket_list, vxlso_entry) {
947 if (vxlan_sockaddr_cmp(&vso->vxlso_laddr, &vxlsa->sa) == 0) {
948 VXLAN_SO_ACQUIRE(vso);
949 break;
950 }
951 }
952 VXLAN_LIST_UNLOCK();
953
954 return (vso);
955 }
956
957 static void
vxlan_socket_insert(struct vxlan_socket * vso)958 vxlan_socket_insert(struct vxlan_socket *vso)
959 {
960
961 VXLAN_LIST_LOCK();
962 VXLAN_SO_ACQUIRE(vso);
963 LIST_INSERT_HEAD(&vxlan_socket_list, vso, vxlso_entry);
964 VXLAN_LIST_UNLOCK();
965 }
966
967 static int
vxlan_socket_init(struct vxlan_socket * vso,struct ifnet * ifp)968 vxlan_socket_init(struct vxlan_socket *vso, struct ifnet *ifp)
969 {
970 struct thread *td;
971 int error;
972
973 td = curthread;
974
975 error = socreate(vso->vxlso_laddr.sa.sa_family, &vso->vxlso_sock,
976 SOCK_DGRAM, IPPROTO_UDP, td->td_ucred, td);
977 if (error) {
978 if_printf(ifp, "cannot create socket: %d\n", error);
979 return (error);
980 }
981
982 error = udp_set_kernel_tunneling(vso->vxlso_sock,
983 vxlan_rcv_udp_packet, NULL, vso);
984 if (error) {
985 if_printf(ifp, "cannot set tunneling function: %d\n", error);
986 return (error);
987 }
988
989 if (vxlan_reuse_port != 0) {
990 struct sockopt sopt;
991 int val = 1;
992
993 bzero(&sopt, sizeof(sopt));
994 sopt.sopt_dir = SOPT_SET;
995 sopt.sopt_level = IPPROTO_IP;
996 sopt.sopt_name = SO_REUSEPORT;
997 sopt.sopt_val = &val;
998 sopt.sopt_valsize = sizeof(val);
999 error = sosetopt(vso->vxlso_sock, &sopt);
1000 if (error) {
1001 if_printf(ifp,
1002 "cannot set REUSEADDR socket opt: %d\n", error);
1003 return (error);
1004 }
1005 }
1006
1007 return (0);
1008 }
1009
1010 static int
vxlan_socket_bind(struct vxlan_socket * vso,struct ifnet * ifp)1011 vxlan_socket_bind(struct vxlan_socket *vso, struct ifnet *ifp)
1012 {
1013 union vxlan_sockaddr laddr;
1014 struct thread *td;
1015 int error;
1016
1017 td = curthread;
1018 laddr = vso->vxlso_laddr;
1019
1020 error = sobind(vso->vxlso_sock, &laddr.sa, td);
1021 if (error) {
1022 if (error != EADDRINUSE)
1023 if_printf(ifp, "cannot bind socket: %d\n", error);
1024 return (error);
1025 }
1026
1027 return (0);
1028 }
1029
1030 static int
vxlan_socket_create(struct ifnet * ifp,int multicast,const union vxlan_sockaddr * saddr,struct vxlan_socket ** vsop)1031 vxlan_socket_create(struct ifnet *ifp, int multicast,
1032 const union vxlan_sockaddr *saddr, struct vxlan_socket **vsop)
1033 {
1034 union vxlan_sockaddr laddr;
1035 struct vxlan_socket *vso;
1036 int error;
1037
1038 laddr = *saddr;
1039
1040 /*
1041 * If this socket will be multicast, then only the local port
1042 * must be specified when binding.
1043 */
1044 if (multicast != 0) {
1045 if (VXLAN_SOCKADDR_IS_IPV4(&laddr))
1046 laddr.in4.sin_addr.s_addr = INADDR_ANY;
1047 #ifdef INET6
1048 else
1049 laddr.in6.sin6_addr = in6addr_any;
1050 #endif
1051 }
1052
1053 vso = vxlan_socket_alloc(&laddr);
1054 if (vso == NULL)
1055 return (ENOMEM);
1056
1057 error = vxlan_socket_init(vso, ifp);
1058 if (error)
1059 goto fail;
1060
1061 error = vxlan_socket_bind(vso, ifp);
1062 if (error)
1063 goto fail;
1064
1065 /*
1066 * There is a small window between the bind completing and
1067 * inserting the socket, so that a concurrent create may fail.
1068 * Let's not worry about that for now.
1069 */
1070 vxlan_socket_insert(vso);
1071 *vsop = vso;
1072
1073 return (0);
1074
1075 fail:
1076 vxlan_socket_destroy(vso);
1077
1078 return (error);
1079 }
1080
1081 static void
vxlan_socket_ifdetach(struct vxlan_socket * vso,struct ifnet * ifp,struct vxlan_softc_head * list)1082 vxlan_socket_ifdetach(struct vxlan_socket *vso, struct ifnet *ifp,
1083 struct vxlan_softc_head *list)
1084 {
1085 struct rm_priotracker tracker;
1086 struct vxlan_softc *sc;
1087 int i;
1088
1089 VXLAN_SO_RLOCK(vso, &tracker);
1090 for (i = 0; i < VXLAN_SO_VNI_HASH_SIZE; i++) {
1091 LIST_FOREACH(sc, &vso->vxlso_vni_hash[i], vxl_entry)
1092 vxlan_ifdetach(sc, ifp, list);
1093 }
1094 VXLAN_SO_RUNLOCK(vso, &tracker);
1095 }
1096
1097 static struct vxlan_socket *
vxlan_socket_mc_lookup(const union vxlan_sockaddr * vxlsa)1098 vxlan_socket_mc_lookup(const union vxlan_sockaddr *vxlsa)
1099 {
1100 union vxlan_sockaddr laddr;
1101 struct vxlan_socket *vso;
1102
1103 laddr = *vxlsa;
1104
1105 if (VXLAN_SOCKADDR_IS_IPV4(&laddr))
1106 laddr.in4.sin_addr.s_addr = INADDR_ANY;
1107 #ifdef INET6
1108 else
1109 laddr.in6.sin6_addr = in6addr_any;
1110 #endif
1111
1112 vso = vxlan_socket_lookup(&laddr);
1113
1114 return (vso);
1115 }
1116
1117 static int
vxlan_sockaddr_mc_info_match(const struct vxlan_socket_mc_info * mc,const union vxlan_sockaddr * group,const union vxlan_sockaddr * local,int ifidx)1118 vxlan_sockaddr_mc_info_match(const struct vxlan_socket_mc_info *mc,
1119 const union vxlan_sockaddr *group, const union vxlan_sockaddr *local,
1120 int ifidx)
1121 {
1122
1123 if (!vxlan_sockaddr_in_any(local) &&
1124 !vxlan_sockaddr_in_equal(&mc->vxlsomc_saddr, &local->sa))
1125 return (0);
1126 if (!vxlan_sockaddr_in_equal(&mc->vxlsomc_gaddr, &group->sa))
1127 return (0);
1128 if (ifidx != 0 && ifidx != mc->vxlsomc_ifidx)
1129 return (0);
1130
1131 return (1);
1132 }
1133
1134 static int
vxlan_socket_mc_join_group(struct vxlan_socket * vso,const union vxlan_sockaddr * group,const union vxlan_sockaddr * local,int * ifidx,union vxlan_sockaddr * source)1135 vxlan_socket_mc_join_group(struct vxlan_socket *vso,
1136 const union vxlan_sockaddr *group, const union vxlan_sockaddr *local,
1137 int *ifidx, union vxlan_sockaddr *source)
1138 {
1139 struct sockopt sopt;
1140 int error;
1141
1142 *source = *local;
1143
1144 if (VXLAN_SOCKADDR_IS_IPV4(group)) {
1145 struct ip_mreq mreq;
1146
1147 mreq.imr_multiaddr = group->in4.sin_addr;
1148 mreq.imr_interface = local->in4.sin_addr;
1149
1150 bzero(&sopt, sizeof(sopt));
1151 sopt.sopt_dir = SOPT_SET;
1152 sopt.sopt_level = IPPROTO_IP;
1153 sopt.sopt_name = IP_ADD_MEMBERSHIP;
1154 sopt.sopt_val = &mreq;
1155 sopt.sopt_valsize = sizeof(mreq);
1156 error = sosetopt(vso->vxlso_sock, &sopt);
1157 if (error)
1158 return (error);
1159
1160 /*
1161 * BMV: Ideally, there would be a formal way for us to get
1162 * the local interface that was selected based on the
1163 * imr_interface address. We could then update *ifidx so
1164 * vxlan_sockaddr_mc_info_match() would return a match for
1165 * later creates that explicitly set the multicast interface.
1166 *
1167 * If we really need to, we can of course look in the INP's
1168 * membership list:
1169 * sotoinpcb(vso->vxlso_sock)->inp_moptions->
1170 * imo_head[]->imf_inm->inm_ifp
1171 * similarly to imo_match_group().
1172 */
1173 source->in4.sin_addr = local->in4.sin_addr;
1174
1175 } else if (VXLAN_SOCKADDR_IS_IPV6(group)) {
1176 struct ipv6_mreq mreq;
1177
1178 mreq.ipv6mr_multiaddr = group->in6.sin6_addr;
1179 mreq.ipv6mr_interface = *ifidx;
1180
1181 bzero(&sopt, sizeof(sopt));
1182 sopt.sopt_dir = SOPT_SET;
1183 sopt.sopt_level = IPPROTO_IPV6;
1184 sopt.sopt_name = IPV6_JOIN_GROUP;
1185 sopt.sopt_val = &mreq;
1186 sopt.sopt_valsize = sizeof(mreq);
1187 error = sosetopt(vso->vxlso_sock, &sopt);
1188 if (error)
1189 return (error);
1190
1191 /*
1192 * BMV: As with IPv4, we would really like to know what
1193 * interface in6p_lookup_mcast_ifp() selected.
1194 */
1195 } else
1196 error = EAFNOSUPPORT;
1197
1198 return (error);
1199 }
1200
1201 static int
vxlan_socket_mc_leave_group(struct vxlan_socket * vso,const union vxlan_sockaddr * group,const union vxlan_sockaddr * source,int ifidx)1202 vxlan_socket_mc_leave_group(struct vxlan_socket *vso,
1203 const union vxlan_sockaddr *group, const union vxlan_sockaddr *source,
1204 int ifidx)
1205 {
1206 struct sockopt sopt;
1207 int error;
1208
1209 bzero(&sopt, sizeof(sopt));
1210 sopt.sopt_dir = SOPT_SET;
1211
1212 if (VXLAN_SOCKADDR_IS_IPV4(group)) {
1213 struct ip_mreq mreq;
1214
1215 mreq.imr_multiaddr = group->in4.sin_addr;
1216 mreq.imr_interface = source->in4.sin_addr;
1217
1218 sopt.sopt_level = IPPROTO_IP;
1219 sopt.sopt_name = IP_DROP_MEMBERSHIP;
1220 sopt.sopt_val = &mreq;
1221 sopt.sopt_valsize = sizeof(mreq);
1222 error = sosetopt(vso->vxlso_sock, &sopt);
1223
1224 } else if (VXLAN_SOCKADDR_IS_IPV6(group)) {
1225 struct ipv6_mreq mreq;
1226
1227 mreq.ipv6mr_multiaddr = group->in6.sin6_addr;
1228 mreq.ipv6mr_interface = ifidx;
1229
1230 sopt.sopt_level = IPPROTO_IPV6;
1231 sopt.sopt_name = IPV6_LEAVE_GROUP;
1232 sopt.sopt_val = &mreq;
1233 sopt.sopt_valsize = sizeof(mreq);
1234 error = sosetopt(vso->vxlso_sock, &sopt);
1235
1236 } else
1237 error = EAFNOSUPPORT;
1238
1239 return (error);
1240 }
1241
1242 static int
vxlan_socket_mc_add_group(struct vxlan_socket * vso,const union vxlan_sockaddr * group,const union vxlan_sockaddr * local,int ifidx,int * idx)1243 vxlan_socket_mc_add_group(struct vxlan_socket *vso,
1244 const union vxlan_sockaddr *group, const union vxlan_sockaddr *local,
1245 int ifidx, int *idx)
1246 {
1247 union vxlan_sockaddr source;
1248 struct vxlan_socket_mc_info *mc;
1249 int i, empty, error;
1250
1251 /*
1252 * Within a socket, the same multicast group may be used by multiple
1253 * interfaces, each with a different network identifier. But a socket
1254 * may only join a multicast group once, so keep track of the users
1255 * here.
1256 */
1257
1258 VXLAN_SO_WLOCK(vso);
1259 for (empty = 0, i = 0; i < VXLAN_SO_MC_MAX_GROUPS; i++) {
1260 mc = &vso->vxlso_mc[i];
1261
1262 if (mc->vxlsomc_gaddr.sa.sa_family == AF_UNSPEC) {
1263 empty++;
1264 continue;
1265 }
1266
1267 if (vxlan_sockaddr_mc_info_match(mc, group, local, ifidx))
1268 goto out;
1269 }
1270 VXLAN_SO_WUNLOCK(vso);
1271
1272 if (empty == 0)
1273 return (ENOSPC);
1274
1275 error = vxlan_socket_mc_join_group(vso, group, local, &ifidx, &source);
1276 if (error)
1277 return (error);
1278
1279 VXLAN_SO_WLOCK(vso);
1280 for (i = 0; i < VXLAN_SO_MC_MAX_GROUPS; i++) {
1281 mc = &vso->vxlso_mc[i];
1282
1283 if (mc->vxlsomc_gaddr.sa.sa_family == AF_UNSPEC) {
1284 vxlan_sockaddr_copy(&mc->vxlsomc_gaddr, &group->sa);
1285 vxlan_sockaddr_copy(&mc->vxlsomc_saddr, &source.sa);
1286 mc->vxlsomc_ifidx = ifidx;
1287 goto out;
1288 }
1289 }
1290 VXLAN_SO_WUNLOCK(vso);
1291
1292 error = vxlan_socket_mc_leave_group(vso, group, &source, ifidx);
1293 MPASS(error == 0);
1294
1295 return (ENOSPC);
1296
1297 out:
1298 mc->vxlsomc_users++;
1299 VXLAN_SO_WUNLOCK(vso);
1300
1301 *idx = i;
1302
1303 return (0);
1304 }
1305
1306 static void
vxlan_socket_mc_release_group_by_idx(struct vxlan_socket * vso,int idx)1307 vxlan_socket_mc_release_group_by_idx(struct vxlan_socket *vso, int idx)
1308 {
1309 union vxlan_sockaddr group, source;
1310 struct vxlan_socket_mc_info *mc;
1311 int ifidx, leave;
1312
1313 KASSERT(idx >= 0 && idx < VXLAN_SO_MC_MAX_GROUPS,
1314 ("%s: vso %p idx %d out of bounds", __func__, vso, idx));
1315
1316 leave = 0;
1317 mc = &vso->vxlso_mc[idx];
1318
1319 VXLAN_SO_WLOCK(vso);
1320 mc->vxlsomc_users--;
1321 if (mc->vxlsomc_users == 0) {
1322 group = mc->vxlsomc_gaddr;
1323 source = mc->vxlsomc_saddr;
1324 ifidx = mc->vxlsomc_ifidx;
1325 bzero(mc, sizeof(*mc));
1326 leave = 1;
1327 }
1328 VXLAN_SO_WUNLOCK(vso);
1329
1330 if (leave != 0) {
1331 /*
1332 * Our socket's membership in this group may have already
1333 * been removed if we joined through an interface that's
1334 * been detached.
1335 */
1336 vxlan_socket_mc_leave_group(vso, &group, &source, ifidx);
1337 }
1338 }
1339
1340 static struct vxlan_softc *
vxlan_socket_lookup_softc_locked(struct vxlan_socket * vso,uint32_t vni)1341 vxlan_socket_lookup_softc_locked(struct vxlan_socket *vso, uint32_t vni)
1342 {
1343 struct vxlan_softc *sc;
1344 uint32_t hash;
1345
1346 VXLAN_SO_LOCK_ASSERT(vso);
1347 hash = VXLAN_SO_VNI_HASH(vni);
1348
1349 LIST_FOREACH(sc, &vso->vxlso_vni_hash[hash], vxl_entry) {
1350 if (sc->vxl_vni == vni) {
1351 VXLAN_ACQUIRE(sc);
1352 break;
1353 }
1354 }
1355
1356 return (sc);
1357 }
1358
1359 static struct vxlan_softc *
vxlan_socket_lookup_softc(struct vxlan_socket * vso,uint32_t vni)1360 vxlan_socket_lookup_softc(struct vxlan_socket *vso, uint32_t vni)
1361 {
1362 struct rm_priotracker tracker;
1363 struct vxlan_softc *sc;
1364
1365 VXLAN_SO_RLOCK(vso, &tracker);
1366 sc = vxlan_socket_lookup_softc_locked(vso, vni);
1367 VXLAN_SO_RUNLOCK(vso, &tracker);
1368
1369 return (sc);
1370 }
1371
1372 static int
vxlan_socket_insert_softc(struct vxlan_socket * vso,struct vxlan_softc * sc)1373 vxlan_socket_insert_softc(struct vxlan_socket *vso, struct vxlan_softc *sc)
1374 {
1375 struct vxlan_softc *tsc;
1376 uint32_t vni, hash;
1377
1378 vni = sc->vxl_vni;
1379 hash = VXLAN_SO_VNI_HASH(vni);
1380
1381 VXLAN_SO_WLOCK(vso);
1382 tsc = vxlan_socket_lookup_softc_locked(vso, vni);
1383 if (tsc != NULL) {
1384 VXLAN_SO_WUNLOCK(vso);
1385 vxlan_release(tsc);
1386 return (EEXIST);
1387 }
1388
1389 VXLAN_ACQUIRE(sc);
1390 LIST_INSERT_HEAD(&vso->vxlso_vni_hash[hash], sc, vxl_entry);
1391 VXLAN_SO_WUNLOCK(vso);
1392
1393 return (0);
1394 }
1395
1396 static void
vxlan_socket_remove_softc(struct vxlan_socket * vso,struct vxlan_softc * sc)1397 vxlan_socket_remove_softc(struct vxlan_socket *vso, struct vxlan_softc *sc)
1398 {
1399
1400 VXLAN_SO_WLOCK(vso);
1401 LIST_REMOVE(sc, vxl_entry);
1402 VXLAN_SO_WUNLOCK(vso);
1403
1404 vxlan_release(sc);
1405 }
1406
1407 static struct ifnet *
vxlan_multicast_if_ref(struct vxlan_softc * sc,int ipv4)1408 vxlan_multicast_if_ref(struct vxlan_softc *sc, int ipv4)
1409 {
1410 struct ifnet *ifp;
1411
1412 VXLAN_LOCK_ASSERT(sc);
1413
1414 if (ipv4 && sc->vxl_im4o != NULL)
1415 ifp = sc->vxl_im4o->imo_multicast_ifp;
1416 else if (!ipv4 && sc->vxl_im6o != NULL)
1417 ifp = sc->vxl_im6o->im6o_multicast_ifp;
1418 else
1419 ifp = NULL;
1420
1421 if (ifp != NULL)
1422 if_ref(ifp);
1423
1424 return (ifp);
1425 }
1426
1427 static void
vxlan_free_multicast(struct vxlan_softc * sc)1428 vxlan_free_multicast(struct vxlan_softc *sc)
1429 {
1430
1431 if (sc->vxl_mc_ifp != NULL) {
1432 if_rele(sc->vxl_mc_ifp);
1433 sc->vxl_mc_ifp = NULL;
1434 sc->vxl_mc_ifindex = 0;
1435 }
1436
1437 if (sc->vxl_im4o != NULL) {
1438 free(sc->vxl_im4o, M_VXLAN);
1439 sc->vxl_im4o = NULL;
1440 }
1441
1442 if (sc->vxl_im6o != NULL) {
1443 free(sc->vxl_im6o, M_VXLAN);
1444 sc->vxl_im6o = NULL;
1445 }
1446 }
1447
1448 static int
vxlan_setup_multicast_interface(struct vxlan_softc * sc)1449 vxlan_setup_multicast_interface(struct vxlan_softc *sc)
1450 {
1451 struct ifnet *ifp;
1452
1453 ifp = ifunit_ref(sc->vxl_mc_ifname);
1454 if (ifp == NULL) {
1455 if_printf(sc->vxl_ifp, "multicast interface %s does "
1456 "not exist\n", sc->vxl_mc_ifname);
1457 return (ENOENT);
1458 }
1459
1460 if ((ifp->if_flags & IFF_MULTICAST) == 0) {
1461 if_printf(sc->vxl_ifp, "interface %s does not support "
1462 "multicast\n", sc->vxl_mc_ifname);
1463 if_rele(ifp);
1464 return (ENOTSUP);
1465 }
1466
1467 sc->vxl_mc_ifp = ifp;
1468 sc->vxl_mc_ifindex = ifp->if_index;
1469
1470 return (0);
1471 }
1472
1473 static int
vxlan_setup_multicast(struct vxlan_softc * sc)1474 vxlan_setup_multicast(struct vxlan_softc *sc)
1475 {
1476 const union vxlan_sockaddr *group;
1477 int error;
1478
1479 group = &sc->vxl_dst_addr;
1480 error = 0;
1481
1482 if (sc->vxl_mc_ifname[0] != '\0') {
1483 error = vxlan_setup_multicast_interface(sc);
1484 if (error)
1485 return (error);
1486 }
1487
1488 /*
1489 * Initialize an multicast options structure that is sufficiently
1490 * populated for use in the respective IP output routine. This
1491 * structure is typically stored in the socket, but our sockets
1492 * may be shared among multiple interfaces.
1493 */
1494 if (VXLAN_SOCKADDR_IS_IPV4(group)) {
1495 sc->vxl_im4o = malloc(sizeof(struct ip_moptions), M_VXLAN,
1496 M_ZERO | M_WAITOK);
1497 sc->vxl_im4o->imo_multicast_ifp = sc->vxl_mc_ifp;
1498 sc->vxl_im4o->imo_multicast_ttl = sc->vxl_ttl;
1499 sc->vxl_im4o->imo_multicast_vif = -1;
1500 } else if (VXLAN_SOCKADDR_IS_IPV6(group)) {
1501 sc->vxl_im6o = malloc(sizeof(struct ip6_moptions), M_VXLAN,
1502 M_ZERO | M_WAITOK);
1503 sc->vxl_im6o->im6o_multicast_ifp = sc->vxl_mc_ifp;
1504 sc->vxl_im6o->im6o_multicast_hlim = sc->vxl_ttl;
1505 }
1506
1507 return (error);
1508 }
1509
1510 static int
vxlan_setup_socket(struct vxlan_softc * sc)1511 vxlan_setup_socket(struct vxlan_softc *sc)
1512 {
1513 struct vxlan_socket *vso;
1514 struct ifnet *ifp;
1515 union vxlan_sockaddr *saddr, *daddr;
1516 int multicast, error;
1517
1518 vso = NULL;
1519 ifp = sc->vxl_ifp;
1520 saddr = &sc->vxl_src_addr;
1521 daddr = &sc->vxl_dst_addr;
1522
1523 multicast = vxlan_sockaddr_in_multicast(daddr);
1524 MPASS(multicast != -1);
1525 sc->vxl_vso_mc_index = -1;
1526
1527 /*
1528 * Try to create the socket. If that fails, attempt to use an
1529 * existing socket.
1530 */
1531 error = vxlan_socket_create(ifp, multicast, saddr, &vso);
1532 if (error) {
1533 if (multicast != 0)
1534 vso = vxlan_socket_mc_lookup(saddr);
1535 else
1536 vso = vxlan_socket_lookup(saddr);
1537
1538 if (vso == NULL) {
1539 if_printf(ifp, "cannot create socket (error: %d), "
1540 "and no existing socket found\n", error);
1541 goto out;
1542 }
1543 }
1544
1545 if (multicast != 0) {
1546 error = vxlan_setup_multicast(sc);
1547 if (error)
1548 goto out;
1549
1550 error = vxlan_socket_mc_add_group(vso, daddr, saddr,
1551 sc->vxl_mc_ifindex, &sc->vxl_vso_mc_index);
1552 if (error)
1553 goto out;
1554 }
1555
1556 sc->vxl_sock = vso;
1557 error = vxlan_socket_insert_softc(vso, sc);
1558 if (error) {
1559 sc->vxl_sock = NULL;
1560 if_printf(ifp, "network identifier %d already exists in "
1561 "this socket\n", sc->vxl_vni);
1562 goto out;
1563 }
1564
1565 return (0);
1566
1567 out:
1568 if (vso != NULL) {
1569 if (sc->vxl_vso_mc_index != -1) {
1570 vxlan_socket_mc_release_group_by_idx(vso,
1571 sc->vxl_vso_mc_index);
1572 sc->vxl_vso_mc_index = -1;
1573 }
1574 if (multicast != 0)
1575 vxlan_free_multicast(sc);
1576 vxlan_socket_release(vso);
1577 }
1578
1579 return (error);
1580 }
1581
1582 #ifdef INET6
1583 static void
vxlan_setup_zero_checksum_port(struct vxlan_softc * sc)1584 vxlan_setup_zero_checksum_port(struct vxlan_softc *sc)
1585 {
1586
1587 if (!VXLAN_SOCKADDR_IS_IPV6(&sc->vxl_src_addr))
1588 return;
1589
1590 MPASS(sc->vxl_src_addr.in6.sin6_port != 0);
1591 MPASS(sc->vxl_dst_addr.in6.sin6_port != 0);
1592
1593 if (sc->vxl_src_addr.in6.sin6_port != sc->vxl_dst_addr.in6.sin6_port) {
1594 if_printf(sc->vxl_ifp, "port %d in src address does not match "
1595 "port %d in dst address, rfc6935_port (%d) not updated.\n",
1596 ntohs(sc->vxl_src_addr.in6.sin6_port),
1597 ntohs(sc->vxl_dst_addr.in6.sin6_port),
1598 V_zero_checksum_port);
1599 return;
1600 }
1601
1602 if (V_zero_checksum_port != 0) {
1603 if (V_zero_checksum_port !=
1604 ntohs(sc->vxl_src_addr.in6.sin6_port)) {
1605 if_printf(sc->vxl_ifp, "rfc6935_port is already set to "
1606 "%d, cannot set it to %d.\n", V_zero_checksum_port,
1607 ntohs(sc->vxl_src_addr.in6.sin6_port));
1608 }
1609 return;
1610 }
1611
1612 V_zero_checksum_port = ntohs(sc->vxl_src_addr.in6.sin6_port);
1613 if_printf(sc->vxl_ifp, "rfc6935_port set to %d\n",
1614 V_zero_checksum_port);
1615 }
1616 #endif
1617
1618 static void
vxlan_setup_interface_hdrlen(struct vxlan_softc * sc)1619 vxlan_setup_interface_hdrlen(struct vxlan_softc *sc)
1620 {
1621 struct ifnet *ifp;
1622
1623 ifp = sc->vxl_ifp;
1624 ifp->if_hdrlen = ETHER_HDR_LEN + sizeof(struct vxlanudphdr);
1625
1626 if (VXLAN_SOCKADDR_IS_IPV4(&sc->vxl_dst_addr) != 0)
1627 ifp->if_hdrlen += sizeof(struct ip);
1628 else if (VXLAN_SOCKADDR_IS_IPV6(&sc->vxl_dst_addr) != 0)
1629 ifp->if_hdrlen += sizeof(struct ip6_hdr);
1630 }
1631
1632 static int
vxlan_valid_init_config(struct vxlan_softc * sc)1633 vxlan_valid_init_config(struct vxlan_softc *sc)
1634 {
1635 const char *reason;
1636
1637 if (vxlan_check_vni(sc->vxl_vni) != 0) {
1638 reason = "invalid virtual network identifier specified";
1639 goto fail;
1640 }
1641
1642 if (vxlan_sockaddr_supported(&sc->vxl_src_addr, 1) == 0) {
1643 reason = "source address type is not supported";
1644 goto fail;
1645 }
1646
1647 if (vxlan_sockaddr_supported(&sc->vxl_dst_addr, 0) == 0) {
1648 reason = "destination address type is not supported";
1649 goto fail;
1650 }
1651
1652 if (vxlan_sockaddr_in_any(&sc->vxl_dst_addr) != 0) {
1653 reason = "no valid destination address specified";
1654 goto fail;
1655 }
1656
1657 if (vxlan_sockaddr_in_multicast(&sc->vxl_dst_addr) == 0 &&
1658 sc->vxl_mc_ifname[0] != '\0') {
1659 reason = "can only specify interface with a group address";
1660 goto fail;
1661 }
1662
1663 if (vxlan_sockaddr_in_any(&sc->vxl_src_addr) == 0) {
1664 if (VXLAN_SOCKADDR_IS_IPV4(&sc->vxl_src_addr) ^
1665 VXLAN_SOCKADDR_IS_IPV4(&sc->vxl_dst_addr)) {
1666 reason = "source and destination address must both "
1667 "be either IPv4 or IPv6";
1668 goto fail;
1669 }
1670 }
1671
1672 if (sc->vxl_src_addr.in4.sin_port == 0) {
1673 reason = "local port not specified";
1674 goto fail;
1675 }
1676
1677 if (sc->vxl_dst_addr.in4.sin_port == 0) {
1678 reason = "remote port not specified";
1679 goto fail;
1680 }
1681
1682 return (0);
1683
1684 fail:
1685 if_printf(sc->vxl_ifp, "cannot initialize interface: %s\n", reason);
1686 return (EINVAL);
1687 }
1688
1689 static void
vxlan_init_wait(struct vxlan_softc * sc)1690 vxlan_init_wait(struct vxlan_softc *sc)
1691 {
1692
1693 VXLAN_LOCK_WASSERT(sc);
1694 while (sc->vxl_flags & VXLAN_FLAG_INIT)
1695 rm_sleep(sc, &sc->vxl_lock, 0, "vxlint", hz);
1696 }
1697
1698 static void
vxlan_init_complete(struct vxlan_softc * sc)1699 vxlan_init_complete(struct vxlan_softc *sc)
1700 {
1701
1702 VXLAN_WLOCK(sc);
1703 sc->vxl_flags &= ~VXLAN_FLAG_INIT;
1704 wakeup(sc);
1705 VXLAN_WUNLOCK(sc);
1706 }
1707
1708 static void
vxlan_init(void * xsc)1709 vxlan_init(void *xsc)
1710 {
1711 static const uint8_t empty_mac[ETHER_ADDR_LEN];
1712 struct vxlan_softc *sc;
1713 struct ifnet *ifp;
1714
1715 sc = xsc;
1716 ifp = sc->vxl_ifp;
1717
1718 sx_xlock(&vxlan_sx);
1719 VXLAN_WLOCK(sc);
1720 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1721 VXLAN_WUNLOCK(sc);
1722 sx_xunlock(&vxlan_sx);
1723 return;
1724 }
1725 sc->vxl_flags |= VXLAN_FLAG_INIT;
1726 VXLAN_WUNLOCK(sc);
1727
1728 if (vxlan_valid_init_config(sc) != 0)
1729 goto out;
1730
1731 if (vxlan_setup_socket(sc) != 0)
1732 goto out;
1733
1734 #ifdef INET6
1735 vxlan_setup_zero_checksum_port(sc);
1736 #endif
1737
1738 /* Initialize the default forwarding entry. */
1739 vxlan_ftable_entry_init(sc, &sc->vxl_default_fe, empty_mac,
1740 &sc->vxl_dst_addr.sa, VXLAN_FE_FLAG_STATIC);
1741
1742 VXLAN_WLOCK(sc);
1743 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1744 callout_reset(&sc->vxl_callout, vxlan_ftable_prune_period * hz,
1745 vxlan_timer, sc);
1746 VXLAN_WUNLOCK(sc);
1747
1748 if_link_state_change(ifp, LINK_STATE_UP);
1749
1750 EVENTHANDLER_INVOKE(vxlan_start, ifp, sc->vxl_src_addr.in4.sin_family,
1751 ntohs(sc->vxl_src_addr.in4.sin_port));
1752 out:
1753 vxlan_init_complete(sc);
1754 sx_xunlock(&vxlan_sx);
1755 }
1756
1757 static void
vxlan_release(struct vxlan_softc * sc)1758 vxlan_release(struct vxlan_softc *sc)
1759 {
1760
1761 /*
1762 * The softc may be destroyed as soon as we release our reference,
1763 * so we cannot serialize the wakeup with the softc lock. We use a
1764 * timeout in our sleeps so a missed wakeup is unfortunate but not
1765 * fatal.
1766 */
1767 if (VXLAN_RELEASE(sc) != 0)
1768 wakeup(sc);
1769 }
1770
1771 static void
vxlan_teardown_wait(struct vxlan_softc * sc)1772 vxlan_teardown_wait(struct vxlan_softc *sc)
1773 {
1774
1775 VXLAN_LOCK_WASSERT(sc);
1776 while (sc->vxl_flags & VXLAN_FLAG_TEARDOWN)
1777 rm_sleep(sc, &sc->vxl_lock, 0, "vxltrn", hz);
1778 }
1779
1780 static void
vxlan_teardown_complete(struct vxlan_softc * sc)1781 vxlan_teardown_complete(struct vxlan_softc *sc)
1782 {
1783
1784 VXLAN_WLOCK(sc);
1785 sc->vxl_flags &= ~VXLAN_FLAG_TEARDOWN;
1786 wakeup(sc);
1787 VXLAN_WUNLOCK(sc);
1788 }
1789
1790 static void
vxlan_teardown_locked(struct vxlan_softc * sc)1791 vxlan_teardown_locked(struct vxlan_softc *sc)
1792 {
1793 struct ifnet *ifp;
1794 struct vxlan_socket *vso;
1795
1796 sx_assert(&vxlan_sx, SA_XLOCKED);
1797 VXLAN_LOCK_WASSERT(sc);
1798 MPASS(sc->vxl_flags & VXLAN_FLAG_TEARDOWN);
1799
1800 ifp = sc->vxl_ifp;
1801 ifp->if_flags &= ~IFF_UP;
1802 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1803 callout_stop(&sc->vxl_callout);
1804 vso = sc->vxl_sock;
1805 sc->vxl_sock = NULL;
1806
1807 VXLAN_WUNLOCK(sc);
1808 if_link_state_change(ifp, LINK_STATE_DOWN);
1809 EVENTHANDLER_INVOKE(vxlan_stop, ifp, sc->vxl_src_addr.in4.sin_family,
1810 ntohs(sc->vxl_src_addr.in4.sin_port));
1811
1812 if (vso != NULL) {
1813 vxlan_socket_remove_softc(vso, sc);
1814
1815 if (sc->vxl_vso_mc_index != -1) {
1816 vxlan_socket_mc_release_group_by_idx(vso,
1817 sc->vxl_vso_mc_index);
1818 sc->vxl_vso_mc_index = -1;
1819 }
1820 }
1821
1822 VXLAN_WLOCK(sc);
1823 while (sc->vxl_refcnt != 0)
1824 rm_sleep(sc, &sc->vxl_lock, 0, "vxldrn", hz);
1825 VXLAN_WUNLOCK(sc);
1826
1827 callout_drain(&sc->vxl_callout);
1828
1829 vxlan_free_multicast(sc);
1830 if (vso != NULL)
1831 vxlan_socket_release(vso);
1832
1833 vxlan_teardown_complete(sc);
1834 }
1835
1836 static void
vxlan_teardown(struct vxlan_softc * sc)1837 vxlan_teardown(struct vxlan_softc *sc)
1838 {
1839
1840 sx_xlock(&vxlan_sx);
1841 VXLAN_WLOCK(sc);
1842 if (sc->vxl_flags & VXLAN_FLAG_TEARDOWN) {
1843 vxlan_teardown_wait(sc);
1844 VXLAN_WUNLOCK(sc);
1845 sx_xunlock(&vxlan_sx);
1846 return;
1847 }
1848
1849 sc->vxl_flags |= VXLAN_FLAG_TEARDOWN;
1850 vxlan_teardown_locked(sc);
1851 sx_xunlock(&vxlan_sx);
1852 }
1853
1854 static void
vxlan_ifdetach(struct vxlan_softc * sc,struct ifnet * ifp,struct vxlan_softc_head * list)1855 vxlan_ifdetach(struct vxlan_softc *sc, struct ifnet *ifp,
1856 struct vxlan_softc_head *list)
1857 {
1858
1859 VXLAN_WLOCK(sc);
1860
1861 if (sc->vxl_mc_ifp != ifp)
1862 goto out;
1863 if (sc->vxl_flags & VXLAN_FLAG_TEARDOWN)
1864 goto out;
1865
1866 sc->vxl_flags |= VXLAN_FLAG_TEARDOWN;
1867 LIST_INSERT_HEAD(list, sc, vxl_ifdetach_list);
1868
1869 out:
1870 VXLAN_WUNLOCK(sc);
1871 }
1872
1873 static void
vxlan_timer(void * xsc)1874 vxlan_timer(void *xsc)
1875 {
1876 struct vxlan_softc *sc;
1877
1878 sc = xsc;
1879 VXLAN_LOCK_WASSERT(sc);
1880
1881 vxlan_ftable_expire(sc);
1882 callout_schedule(&sc->vxl_callout, vxlan_ftable_prune_period * hz);
1883 }
1884
1885 static int
vxlan_ioctl_ifflags(struct vxlan_softc * sc)1886 vxlan_ioctl_ifflags(struct vxlan_softc *sc)
1887 {
1888 struct ifnet *ifp;
1889
1890 ifp = sc->vxl_ifp;
1891
1892 if (ifp->if_flags & IFF_UP) {
1893 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1894 vxlan_init(sc);
1895 } else {
1896 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1897 vxlan_teardown(sc);
1898 }
1899
1900 return (0);
1901 }
1902
1903 static int
vxlan_ctrl_get_config(struct vxlan_softc * sc,void * arg)1904 vxlan_ctrl_get_config(struct vxlan_softc *sc, void *arg)
1905 {
1906 struct rm_priotracker tracker;
1907 struct ifvxlancfg *cfg;
1908
1909 cfg = arg;
1910 bzero(cfg, sizeof(*cfg));
1911
1912 VXLAN_RLOCK(sc, &tracker);
1913 cfg->vxlc_vni = sc->vxl_vni;
1914 memcpy(&cfg->vxlc_local_sa, &sc->vxl_src_addr,
1915 sizeof(union vxlan_sockaddr));
1916 memcpy(&cfg->vxlc_remote_sa, &sc->vxl_dst_addr,
1917 sizeof(union vxlan_sockaddr));
1918 cfg->vxlc_mc_ifindex = sc->vxl_mc_ifindex;
1919 cfg->vxlc_ftable_cnt = sc->vxl_ftable_cnt;
1920 cfg->vxlc_ftable_max = sc->vxl_ftable_max;
1921 cfg->vxlc_ftable_timeout = sc->vxl_ftable_timeout;
1922 cfg->vxlc_port_min = sc->vxl_min_port;
1923 cfg->vxlc_port_max = sc->vxl_max_port;
1924 cfg->vxlc_learn = (sc->vxl_flags & VXLAN_FLAG_LEARN) != 0;
1925 cfg->vxlc_ttl = sc->vxl_ttl;
1926 VXLAN_RUNLOCK(sc, &tracker);
1927
1928 #ifdef INET6
1929 if (VXLAN_SOCKADDR_IS_IPV6(&cfg->vxlc_local_sa))
1930 sa6_recoverscope(&cfg->vxlc_local_sa.in6);
1931 if (VXLAN_SOCKADDR_IS_IPV6(&cfg->vxlc_remote_sa))
1932 sa6_recoverscope(&cfg->vxlc_remote_sa.in6);
1933 #endif
1934
1935 return (0);
1936 }
1937
1938 static int
vxlan_ctrl_set_vni(struct vxlan_softc * sc,void * arg)1939 vxlan_ctrl_set_vni(struct vxlan_softc *sc, void *arg)
1940 {
1941 struct ifvxlancmd *cmd;
1942 int error;
1943
1944 cmd = arg;
1945
1946 if (vxlan_check_vni(cmd->vxlcmd_vni) != 0)
1947 return (EINVAL);
1948
1949 VXLAN_WLOCK(sc);
1950 if (vxlan_can_change_config(sc)) {
1951 sc->vxl_vni = cmd->vxlcmd_vni;
1952 error = 0;
1953 } else
1954 error = EBUSY;
1955 VXLAN_WUNLOCK(sc);
1956
1957 return (error);
1958 }
1959
1960 static int
vxlan_ctrl_set_local_addr(struct vxlan_softc * sc,void * arg)1961 vxlan_ctrl_set_local_addr(struct vxlan_softc *sc, void *arg)
1962 {
1963 struct ifvxlancmd *cmd;
1964 union vxlan_sockaddr *vxlsa;
1965 int error;
1966
1967 cmd = arg;
1968 vxlsa = &cmd->vxlcmd_sa;
1969
1970 if (!VXLAN_SOCKADDR_IS_IPV46(vxlsa))
1971 return (EINVAL);
1972 if (vxlan_sockaddr_in_multicast(vxlsa) != 0)
1973 return (EINVAL);
1974 if (VXLAN_SOCKADDR_IS_IPV6(vxlsa)) {
1975 error = vxlan_sockaddr_in6_embedscope(vxlsa);
1976 if (error)
1977 return (error);
1978 }
1979
1980 VXLAN_WLOCK(sc);
1981 if (vxlan_can_change_config(sc)) {
1982 vxlan_sockaddr_in_copy(&sc->vxl_src_addr, &vxlsa->sa);
1983 vxlan_set_hwcaps(sc);
1984 error = 0;
1985 } else
1986 error = EBUSY;
1987 VXLAN_WUNLOCK(sc);
1988
1989 return (error);
1990 }
1991
1992 static int
vxlan_ctrl_set_remote_addr(struct vxlan_softc * sc,void * arg)1993 vxlan_ctrl_set_remote_addr(struct vxlan_softc *sc, void *arg)
1994 {
1995 struct ifvxlancmd *cmd;
1996 union vxlan_sockaddr *vxlsa;
1997 int error;
1998
1999 cmd = arg;
2000 vxlsa = &cmd->vxlcmd_sa;
2001
2002 if (!VXLAN_SOCKADDR_IS_IPV46(vxlsa))
2003 return (EINVAL);
2004 if (VXLAN_SOCKADDR_IS_IPV6(vxlsa)) {
2005 error = vxlan_sockaddr_in6_embedscope(vxlsa);
2006 if (error)
2007 return (error);
2008 }
2009
2010 VXLAN_WLOCK(sc);
2011 if (vxlan_can_change_config(sc)) {
2012 vxlan_sockaddr_in_copy(&sc->vxl_dst_addr, &vxlsa->sa);
2013 vxlan_setup_interface_hdrlen(sc);
2014 error = 0;
2015 } else
2016 error = EBUSY;
2017 VXLAN_WUNLOCK(sc);
2018
2019 return (error);
2020 }
2021
2022 static int
vxlan_ctrl_set_local_port(struct vxlan_softc * sc,void * arg)2023 vxlan_ctrl_set_local_port(struct vxlan_softc *sc, void *arg)
2024 {
2025 struct ifvxlancmd *cmd;
2026 int error;
2027
2028 cmd = arg;
2029
2030 if (cmd->vxlcmd_port == 0)
2031 return (EINVAL);
2032
2033 VXLAN_WLOCK(sc);
2034 if (vxlan_can_change_config(sc)) {
2035 sc->vxl_src_addr.in4.sin_port = htons(cmd->vxlcmd_port);
2036 error = 0;
2037 } else
2038 error = EBUSY;
2039 VXLAN_WUNLOCK(sc);
2040
2041 return (error);
2042 }
2043
2044 static int
vxlan_ctrl_set_remote_port(struct vxlan_softc * sc,void * arg)2045 vxlan_ctrl_set_remote_port(struct vxlan_softc *sc, void *arg)
2046 {
2047 struct ifvxlancmd *cmd;
2048 int error;
2049
2050 cmd = arg;
2051
2052 if (cmd->vxlcmd_port == 0)
2053 return (EINVAL);
2054
2055 VXLAN_WLOCK(sc);
2056 if (vxlan_can_change_config(sc)) {
2057 sc->vxl_dst_addr.in4.sin_port = htons(cmd->vxlcmd_port);
2058 error = 0;
2059 } else
2060 error = EBUSY;
2061 VXLAN_WUNLOCK(sc);
2062
2063 return (error);
2064 }
2065
2066 static int
vxlan_ctrl_set_port_range(struct vxlan_softc * sc,void * arg)2067 vxlan_ctrl_set_port_range(struct vxlan_softc *sc, void *arg)
2068 {
2069 struct ifvxlancmd *cmd;
2070 uint16_t min, max;
2071 int error;
2072
2073 cmd = arg;
2074 min = cmd->vxlcmd_port_min;
2075 max = cmd->vxlcmd_port_max;
2076
2077 if (max < min)
2078 return (EINVAL);
2079
2080 VXLAN_WLOCK(sc);
2081 if (vxlan_can_change_config(sc)) {
2082 sc->vxl_min_port = min;
2083 sc->vxl_max_port = max;
2084 error = 0;
2085 } else
2086 error = EBUSY;
2087 VXLAN_WUNLOCK(sc);
2088
2089 return (error);
2090 }
2091
2092 static int
vxlan_ctrl_set_ftable_timeout(struct vxlan_softc * sc,void * arg)2093 vxlan_ctrl_set_ftable_timeout(struct vxlan_softc *sc, void *arg)
2094 {
2095 struct ifvxlancmd *cmd;
2096 int error;
2097
2098 cmd = arg;
2099
2100 VXLAN_WLOCK(sc);
2101 if (vxlan_check_ftable_timeout(cmd->vxlcmd_ftable_timeout) == 0) {
2102 sc->vxl_ftable_timeout = cmd->vxlcmd_ftable_timeout;
2103 error = 0;
2104 } else
2105 error = EINVAL;
2106 VXLAN_WUNLOCK(sc);
2107
2108 return (error);
2109 }
2110
2111 static int
vxlan_ctrl_set_ftable_max(struct vxlan_softc * sc,void * arg)2112 vxlan_ctrl_set_ftable_max(struct vxlan_softc *sc, void *arg)
2113 {
2114 struct ifvxlancmd *cmd;
2115 int error;
2116
2117 cmd = arg;
2118
2119 VXLAN_WLOCK(sc);
2120 if (vxlan_check_ftable_max(cmd->vxlcmd_ftable_max) == 0) {
2121 sc->vxl_ftable_max = cmd->vxlcmd_ftable_max;
2122 error = 0;
2123 } else
2124 error = EINVAL;
2125 VXLAN_WUNLOCK(sc);
2126
2127 return (error);
2128 }
2129
2130 static int
vxlan_ctrl_set_multicast_if(struct vxlan_softc * sc,void * arg)2131 vxlan_ctrl_set_multicast_if(struct vxlan_softc * sc, void *arg)
2132 {
2133 struct ifvxlancmd *cmd;
2134 int error;
2135
2136 cmd = arg;
2137
2138 VXLAN_WLOCK(sc);
2139 if (vxlan_can_change_config(sc)) {
2140 strlcpy(sc->vxl_mc_ifname, cmd->vxlcmd_ifname, IFNAMSIZ);
2141 vxlan_set_hwcaps(sc);
2142 error = 0;
2143 } else
2144 error = EBUSY;
2145 VXLAN_WUNLOCK(sc);
2146
2147 return (error);
2148 }
2149
2150 static int
vxlan_ctrl_set_ttl(struct vxlan_softc * sc,void * arg)2151 vxlan_ctrl_set_ttl(struct vxlan_softc *sc, void *arg)
2152 {
2153 struct ifvxlancmd *cmd;
2154 int error;
2155
2156 cmd = arg;
2157
2158 VXLAN_WLOCK(sc);
2159 if (vxlan_check_ttl(cmd->vxlcmd_ttl) == 0) {
2160 sc->vxl_ttl = cmd->vxlcmd_ttl;
2161 if (sc->vxl_im4o != NULL)
2162 sc->vxl_im4o->imo_multicast_ttl = sc->vxl_ttl;
2163 if (sc->vxl_im6o != NULL)
2164 sc->vxl_im6o->im6o_multicast_hlim = sc->vxl_ttl;
2165 error = 0;
2166 } else
2167 error = EINVAL;
2168 VXLAN_WUNLOCK(sc);
2169
2170 return (error);
2171 }
2172
2173 static int
vxlan_ctrl_set_learn(struct vxlan_softc * sc,void * arg)2174 vxlan_ctrl_set_learn(struct vxlan_softc *sc, void *arg)
2175 {
2176 struct ifvxlancmd *cmd;
2177
2178 cmd = arg;
2179
2180 VXLAN_WLOCK(sc);
2181 if (cmd->vxlcmd_flags & VXLAN_CMD_FLAG_LEARN)
2182 sc->vxl_flags |= VXLAN_FLAG_LEARN;
2183 else
2184 sc->vxl_flags &= ~VXLAN_FLAG_LEARN;
2185 VXLAN_WUNLOCK(sc);
2186
2187 return (0);
2188 }
2189
2190 static int
vxlan_ctrl_ftable_entry_add(struct vxlan_softc * sc,void * arg)2191 vxlan_ctrl_ftable_entry_add(struct vxlan_softc *sc, void *arg)
2192 {
2193 union vxlan_sockaddr vxlsa;
2194 struct ifvxlancmd *cmd;
2195 struct vxlan_ftable_entry *fe;
2196 int error;
2197
2198 cmd = arg;
2199 vxlsa = cmd->vxlcmd_sa;
2200
2201 if (!VXLAN_SOCKADDR_IS_IPV46(&vxlsa))
2202 return (EINVAL);
2203 if (vxlan_sockaddr_in_any(&vxlsa) != 0)
2204 return (EINVAL);
2205 if (vxlan_sockaddr_in_multicast(&vxlsa) != 0)
2206 return (EINVAL);
2207 /* BMV: We could support both IPv4 and IPv6 later. */
2208 if (vxlsa.sa.sa_family != sc->vxl_dst_addr.sa.sa_family)
2209 return (EAFNOSUPPORT);
2210
2211 if (VXLAN_SOCKADDR_IS_IPV6(&vxlsa)) {
2212 error = vxlan_sockaddr_in6_embedscope(&vxlsa);
2213 if (error)
2214 return (error);
2215 }
2216
2217 fe = vxlan_ftable_entry_alloc();
2218 if (fe == NULL)
2219 return (ENOMEM);
2220
2221 if (vxlsa.in4.sin_port == 0)
2222 vxlsa.in4.sin_port = sc->vxl_dst_addr.in4.sin_port;
2223
2224 vxlan_ftable_entry_init(sc, fe, cmd->vxlcmd_mac, &vxlsa.sa,
2225 VXLAN_FE_FLAG_STATIC);
2226
2227 VXLAN_WLOCK(sc);
2228 error = vxlan_ftable_entry_insert(sc, fe);
2229 VXLAN_WUNLOCK(sc);
2230
2231 if (error)
2232 vxlan_ftable_entry_free(fe);
2233
2234 return (error);
2235 }
2236
2237 static int
vxlan_ctrl_ftable_entry_rem(struct vxlan_softc * sc,void * arg)2238 vxlan_ctrl_ftable_entry_rem(struct vxlan_softc *sc, void *arg)
2239 {
2240 struct ifvxlancmd *cmd;
2241 struct vxlan_ftable_entry *fe;
2242 int error;
2243
2244 cmd = arg;
2245
2246 VXLAN_WLOCK(sc);
2247 fe = vxlan_ftable_entry_lookup(sc, cmd->vxlcmd_mac);
2248 if (fe != NULL) {
2249 vxlan_ftable_entry_destroy(sc, fe);
2250 error = 0;
2251 } else
2252 error = ENOENT;
2253 VXLAN_WUNLOCK(sc);
2254
2255 return (error);
2256 }
2257
2258 static int
vxlan_ctrl_flush(struct vxlan_softc * sc,void * arg)2259 vxlan_ctrl_flush(struct vxlan_softc *sc, void *arg)
2260 {
2261 struct ifvxlancmd *cmd;
2262 int all;
2263
2264 cmd = arg;
2265 all = cmd->vxlcmd_flags & VXLAN_CMD_FLAG_FLUSH_ALL;
2266
2267 VXLAN_WLOCK(sc);
2268 vxlan_ftable_flush(sc, all);
2269 VXLAN_WUNLOCK(sc);
2270
2271 return (0);
2272 }
2273
2274 static int
vxlan_ioctl_drvspec(struct vxlan_softc * sc,struct ifdrv * ifd,int get)2275 vxlan_ioctl_drvspec(struct vxlan_softc *sc, struct ifdrv *ifd, int get)
2276 {
2277 const struct vxlan_control *vc;
2278 union {
2279 struct ifvxlancfg cfg;
2280 struct ifvxlancmd cmd;
2281 } args;
2282 int out, error;
2283
2284 if (ifd->ifd_cmd >= vxlan_control_table_size)
2285 return (EINVAL);
2286
2287 bzero(&args, sizeof(args));
2288 vc = &vxlan_control_table[ifd->ifd_cmd];
2289 out = (vc->vxlc_flags & VXLAN_CTRL_FLAG_COPYOUT) != 0;
2290
2291 if ((get != 0 && out == 0) || (get == 0 && out != 0))
2292 return (EINVAL);
2293
2294 if (vc->vxlc_flags & VXLAN_CTRL_FLAG_SUSER) {
2295 error = priv_check(curthread, PRIV_NET_VXLAN);
2296 if (error)
2297 return (error);
2298 }
2299
2300 if (ifd->ifd_len != vc->vxlc_argsize ||
2301 ifd->ifd_len > sizeof(args))
2302 return (EINVAL);
2303
2304 if (vc->vxlc_flags & VXLAN_CTRL_FLAG_COPYIN) {
2305 error = copyin(ifd->ifd_data, &args, ifd->ifd_len);
2306 if (error)
2307 return (error);
2308 }
2309
2310 error = vc->vxlc_func(sc, &args);
2311 if (error)
2312 return (error);
2313
2314 if (vc->vxlc_flags & VXLAN_CTRL_FLAG_COPYOUT) {
2315 error = copyout(&args, ifd->ifd_data, ifd->ifd_len);
2316 if (error)
2317 return (error);
2318 }
2319
2320 return (0);
2321 }
2322
2323 static int
vxlan_ioctl(struct ifnet * ifp,u_long cmd,caddr_t data)2324 vxlan_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
2325 {
2326 struct vxlan_softc *sc;
2327 struct ifreq *ifr;
2328 struct ifdrv *ifd;
2329 int error;
2330
2331 sc = ifp->if_softc;
2332 ifr = (struct ifreq *) data;
2333 ifd = (struct ifdrv *) data;
2334
2335 error = 0;
2336
2337 switch (cmd) {
2338 case SIOCADDMULTI:
2339 case SIOCDELMULTI:
2340 break;
2341
2342 case SIOCGDRVSPEC:
2343 case SIOCSDRVSPEC:
2344 error = vxlan_ioctl_drvspec(sc, ifd, cmd == SIOCGDRVSPEC);
2345 break;
2346
2347 case SIOCSIFFLAGS:
2348 error = vxlan_ioctl_ifflags(sc);
2349 break;
2350
2351 case SIOCSIFMEDIA:
2352 case SIOCGIFMEDIA:
2353 error = ifmedia_ioctl(ifp, ifr, &sc->vxl_media, cmd);
2354 break;
2355
2356 case SIOCSIFMTU:
2357 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > VXLAN_MAX_MTU)
2358 error = EINVAL;
2359 else
2360 ifp->if_mtu = ifr->ifr_mtu;
2361 break;
2362
2363 case SIOCSIFCAP:
2364 VXLAN_WLOCK(sc);
2365 error = vxlan_set_reqcap(sc, ifp, ifr->ifr_reqcap);
2366 if (error == 0)
2367 vxlan_set_hwcaps(sc);
2368 VXLAN_WUNLOCK(sc);
2369 break;
2370
2371 default:
2372 error = ether_ioctl(ifp, cmd, data);
2373 break;
2374 }
2375
2376 return (error);
2377 }
2378
2379 #if defined(INET) || defined(INET6)
2380 static uint16_t
vxlan_pick_source_port(struct vxlan_softc * sc,struct mbuf * m)2381 vxlan_pick_source_port(struct vxlan_softc *sc, struct mbuf *m)
2382 {
2383 int range;
2384 uint32_t hash;
2385
2386 range = sc->vxl_max_port - sc->vxl_min_port + 1;
2387
2388 if (M_HASHTYPE_ISHASH(m))
2389 hash = m->m_pkthdr.flowid;
2390 else
2391 hash = jenkins_hash(m->m_data, ETHER_HDR_LEN,
2392 sc->vxl_port_hash_key);
2393
2394 return (sc->vxl_min_port + (hash % range));
2395 }
2396
2397 static void
vxlan_encap_header(struct vxlan_softc * sc,struct mbuf * m,int ipoff,uint16_t srcport,uint16_t dstport)2398 vxlan_encap_header(struct vxlan_softc *sc, struct mbuf *m, int ipoff,
2399 uint16_t srcport, uint16_t dstport)
2400 {
2401 struct vxlanudphdr *hdr;
2402 struct udphdr *udph;
2403 struct vxlan_header *vxh;
2404 int len;
2405
2406 len = m->m_pkthdr.len - ipoff;
2407 MPASS(len >= sizeof(struct vxlanudphdr));
2408 hdr = mtodo(m, ipoff);
2409
2410 udph = &hdr->vxlh_udp;
2411 udph->uh_sport = srcport;
2412 udph->uh_dport = dstport;
2413 udph->uh_ulen = htons(len);
2414 udph->uh_sum = 0;
2415
2416 vxh = &hdr->vxlh_hdr;
2417 vxh->vxlh_flags = htonl(VXLAN_HDR_FLAGS_VALID_VNI);
2418 vxh->vxlh_vni = htonl(sc->vxl_vni << VXLAN_HDR_VNI_SHIFT);
2419 }
2420 #endif
2421
2422 /*
2423 * Return the CSUM_INNER_* equivalent of CSUM_* caps.
2424 */
2425 static uint32_t
csum_flags_to_inner_flags(uint32_t csum_flags_in,const uint32_t encap)2426 csum_flags_to_inner_flags(uint32_t csum_flags_in, const uint32_t encap)
2427 {
2428 uint32_t csum_flags = encap;
2429 const uint32_t v4 = CSUM_IP | CSUM_IP_UDP | CSUM_IP_TCP;
2430
2431 /*
2432 * csum_flags can request either v4 or v6 offload but not both.
2433 * tcp_output always sets CSUM_TSO (both CSUM_IP_TSO and CSUM_IP6_TSO)
2434 * so those bits are no good to detect the IP version. Other bits are
2435 * always set with CSUM_TSO and we use those to figure out the IP
2436 * version.
2437 */
2438 if (csum_flags_in & v4) {
2439 if (csum_flags_in & CSUM_IP)
2440 csum_flags |= CSUM_INNER_IP;
2441 if (csum_flags_in & CSUM_IP_UDP)
2442 csum_flags |= CSUM_INNER_IP_UDP;
2443 if (csum_flags_in & CSUM_IP_TCP)
2444 csum_flags |= CSUM_INNER_IP_TCP;
2445 if (csum_flags_in & CSUM_IP_TSO)
2446 csum_flags |= CSUM_INNER_IP_TSO;
2447 } else {
2448 #ifdef INVARIANTS
2449 const uint32_t v6 = CSUM_IP6_UDP | CSUM_IP6_TCP;
2450
2451 MPASS((csum_flags_in & v6) != 0);
2452 #endif
2453 if (csum_flags_in & CSUM_IP6_UDP)
2454 csum_flags |= CSUM_INNER_IP6_UDP;
2455 if (csum_flags_in & CSUM_IP6_TCP)
2456 csum_flags |= CSUM_INNER_IP6_TCP;
2457 if (csum_flags_in & CSUM_IP6_TSO)
2458 csum_flags |= CSUM_INNER_IP6_TSO;
2459 }
2460
2461 return (csum_flags);
2462 }
2463
2464 static int
vxlan_encap4(struct vxlan_softc * sc,const union vxlan_sockaddr * fvxlsa,struct mbuf * m)2465 vxlan_encap4(struct vxlan_softc *sc, const union vxlan_sockaddr *fvxlsa,
2466 struct mbuf *m)
2467 {
2468 #ifdef INET
2469 struct ifnet *ifp;
2470 struct ip *ip;
2471 struct in_addr srcaddr, dstaddr;
2472 uint16_t srcport, dstport;
2473 int len, mcast, error;
2474 struct route route, *ro;
2475 struct sockaddr_in *sin;
2476 uint32_t csum_flags;
2477
2478 NET_EPOCH_ASSERT();
2479
2480 ifp = sc->vxl_ifp;
2481 srcaddr = sc->vxl_src_addr.in4.sin_addr;
2482 srcport = vxlan_pick_source_port(sc, m);
2483 dstaddr = fvxlsa->in4.sin_addr;
2484 dstport = fvxlsa->in4.sin_port;
2485
2486 M_PREPEND(m, sizeof(struct ip) + sizeof(struct vxlanudphdr),
2487 M_NOWAIT);
2488 if (m == NULL) {
2489 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2490 return (ENOBUFS);
2491 }
2492
2493 len = m->m_pkthdr.len;
2494
2495 ip = mtod(m, struct ip *);
2496 ip->ip_tos = 0;
2497 ip->ip_len = htons(len);
2498 ip->ip_off = 0;
2499 ip->ip_ttl = sc->vxl_ttl;
2500 ip->ip_p = IPPROTO_UDP;
2501 ip->ip_sum = 0;
2502 ip->ip_src = srcaddr;
2503 ip->ip_dst = dstaddr;
2504
2505 vxlan_encap_header(sc, m, sizeof(struct ip), srcport, dstport);
2506
2507 mcast = (m->m_flags & (M_MCAST | M_BCAST)) ? 1 : 0;
2508 m->m_flags &= ~(M_MCAST | M_BCAST);
2509
2510 m->m_pkthdr.csum_flags &= CSUM_FLAGS_TX;
2511 if (m->m_pkthdr.csum_flags != 0) {
2512 /*
2513 * HW checksum (L3 and/or L4) or TSO has been requested. Look
2514 * up the ifnet for the outbound route and verify that the
2515 * outbound ifnet can perform the requested operation on the
2516 * inner frame.
2517 */
2518 bzero(&route, sizeof(route));
2519 ro = &route;
2520 sin = (struct sockaddr_in *)&ro->ro_dst;
2521 sin->sin_family = AF_INET;
2522 sin->sin_len = sizeof(*sin);
2523 sin->sin_addr = ip->ip_dst;
2524 ro->ro_nh = fib4_lookup(RT_DEFAULT_FIB, ip->ip_dst, 0, NHR_NONE,
2525 0);
2526 if (ro->ro_nh == NULL) {
2527 m_freem(m);
2528 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2529 return (EHOSTUNREACH);
2530 }
2531
2532 csum_flags = csum_flags_to_inner_flags(m->m_pkthdr.csum_flags,
2533 CSUM_ENCAP_VXLAN);
2534 if ((csum_flags & ro->ro_nh->nh_ifp->if_hwassist) !=
2535 csum_flags) {
2536 if (ppsratecheck(&sc->err_time, &sc->err_pps, 1)) {
2537 const struct ifnet *nh_ifp = ro->ro_nh->nh_ifp;
2538
2539 if_printf(ifp, "interface %s is missing hwcaps "
2540 "0x%08x, csum_flags 0x%08x -> 0x%08x, "
2541 "hwassist 0x%08x\n", nh_ifp->if_xname,
2542 csum_flags & ~(uint32_t)nh_ifp->if_hwassist,
2543 m->m_pkthdr.csum_flags, csum_flags,
2544 (uint32_t)nh_ifp->if_hwassist);
2545 }
2546 m_freem(m);
2547 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2548 return (ENXIO);
2549 }
2550 m->m_pkthdr.csum_flags = csum_flags;
2551 if (csum_flags &
2552 (CSUM_INNER_IP | CSUM_INNER_IP_UDP | CSUM_INNER_IP6_UDP |
2553 CSUM_INNER_IP_TCP | CSUM_INNER_IP6_TCP)) {
2554 counter_u64_add(sc->vxl_stats.txcsum, 1);
2555 if (csum_flags & CSUM_INNER_TSO)
2556 counter_u64_add(sc->vxl_stats.tso, 1);
2557 }
2558 } else
2559 ro = NULL;
2560 error = ip_output(m, NULL, ro, 0, sc->vxl_im4o, NULL);
2561 if (error == 0) {
2562 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
2563 if_inc_counter(ifp, IFCOUNTER_OBYTES, len);
2564 if (mcast != 0)
2565 if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1);
2566 } else
2567 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2568
2569 return (error);
2570 #else
2571 m_freem(m);
2572 return (ENOTSUP);
2573 #endif
2574 }
2575
2576 static int
vxlan_encap6(struct vxlan_softc * sc,const union vxlan_sockaddr * fvxlsa,struct mbuf * m)2577 vxlan_encap6(struct vxlan_softc *sc, const union vxlan_sockaddr *fvxlsa,
2578 struct mbuf *m)
2579 {
2580 #ifdef INET6
2581 struct ifnet *ifp;
2582 struct ip6_hdr *ip6;
2583 const struct in6_addr *srcaddr, *dstaddr;
2584 uint16_t srcport, dstport;
2585 int len, mcast, error;
2586 struct route_in6 route, *ro;
2587 struct sockaddr_in6 *sin6;
2588 uint32_t csum_flags;
2589
2590 NET_EPOCH_ASSERT();
2591
2592 ifp = sc->vxl_ifp;
2593 srcaddr = &sc->vxl_src_addr.in6.sin6_addr;
2594 srcport = vxlan_pick_source_port(sc, m);
2595 dstaddr = &fvxlsa->in6.sin6_addr;
2596 dstport = fvxlsa->in6.sin6_port;
2597
2598 M_PREPEND(m, sizeof(struct ip6_hdr) + sizeof(struct vxlanudphdr),
2599 M_NOWAIT);
2600 if (m == NULL) {
2601 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2602 return (ENOBUFS);
2603 }
2604
2605 len = m->m_pkthdr.len;
2606
2607 ip6 = mtod(m, struct ip6_hdr *);
2608 ip6->ip6_flow = 0; /* BMV: Keep in forwarding entry? */
2609 ip6->ip6_vfc = IPV6_VERSION;
2610 ip6->ip6_plen = 0;
2611 ip6->ip6_nxt = IPPROTO_UDP;
2612 ip6->ip6_hlim = sc->vxl_ttl;
2613 ip6->ip6_src = *srcaddr;
2614 ip6->ip6_dst = *dstaddr;
2615
2616 vxlan_encap_header(sc, m, sizeof(struct ip6_hdr), srcport, dstport);
2617
2618 mcast = (m->m_flags & (M_MCAST | M_BCAST)) ? 1 : 0;
2619 m->m_flags &= ~(M_MCAST | M_BCAST);
2620
2621 ro = NULL;
2622 m->m_pkthdr.csum_flags &= CSUM_FLAGS_TX;
2623 if (m->m_pkthdr.csum_flags != 0) {
2624 /*
2625 * HW checksum (L3 and/or L4) or TSO has been requested. Look
2626 * up the ifnet for the outbound route and verify that the
2627 * outbound ifnet can perform the requested operation on the
2628 * inner frame.
2629 */
2630 bzero(&route, sizeof(route));
2631 ro = &route;
2632 sin6 = (struct sockaddr_in6 *)&ro->ro_dst;
2633 sin6->sin6_family = AF_INET6;
2634 sin6->sin6_len = sizeof(*sin6);
2635 sin6->sin6_addr = ip6->ip6_dst;
2636 ro->ro_nh = fib6_lookup(RT_DEFAULT_FIB, &ip6->ip6_dst, 0,
2637 NHR_NONE, 0);
2638 if (ro->ro_nh == NULL) {
2639 m_freem(m);
2640 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2641 return (EHOSTUNREACH);
2642 }
2643
2644 csum_flags = csum_flags_to_inner_flags(m->m_pkthdr.csum_flags,
2645 CSUM_ENCAP_VXLAN);
2646 if ((csum_flags & ro->ro_nh->nh_ifp->if_hwassist) !=
2647 csum_flags) {
2648 if (ppsratecheck(&sc->err_time, &sc->err_pps, 1)) {
2649 const struct ifnet *nh_ifp = ro->ro_nh->nh_ifp;
2650
2651 if_printf(ifp, "interface %s is missing hwcaps "
2652 "0x%08x, csum_flags 0x%08x -> 0x%08x, "
2653 "hwassist 0x%08x\n", nh_ifp->if_xname,
2654 csum_flags & ~(uint32_t)nh_ifp->if_hwassist,
2655 m->m_pkthdr.csum_flags, csum_flags,
2656 (uint32_t)nh_ifp->if_hwassist);
2657 }
2658 m_freem(m);
2659 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2660 return (ENXIO);
2661 }
2662 m->m_pkthdr.csum_flags = csum_flags;
2663 if (csum_flags &
2664 (CSUM_INNER_IP | CSUM_INNER_IP_UDP | CSUM_INNER_IP6_UDP |
2665 CSUM_INNER_IP_TCP | CSUM_INNER_IP6_TCP)) {
2666 counter_u64_add(sc->vxl_stats.txcsum, 1);
2667 if (csum_flags & CSUM_INNER_TSO)
2668 counter_u64_add(sc->vxl_stats.tso, 1);
2669 }
2670 } else if (ntohs(dstport) != V_zero_checksum_port) {
2671 struct udphdr *hdr = mtodo(m, sizeof(struct ip6_hdr));
2672
2673 hdr->uh_sum = in6_cksum_pseudo(ip6,
2674 m->m_pkthdr.len - sizeof(struct ip6_hdr), IPPROTO_UDP, 0);
2675 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6;
2676 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
2677 }
2678 error = ip6_output(m, NULL, ro, 0, sc->vxl_im6o, NULL, NULL);
2679 if (error == 0) {
2680 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
2681 if_inc_counter(ifp, IFCOUNTER_OBYTES, len);
2682 if (mcast != 0)
2683 if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1);
2684 } else
2685 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2686
2687 return (error);
2688 #else
2689 m_freem(m);
2690 return (ENOTSUP);
2691 #endif
2692 }
2693
2694 static int
vxlan_transmit(struct ifnet * ifp,struct mbuf * m)2695 vxlan_transmit(struct ifnet *ifp, struct mbuf *m)
2696 {
2697 struct rm_priotracker tracker;
2698 union vxlan_sockaddr vxlsa;
2699 struct vxlan_softc *sc;
2700 struct vxlan_ftable_entry *fe;
2701 struct ifnet *mcifp;
2702 struct ether_header *eh;
2703 int ipv4, error;
2704
2705 sc = ifp->if_softc;
2706 eh = mtod(m, struct ether_header *);
2707 fe = NULL;
2708 mcifp = NULL;
2709
2710 ETHER_BPF_MTAP(ifp, m);
2711
2712 VXLAN_RLOCK(sc, &tracker);
2713 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2714 VXLAN_RUNLOCK(sc, &tracker);
2715 m_freem(m);
2716 return (ENETDOWN);
2717 }
2718
2719 if ((m->m_flags & (M_BCAST | M_MCAST)) == 0)
2720 fe = vxlan_ftable_entry_lookup(sc, eh->ether_dhost);
2721 if (fe == NULL)
2722 fe = &sc->vxl_default_fe;
2723 vxlan_sockaddr_copy(&vxlsa, &fe->vxlfe_raddr.sa);
2724
2725 ipv4 = VXLAN_SOCKADDR_IS_IPV4(&vxlsa) != 0;
2726 if (vxlan_sockaddr_in_multicast(&vxlsa) != 0)
2727 mcifp = vxlan_multicast_if_ref(sc, ipv4);
2728
2729 VXLAN_ACQUIRE(sc);
2730 VXLAN_RUNLOCK(sc, &tracker);
2731
2732 if (ipv4 != 0)
2733 error = vxlan_encap4(sc, &vxlsa, m);
2734 else
2735 error = vxlan_encap6(sc, &vxlsa, m);
2736
2737 vxlan_release(sc);
2738 if (mcifp != NULL)
2739 if_rele(mcifp);
2740
2741 return (error);
2742 }
2743
2744 static void
vxlan_qflush(struct ifnet * ifp __unused)2745 vxlan_qflush(struct ifnet *ifp __unused)
2746 {
2747 }
2748
2749 static void
vxlan_rcv_udp_packet(struct mbuf * m,int offset,struct inpcb * inpcb,const struct sockaddr * srcsa,void * xvso)2750 vxlan_rcv_udp_packet(struct mbuf *m, int offset, struct inpcb *inpcb,
2751 const struct sockaddr *srcsa, void *xvso)
2752 {
2753 struct vxlan_socket *vso;
2754 struct vxlan_header *vxh, vxlanhdr;
2755 uint32_t vni;
2756 int error __unused;
2757
2758 M_ASSERTPKTHDR(m);
2759 vso = xvso;
2760 offset += sizeof(struct udphdr);
2761
2762 if (m->m_pkthdr.len < offset + sizeof(struct vxlan_header))
2763 goto out;
2764
2765 if (__predict_false(m->m_len < offset + sizeof(struct vxlan_header))) {
2766 m_copydata(m, offset, sizeof(struct vxlan_header),
2767 (caddr_t) &vxlanhdr);
2768 vxh = &vxlanhdr;
2769 } else
2770 vxh = mtodo(m, offset);
2771
2772 /*
2773 * Drop if there is a reserved bit set in either the flags or VNI
2774 * fields of the header. This goes against the specification, but
2775 * a bit set may indicate an unsupported new feature. This matches
2776 * the behavior of the Linux implementation.
2777 */
2778 if (vxh->vxlh_flags != htonl(VXLAN_HDR_FLAGS_VALID_VNI) ||
2779 vxh->vxlh_vni & ~VXLAN_VNI_MASK)
2780 goto out;
2781
2782 vni = ntohl(vxh->vxlh_vni) >> VXLAN_HDR_VNI_SHIFT;
2783 /* Adjust to the start of the inner Ethernet frame. */
2784 m_adj(m, offset + sizeof(struct vxlan_header));
2785
2786 error = vxlan_input(vso, vni, &m, srcsa);
2787 MPASS(error != 0 || m == NULL);
2788
2789 out:
2790 if (m != NULL)
2791 m_freem(m);
2792 }
2793
2794 static int
vxlan_input(struct vxlan_socket * vso,uint32_t vni,struct mbuf ** m0,const struct sockaddr * sa)2795 vxlan_input(struct vxlan_socket *vso, uint32_t vni, struct mbuf **m0,
2796 const struct sockaddr *sa)
2797 {
2798 struct vxlan_softc *sc;
2799 struct ifnet *ifp;
2800 struct mbuf *m;
2801 struct ether_header *eh;
2802 int error;
2803
2804 sc = vxlan_socket_lookup_softc(vso, vni);
2805 if (sc == NULL)
2806 return (ENOENT);
2807
2808 ifp = sc->vxl_ifp;
2809 m = *m0;
2810 eh = mtod(m, struct ether_header *);
2811
2812 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2813 error = ENETDOWN;
2814 goto out;
2815 } else if (ifp == m->m_pkthdr.rcvif) {
2816 /* XXX Does not catch more complex loops. */
2817 error = EDEADLK;
2818 goto out;
2819 }
2820
2821 if (sc->vxl_flags & VXLAN_FLAG_LEARN)
2822 vxlan_ftable_learn(sc, sa, eh->ether_shost);
2823
2824 m_clrprotoflags(m);
2825 m->m_pkthdr.rcvif = ifp;
2826 M_SETFIB(m, ifp->if_fib);
2827 if (((ifp->if_capenable & IFCAP_RXCSUM &&
2828 m->m_pkthdr.csum_flags & CSUM_INNER_L3_CALC) ||
2829 (ifp->if_capenable & IFCAP_RXCSUM_IPV6 &&
2830 !(m->m_pkthdr.csum_flags & CSUM_INNER_L3_CALC)))) {
2831 uint32_t csum_flags = 0;
2832
2833 if (m->m_pkthdr.csum_flags & CSUM_INNER_L3_CALC)
2834 csum_flags |= CSUM_L3_CALC;
2835 if (m->m_pkthdr.csum_flags & CSUM_INNER_L3_VALID)
2836 csum_flags |= CSUM_L3_VALID;
2837 if (m->m_pkthdr.csum_flags & CSUM_INNER_L4_CALC)
2838 csum_flags |= CSUM_L4_CALC;
2839 if (m->m_pkthdr.csum_flags & CSUM_INNER_L4_VALID)
2840 csum_flags |= CSUM_L4_VALID;
2841 m->m_pkthdr.csum_flags = csum_flags;
2842 counter_u64_add(sc->vxl_stats.rxcsum, 1);
2843 } else {
2844 /* clear everything */
2845 m->m_pkthdr.csum_flags = 0;
2846 m->m_pkthdr.csum_data = 0;
2847 }
2848
2849 error = netisr_dispatch(NETISR_ETHER, m);
2850 *m0 = NULL;
2851
2852 out:
2853 vxlan_release(sc);
2854 return (error);
2855 }
2856
2857 static int
vxlan_stats_alloc(struct vxlan_softc * sc)2858 vxlan_stats_alloc(struct vxlan_softc *sc)
2859 {
2860 struct vxlan_statistics *stats = &sc->vxl_stats;
2861
2862 stats->txcsum = counter_u64_alloc(M_WAITOK);
2863 if (stats->txcsum == NULL)
2864 goto failed;
2865
2866 stats->tso = counter_u64_alloc(M_WAITOK);
2867 if (stats->tso == NULL)
2868 goto failed;
2869
2870 stats->rxcsum = counter_u64_alloc(M_WAITOK);
2871 if (stats->rxcsum == NULL)
2872 goto failed;
2873
2874 return (0);
2875 failed:
2876 vxlan_stats_free(sc);
2877 return (ENOMEM);
2878 }
2879
2880 static void
vxlan_stats_free(struct vxlan_softc * sc)2881 vxlan_stats_free(struct vxlan_softc *sc)
2882 {
2883 struct vxlan_statistics *stats = &sc->vxl_stats;
2884
2885 if (stats->txcsum != NULL) {
2886 counter_u64_free(stats->txcsum);
2887 stats->txcsum = NULL;
2888 }
2889 if (stats->tso != NULL) {
2890 counter_u64_free(stats->tso);
2891 stats->tso = NULL;
2892 }
2893 if (stats->rxcsum != NULL) {
2894 counter_u64_free(stats->rxcsum);
2895 stats->rxcsum = NULL;
2896 }
2897 }
2898
2899 static void
vxlan_set_default_config(struct vxlan_softc * sc)2900 vxlan_set_default_config(struct vxlan_softc *sc)
2901 {
2902
2903 sc->vxl_flags |= VXLAN_FLAG_LEARN;
2904
2905 sc->vxl_vni = VXLAN_VNI_MAX;
2906 sc->vxl_ttl = IPDEFTTL;
2907
2908 if (!vxlan_tunable_int(sc, "legacy_port", vxlan_legacy_port)) {
2909 sc->vxl_src_addr.in4.sin_port = htons(VXLAN_PORT);
2910 sc->vxl_dst_addr.in4.sin_port = htons(VXLAN_PORT);
2911 } else {
2912 sc->vxl_src_addr.in4.sin_port = htons(VXLAN_LEGACY_PORT);
2913 sc->vxl_dst_addr.in4.sin_port = htons(VXLAN_LEGACY_PORT);
2914 }
2915
2916 sc->vxl_min_port = V_ipport_firstauto;
2917 sc->vxl_max_port = V_ipport_lastauto;
2918
2919 sc->vxl_ftable_max = VXLAN_FTABLE_MAX;
2920 sc->vxl_ftable_timeout = VXLAN_FTABLE_TIMEOUT;
2921 }
2922
2923 static int
vxlan_set_user_config(struct vxlan_softc * sc,struct ifvxlanparam * vxlp)2924 vxlan_set_user_config(struct vxlan_softc *sc, struct ifvxlanparam *vxlp)
2925 {
2926
2927 #ifndef INET
2928 if (vxlp->vxlp_with & (VXLAN_PARAM_WITH_LOCAL_ADDR4 |
2929 VXLAN_PARAM_WITH_REMOTE_ADDR4))
2930 return (EAFNOSUPPORT);
2931 #endif
2932
2933 #ifndef INET6
2934 if (vxlp->vxlp_with & (VXLAN_PARAM_WITH_LOCAL_ADDR6 |
2935 VXLAN_PARAM_WITH_REMOTE_ADDR6))
2936 return (EAFNOSUPPORT);
2937 #else
2938 if (vxlp->vxlp_with & VXLAN_PARAM_WITH_LOCAL_ADDR6) {
2939 int error = vxlan_sockaddr_in6_embedscope(&vxlp->vxlp_local_sa);
2940 if (error)
2941 return (error);
2942 }
2943 if (vxlp->vxlp_with & VXLAN_PARAM_WITH_REMOTE_ADDR6) {
2944 int error = vxlan_sockaddr_in6_embedscope(
2945 &vxlp->vxlp_remote_sa);
2946 if (error)
2947 return (error);
2948 }
2949 #endif
2950
2951 if (vxlp->vxlp_with & VXLAN_PARAM_WITH_VNI) {
2952 if (vxlan_check_vni(vxlp->vxlp_vni) == 0)
2953 sc->vxl_vni = vxlp->vxlp_vni;
2954 }
2955
2956 if (vxlp->vxlp_with & VXLAN_PARAM_WITH_LOCAL_ADDR4) {
2957 sc->vxl_src_addr.in4.sin_len = sizeof(struct sockaddr_in);
2958 sc->vxl_src_addr.in4.sin_family = AF_INET;
2959 sc->vxl_src_addr.in4.sin_addr =
2960 vxlp->vxlp_local_sa.in4.sin_addr;
2961 } else if (vxlp->vxlp_with & VXLAN_PARAM_WITH_LOCAL_ADDR6) {
2962 sc->vxl_src_addr.in6.sin6_len = sizeof(struct sockaddr_in6);
2963 sc->vxl_src_addr.in6.sin6_family = AF_INET6;
2964 sc->vxl_src_addr.in6.sin6_addr =
2965 vxlp->vxlp_local_sa.in6.sin6_addr;
2966 }
2967
2968 if (vxlp->vxlp_with & VXLAN_PARAM_WITH_REMOTE_ADDR4) {
2969 sc->vxl_dst_addr.in4.sin_len = sizeof(struct sockaddr_in);
2970 sc->vxl_dst_addr.in4.sin_family = AF_INET;
2971 sc->vxl_dst_addr.in4.sin_addr =
2972 vxlp->vxlp_remote_sa.in4.sin_addr;
2973 } else if (vxlp->vxlp_with & VXLAN_PARAM_WITH_REMOTE_ADDR6) {
2974 sc->vxl_dst_addr.in6.sin6_len = sizeof(struct sockaddr_in6);
2975 sc->vxl_dst_addr.in6.sin6_family = AF_INET6;
2976 sc->vxl_dst_addr.in6.sin6_addr =
2977 vxlp->vxlp_remote_sa.in6.sin6_addr;
2978 }
2979
2980 if (vxlp->vxlp_with & VXLAN_PARAM_WITH_LOCAL_PORT)
2981 sc->vxl_src_addr.in4.sin_port = htons(vxlp->vxlp_local_port);
2982 if (vxlp->vxlp_with & VXLAN_PARAM_WITH_REMOTE_PORT)
2983 sc->vxl_dst_addr.in4.sin_port = htons(vxlp->vxlp_remote_port);
2984
2985 if (vxlp->vxlp_with & VXLAN_PARAM_WITH_PORT_RANGE) {
2986 if (vxlp->vxlp_min_port <= vxlp->vxlp_max_port) {
2987 sc->vxl_min_port = vxlp->vxlp_min_port;
2988 sc->vxl_max_port = vxlp->vxlp_max_port;
2989 }
2990 }
2991
2992 if (vxlp->vxlp_with & VXLAN_PARAM_WITH_MULTICAST_IF)
2993 strlcpy(sc->vxl_mc_ifname, vxlp->vxlp_mc_ifname, IFNAMSIZ);
2994
2995 if (vxlp->vxlp_with & VXLAN_PARAM_WITH_FTABLE_TIMEOUT) {
2996 if (vxlan_check_ftable_timeout(vxlp->vxlp_ftable_timeout) == 0)
2997 sc->vxl_ftable_timeout = vxlp->vxlp_ftable_timeout;
2998 }
2999
3000 if (vxlp->vxlp_with & VXLAN_PARAM_WITH_FTABLE_MAX) {
3001 if (vxlan_check_ftable_max(vxlp->vxlp_ftable_max) == 0)
3002 sc->vxl_ftable_max = vxlp->vxlp_ftable_max;
3003 }
3004
3005 if (vxlp->vxlp_with & VXLAN_PARAM_WITH_TTL) {
3006 if (vxlan_check_ttl(vxlp->vxlp_ttl) == 0)
3007 sc->vxl_ttl = vxlp->vxlp_ttl;
3008 }
3009
3010 if (vxlp->vxlp_with & VXLAN_PARAM_WITH_LEARN) {
3011 if (vxlp->vxlp_learn == 0)
3012 sc->vxl_flags &= ~VXLAN_FLAG_LEARN;
3013 }
3014
3015 return (0);
3016 }
3017
3018 static int
vxlan_set_reqcap(struct vxlan_softc * sc,struct ifnet * ifp,int reqcap)3019 vxlan_set_reqcap(struct vxlan_softc *sc, struct ifnet *ifp, int reqcap)
3020 {
3021 int mask = reqcap ^ ifp->if_capenable;
3022
3023 /* Disable TSO if tx checksums are disabled. */
3024 if (mask & IFCAP_TXCSUM && !(reqcap & IFCAP_TXCSUM) &&
3025 reqcap & IFCAP_TSO4) {
3026 reqcap &= ~IFCAP_TSO4;
3027 if_printf(ifp, "tso4 disabled due to -txcsum.\n");
3028 }
3029 if (mask & IFCAP_TXCSUM_IPV6 && !(reqcap & IFCAP_TXCSUM_IPV6) &&
3030 reqcap & IFCAP_TSO6) {
3031 reqcap &= ~IFCAP_TSO6;
3032 if_printf(ifp, "tso6 disabled due to -txcsum6.\n");
3033 }
3034
3035 /* Do not enable TSO if tx checksums are disabled. */
3036 if (mask & IFCAP_TSO4 && reqcap & IFCAP_TSO4 &&
3037 !(reqcap & IFCAP_TXCSUM)) {
3038 if_printf(ifp, "enable txcsum first.\n");
3039 return (EAGAIN);
3040 }
3041 if (mask & IFCAP_TSO6 && reqcap & IFCAP_TSO6 &&
3042 !(reqcap & IFCAP_TXCSUM_IPV6)) {
3043 if_printf(ifp, "enable txcsum6 first.\n");
3044 return (EAGAIN);
3045 }
3046
3047 sc->vxl_reqcap = reqcap;
3048 return (0);
3049 }
3050
3051 /*
3052 * A VXLAN interface inherits the capabilities of the vxlandev or the interface
3053 * hosting the vxlanlocal address.
3054 */
3055 static void
vxlan_set_hwcaps(struct vxlan_softc * sc)3056 vxlan_set_hwcaps(struct vxlan_softc *sc)
3057 {
3058 struct epoch_tracker et;
3059 struct ifnet *p;
3060 struct ifaddr *ifa;
3061 u_long hwa;
3062 int cap, ena;
3063 bool rel;
3064 struct ifnet *ifp = sc->vxl_ifp;
3065
3066 /* reset caps */
3067 ifp->if_capabilities &= VXLAN_BASIC_IFCAPS;
3068 ifp->if_capenable &= VXLAN_BASIC_IFCAPS;
3069 ifp->if_hwassist = 0;
3070
3071 NET_EPOCH_ENTER(et);
3072 CURVNET_SET(ifp->if_vnet);
3073
3074 rel = false;
3075 p = NULL;
3076 if (sc->vxl_mc_ifname[0] != '\0') {
3077 rel = true;
3078 p = ifunit_ref(sc->vxl_mc_ifname);
3079 } else if (vxlan_sockaddr_in_any(&sc->vxl_src_addr) == 0) {
3080 if (sc->vxl_src_addr.sa.sa_family == AF_INET) {
3081 struct sockaddr_in in4 = sc->vxl_src_addr.in4;
3082
3083 in4.sin_port = 0;
3084 ifa = ifa_ifwithaddr((struct sockaddr *)&in4);
3085 if (ifa != NULL)
3086 p = ifa->ifa_ifp;
3087 } else if (sc->vxl_src_addr.sa.sa_family == AF_INET6) {
3088 struct sockaddr_in6 in6 = sc->vxl_src_addr.in6;
3089
3090 in6.sin6_port = 0;
3091 ifa = ifa_ifwithaddr((struct sockaddr *)&in6);
3092 if (ifa != NULL)
3093 p = ifa->ifa_ifp;
3094 }
3095 }
3096 if (p == NULL)
3097 goto done;
3098
3099 cap = ena = hwa = 0;
3100
3101 /* checksum offload */
3102 if (p->if_capabilities & IFCAP_VXLAN_HWCSUM)
3103 cap |= p->if_capabilities & (IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6);
3104 if (p->if_capenable & IFCAP_VXLAN_HWCSUM) {
3105 ena |= sc->vxl_reqcap & p->if_capenable &
3106 (IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6);
3107 if (ena & IFCAP_TXCSUM) {
3108 if (p->if_hwassist & CSUM_INNER_IP)
3109 hwa |= CSUM_IP;
3110 if (p->if_hwassist & CSUM_INNER_IP_UDP)
3111 hwa |= CSUM_IP_UDP;
3112 if (p->if_hwassist & CSUM_INNER_IP_TCP)
3113 hwa |= CSUM_IP_TCP;
3114 }
3115 if (ena & IFCAP_TXCSUM_IPV6) {
3116 if (p->if_hwassist & CSUM_INNER_IP6_UDP)
3117 hwa |= CSUM_IP6_UDP;
3118 if (p->if_hwassist & CSUM_INNER_IP6_TCP)
3119 hwa |= CSUM_IP6_TCP;
3120 }
3121 }
3122
3123 /* hardware TSO */
3124 if (p->if_capabilities & IFCAP_VXLAN_HWTSO) {
3125 cap |= p->if_capabilities & IFCAP_TSO;
3126 if (p->if_hw_tsomax > IP_MAXPACKET - ifp->if_hdrlen)
3127 ifp->if_hw_tsomax = IP_MAXPACKET - ifp->if_hdrlen;
3128 else
3129 ifp->if_hw_tsomax = p->if_hw_tsomax;
3130 /* XXX: tsomaxsegcount decrement is cxgbe specific */
3131 ifp->if_hw_tsomaxsegcount = p->if_hw_tsomaxsegcount - 1;
3132 ifp->if_hw_tsomaxsegsize = p->if_hw_tsomaxsegsize;
3133 }
3134 if (p->if_capenable & IFCAP_VXLAN_HWTSO) {
3135 ena |= sc->vxl_reqcap & p->if_capenable & IFCAP_TSO;
3136 if (ena & IFCAP_TSO) {
3137 if (p->if_hwassist & CSUM_INNER_IP_TSO)
3138 hwa |= CSUM_IP_TSO;
3139 if (p->if_hwassist & CSUM_INNER_IP6_TSO)
3140 hwa |= CSUM_IP6_TSO;
3141 }
3142 }
3143
3144 ifp->if_capabilities |= cap;
3145 ifp->if_capenable |= ena;
3146 ifp->if_hwassist |= hwa;
3147 if (rel)
3148 if_rele(p);
3149 done:
3150 CURVNET_RESTORE();
3151 NET_EPOCH_EXIT(et);
3152 }
3153
3154 static int
vxlan_clone_create(struct if_clone * ifc,int unit,caddr_t params)3155 vxlan_clone_create(struct if_clone *ifc, int unit, caddr_t params)
3156 {
3157 struct vxlan_softc *sc;
3158 struct ifnet *ifp;
3159 struct ifvxlanparam vxlp;
3160 int error;
3161
3162 sc = malloc(sizeof(struct vxlan_softc), M_VXLAN, M_WAITOK | M_ZERO);
3163 sc->vxl_unit = unit;
3164 vxlan_set_default_config(sc);
3165 error = vxlan_stats_alloc(sc);
3166 if (error != 0)
3167 goto fail;
3168
3169 if (params != 0) {
3170 error = copyin(params, &vxlp, sizeof(vxlp));
3171 if (error)
3172 goto fail;
3173
3174 error = vxlan_set_user_config(sc, &vxlp);
3175 if (error)
3176 goto fail;
3177 }
3178
3179 ifp = if_alloc(IFT_ETHER);
3180 if (ifp == NULL) {
3181 error = ENOSPC;
3182 goto fail;
3183 }
3184
3185 sc->vxl_ifp = ifp;
3186 rm_init(&sc->vxl_lock, "vxlanrm");
3187 callout_init_rw(&sc->vxl_callout, &sc->vxl_lock, 0);
3188 sc->vxl_port_hash_key = arc4random();
3189 vxlan_ftable_init(sc);
3190
3191 vxlan_sysctl_setup(sc);
3192
3193 ifp->if_softc = sc;
3194 if_initname(ifp, vxlan_name, unit);
3195 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
3196 ifp->if_init = vxlan_init;
3197 ifp->if_ioctl = vxlan_ioctl;
3198 ifp->if_transmit = vxlan_transmit;
3199 ifp->if_qflush = vxlan_qflush;
3200 ifp->if_capabilities = VXLAN_BASIC_IFCAPS;
3201 ifp->if_capenable = VXLAN_BASIC_IFCAPS;
3202 sc->vxl_reqcap = -1;
3203 vxlan_set_hwcaps(sc);
3204
3205 ifmedia_init(&sc->vxl_media, 0, vxlan_media_change, vxlan_media_status);
3206 ifmedia_add(&sc->vxl_media, IFM_ETHER | IFM_AUTO, 0, NULL);
3207 ifmedia_set(&sc->vxl_media, IFM_ETHER | IFM_AUTO);
3208
3209 ether_gen_addr(ifp, &sc->vxl_hwaddr);
3210 ether_ifattach(ifp, sc->vxl_hwaddr.octet);
3211
3212 ifp->if_baudrate = 0;
3213 vxlan_setup_interface_hdrlen(sc);
3214
3215 return (0);
3216
3217 fail:
3218 free(sc, M_VXLAN);
3219 return (error);
3220 }
3221
3222 static void
vxlan_clone_destroy(struct ifnet * ifp)3223 vxlan_clone_destroy(struct ifnet *ifp)
3224 {
3225 struct vxlan_softc *sc;
3226
3227 sc = ifp->if_softc;
3228
3229 vxlan_teardown(sc);
3230
3231 vxlan_ftable_flush(sc, 1);
3232
3233 ether_ifdetach(ifp);
3234 if_free(ifp);
3235 ifmedia_removeall(&sc->vxl_media);
3236
3237 vxlan_ftable_fini(sc);
3238
3239 vxlan_sysctl_destroy(sc);
3240 rm_destroy(&sc->vxl_lock);
3241 vxlan_stats_free(sc);
3242 free(sc, M_VXLAN);
3243 }
3244
3245 /* BMV: Taken from if_bridge. */
3246 static uint32_t
vxlan_mac_hash(struct vxlan_softc * sc,const uint8_t * addr)3247 vxlan_mac_hash(struct vxlan_softc *sc, const uint8_t *addr)
3248 {
3249 uint32_t a = 0x9e3779b9, b = 0x9e3779b9, c = sc->vxl_ftable_hash_key;
3250
3251 b += addr[5] << 8;
3252 b += addr[4];
3253 a += addr[3] << 24;
3254 a += addr[2] << 16;
3255 a += addr[1] << 8;
3256 a += addr[0];
3257
3258 /*
3259 * The following hash function is adapted from "Hash Functions" by Bob Jenkins
3260 * ("Algorithm Alley", Dr. Dobbs Journal, September 1997).
3261 */
3262 #define mix(a, b, c) \
3263 do { \
3264 a -= b; a -= c; a ^= (c >> 13); \
3265 b -= c; b -= a; b ^= (a << 8); \
3266 c -= a; c -= b; c ^= (b >> 13); \
3267 a -= b; a -= c; a ^= (c >> 12); \
3268 b -= c; b -= a; b ^= (a << 16); \
3269 c -= a; c -= b; c ^= (b >> 5); \
3270 a -= b; a -= c; a ^= (c >> 3); \
3271 b -= c; b -= a; b ^= (a << 10); \
3272 c -= a; c -= b; c ^= (b >> 15); \
3273 } while (0)
3274
3275 mix(a, b, c);
3276
3277 #undef mix
3278
3279 return (c);
3280 }
3281
3282 static int
vxlan_media_change(struct ifnet * ifp)3283 vxlan_media_change(struct ifnet *ifp)
3284 {
3285
3286 /* Ignore. */
3287 return (0);
3288 }
3289
3290 static void
vxlan_media_status(struct ifnet * ifp,struct ifmediareq * ifmr)3291 vxlan_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
3292 {
3293
3294 ifmr->ifm_status = IFM_ACTIVE | IFM_AVALID;
3295 ifmr->ifm_active = IFM_ETHER | IFM_FDX;
3296 }
3297
3298 static int
vxlan_sockaddr_cmp(const union vxlan_sockaddr * vxladdr,const struct sockaddr * sa)3299 vxlan_sockaddr_cmp(const union vxlan_sockaddr *vxladdr,
3300 const struct sockaddr *sa)
3301 {
3302
3303 return (bcmp(&vxladdr->sa, sa, vxladdr->sa.sa_len));
3304 }
3305
3306 static void
vxlan_sockaddr_copy(union vxlan_sockaddr * vxladdr,const struct sockaddr * sa)3307 vxlan_sockaddr_copy(union vxlan_sockaddr *vxladdr,
3308 const struct sockaddr *sa)
3309 {
3310
3311 MPASS(sa->sa_family == AF_INET || sa->sa_family == AF_INET6);
3312 bzero(vxladdr, sizeof(*vxladdr));
3313
3314 if (sa->sa_family == AF_INET) {
3315 vxladdr->in4 = *satoconstsin(sa);
3316 vxladdr->in4.sin_len = sizeof(struct sockaddr_in);
3317 } else if (sa->sa_family == AF_INET6) {
3318 vxladdr->in6 = *satoconstsin6(sa);
3319 vxladdr->in6.sin6_len = sizeof(struct sockaddr_in6);
3320 }
3321 }
3322
3323 static int
vxlan_sockaddr_in_equal(const union vxlan_sockaddr * vxladdr,const struct sockaddr * sa)3324 vxlan_sockaddr_in_equal(const union vxlan_sockaddr *vxladdr,
3325 const struct sockaddr *sa)
3326 {
3327 int equal;
3328
3329 if (sa->sa_family == AF_INET) {
3330 const struct in_addr *in4 = &satoconstsin(sa)->sin_addr;
3331 equal = in4->s_addr == vxladdr->in4.sin_addr.s_addr;
3332 } else if (sa->sa_family == AF_INET6) {
3333 const struct in6_addr *in6 = &satoconstsin6(sa)->sin6_addr;
3334 equal = IN6_ARE_ADDR_EQUAL(in6, &vxladdr->in6.sin6_addr);
3335 } else
3336 equal = 0;
3337
3338 return (equal);
3339 }
3340
3341 static void
vxlan_sockaddr_in_copy(union vxlan_sockaddr * vxladdr,const struct sockaddr * sa)3342 vxlan_sockaddr_in_copy(union vxlan_sockaddr *vxladdr,
3343 const struct sockaddr *sa)
3344 {
3345
3346 MPASS(sa->sa_family == AF_INET || sa->sa_family == AF_INET6);
3347
3348 if (sa->sa_family == AF_INET) {
3349 const struct in_addr *in4 = &satoconstsin(sa)->sin_addr;
3350 vxladdr->in4.sin_family = AF_INET;
3351 vxladdr->in4.sin_len = sizeof(struct sockaddr_in);
3352 vxladdr->in4.sin_addr = *in4;
3353 } else if (sa->sa_family == AF_INET6) {
3354 const struct in6_addr *in6 = &satoconstsin6(sa)->sin6_addr;
3355 vxladdr->in6.sin6_family = AF_INET6;
3356 vxladdr->in6.sin6_len = sizeof(struct sockaddr_in6);
3357 vxladdr->in6.sin6_addr = *in6;
3358 }
3359 }
3360
3361 static int
vxlan_sockaddr_supported(const union vxlan_sockaddr * vxladdr,int unspec)3362 vxlan_sockaddr_supported(const union vxlan_sockaddr *vxladdr, int unspec)
3363 {
3364 const struct sockaddr *sa;
3365 int supported;
3366
3367 sa = &vxladdr->sa;
3368 supported = 0;
3369
3370 if (sa->sa_family == AF_UNSPEC && unspec != 0) {
3371 supported = 1;
3372 } else if (sa->sa_family == AF_INET) {
3373 #ifdef INET
3374 supported = 1;
3375 #endif
3376 } else if (sa->sa_family == AF_INET6) {
3377 #ifdef INET6
3378 supported = 1;
3379 #endif
3380 }
3381
3382 return (supported);
3383 }
3384
3385 static int
vxlan_sockaddr_in_any(const union vxlan_sockaddr * vxladdr)3386 vxlan_sockaddr_in_any(const union vxlan_sockaddr *vxladdr)
3387 {
3388 const struct sockaddr *sa;
3389 int any;
3390
3391 sa = &vxladdr->sa;
3392
3393 if (sa->sa_family == AF_INET) {
3394 const struct in_addr *in4 = &satoconstsin(sa)->sin_addr;
3395 any = in4->s_addr == INADDR_ANY;
3396 } else if (sa->sa_family == AF_INET6) {
3397 const struct in6_addr *in6 = &satoconstsin6(sa)->sin6_addr;
3398 any = IN6_IS_ADDR_UNSPECIFIED(in6);
3399 } else
3400 any = -1;
3401
3402 return (any);
3403 }
3404
3405 static int
vxlan_sockaddr_in_multicast(const union vxlan_sockaddr * vxladdr)3406 vxlan_sockaddr_in_multicast(const union vxlan_sockaddr *vxladdr)
3407 {
3408 const struct sockaddr *sa;
3409 int mc;
3410
3411 sa = &vxladdr->sa;
3412
3413 if (sa->sa_family == AF_INET) {
3414 const struct in_addr *in4 = &satoconstsin(sa)->sin_addr;
3415 mc = IN_MULTICAST(ntohl(in4->s_addr));
3416 } else if (sa->sa_family == AF_INET6) {
3417 const struct in6_addr *in6 = &satoconstsin6(sa)->sin6_addr;
3418 mc = IN6_IS_ADDR_MULTICAST(in6);
3419 } else
3420 mc = -1;
3421
3422 return (mc);
3423 }
3424
3425 static int
vxlan_sockaddr_in6_embedscope(union vxlan_sockaddr * vxladdr)3426 vxlan_sockaddr_in6_embedscope(union vxlan_sockaddr *vxladdr)
3427 {
3428 int error;
3429
3430 MPASS(VXLAN_SOCKADDR_IS_IPV6(vxladdr));
3431 #ifdef INET6
3432 error = sa6_embedscope(&vxladdr->in6, V_ip6_use_defzone);
3433 #else
3434 error = EAFNOSUPPORT;
3435 #endif
3436
3437 return (error);
3438 }
3439
3440 static int
vxlan_can_change_config(struct vxlan_softc * sc)3441 vxlan_can_change_config(struct vxlan_softc *sc)
3442 {
3443 struct ifnet *ifp;
3444
3445 ifp = sc->vxl_ifp;
3446 VXLAN_LOCK_ASSERT(sc);
3447
3448 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3449 return (0);
3450 if (sc->vxl_flags & (VXLAN_FLAG_INIT | VXLAN_FLAG_TEARDOWN))
3451 return (0);
3452
3453 return (1);
3454 }
3455
3456 static int
vxlan_check_vni(uint32_t vni)3457 vxlan_check_vni(uint32_t vni)
3458 {
3459
3460 return (vni >= VXLAN_VNI_MAX);
3461 }
3462
3463 static int
vxlan_check_ttl(int ttl)3464 vxlan_check_ttl(int ttl)
3465 {
3466
3467 return (ttl > MAXTTL);
3468 }
3469
3470 static int
vxlan_check_ftable_timeout(uint32_t timeout)3471 vxlan_check_ftable_timeout(uint32_t timeout)
3472 {
3473
3474 return (timeout > VXLAN_FTABLE_MAX_TIMEOUT);
3475 }
3476
3477 static int
vxlan_check_ftable_max(uint32_t max)3478 vxlan_check_ftable_max(uint32_t max)
3479 {
3480
3481 return (max > VXLAN_FTABLE_MAX);
3482 }
3483
3484 static void
vxlan_sysctl_setup(struct vxlan_softc * sc)3485 vxlan_sysctl_setup(struct vxlan_softc *sc)
3486 {
3487 struct sysctl_ctx_list *ctx;
3488 struct sysctl_oid *node;
3489 struct vxlan_statistics *stats;
3490 char namebuf[8];
3491
3492 ctx = &sc->vxl_sysctl_ctx;
3493 stats = &sc->vxl_stats;
3494 snprintf(namebuf, sizeof(namebuf), "%d", sc->vxl_unit);
3495
3496 sysctl_ctx_init(ctx);
3497 sc->vxl_sysctl_node = SYSCTL_ADD_NODE(ctx,
3498 SYSCTL_STATIC_CHILDREN(_net_link_vxlan), OID_AUTO, namebuf,
3499 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "");
3500
3501 node = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(sc->vxl_sysctl_node),
3502 OID_AUTO, "ftable", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "");
3503 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(node), OID_AUTO, "count",
3504 CTLFLAG_RD, &sc->vxl_ftable_cnt, 0,
3505 "Number of entries in fowarding table");
3506 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(node), OID_AUTO, "max",
3507 CTLFLAG_RD, &sc->vxl_ftable_max, 0,
3508 "Maximum number of entries allowed in fowarding table");
3509 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(node), OID_AUTO, "timeout",
3510 CTLFLAG_RD, &sc->vxl_ftable_timeout, 0,
3511 "Number of seconds between prunes of the forwarding table");
3512 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(node), OID_AUTO, "dump",
3513 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE | CTLFLAG_SKIP,
3514 sc, 0, vxlan_ftable_sysctl_dump, "A",
3515 "Dump the forwarding table entries");
3516
3517 node = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(sc->vxl_sysctl_node),
3518 OID_AUTO, "stats", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "");
3519 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(node), OID_AUTO,
3520 "ftable_nospace", CTLFLAG_RD, &stats->ftable_nospace, 0,
3521 "Fowarding table reached maximum entries");
3522 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(node), OID_AUTO,
3523 "ftable_lock_upgrade_failed", CTLFLAG_RD,
3524 &stats->ftable_lock_upgrade_failed, 0,
3525 "Forwarding table update required lock upgrade");
3526
3527 SYSCTL_ADD_COUNTER_U64(ctx, SYSCTL_CHILDREN(node), OID_AUTO, "txcsum",
3528 CTLFLAG_RD, &stats->txcsum,
3529 "# of times hardware assisted with tx checksum");
3530 SYSCTL_ADD_COUNTER_U64(ctx, SYSCTL_CHILDREN(node), OID_AUTO, "tso",
3531 CTLFLAG_RD, &stats->tso, "# of times hardware assisted with TSO");
3532 SYSCTL_ADD_COUNTER_U64(ctx, SYSCTL_CHILDREN(node), OID_AUTO, "rxcsum",
3533 CTLFLAG_RD, &stats->rxcsum,
3534 "# of times hardware assisted with rx checksum");
3535 }
3536
3537 static void
vxlan_sysctl_destroy(struct vxlan_softc * sc)3538 vxlan_sysctl_destroy(struct vxlan_softc *sc)
3539 {
3540
3541 sysctl_ctx_free(&sc->vxl_sysctl_ctx);
3542 sc->vxl_sysctl_node = NULL;
3543 }
3544
3545 static int
vxlan_tunable_int(struct vxlan_softc * sc,const char * knob,int def)3546 vxlan_tunable_int(struct vxlan_softc *sc, const char *knob, int def)
3547 {
3548 char path[64];
3549
3550 snprintf(path, sizeof(path), "net.link.vxlan.%d.%s",
3551 sc->vxl_unit, knob);
3552 TUNABLE_INT_FETCH(path, &def);
3553
3554 return (def);
3555 }
3556
3557 static void
vxlan_ifdetach_event(void * arg __unused,struct ifnet * ifp)3558 vxlan_ifdetach_event(void *arg __unused, struct ifnet *ifp)
3559 {
3560 struct vxlan_softc_head list;
3561 struct vxlan_socket *vso;
3562 struct vxlan_softc *sc, *tsc;
3563
3564 LIST_INIT(&list);
3565
3566 if (ifp->if_flags & IFF_RENAMING)
3567 return;
3568 if ((ifp->if_flags & IFF_MULTICAST) == 0)
3569 return;
3570
3571 VXLAN_LIST_LOCK();
3572 LIST_FOREACH(vso, &vxlan_socket_list, vxlso_entry)
3573 vxlan_socket_ifdetach(vso, ifp, &list);
3574 VXLAN_LIST_UNLOCK();
3575
3576 LIST_FOREACH_SAFE(sc, &list, vxl_ifdetach_list, tsc) {
3577 LIST_REMOVE(sc, vxl_ifdetach_list);
3578
3579 sx_xlock(&vxlan_sx);
3580 VXLAN_WLOCK(sc);
3581 if (sc->vxl_flags & VXLAN_FLAG_INIT)
3582 vxlan_init_wait(sc);
3583 vxlan_teardown_locked(sc);
3584 sx_xunlock(&vxlan_sx);
3585 }
3586 }
3587
3588 static void
vxlan_load(void)3589 vxlan_load(void)
3590 {
3591
3592 mtx_init(&vxlan_list_mtx, "vxlan list", NULL, MTX_DEF);
3593 LIST_INIT(&vxlan_socket_list);
3594 vxlan_ifdetach_event_tag = EVENTHANDLER_REGISTER(ifnet_departure_event,
3595 vxlan_ifdetach_event, NULL, EVENTHANDLER_PRI_ANY);
3596 vxlan_cloner = if_clone_simple(vxlan_name, vxlan_clone_create,
3597 vxlan_clone_destroy, 0);
3598 }
3599
3600 static void
vxlan_unload(void)3601 vxlan_unload(void)
3602 {
3603
3604 EVENTHANDLER_DEREGISTER(ifnet_departure_event,
3605 vxlan_ifdetach_event_tag);
3606 if_clone_detach(vxlan_cloner);
3607 mtx_destroy(&vxlan_list_mtx);
3608 MPASS(LIST_EMPTY(&vxlan_socket_list));
3609 }
3610
3611 static int
vxlan_modevent(module_t mod,int type,void * unused)3612 vxlan_modevent(module_t mod, int type, void *unused)
3613 {
3614 int error;
3615
3616 error = 0;
3617
3618 switch (type) {
3619 case MOD_LOAD:
3620 vxlan_load();
3621 break;
3622 case MOD_UNLOAD:
3623 vxlan_unload();
3624 break;
3625 default:
3626 error = ENOTSUP;
3627 break;
3628 }
3629
3630 return (error);
3631 }
3632
3633 static moduledata_t vxlan_mod = {
3634 "if_vxlan",
3635 vxlan_modevent,
3636 0
3637 };
3638
3639 DECLARE_MODULE(if_vxlan, vxlan_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
3640 MODULE_VERSION(if_vxlan, 1);
3641