1 /*-
2 * SPDX-License-Identifier: (BSD-2-Clause-FreeBSD AND ISC)
3 *
4 * Copyright (c) 2002 Michael Shalayeff
5 * Copyright (c) 2012 Gleb Smirnoff <[email protected]>
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT,
21 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
22 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
23 * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
25 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
26 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
27 * THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 /*-
31 * Copyright (c) 2009 David Gwynne <[email protected]>
32 *
33 * Permission to use, copy, modify, and distribute this software for any
34 * purpose with or without fee is hereby granted, provided that the above
35 * copyright notice and this permission notice appear in all copies.
36 *
37 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
38 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
39 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
40 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
41 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
42 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
43 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
44 */
45
46 /*
47 * $OpenBSD: if_pfsync.c,v 1.110 2009/02/24 05:39:19 dlg Exp $
48 *
49 * Revisions picked from OpenBSD after revision 1.110 import:
50 * 1.119 - don't m_copydata() beyond the len of mbuf in pfsync_input()
51 * 1.118, 1.124, 1.148, 1.149, 1.151, 1.171 - fixes to bulk updates
52 * 1.120, 1.175 - use monotonic time_uptime
53 * 1.122 - reduce number of updates for non-TCP sessions
54 * 1.125, 1.127 - rewrite merge or stale processing
55 * 1.128 - cleanups
56 * 1.146 - bzero() mbuf before sparsely filling it with data
57 * 1.170 - SIOCSIFMTU checks
58 * 1.126, 1.142 - deferred packets processing
59 * 1.173 - correct expire time processing
60 */
61
62 #include <sys/cdefs.h>
63 __FBSDID("$FreeBSD$");
64
65 #include "opt_inet.h"
66 #include "opt_inet6.h"
67 #include "opt_pf.h"
68
69 #include <sys/param.h>
70 #include <sys/bus.h>
71 #include <sys/endian.h>
72 #include <sys/interrupt.h>
73 #include <sys/kernel.h>
74 #include <sys/lock.h>
75 #include <sys/mbuf.h>
76 #include <sys/module.h>
77 #include <sys/mutex.h>
78 #include <sys/priv.h>
79 #include <sys/protosw.h>
80 #include <sys/smp.h>
81 #include <sys/socket.h>
82 #include <sys/sockio.h>
83 #include <sys/sysctl.h>
84 #include <sys/syslog.h>
85
86 #include <net/bpf.h>
87 #include <net/if.h>
88 #include <net/if_var.h>
89 #include <net/if_clone.h>
90 #include <net/if_types.h>
91 #include <net/vnet.h>
92 #include <net/pfvar.h>
93 #include <net/if_pfsync.h>
94
95 #include <netinet/if_ether.h>
96 #include <netinet/in.h>
97 #include <netinet/in_var.h>
98 #include <netinet/ip.h>
99 #include <netinet/ip_carp.h>
100 #include <netinet/ip_var.h>
101 #include <netinet/tcp.h>
102 #include <netinet/tcp_fsm.h>
103 #include <netinet/tcp_seq.h>
104
105 #define PFSYNC_MINPKT ( \
106 sizeof(struct ip) + \
107 sizeof(struct pfsync_header) + \
108 sizeof(struct pfsync_subheader) )
109
110 struct pfsync_bucket;
111
112 struct pfsync_pkt {
113 struct ip *ip;
114 struct in_addr src;
115 u_int8_t flags;
116 };
117
118 static int pfsync_upd_tcp(struct pf_kstate *, struct pfsync_state_peer *,
119 struct pfsync_state_peer *);
120 static int pfsync_in_clr(struct pfsync_pkt *, struct mbuf *, int, int);
121 static int pfsync_in_ins(struct pfsync_pkt *, struct mbuf *, int, int);
122 static int pfsync_in_iack(struct pfsync_pkt *, struct mbuf *, int, int);
123 static int pfsync_in_upd(struct pfsync_pkt *, struct mbuf *, int, int);
124 static int pfsync_in_upd_c(struct pfsync_pkt *, struct mbuf *, int, int);
125 static int pfsync_in_ureq(struct pfsync_pkt *, struct mbuf *, int, int);
126 static int pfsync_in_del(struct pfsync_pkt *, struct mbuf *, int, int);
127 static int pfsync_in_del_c(struct pfsync_pkt *, struct mbuf *, int, int);
128 static int pfsync_in_bus(struct pfsync_pkt *, struct mbuf *, int, int);
129 static int pfsync_in_tdb(struct pfsync_pkt *, struct mbuf *, int, int);
130 static int pfsync_in_eof(struct pfsync_pkt *, struct mbuf *, int, int);
131 static int pfsync_in_error(struct pfsync_pkt *, struct mbuf *, int, int);
132
133 static int (*pfsync_acts[])(struct pfsync_pkt *, struct mbuf *, int, int) = {
134 pfsync_in_clr, /* PFSYNC_ACT_CLR */
135 pfsync_in_ins, /* PFSYNC_ACT_INS */
136 pfsync_in_iack, /* PFSYNC_ACT_INS_ACK */
137 pfsync_in_upd, /* PFSYNC_ACT_UPD */
138 pfsync_in_upd_c, /* PFSYNC_ACT_UPD_C */
139 pfsync_in_ureq, /* PFSYNC_ACT_UPD_REQ */
140 pfsync_in_del, /* PFSYNC_ACT_DEL */
141 pfsync_in_del_c, /* PFSYNC_ACT_DEL_C */
142 pfsync_in_error, /* PFSYNC_ACT_INS_F */
143 pfsync_in_error, /* PFSYNC_ACT_DEL_F */
144 pfsync_in_bus, /* PFSYNC_ACT_BUS */
145 pfsync_in_tdb, /* PFSYNC_ACT_TDB */
146 pfsync_in_eof /* PFSYNC_ACT_EOF */
147 };
148
149 struct pfsync_q {
150 void (*write)(struct pf_kstate *, void *);
151 size_t len;
152 u_int8_t action;
153 };
154
155 /* we have one of these for every PFSYNC_S_ */
156 static void pfsync_out_state(struct pf_kstate *, void *);
157 static void pfsync_out_iack(struct pf_kstate *, void *);
158 static void pfsync_out_upd_c(struct pf_kstate *, void *);
159 static void pfsync_out_del(struct pf_kstate *, void *);
160
161 static struct pfsync_q pfsync_qs[] = {
162 { pfsync_out_state, sizeof(struct pfsync_state), PFSYNC_ACT_INS },
163 { pfsync_out_iack, sizeof(struct pfsync_ins_ack), PFSYNC_ACT_INS_ACK },
164 { pfsync_out_state, sizeof(struct pfsync_state), PFSYNC_ACT_UPD },
165 { pfsync_out_upd_c, sizeof(struct pfsync_upd_c), PFSYNC_ACT_UPD_C },
166 { pfsync_out_del, sizeof(struct pfsync_del_c), PFSYNC_ACT_DEL_C }
167 };
168
169 static void pfsync_q_ins(struct pf_kstate *, int, bool);
170 static void pfsync_q_del(struct pf_kstate *, bool, struct pfsync_bucket *);
171
172 static void pfsync_update_state(struct pf_kstate *);
173
174 struct pfsync_upd_req_item {
175 TAILQ_ENTRY(pfsync_upd_req_item) ur_entry;
176 struct pfsync_upd_req ur_msg;
177 };
178
179 struct pfsync_deferral {
180 struct pfsync_softc *pd_sc;
181 TAILQ_ENTRY(pfsync_deferral) pd_entry;
182 u_int pd_refs;
183 struct callout pd_tmo;
184
185 struct pf_kstate *pd_st;
186 struct mbuf *pd_m;
187 };
188
189 struct pfsync_sofct;
190
191 struct pfsync_bucket
192 {
193 int b_id;
194 struct pfsync_softc *b_sc;
195 struct mtx b_mtx;
196 struct callout b_tmo;
197 int b_flags;
198 #define PFSYNCF_BUCKET_PUSH 0x00000001
199
200 size_t b_len;
201 TAILQ_HEAD(, pf_kstate) b_qs[PFSYNC_S_COUNT];
202 TAILQ_HEAD(, pfsync_upd_req_item) b_upd_req_list;
203 TAILQ_HEAD(, pfsync_deferral) b_deferrals;
204 u_int b_deferred;
205 void *b_plus;
206 size_t b_pluslen;
207
208 struct ifaltq b_snd;
209 };
210
211 struct pfsync_softc {
212 /* Configuration */
213 struct ifnet *sc_ifp;
214 struct ifnet *sc_sync_if;
215 struct ip_moptions sc_imo;
216 struct in_addr sc_sync_peer;
217 uint32_t sc_flags;
218 uint8_t sc_maxupdates;
219 struct ip sc_template;
220 struct mtx sc_mtx;
221
222 /* Queued data */
223 struct pfsync_bucket *sc_buckets;
224
225 /* Bulk update info */
226 struct mtx sc_bulk_mtx;
227 uint32_t sc_ureq_sent;
228 int sc_bulk_tries;
229 uint32_t sc_ureq_received;
230 int sc_bulk_hashid;
231 uint64_t sc_bulk_stateid;
232 uint32_t sc_bulk_creatorid;
233 struct callout sc_bulk_tmo;
234 struct callout sc_bulkfail_tmo;
235 };
236
237 #define PFSYNC_LOCK(sc) mtx_lock(&(sc)->sc_mtx)
238 #define PFSYNC_UNLOCK(sc) mtx_unlock(&(sc)->sc_mtx)
239 #define PFSYNC_LOCK_ASSERT(sc) mtx_assert(&(sc)->sc_mtx, MA_OWNED)
240
241 #define PFSYNC_BUCKET_LOCK(b) mtx_lock(&(b)->b_mtx)
242 #define PFSYNC_BUCKET_UNLOCK(b) mtx_unlock(&(b)->b_mtx)
243 #define PFSYNC_BUCKET_LOCK_ASSERT(b) mtx_assert(&(b)->b_mtx, MA_OWNED)
244
245 #define PFSYNC_BLOCK(sc) mtx_lock(&(sc)->sc_bulk_mtx)
246 #define PFSYNC_BUNLOCK(sc) mtx_unlock(&(sc)->sc_bulk_mtx)
247 #define PFSYNC_BLOCK_ASSERT(sc) mtx_assert(&(sc)->sc_bulk_mtx, MA_OWNED)
248
249 static const char pfsyncname[] = "pfsync";
250 static MALLOC_DEFINE(M_PFSYNC, pfsyncname, "pfsync(4) data");
251 VNET_DEFINE_STATIC(struct pfsync_softc *, pfsyncif) = NULL;
252 #define V_pfsyncif VNET(pfsyncif)
253 VNET_DEFINE_STATIC(void *, pfsync_swi_cookie) = NULL;
254 #define V_pfsync_swi_cookie VNET(pfsync_swi_cookie)
255 VNET_DEFINE_STATIC(struct intr_event *, pfsync_swi_ie);
256 #define V_pfsync_swi_ie VNET(pfsync_swi_ie)
257 VNET_DEFINE_STATIC(struct pfsyncstats, pfsyncstats);
258 #define V_pfsyncstats VNET(pfsyncstats)
259 VNET_DEFINE_STATIC(int, pfsync_carp_adj) = CARP_MAXSKEW;
260 #define V_pfsync_carp_adj VNET(pfsync_carp_adj)
261
262 static void pfsync_timeout(void *);
263 static void pfsync_push(struct pfsync_bucket *);
264 static void pfsync_push_all(struct pfsync_softc *);
265 static void pfsyncintr(void *);
266 static int pfsync_multicast_setup(struct pfsync_softc *, struct ifnet *,
267 struct in_mfilter *imf);
268 static void pfsync_multicast_cleanup(struct pfsync_softc *);
269 static void pfsync_pointers_init(void);
270 static void pfsync_pointers_uninit(void);
271 static int pfsync_init(void);
272 static void pfsync_uninit(void);
273
274 static unsigned long pfsync_buckets;
275
276 SYSCTL_NODE(_net, OID_AUTO, pfsync, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
277 "PFSYNC");
278 SYSCTL_STRUCT(_net_pfsync, OID_AUTO, stats, CTLFLAG_VNET | CTLFLAG_RW,
279 &VNET_NAME(pfsyncstats), pfsyncstats,
280 "PFSYNC statistics (struct pfsyncstats, net/if_pfsync.h)");
281 SYSCTL_INT(_net_pfsync, OID_AUTO, carp_demotion_factor, CTLFLAG_RW,
282 &VNET_NAME(pfsync_carp_adj), 0, "pfsync's CARP demotion factor adjustment");
283 SYSCTL_ULONG(_net_pfsync, OID_AUTO, pfsync_buckets, CTLFLAG_RDTUN,
284 &pfsync_buckets, 0, "Number of pfsync hash buckets");
285
286 static int pfsync_clone_create(struct if_clone *, int, caddr_t);
287 static void pfsync_clone_destroy(struct ifnet *);
288 static int pfsync_alloc_scrub_memory(struct pfsync_state_peer *,
289 struct pf_state_peer *);
290 static int pfsyncoutput(struct ifnet *, struct mbuf *,
291 const struct sockaddr *, struct route *);
292 static int pfsyncioctl(struct ifnet *, u_long, caddr_t);
293
294 static int pfsync_defer(struct pf_kstate *, struct mbuf *);
295 static void pfsync_undefer(struct pfsync_deferral *, int);
296 static void pfsync_undefer_state(struct pf_kstate *, int);
297 static void pfsync_defer_tmo(void *);
298
299 static void pfsync_request_update(u_int32_t, u_int64_t);
300 static bool pfsync_update_state_req(struct pf_kstate *);
301
302 static void pfsync_drop(struct pfsync_softc *);
303 static void pfsync_sendout(int, int);
304 static void pfsync_send_plus(void *, size_t);
305
306 static void pfsync_bulk_start(void);
307 static void pfsync_bulk_status(u_int8_t);
308 static void pfsync_bulk_update(void *);
309 static void pfsync_bulk_fail(void *);
310
311 static void pfsync_detach_ifnet(struct ifnet *);
312 #ifdef IPSEC
313 static void pfsync_update_net_tdb(struct pfsync_tdb *);
314 #endif
315 static struct pfsync_bucket *pfsync_get_bucket(struct pfsync_softc *,
316 struct pf_kstate *);
317
318 #define PFSYNC_MAX_BULKTRIES 12
319 #define PFSYNC_DEFER_TIMEOUT ((20 * hz) / 1000)
320
321 VNET_DEFINE(struct if_clone *, pfsync_cloner);
322 #define V_pfsync_cloner VNET(pfsync_cloner)
323
324 static int
pfsync_clone_create(struct if_clone * ifc,int unit,caddr_t param)325 pfsync_clone_create(struct if_clone *ifc, int unit, caddr_t param)
326 {
327 struct pfsync_softc *sc;
328 struct ifnet *ifp;
329 struct pfsync_bucket *b;
330 int c, q;
331
332 if (unit != 0)
333 return (EINVAL);
334
335 if (! pfsync_buckets)
336 pfsync_buckets = mp_ncpus * 2;
337
338 sc = malloc(sizeof(struct pfsync_softc), M_PFSYNC, M_WAITOK | M_ZERO);
339 sc->sc_flags |= PFSYNCF_OK;
340 sc->sc_maxupdates = 128;
341
342 ifp = sc->sc_ifp = if_alloc(IFT_PFSYNC);
343 if (ifp == NULL) {
344 free(sc, M_PFSYNC);
345 return (ENOSPC);
346 }
347 if_initname(ifp, pfsyncname, unit);
348 ifp->if_softc = sc;
349 ifp->if_ioctl = pfsyncioctl;
350 ifp->if_output = pfsyncoutput;
351 ifp->if_type = IFT_PFSYNC;
352 ifp->if_hdrlen = sizeof(struct pfsync_header);
353 ifp->if_mtu = ETHERMTU;
354 mtx_init(&sc->sc_mtx, pfsyncname, NULL, MTX_DEF);
355 mtx_init(&sc->sc_bulk_mtx, "pfsync bulk", NULL, MTX_DEF);
356 callout_init_mtx(&sc->sc_bulk_tmo, &sc->sc_bulk_mtx, 0);
357 callout_init_mtx(&sc->sc_bulkfail_tmo, &sc->sc_bulk_mtx, 0);
358
359 if_attach(ifp);
360
361 bpfattach(ifp, DLT_PFSYNC, PFSYNC_HDRLEN);
362
363 sc->sc_buckets = mallocarray(pfsync_buckets, sizeof(*sc->sc_buckets),
364 M_PFSYNC, M_ZERO | M_WAITOK);
365 for (c = 0; c < pfsync_buckets; c++) {
366 b = &sc->sc_buckets[c];
367 mtx_init(&b->b_mtx, "pfsync bucket", NULL, MTX_DEF);
368
369 b->b_id = c;
370 b->b_sc = sc;
371 b->b_len = PFSYNC_MINPKT;
372
373 for (q = 0; q < PFSYNC_S_COUNT; q++)
374 TAILQ_INIT(&b->b_qs[q]);
375
376 TAILQ_INIT(&b->b_upd_req_list);
377 TAILQ_INIT(&b->b_deferrals);
378
379 callout_init(&b->b_tmo, 1);
380
381 b->b_snd.ifq_maxlen = ifqmaxlen;
382 }
383
384 V_pfsyncif = sc;
385
386 return (0);
387 }
388
389 static void
pfsync_clone_destroy(struct ifnet * ifp)390 pfsync_clone_destroy(struct ifnet *ifp)
391 {
392 struct pfsync_softc *sc = ifp->if_softc;
393 struct pfsync_bucket *b;
394 int c;
395
396 for (c = 0; c < pfsync_buckets; c++) {
397 b = &sc->sc_buckets[c];
398 /*
399 * At this stage, everything should have already been
400 * cleared by pfsync_uninit(), and we have only to
401 * drain callouts.
402 */
403 while (b->b_deferred > 0) {
404 struct pfsync_deferral *pd =
405 TAILQ_FIRST(&b->b_deferrals);
406
407 TAILQ_REMOVE(&b->b_deferrals, pd, pd_entry);
408 b->b_deferred--;
409 if (callout_stop(&pd->pd_tmo) > 0) {
410 pf_release_state(pd->pd_st);
411 m_freem(pd->pd_m);
412 free(pd, M_PFSYNC);
413 } else {
414 pd->pd_refs++;
415 callout_drain(&pd->pd_tmo);
416 free(pd, M_PFSYNC);
417 }
418 }
419
420 callout_drain(&b->b_tmo);
421 }
422
423 callout_drain(&sc->sc_bulkfail_tmo);
424 callout_drain(&sc->sc_bulk_tmo);
425
426 if (!(sc->sc_flags & PFSYNCF_OK) && carp_demote_adj_p)
427 (*carp_demote_adj_p)(-V_pfsync_carp_adj, "pfsync destroy");
428 bpfdetach(ifp);
429 if_detach(ifp);
430
431 pfsync_drop(sc);
432
433 if_free(ifp);
434 pfsync_multicast_cleanup(sc);
435 mtx_destroy(&sc->sc_mtx);
436 mtx_destroy(&sc->sc_bulk_mtx);
437
438 free(sc->sc_buckets, M_PFSYNC);
439 free(sc, M_PFSYNC);
440
441 V_pfsyncif = NULL;
442 }
443
444 static int
pfsync_alloc_scrub_memory(struct pfsync_state_peer * s,struct pf_state_peer * d)445 pfsync_alloc_scrub_memory(struct pfsync_state_peer *s,
446 struct pf_state_peer *d)
447 {
448 if (s->scrub.scrub_flag && d->scrub == NULL) {
449 d->scrub = uma_zalloc(V_pf_state_scrub_z, M_NOWAIT | M_ZERO);
450 if (d->scrub == NULL)
451 return (ENOMEM);
452 }
453
454 return (0);
455 }
456
457 static int
pfsync_state_import(struct pfsync_state * sp,u_int8_t flags)458 pfsync_state_import(struct pfsync_state *sp, u_int8_t flags)
459 {
460 struct pfsync_softc *sc = V_pfsyncif;
461 #ifndef __NO_STRICT_ALIGNMENT
462 struct pfsync_state_key key[2];
463 #endif
464 struct pfsync_state_key *kw, *ks;
465 struct pf_kstate *st = NULL;
466 struct pf_state_key *skw = NULL, *sks = NULL;
467 struct pf_krule *r = NULL;
468 struct pfi_kkif *kif;
469 int error;
470
471 PF_RULES_RASSERT();
472
473 if (sp->creatorid == 0) {
474 if (V_pf_status.debug >= PF_DEBUG_MISC)
475 printf("%s: invalid creator id: %08x\n", __func__,
476 ntohl(sp->creatorid));
477 return (EINVAL);
478 }
479
480 if ((kif = pfi_kkif_find(sp->ifname)) == NULL) {
481 if (V_pf_status.debug >= PF_DEBUG_MISC)
482 printf("%s: unknown interface: %s\n", __func__,
483 sp->ifname);
484 if (flags & PFSYNC_SI_IOCTL)
485 return (EINVAL);
486 return (0); /* skip this state */
487 }
488
489 /*
490 * If the ruleset checksums match or the state is coming from the ioctl,
491 * it's safe to associate the state with the rule of that number.
492 */
493 if (sp->rule != htonl(-1) && sp->anchor == htonl(-1) &&
494 (flags & (PFSYNC_SI_IOCTL | PFSYNC_SI_CKSUM)) && ntohl(sp->rule) <
495 pf_main_ruleset.rules[PF_RULESET_FILTER].active.rcount)
496 r = pf_main_ruleset.rules[
497 PF_RULESET_FILTER].active.ptr_array[ntohl(sp->rule)];
498 else
499 r = &V_pf_default_rule;
500
501 if ((r->max_states &&
502 counter_u64_fetch(r->states_cur) >= r->max_states))
503 goto cleanup;
504
505 /*
506 * XXXGL: consider M_WAITOK in ioctl path after.
507 */
508 st = pf_alloc_state(M_NOWAIT);
509 if (__predict_false(st == NULL))
510 goto cleanup;
511
512 if ((skw = uma_zalloc(V_pf_state_key_z, M_NOWAIT)) == NULL)
513 goto cleanup;
514
515 #ifndef __NO_STRICT_ALIGNMENT
516 bcopy(&sp->key, key, sizeof(struct pfsync_state_key) * 2);
517 kw = &key[PF_SK_WIRE];
518 ks = &key[PF_SK_STACK];
519 #else
520 kw = &sp->key[PF_SK_WIRE];
521 ks = &sp->key[PF_SK_STACK];
522 #endif
523
524 if (PF_ANEQ(&kw->addr[0], &ks->addr[0], sp->af) ||
525 PF_ANEQ(&kw->addr[1], &ks->addr[1], sp->af) ||
526 kw->port[0] != ks->port[0] ||
527 kw->port[1] != ks->port[1]) {
528 sks = uma_zalloc(V_pf_state_key_z, M_NOWAIT);
529 if (sks == NULL)
530 goto cleanup;
531 } else
532 sks = skw;
533
534 /* allocate memory for scrub info */
535 if (pfsync_alloc_scrub_memory(&sp->src, &st->src) ||
536 pfsync_alloc_scrub_memory(&sp->dst, &st->dst))
537 goto cleanup;
538
539 /* Copy to state key(s). */
540 skw->addr[0] = kw->addr[0];
541 skw->addr[1] = kw->addr[1];
542 skw->port[0] = kw->port[0];
543 skw->port[1] = kw->port[1];
544 skw->proto = sp->proto;
545 skw->af = sp->af;
546 if (sks != skw) {
547 sks->addr[0] = ks->addr[0];
548 sks->addr[1] = ks->addr[1];
549 sks->port[0] = ks->port[0];
550 sks->port[1] = ks->port[1];
551 sks->proto = sp->proto;
552 sks->af = sp->af;
553 }
554
555 /* copy to state */
556 bcopy(&sp->rt_addr, &st->rt_addr, sizeof(st->rt_addr));
557 st->creation = time_uptime - ntohl(sp->creation);
558 st->expire = time_uptime;
559 if (sp->expire) {
560 uint32_t timeout;
561
562 timeout = r->timeout[sp->timeout];
563 if (!timeout)
564 timeout = V_pf_default_rule.timeout[sp->timeout];
565
566 /* sp->expire may have been adaptively scaled by export. */
567 st->expire -= timeout - ntohl(sp->expire);
568 }
569
570 st->direction = sp->direction;
571 st->log = sp->log;
572 st->timeout = sp->timeout;
573 st->state_flags = sp->state_flags;
574
575 st->id = sp->id;
576 st->creatorid = sp->creatorid;
577 pf_state_peer_ntoh(&sp->src, &st->src);
578 pf_state_peer_ntoh(&sp->dst, &st->dst);
579
580 st->rule.ptr = r;
581 st->nat_rule.ptr = NULL;
582 st->anchor.ptr = NULL;
583 st->rt_kif = NULL;
584
585 st->pfsync_time = time_uptime;
586 st->sync_state = PFSYNC_S_NONE;
587
588 if (!(flags & PFSYNC_SI_IOCTL))
589 st->state_flags |= PFSTATE_NOSYNC;
590
591 if ((error = pf_state_insert(kif, kif, skw, sks, st)) != 0)
592 goto cleanup_state;
593
594 /* XXX when we have nat_rule/anchors, use STATE_INC_COUNTERS */
595 counter_u64_add(r->states_cur, 1);
596 counter_u64_add(r->states_tot, 1);
597
598 if (!(flags & PFSYNC_SI_IOCTL)) {
599 st->state_flags &= ~PFSTATE_NOSYNC;
600 if (st->state_flags & PFSTATE_ACK) {
601 pfsync_q_ins(st, PFSYNC_S_IACK, true);
602 pfsync_push_all(sc);
603 }
604 }
605 st->state_flags &= ~PFSTATE_ACK;
606 PF_STATE_UNLOCK(st);
607
608 return (0);
609
610 cleanup:
611 error = ENOMEM;
612 if (skw == sks)
613 sks = NULL;
614 if (skw != NULL)
615 uma_zfree(V_pf_state_key_z, skw);
616 if (sks != NULL)
617 uma_zfree(V_pf_state_key_z, sks);
618
619 cleanup_state: /* pf_state_insert() frees the state keys. */
620 if (st) {
621 pf_free_state(st);
622 }
623 return (error);
624 }
625
626 static int
pfsync_input(struct mbuf ** mp,int * offp __unused,int proto __unused)627 pfsync_input(struct mbuf **mp, int *offp __unused, int proto __unused)
628 {
629 struct pfsync_softc *sc = V_pfsyncif;
630 struct pfsync_pkt pkt;
631 struct mbuf *m = *mp;
632 struct ip *ip = mtod(m, struct ip *);
633 struct pfsync_header *ph;
634 struct pfsync_subheader subh;
635
636 int offset, len;
637 int rv;
638 uint16_t count;
639
640 PF_RULES_RLOCK_TRACKER;
641
642 *mp = NULL;
643 V_pfsyncstats.pfsyncs_ipackets++;
644
645 /* Verify that we have a sync interface configured. */
646 if (!sc || !sc->sc_sync_if || !V_pf_status.running ||
647 (sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
648 goto done;
649
650 /* verify that the packet came in on the right interface */
651 if (sc->sc_sync_if != m->m_pkthdr.rcvif) {
652 V_pfsyncstats.pfsyncs_badif++;
653 goto done;
654 }
655
656 if_inc_counter(sc->sc_ifp, IFCOUNTER_IPACKETS, 1);
657 if_inc_counter(sc->sc_ifp, IFCOUNTER_IBYTES, m->m_pkthdr.len);
658 /* verify that the IP TTL is 255. */
659 if (ip->ip_ttl != PFSYNC_DFLTTL) {
660 V_pfsyncstats.pfsyncs_badttl++;
661 goto done;
662 }
663
664 offset = ip->ip_hl << 2;
665 if (m->m_pkthdr.len < offset + sizeof(*ph)) {
666 V_pfsyncstats.pfsyncs_hdrops++;
667 goto done;
668 }
669
670 if (offset + sizeof(*ph) > m->m_len) {
671 if (m_pullup(m, offset + sizeof(*ph)) == NULL) {
672 V_pfsyncstats.pfsyncs_hdrops++;
673 return (IPPROTO_DONE);
674 }
675 ip = mtod(m, struct ip *);
676 }
677 ph = (struct pfsync_header *)((char *)ip + offset);
678
679 /* verify the version */
680 if (ph->version != PFSYNC_VERSION) {
681 V_pfsyncstats.pfsyncs_badver++;
682 goto done;
683 }
684
685 len = ntohs(ph->len) + offset;
686 if (m->m_pkthdr.len < len) {
687 V_pfsyncstats.pfsyncs_badlen++;
688 goto done;
689 }
690
691 /* Cheaper to grab this now than having to mess with mbufs later */
692 pkt.ip = ip;
693 pkt.src = ip->ip_src;
694 pkt.flags = 0;
695
696 /*
697 * Trusting pf_chksum during packet processing, as well as seeking
698 * in interface name tree, require holding PF_RULES_RLOCK().
699 */
700 PF_RULES_RLOCK();
701 if (!bcmp(&ph->pfcksum, &V_pf_status.pf_chksum, PF_MD5_DIGEST_LENGTH))
702 pkt.flags |= PFSYNC_SI_CKSUM;
703
704 offset += sizeof(*ph);
705 while (offset <= len - sizeof(subh)) {
706 m_copydata(m, offset, sizeof(subh), (caddr_t)&subh);
707 offset += sizeof(subh);
708
709 if (subh.action >= PFSYNC_ACT_MAX) {
710 V_pfsyncstats.pfsyncs_badact++;
711 PF_RULES_RUNLOCK();
712 goto done;
713 }
714
715 count = ntohs(subh.count);
716 V_pfsyncstats.pfsyncs_iacts[subh.action] += count;
717 rv = (*pfsync_acts[subh.action])(&pkt, m, offset, count);
718 if (rv == -1) {
719 PF_RULES_RUNLOCK();
720 return (IPPROTO_DONE);
721 }
722
723 offset += rv;
724 }
725 PF_RULES_RUNLOCK();
726
727 done:
728 m_freem(m);
729 return (IPPROTO_DONE);
730 }
731
732 static int
pfsync_in_clr(struct pfsync_pkt * pkt,struct mbuf * m,int offset,int count)733 pfsync_in_clr(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
734 {
735 struct pfsync_clr *clr;
736 struct mbuf *mp;
737 int len = sizeof(*clr) * count;
738 int i, offp;
739 u_int32_t creatorid;
740
741 mp = m_pulldown(m, offset, len, &offp);
742 if (mp == NULL) {
743 V_pfsyncstats.pfsyncs_badlen++;
744 return (-1);
745 }
746 clr = (struct pfsync_clr *)(mp->m_data + offp);
747
748 for (i = 0; i < count; i++) {
749 creatorid = clr[i].creatorid;
750
751 if (clr[i].ifname[0] != '\0' &&
752 pfi_kkif_find(clr[i].ifname) == NULL)
753 continue;
754
755 for (int i = 0; i <= pf_hashmask; i++) {
756 struct pf_idhash *ih = &V_pf_idhash[i];
757 struct pf_kstate *s;
758 relock:
759 PF_HASHROW_LOCK(ih);
760 LIST_FOREACH(s, &ih->states, entry) {
761 if (s->creatorid == creatorid) {
762 s->state_flags |= PFSTATE_NOSYNC;
763 pf_unlink_state(s, PF_ENTER_LOCKED);
764 goto relock;
765 }
766 }
767 PF_HASHROW_UNLOCK(ih);
768 }
769 }
770
771 return (len);
772 }
773
774 static int
pfsync_in_ins(struct pfsync_pkt * pkt,struct mbuf * m,int offset,int count)775 pfsync_in_ins(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
776 {
777 struct mbuf *mp;
778 struct pfsync_state *sa, *sp;
779 int len = sizeof(*sp) * count;
780 int i, offp;
781
782 mp = m_pulldown(m, offset, len, &offp);
783 if (mp == NULL) {
784 V_pfsyncstats.pfsyncs_badlen++;
785 return (-1);
786 }
787 sa = (struct pfsync_state *)(mp->m_data + offp);
788
789 for (i = 0; i < count; i++) {
790 sp = &sa[i];
791
792 /* Check for invalid values. */
793 if (sp->timeout >= PFTM_MAX ||
794 sp->src.state > PF_TCPS_PROXY_DST ||
795 sp->dst.state > PF_TCPS_PROXY_DST ||
796 sp->direction > PF_OUT ||
797 (sp->af != AF_INET && sp->af != AF_INET6)) {
798 if (V_pf_status.debug >= PF_DEBUG_MISC)
799 printf("%s: invalid value\n", __func__);
800 V_pfsyncstats.pfsyncs_badval++;
801 continue;
802 }
803
804 if (pfsync_state_import(sp, pkt->flags) == ENOMEM)
805 /* Drop out, but process the rest of the actions. */
806 break;
807 }
808
809 return (len);
810 }
811
812 static int
pfsync_in_iack(struct pfsync_pkt * pkt,struct mbuf * m,int offset,int count)813 pfsync_in_iack(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
814 {
815 struct pfsync_ins_ack *ia, *iaa;
816 struct pf_kstate *st;
817
818 struct mbuf *mp;
819 int len = count * sizeof(*ia);
820 int offp, i;
821
822 mp = m_pulldown(m, offset, len, &offp);
823 if (mp == NULL) {
824 V_pfsyncstats.pfsyncs_badlen++;
825 return (-1);
826 }
827 iaa = (struct pfsync_ins_ack *)(mp->m_data + offp);
828
829 for (i = 0; i < count; i++) {
830 ia = &iaa[i];
831
832 st = pf_find_state_byid(ia->id, ia->creatorid);
833 if (st == NULL)
834 continue;
835
836 if (st->state_flags & PFSTATE_ACK) {
837 pfsync_undefer_state(st, 0);
838 }
839 PF_STATE_UNLOCK(st);
840 }
841 /*
842 * XXX this is not yet implemented, but we know the size of the
843 * message so we can skip it.
844 */
845
846 return (count * sizeof(struct pfsync_ins_ack));
847 }
848
849 static int
pfsync_upd_tcp(struct pf_kstate * st,struct pfsync_state_peer * src,struct pfsync_state_peer * dst)850 pfsync_upd_tcp(struct pf_kstate *st, struct pfsync_state_peer *src,
851 struct pfsync_state_peer *dst)
852 {
853 int sync = 0;
854
855 PF_STATE_LOCK_ASSERT(st);
856
857 /*
858 * The state should never go backwards except
859 * for syn-proxy states. Neither should the
860 * sequence window slide backwards.
861 */
862 if ((st->src.state > src->state &&
863 (st->src.state < PF_TCPS_PROXY_SRC ||
864 src->state >= PF_TCPS_PROXY_SRC)) ||
865
866 (st->src.state == src->state &&
867 SEQ_GT(st->src.seqlo, ntohl(src->seqlo))))
868 sync++;
869 else
870 pf_state_peer_ntoh(src, &st->src);
871
872 if ((st->dst.state > dst->state) ||
873
874 (st->dst.state >= TCPS_SYN_SENT &&
875 SEQ_GT(st->dst.seqlo, ntohl(dst->seqlo))))
876 sync++;
877 else
878 pf_state_peer_ntoh(dst, &st->dst);
879
880 return (sync);
881 }
882
883 static int
pfsync_in_upd(struct pfsync_pkt * pkt,struct mbuf * m,int offset,int count)884 pfsync_in_upd(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
885 {
886 struct pfsync_softc *sc = V_pfsyncif;
887 struct pfsync_state *sa, *sp;
888 struct pf_kstate *st;
889 int sync;
890
891 struct mbuf *mp;
892 int len = count * sizeof(*sp);
893 int offp, i;
894
895 mp = m_pulldown(m, offset, len, &offp);
896 if (mp == NULL) {
897 V_pfsyncstats.pfsyncs_badlen++;
898 return (-1);
899 }
900 sa = (struct pfsync_state *)(mp->m_data + offp);
901
902 for (i = 0; i < count; i++) {
903 sp = &sa[i];
904
905 /* check for invalid values */
906 if (sp->timeout >= PFTM_MAX ||
907 sp->src.state > PF_TCPS_PROXY_DST ||
908 sp->dst.state > PF_TCPS_PROXY_DST) {
909 if (V_pf_status.debug >= PF_DEBUG_MISC) {
910 printf("pfsync_input: PFSYNC_ACT_UPD: "
911 "invalid value\n");
912 }
913 V_pfsyncstats.pfsyncs_badval++;
914 continue;
915 }
916
917 st = pf_find_state_byid(sp->id, sp->creatorid);
918 if (st == NULL) {
919 /* insert the update */
920 if (pfsync_state_import(sp, pkt->flags))
921 V_pfsyncstats.pfsyncs_badstate++;
922 continue;
923 }
924
925 if (st->state_flags & PFSTATE_ACK) {
926 pfsync_undefer_state(st, 1);
927 }
928
929 if (st->key[PF_SK_WIRE]->proto == IPPROTO_TCP)
930 sync = pfsync_upd_tcp(st, &sp->src, &sp->dst);
931 else {
932 sync = 0;
933
934 /*
935 * Non-TCP protocol state machine always go
936 * forwards
937 */
938 if (st->src.state > sp->src.state)
939 sync++;
940 else
941 pf_state_peer_ntoh(&sp->src, &st->src);
942 if (st->dst.state > sp->dst.state)
943 sync++;
944 else
945 pf_state_peer_ntoh(&sp->dst, &st->dst);
946 }
947 if (sync < 2) {
948 pfsync_alloc_scrub_memory(&sp->dst, &st->dst);
949 pf_state_peer_ntoh(&sp->dst, &st->dst);
950 st->expire = time_uptime;
951 st->timeout = sp->timeout;
952 }
953 st->pfsync_time = time_uptime;
954
955 if (sync) {
956 V_pfsyncstats.pfsyncs_stale++;
957
958 pfsync_update_state(st);
959 PF_STATE_UNLOCK(st);
960 pfsync_push_all(sc);
961 continue;
962 }
963 PF_STATE_UNLOCK(st);
964 }
965
966 return (len);
967 }
968
969 static int
pfsync_in_upd_c(struct pfsync_pkt * pkt,struct mbuf * m,int offset,int count)970 pfsync_in_upd_c(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
971 {
972 struct pfsync_softc *sc = V_pfsyncif;
973 struct pfsync_upd_c *ua, *up;
974 struct pf_kstate *st;
975 int len = count * sizeof(*up);
976 int sync;
977 struct mbuf *mp;
978 int offp, i;
979
980 mp = m_pulldown(m, offset, len, &offp);
981 if (mp == NULL) {
982 V_pfsyncstats.pfsyncs_badlen++;
983 return (-1);
984 }
985 ua = (struct pfsync_upd_c *)(mp->m_data + offp);
986
987 for (i = 0; i < count; i++) {
988 up = &ua[i];
989
990 /* check for invalid values */
991 if (up->timeout >= PFTM_MAX ||
992 up->src.state > PF_TCPS_PROXY_DST ||
993 up->dst.state > PF_TCPS_PROXY_DST) {
994 if (V_pf_status.debug >= PF_DEBUG_MISC) {
995 printf("pfsync_input: "
996 "PFSYNC_ACT_UPD_C: "
997 "invalid value\n");
998 }
999 V_pfsyncstats.pfsyncs_badval++;
1000 continue;
1001 }
1002
1003 st = pf_find_state_byid(up->id, up->creatorid);
1004 if (st == NULL) {
1005 /* We don't have this state. Ask for it. */
1006 PFSYNC_BUCKET_LOCK(&sc->sc_buckets[0]);
1007 pfsync_request_update(up->creatorid, up->id);
1008 PFSYNC_BUCKET_UNLOCK(&sc->sc_buckets[0]);
1009 continue;
1010 }
1011
1012 if (st->state_flags & PFSTATE_ACK) {
1013 pfsync_undefer_state(st, 1);
1014 }
1015
1016 if (st->key[PF_SK_WIRE]->proto == IPPROTO_TCP)
1017 sync = pfsync_upd_tcp(st, &up->src, &up->dst);
1018 else {
1019 sync = 0;
1020
1021 /*
1022 * Non-TCP protocol state machine always go
1023 * forwards
1024 */
1025 if (st->src.state > up->src.state)
1026 sync++;
1027 else
1028 pf_state_peer_ntoh(&up->src, &st->src);
1029 if (st->dst.state > up->dst.state)
1030 sync++;
1031 else
1032 pf_state_peer_ntoh(&up->dst, &st->dst);
1033 }
1034 if (sync < 2) {
1035 pfsync_alloc_scrub_memory(&up->dst, &st->dst);
1036 pf_state_peer_ntoh(&up->dst, &st->dst);
1037 st->expire = time_uptime;
1038 st->timeout = up->timeout;
1039 }
1040 st->pfsync_time = time_uptime;
1041
1042 if (sync) {
1043 V_pfsyncstats.pfsyncs_stale++;
1044
1045 pfsync_update_state(st);
1046 PF_STATE_UNLOCK(st);
1047 pfsync_push_all(sc);
1048 continue;
1049 }
1050 PF_STATE_UNLOCK(st);
1051 }
1052
1053 return (len);
1054 }
1055
1056 static int
pfsync_in_ureq(struct pfsync_pkt * pkt,struct mbuf * m,int offset,int count)1057 pfsync_in_ureq(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1058 {
1059 struct pfsync_upd_req *ur, *ura;
1060 struct mbuf *mp;
1061 int len = count * sizeof(*ur);
1062 int i, offp;
1063
1064 struct pf_kstate *st;
1065
1066 mp = m_pulldown(m, offset, len, &offp);
1067 if (mp == NULL) {
1068 V_pfsyncstats.pfsyncs_badlen++;
1069 return (-1);
1070 }
1071 ura = (struct pfsync_upd_req *)(mp->m_data + offp);
1072
1073 for (i = 0; i < count; i++) {
1074 ur = &ura[i];
1075
1076 if (ur->id == 0 && ur->creatorid == 0)
1077 pfsync_bulk_start();
1078 else {
1079 st = pf_find_state_byid(ur->id, ur->creatorid);
1080 if (st == NULL) {
1081 V_pfsyncstats.pfsyncs_badstate++;
1082 continue;
1083 }
1084 if (st->state_flags & PFSTATE_NOSYNC) {
1085 PF_STATE_UNLOCK(st);
1086 continue;
1087 }
1088
1089 pfsync_update_state_req(st);
1090 PF_STATE_UNLOCK(st);
1091 }
1092 }
1093
1094 return (len);
1095 }
1096
1097 static int
pfsync_in_del(struct pfsync_pkt * pkt,struct mbuf * m,int offset,int count)1098 pfsync_in_del(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1099 {
1100 struct mbuf *mp;
1101 struct pfsync_state *sa, *sp;
1102 struct pf_kstate *st;
1103 int len = count * sizeof(*sp);
1104 int offp, i;
1105
1106 mp = m_pulldown(m, offset, len, &offp);
1107 if (mp == NULL) {
1108 V_pfsyncstats.pfsyncs_badlen++;
1109 return (-1);
1110 }
1111 sa = (struct pfsync_state *)(mp->m_data + offp);
1112
1113 for (i = 0; i < count; i++) {
1114 sp = &sa[i];
1115
1116 st = pf_find_state_byid(sp->id, sp->creatorid);
1117 if (st == NULL) {
1118 V_pfsyncstats.pfsyncs_badstate++;
1119 continue;
1120 }
1121 st->state_flags |= PFSTATE_NOSYNC;
1122 pf_unlink_state(st, PF_ENTER_LOCKED);
1123 }
1124
1125 return (len);
1126 }
1127
1128 static int
pfsync_in_del_c(struct pfsync_pkt * pkt,struct mbuf * m,int offset,int count)1129 pfsync_in_del_c(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1130 {
1131 struct mbuf *mp;
1132 struct pfsync_del_c *sa, *sp;
1133 struct pf_kstate *st;
1134 int len = count * sizeof(*sp);
1135 int offp, i;
1136
1137 mp = m_pulldown(m, offset, len, &offp);
1138 if (mp == NULL) {
1139 V_pfsyncstats.pfsyncs_badlen++;
1140 return (-1);
1141 }
1142 sa = (struct pfsync_del_c *)(mp->m_data + offp);
1143
1144 for (i = 0; i < count; i++) {
1145 sp = &sa[i];
1146
1147 st = pf_find_state_byid(sp->id, sp->creatorid);
1148 if (st == NULL) {
1149 V_pfsyncstats.pfsyncs_badstate++;
1150 continue;
1151 }
1152
1153 st->state_flags |= PFSTATE_NOSYNC;
1154 pf_unlink_state(st, PF_ENTER_LOCKED);
1155 }
1156
1157 return (len);
1158 }
1159
1160 static int
pfsync_in_bus(struct pfsync_pkt * pkt,struct mbuf * m,int offset,int count)1161 pfsync_in_bus(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1162 {
1163 struct pfsync_softc *sc = V_pfsyncif;
1164 struct pfsync_bus *bus;
1165 struct mbuf *mp;
1166 int len = count * sizeof(*bus);
1167 int offp;
1168
1169 PFSYNC_BLOCK(sc);
1170
1171 /* If we're not waiting for a bulk update, who cares. */
1172 if (sc->sc_ureq_sent == 0) {
1173 PFSYNC_BUNLOCK(sc);
1174 return (len);
1175 }
1176
1177 mp = m_pulldown(m, offset, len, &offp);
1178 if (mp == NULL) {
1179 PFSYNC_BUNLOCK(sc);
1180 V_pfsyncstats.pfsyncs_badlen++;
1181 return (-1);
1182 }
1183 bus = (struct pfsync_bus *)(mp->m_data + offp);
1184
1185 switch (bus->status) {
1186 case PFSYNC_BUS_START:
1187 callout_reset(&sc->sc_bulkfail_tmo, 4 * hz +
1188 V_pf_limits[PF_LIMIT_STATES].limit /
1189 ((sc->sc_ifp->if_mtu - PFSYNC_MINPKT) /
1190 sizeof(struct pfsync_state)),
1191 pfsync_bulk_fail, sc);
1192 if (V_pf_status.debug >= PF_DEBUG_MISC)
1193 printf("pfsync: received bulk update start\n");
1194 break;
1195
1196 case PFSYNC_BUS_END:
1197 if (time_uptime - ntohl(bus->endtime) >=
1198 sc->sc_ureq_sent) {
1199 /* that's it, we're happy */
1200 sc->sc_ureq_sent = 0;
1201 sc->sc_bulk_tries = 0;
1202 callout_stop(&sc->sc_bulkfail_tmo);
1203 if (!(sc->sc_flags & PFSYNCF_OK) && carp_demote_adj_p)
1204 (*carp_demote_adj_p)(-V_pfsync_carp_adj,
1205 "pfsync bulk done");
1206 sc->sc_flags |= PFSYNCF_OK;
1207 if (V_pf_status.debug >= PF_DEBUG_MISC)
1208 printf("pfsync: received valid "
1209 "bulk update end\n");
1210 } else {
1211 if (V_pf_status.debug >= PF_DEBUG_MISC)
1212 printf("pfsync: received invalid "
1213 "bulk update end: bad timestamp\n");
1214 }
1215 break;
1216 }
1217 PFSYNC_BUNLOCK(sc);
1218
1219 return (len);
1220 }
1221
1222 static int
pfsync_in_tdb(struct pfsync_pkt * pkt,struct mbuf * m,int offset,int count)1223 pfsync_in_tdb(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1224 {
1225 int len = count * sizeof(struct pfsync_tdb);
1226
1227 #if defined(IPSEC)
1228 struct pfsync_tdb *tp;
1229 struct mbuf *mp;
1230 int offp;
1231 int i;
1232 int s;
1233
1234 mp = m_pulldown(m, offset, len, &offp);
1235 if (mp == NULL) {
1236 V_pfsyncstats.pfsyncs_badlen++;
1237 return (-1);
1238 }
1239 tp = (struct pfsync_tdb *)(mp->m_data + offp);
1240
1241 for (i = 0; i < count; i++)
1242 pfsync_update_net_tdb(&tp[i]);
1243 #endif
1244
1245 return (len);
1246 }
1247
1248 #if defined(IPSEC)
1249 /* Update an in-kernel tdb. Silently fail if no tdb is found. */
1250 static void
pfsync_update_net_tdb(struct pfsync_tdb * pt)1251 pfsync_update_net_tdb(struct pfsync_tdb *pt)
1252 {
1253 struct tdb *tdb;
1254 int s;
1255
1256 /* check for invalid values */
1257 if (ntohl(pt->spi) <= SPI_RESERVED_MAX ||
1258 (pt->dst.sa.sa_family != AF_INET &&
1259 pt->dst.sa.sa_family != AF_INET6))
1260 goto bad;
1261
1262 tdb = gettdb(pt->spi, &pt->dst, pt->sproto);
1263 if (tdb) {
1264 pt->rpl = ntohl(pt->rpl);
1265 pt->cur_bytes = (unsigned long long)be64toh(pt->cur_bytes);
1266
1267 /* Neither replay nor byte counter should ever decrease. */
1268 if (pt->rpl < tdb->tdb_rpl ||
1269 pt->cur_bytes < tdb->tdb_cur_bytes) {
1270 goto bad;
1271 }
1272
1273 tdb->tdb_rpl = pt->rpl;
1274 tdb->tdb_cur_bytes = pt->cur_bytes;
1275 }
1276 return;
1277
1278 bad:
1279 if (V_pf_status.debug >= PF_DEBUG_MISC)
1280 printf("pfsync_insert: PFSYNC_ACT_TDB_UPD: "
1281 "invalid value\n");
1282 V_pfsyncstats.pfsyncs_badstate++;
1283 return;
1284 }
1285 #endif
1286
1287 static int
pfsync_in_eof(struct pfsync_pkt * pkt,struct mbuf * m,int offset,int count)1288 pfsync_in_eof(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1289 {
1290 /* check if we are at the right place in the packet */
1291 if (offset != m->m_pkthdr.len)
1292 V_pfsyncstats.pfsyncs_badlen++;
1293
1294 /* we're done. free and let the caller return */
1295 m_freem(m);
1296 return (-1);
1297 }
1298
1299 static int
pfsync_in_error(struct pfsync_pkt * pkt,struct mbuf * m,int offset,int count)1300 pfsync_in_error(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1301 {
1302 V_pfsyncstats.pfsyncs_badact++;
1303
1304 m_freem(m);
1305 return (-1);
1306 }
1307
1308 static int
pfsyncoutput(struct ifnet * ifp,struct mbuf * m,const struct sockaddr * dst,struct route * rt)1309 pfsyncoutput(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *dst,
1310 struct route *rt)
1311 {
1312 m_freem(m);
1313 return (0);
1314 }
1315
1316 /* ARGSUSED */
1317 static int
pfsyncioctl(struct ifnet * ifp,u_long cmd,caddr_t data)1318 pfsyncioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1319 {
1320 struct pfsync_softc *sc = ifp->if_softc;
1321 struct ifreq *ifr = (struct ifreq *)data;
1322 struct pfsyncreq pfsyncr;
1323 int error;
1324 int c;
1325
1326 switch (cmd) {
1327 case SIOCSIFFLAGS:
1328 PFSYNC_LOCK(sc);
1329 if (ifp->if_flags & IFF_UP) {
1330 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1331 PFSYNC_UNLOCK(sc);
1332 pfsync_pointers_init();
1333 } else {
1334 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1335 PFSYNC_UNLOCK(sc);
1336 pfsync_pointers_uninit();
1337 }
1338 break;
1339 case SIOCSIFMTU:
1340 if (!sc->sc_sync_if ||
1341 ifr->ifr_mtu <= PFSYNC_MINPKT ||
1342 ifr->ifr_mtu > sc->sc_sync_if->if_mtu)
1343 return (EINVAL);
1344 if (ifr->ifr_mtu < ifp->if_mtu) {
1345 for (c = 0; c < pfsync_buckets; c++) {
1346 PFSYNC_BUCKET_LOCK(&sc->sc_buckets[c]);
1347 if (sc->sc_buckets[c].b_len > PFSYNC_MINPKT)
1348 pfsync_sendout(1, c);
1349 PFSYNC_BUCKET_UNLOCK(&sc->sc_buckets[c]);
1350 }
1351 }
1352 ifp->if_mtu = ifr->ifr_mtu;
1353 break;
1354 case SIOCGETPFSYNC:
1355 bzero(&pfsyncr, sizeof(pfsyncr));
1356 PFSYNC_LOCK(sc);
1357 if (sc->sc_sync_if) {
1358 strlcpy(pfsyncr.pfsyncr_syncdev,
1359 sc->sc_sync_if->if_xname, IFNAMSIZ);
1360 }
1361 pfsyncr.pfsyncr_syncpeer = sc->sc_sync_peer;
1362 pfsyncr.pfsyncr_maxupdates = sc->sc_maxupdates;
1363 pfsyncr.pfsyncr_defer = sc->sc_flags;
1364 PFSYNC_UNLOCK(sc);
1365 return (copyout(&pfsyncr, ifr_data_get_ptr(ifr),
1366 sizeof(pfsyncr)));
1367
1368 case SIOCSETPFSYNC:
1369 {
1370 struct in_mfilter *imf = NULL;
1371 struct ifnet *sifp;
1372 struct ip *ip;
1373
1374 if ((error = priv_check(curthread, PRIV_NETINET_PF)) != 0)
1375 return (error);
1376 if ((error = copyin(ifr_data_get_ptr(ifr), &pfsyncr,
1377 sizeof(pfsyncr))))
1378 return (error);
1379
1380 if (pfsyncr.pfsyncr_maxupdates > 255)
1381 return (EINVAL);
1382
1383 if (pfsyncr.pfsyncr_syncdev[0] == 0)
1384 sifp = NULL;
1385 else if ((sifp = ifunit_ref(pfsyncr.pfsyncr_syncdev)) == NULL)
1386 return (EINVAL);
1387
1388 if (sifp != NULL && (
1389 pfsyncr.pfsyncr_syncpeer.s_addr == 0 ||
1390 pfsyncr.pfsyncr_syncpeer.s_addr ==
1391 htonl(INADDR_PFSYNC_GROUP)))
1392 imf = ip_mfilter_alloc(M_WAITOK, 0, 0);
1393
1394 PFSYNC_LOCK(sc);
1395 if (pfsyncr.pfsyncr_syncpeer.s_addr == 0)
1396 sc->sc_sync_peer.s_addr = htonl(INADDR_PFSYNC_GROUP);
1397 else
1398 sc->sc_sync_peer.s_addr =
1399 pfsyncr.pfsyncr_syncpeer.s_addr;
1400
1401 sc->sc_maxupdates = pfsyncr.pfsyncr_maxupdates;
1402 if (pfsyncr.pfsyncr_defer & PFSYNCF_DEFER) {
1403 sc->sc_flags |= PFSYNCF_DEFER;
1404 V_pfsync_defer_ptr = pfsync_defer;
1405 } else {
1406 sc->sc_flags &= ~PFSYNCF_DEFER;
1407 V_pfsync_defer_ptr = NULL;
1408 }
1409
1410 if (sifp == NULL) {
1411 if (sc->sc_sync_if)
1412 if_rele(sc->sc_sync_if);
1413 sc->sc_sync_if = NULL;
1414 pfsync_multicast_cleanup(sc);
1415 PFSYNC_UNLOCK(sc);
1416 break;
1417 }
1418
1419 for (c = 0; c < pfsync_buckets; c++) {
1420 PFSYNC_BUCKET_LOCK(&sc->sc_buckets[c]);
1421 if (sc->sc_buckets[c].b_len > PFSYNC_MINPKT &&
1422 (sifp->if_mtu < sc->sc_ifp->if_mtu ||
1423 (sc->sc_sync_if != NULL &&
1424 sifp->if_mtu < sc->sc_sync_if->if_mtu) ||
1425 sifp->if_mtu < MCLBYTES - sizeof(struct ip)))
1426 pfsync_sendout(1, c);
1427 PFSYNC_BUCKET_UNLOCK(&sc->sc_buckets[c]);
1428 }
1429
1430 pfsync_multicast_cleanup(sc);
1431
1432 if (sc->sc_sync_peer.s_addr == htonl(INADDR_PFSYNC_GROUP)) {
1433 error = pfsync_multicast_setup(sc, sifp, imf);
1434 if (error) {
1435 if_rele(sifp);
1436 ip_mfilter_free(imf);
1437 PFSYNC_UNLOCK(sc);
1438 return (error);
1439 }
1440 }
1441 if (sc->sc_sync_if)
1442 if_rele(sc->sc_sync_if);
1443 sc->sc_sync_if = sifp;
1444
1445 ip = &sc->sc_template;
1446 bzero(ip, sizeof(*ip));
1447 ip->ip_v = IPVERSION;
1448 ip->ip_hl = sizeof(sc->sc_template) >> 2;
1449 ip->ip_tos = IPTOS_LOWDELAY;
1450 /* len and id are set later. */
1451 ip->ip_off = htons(IP_DF);
1452 ip->ip_ttl = PFSYNC_DFLTTL;
1453 ip->ip_p = IPPROTO_PFSYNC;
1454 ip->ip_src.s_addr = INADDR_ANY;
1455 ip->ip_dst.s_addr = sc->sc_sync_peer.s_addr;
1456
1457 /* Request a full state table update. */
1458 if ((sc->sc_flags & PFSYNCF_OK) && carp_demote_adj_p)
1459 (*carp_demote_adj_p)(V_pfsync_carp_adj,
1460 "pfsync bulk start");
1461 sc->sc_flags &= ~PFSYNCF_OK;
1462 if (V_pf_status.debug >= PF_DEBUG_MISC)
1463 printf("pfsync: requesting bulk update\n");
1464 PFSYNC_UNLOCK(sc);
1465 PFSYNC_BUCKET_LOCK(&sc->sc_buckets[0]);
1466 pfsync_request_update(0, 0);
1467 PFSYNC_BUCKET_UNLOCK(&sc->sc_buckets[0]);
1468 PFSYNC_BLOCK(sc);
1469 sc->sc_ureq_sent = time_uptime;
1470 callout_reset(&sc->sc_bulkfail_tmo, 5 * hz, pfsync_bulk_fail,
1471 sc);
1472 PFSYNC_BUNLOCK(sc);
1473
1474 break;
1475 }
1476 default:
1477 return (ENOTTY);
1478 }
1479
1480 return (0);
1481 }
1482
1483 static void
pfsync_out_state(struct pf_kstate * st,void * buf)1484 pfsync_out_state(struct pf_kstate *st, void *buf)
1485 {
1486 struct pfsync_state *sp = buf;
1487
1488 pfsync_state_export(sp, st);
1489 }
1490
1491 static void
pfsync_out_iack(struct pf_kstate * st,void * buf)1492 pfsync_out_iack(struct pf_kstate *st, void *buf)
1493 {
1494 struct pfsync_ins_ack *iack = buf;
1495
1496 iack->id = st->id;
1497 iack->creatorid = st->creatorid;
1498 }
1499
1500 static void
pfsync_out_upd_c(struct pf_kstate * st,void * buf)1501 pfsync_out_upd_c(struct pf_kstate *st, void *buf)
1502 {
1503 struct pfsync_upd_c *up = buf;
1504
1505 bzero(up, sizeof(*up));
1506 up->id = st->id;
1507 pf_state_peer_hton(&st->src, &up->src);
1508 pf_state_peer_hton(&st->dst, &up->dst);
1509 up->creatorid = st->creatorid;
1510 up->timeout = st->timeout;
1511 }
1512
1513 static void
pfsync_out_del(struct pf_kstate * st,void * buf)1514 pfsync_out_del(struct pf_kstate *st, void *buf)
1515 {
1516 struct pfsync_del_c *dp = buf;
1517
1518 dp->id = st->id;
1519 dp->creatorid = st->creatorid;
1520 st->state_flags |= PFSTATE_NOSYNC;
1521 }
1522
1523 static void
pfsync_drop(struct pfsync_softc * sc)1524 pfsync_drop(struct pfsync_softc *sc)
1525 {
1526 struct pf_kstate *st, *next;
1527 struct pfsync_upd_req_item *ur;
1528 struct pfsync_bucket *b;
1529 int c, q;
1530
1531 for (c = 0; c < pfsync_buckets; c++) {
1532 b = &sc->sc_buckets[c];
1533 for (q = 0; q < PFSYNC_S_COUNT; q++) {
1534 if (TAILQ_EMPTY(&b->b_qs[q]))
1535 continue;
1536
1537 TAILQ_FOREACH_SAFE(st, &b->b_qs[q], sync_list, next) {
1538 KASSERT(st->sync_state == q,
1539 ("%s: st->sync_state == q",
1540 __func__));
1541 st->sync_state = PFSYNC_S_NONE;
1542 pf_release_state(st);
1543 }
1544 TAILQ_INIT(&b->b_qs[q]);
1545 }
1546
1547 while ((ur = TAILQ_FIRST(&b->b_upd_req_list)) != NULL) {
1548 TAILQ_REMOVE(&b->b_upd_req_list, ur, ur_entry);
1549 free(ur, M_PFSYNC);
1550 }
1551
1552 b->b_len = PFSYNC_MINPKT;
1553 b->b_plus = NULL;
1554 }
1555 }
1556
1557 static void
pfsync_sendout(int schedswi,int c)1558 pfsync_sendout(int schedswi, int c)
1559 {
1560 struct pfsync_softc *sc = V_pfsyncif;
1561 struct ifnet *ifp = sc->sc_ifp;
1562 struct mbuf *m;
1563 struct ip *ip;
1564 struct pfsync_header *ph;
1565 struct pfsync_subheader *subh;
1566 struct pf_kstate *st, *st_next;
1567 struct pfsync_upd_req_item *ur;
1568 struct pfsync_bucket *b = &sc->sc_buckets[c];
1569 int offset;
1570 int q, count = 0;
1571
1572 KASSERT(sc != NULL, ("%s: null sc", __func__));
1573 KASSERT(b->b_len > PFSYNC_MINPKT,
1574 ("%s: sc_len %zu", __func__, b->b_len));
1575 PFSYNC_BUCKET_LOCK_ASSERT(b);
1576
1577 if (ifp->if_bpf == NULL && sc->sc_sync_if == NULL) {
1578 pfsync_drop(sc);
1579 return;
1580 }
1581
1582 m = m_get2(max_linkhdr + b->b_len, M_NOWAIT, MT_DATA, M_PKTHDR);
1583 if (m == NULL) {
1584 if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1);
1585 V_pfsyncstats.pfsyncs_onomem++;
1586 return;
1587 }
1588 m->m_data += max_linkhdr;
1589 m->m_len = m->m_pkthdr.len = b->b_len;
1590
1591 /* build the ip header */
1592 ip = (struct ip *)m->m_data;
1593 bcopy(&sc->sc_template, ip, sizeof(*ip));
1594 offset = sizeof(*ip);
1595
1596 ip->ip_len = htons(m->m_pkthdr.len);
1597 ip_fillid(ip);
1598
1599 /* build the pfsync header */
1600 ph = (struct pfsync_header *)(m->m_data + offset);
1601 bzero(ph, sizeof(*ph));
1602 offset += sizeof(*ph);
1603
1604 ph->version = PFSYNC_VERSION;
1605 ph->len = htons(b->b_len - sizeof(*ip));
1606 bcopy(V_pf_status.pf_chksum, ph->pfcksum, PF_MD5_DIGEST_LENGTH);
1607
1608 /* walk the queues */
1609 for (q = 0; q < PFSYNC_S_COUNT; q++) {
1610 if (TAILQ_EMPTY(&b->b_qs[q]))
1611 continue;
1612
1613 subh = (struct pfsync_subheader *)(m->m_data + offset);
1614 offset += sizeof(*subh);
1615
1616 count = 0;
1617 TAILQ_FOREACH_SAFE(st, &b->b_qs[q], sync_list, st_next) {
1618 KASSERT(st->sync_state == q,
1619 ("%s: st->sync_state == q",
1620 __func__));
1621 /*
1622 * XXXGL: some of write methods do unlocked reads
1623 * of state data :(
1624 */
1625 pfsync_qs[q].write(st, m->m_data + offset);
1626 offset += pfsync_qs[q].len;
1627 st->sync_state = PFSYNC_S_NONE;
1628 pf_release_state(st);
1629 count++;
1630 }
1631 TAILQ_INIT(&b->b_qs[q]);
1632
1633 bzero(subh, sizeof(*subh));
1634 subh->action = pfsync_qs[q].action;
1635 subh->count = htons(count);
1636 V_pfsyncstats.pfsyncs_oacts[pfsync_qs[q].action] += count;
1637 }
1638
1639 if (!TAILQ_EMPTY(&b->b_upd_req_list)) {
1640 subh = (struct pfsync_subheader *)(m->m_data + offset);
1641 offset += sizeof(*subh);
1642
1643 count = 0;
1644 while ((ur = TAILQ_FIRST(&b->b_upd_req_list)) != NULL) {
1645 TAILQ_REMOVE(&b->b_upd_req_list, ur, ur_entry);
1646
1647 bcopy(&ur->ur_msg, m->m_data + offset,
1648 sizeof(ur->ur_msg));
1649 offset += sizeof(ur->ur_msg);
1650 free(ur, M_PFSYNC);
1651 count++;
1652 }
1653
1654 bzero(subh, sizeof(*subh));
1655 subh->action = PFSYNC_ACT_UPD_REQ;
1656 subh->count = htons(count);
1657 V_pfsyncstats.pfsyncs_oacts[PFSYNC_ACT_UPD_REQ] += count;
1658 }
1659
1660 /* has someone built a custom region for us to add? */
1661 if (b->b_plus != NULL) {
1662 bcopy(b->b_plus, m->m_data + offset, b->b_pluslen);
1663 offset += b->b_pluslen;
1664
1665 b->b_plus = NULL;
1666 }
1667
1668 subh = (struct pfsync_subheader *)(m->m_data + offset);
1669 offset += sizeof(*subh);
1670
1671 bzero(subh, sizeof(*subh));
1672 subh->action = PFSYNC_ACT_EOF;
1673 subh->count = htons(1);
1674 V_pfsyncstats.pfsyncs_oacts[PFSYNC_ACT_EOF]++;
1675
1676 /* we're done, let's put it on the wire */
1677 if (ifp->if_bpf) {
1678 m->m_data += sizeof(*ip);
1679 m->m_len = m->m_pkthdr.len = b->b_len - sizeof(*ip);
1680 BPF_MTAP(ifp, m);
1681 m->m_data -= sizeof(*ip);
1682 m->m_len = m->m_pkthdr.len = b->b_len;
1683 }
1684
1685 if (sc->sc_sync_if == NULL) {
1686 b->b_len = PFSYNC_MINPKT;
1687 m_freem(m);
1688 return;
1689 }
1690
1691 if_inc_counter(sc->sc_ifp, IFCOUNTER_OPACKETS, 1);
1692 if_inc_counter(sc->sc_ifp, IFCOUNTER_OBYTES, m->m_pkthdr.len);
1693 b->b_len = PFSYNC_MINPKT;
1694
1695 if (!_IF_QFULL(&b->b_snd))
1696 _IF_ENQUEUE(&b->b_snd, m);
1697 else {
1698 m_freem(m);
1699 if_inc_counter(sc->sc_ifp, IFCOUNTER_OQDROPS, 1);
1700 }
1701 if (schedswi)
1702 swi_sched(V_pfsync_swi_cookie, 0);
1703 }
1704
1705 static void
pfsync_insert_state(struct pf_kstate * st)1706 pfsync_insert_state(struct pf_kstate *st)
1707 {
1708 struct pfsync_softc *sc = V_pfsyncif;
1709 struct pfsync_bucket *b = pfsync_get_bucket(sc, st);
1710
1711 if (st->state_flags & PFSTATE_NOSYNC)
1712 return;
1713
1714 if ((st->rule.ptr->rule_flag & PFRULE_NOSYNC) ||
1715 st->key[PF_SK_WIRE]->proto == IPPROTO_PFSYNC) {
1716 st->state_flags |= PFSTATE_NOSYNC;
1717 return;
1718 }
1719
1720 KASSERT(st->sync_state == PFSYNC_S_NONE,
1721 ("%s: st->sync_state %u", __func__, st->sync_state));
1722
1723 PFSYNC_BUCKET_LOCK(b);
1724 if (b->b_len == PFSYNC_MINPKT)
1725 callout_reset(&b->b_tmo, 1 * hz, pfsync_timeout, b);
1726
1727 pfsync_q_ins(st, PFSYNC_S_INS, true);
1728 PFSYNC_BUCKET_UNLOCK(b);
1729
1730 st->sync_updates = 0;
1731 }
1732
1733 static int
pfsync_defer(struct pf_kstate * st,struct mbuf * m)1734 pfsync_defer(struct pf_kstate *st, struct mbuf *m)
1735 {
1736 struct pfsync_softc *sc = V_pfsyncif;
1737 struct pfsync_deferral *pd;
1738 struct pfsync_bucket *b = pfsync_get_bucket(sc, st);
1739
1740 if (m->m_flags & (M_BCAST|M_MCAST))
1741 return (0);
1742
1743 if (sc == NULL)
1744 return (0);
1745
1746 PFSYNC_LOCK(sc);
1747
1748 if (!(sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) ||
1749 !(sc->sc_flags & PFSYNCF_DEFER)) {
1750 PFSYNC_UNLOCK(sc);
1751 return (0);
1752 }
1753
1754 PFSYNC_BUCKET_LOCK(b);
1755 PFSYNC_UNLOCK(sc);
1756
1757 if (b->b_deferred >= 128)
1758 pfsync_undefer(TAILQ_FIRST(&b->b_deferrals), 0);
1759
1760 pd = malloc(sizeof(*pd), M_PFSYNC, M_NOWAIT);
1761 if (pd == NULL) {
1762 PFSYNC_BUCKET_UNLOCK(b);
1763 return (0);
1764 }
1765 b->b_deferred++;
1766
1767 m->m_flags |= M_SKIP_FIREWALL;
1768 st->state_flags |= PFSTATE_ACK;
1769
1770 pd->pd_sc = sc;
1771 pd->pd_refs = 0;
1772 pd->pd_st = st;
1773 pf_ref_state(st);
1774 pd->pd_m = m;
1775
1776 TAILQ_INSERT_TAIL(&b->b_deferrals, pd, pd_entry);
1777 callout_init_mtx(&pd->pd_tmo, &b->b_mtx, CALLOUT_RETURNUNLOCKED);
1778 callout_reset(&pd->pd_tmo, PFSYNC_DEFER_TIMEOUT, pfsync_defer_tmo, pd);
1779
1780 pfsync_push(b);
1781 PFSYNC_BUCKET_UNLOCK(b);
1782
1783 return (1);
1784 }
1785
1786 static void
pfsync_undefer(struct pfsync_deferral * pd,int drop)1787 pfsync_undefer(struct pfsync_deferral *pd, int drop)
1788 {
1789 struct pfsync_softc *sc = pd->pd_sc;
1790 struct mbuf *m = pd->pd_m;
1791 struct pf_kstate *st = pd->pd_st;
1792 struct pfsync_bucket *b = pfsync_get_bucket(sc, st);
1793
1794 PFSYNC_BUCKET_LOCK_ASSERT(b);
1795
1796 TAILQ_REMOVE(&b->b_deferrals, pd, pd_entry);
1797 b->b_deferred--;
1798 pd->pd_st->state_flags &= ~PFSTATE_ACK; /* XXX: locking! */
1799 free(pd, M_PFSYNC);
1800 pf_release_state(st);
1801
1802 if (drop)
1803 m_freem(m);
1804 else {
1805 _IF_ENQUEUE(&b->b_snd, m);
1806 pfsync_push(b);
1807 }
1808 }
1809
1810 static void
pfsync_defer_tmo(void * arg)1811 pfsync_defer_tmo(void *arg)
1812 {
1813 struct epoch_tracker et;
1814 struct pfsync_deferral *pd = arg;
1815 struct pfsync_softc *sc = pd->pd_sc;
1816 struct mbuf *m = pd->pd_m;
1817 struct pf_kstate *st = pd->pd_st;
1818 struct pfsync_bucket *b = pfsync_get_bucket(sc, st);
1819
1820 PFSYNC_BUCKET_LOCK_ASSERT(b);
1821
1822 NET_EPOCH_ENTER(et);
1823 CURVNET_SET(m->m_pkthdr.rcvif->if_vnet);
1824
1825 TAILQ_REMOVE(&b->b_deferrals, pd, pd_entry);
1826 b->b_deferred--;
1827 pd->pd_st->state_flags &= ~PFSTATE_ACK; /* XXX: locking! */
1828 if (pd->pd_refs == 0)
1829 free(pd, M_PFSYNC);
1830 PFSYNC_BUCKET_UNLOCK(b);
1831
1832 ip_output(m, NULL, NULL, 0, NULL, NULL);
1833
1834 pf_release_state(st);
1835
1836 CURVNET_RESTORE();
1837 NET_EPOCH_EXIT(et);
1838 }
1839
1840 static void
pfsync_undefer_state(struct pf_kstate * st,int drop)1841 pfsync_undefer_state(struct pf_kstate *st, int drop)
1842 {
1843 struct pfsync_softc *sc = V_pfsyncif;
1844 struct pfsync_deferral *pd;
1845 struct pfsync_bucket *b = pfsync_get_bucket(sc, st);
1846
1847 PFSYNC_BUCKET_LOCK(b);
1848
1849 TAILQ_FOREACH(pd, &b->b_deferrals, pd_entry) {
1850 if (pd->pd_st == st) {
1851 if (callout_stop(&pd->pd_tmo) > 0)
1852 pfsync_undefer(pd, drop);
1853
1854 PFSYNC_BUCKET_UNLOCK(b);
1855 return;
1856 }
1857 }
1858 PFSYNC_BUCKET_UNLOCK(b);
1859
1860 panic("%s: unable to find deferred state", __func__);
1861 }
1862
1863 static struct pfsync_bucket*
pfsync_get_bucket(struct pfsync_softc * sc,struct pf_kstate * st)1864 pfsync_get_bucket(struct pfsync_softc *sc, struct pf_kstate *st)
1865 {
1866 int c = PF_IDHASH(st) % pfsync_buckets;
1867 return &sc->sc_buckets[c];
1868 }
1869
1870 static void
pfsync_update_state(struct pf_kstate * st)1871 pfsync_update_state(struct pf_kstate *st)
1872 {
1873 struct pfsync_softc *sc = V_pfsyncif;
1874 bool sync = false, ref = true;
1875 struct pfsync_bucket *b = pfsync_get_bucket(sc, st);
1876
1877 PF_STATE_LOCK_ASSERT(st);
1878 PFSYNC_BUCKET_LOCK(b);
1879
1880 if (st->state_flags & PFSTATE_ACK)
1881 pfsync_undefer_state(st, 0);
1882 if (st->state_flags & PFSTATE_NOSYNC) {
1883 if (st->sync_state != PFSYNC_S_NONE)
1884 pfsync_q_del(st, true, b);
1885 PFSYNC_BUCKET_UNLOCK(b);
1886 return;
1887 }
1888
1889 if (b->b_len == PFSYNC_MINPKT)
1890 callout_reset(&b->b_tmo, 1 * hz, pfsync_timeout, b);
1891
1892 switch (st->sync_state) {
1893 case PFSYNC_S_UPD_C:
1894 case PFSYNC_S_UPD:
1895 case PFSYNC_S_INS:
1896 /* we're already handling it */
1897
1898 if (st->key[PF_SK_WIRE]->proto == IPPROTO_TCP) {
1899 st->sync_updates++;
1900 if (st->sync_updates >= sc->sc_maxupdates)
1901 sync = true;
1902 }
1903 break;
1904
1905 case PFSYNC_S_IACK:
1906 pfsync_q_del(st, false, b);
1907 ref = false;
1908 /* FALLTHROUGH */
1909
1910 case PFSYNC_S_NONE:
1911 pfsync_q_ins(st, PFSYNC_S_UPD_C, ref);
1912 st->sync_updates = 0;
1913 break;
1914
1915 default:
1916 panic("%s: unexpected sync state %d", __func__, st->sync_state);
1917 }
1918
1919 if (sync || (time_uptime - st->pfsync_time) < 2)
1920 pfsync_push(b);
1921
1922 PFSYNC_BUCKET_UNLOCK(b);
1923 }
1924
1925 static void
pfsync_request_update(u_int32_t creatorid,u_int64_t id)1926 pfsync_request_update(u_int32_t creatorid, u_int64_t id)
1927 {
1928 struct pfsync_softc *sc = V_pfsyncif;
1929 struct pfsync_bucket *b = &sc->sc_buckets[0];
1930 struct pfsync_upd_req_item *item;
1931 size_t nlen = sizeof(struct pfsync_upd_req);
1932
1933 PFSYNC_BUCKET_LOCK_ASSERT(b);
1934
1935 /*
1936 * This code does a bit to prevent multiple update requests for the
1937 * same state being generated. It searches current subheader queue,
1938 * but it doesn't lookup into queue of already packed datagrams.
1939 */
1940 TAILQ_FOREACH(item, &b->b_upd_req_list, ur_entry)
1941 if (item->ur_msg.id == id &&
1942 item->ur_msg.creatorid == creatorid)
1943 return;
1944
1945 item = malloc(sizeof(*item), M_PFSYNC, M_NOWAIT);
1946 if (item == NULL)
1947 return; /* XXX stats */
1948
1949 item->ur_msg.id = id;
1950 item->ur_msg.creatorid = creatorid;
1951
1952 if (TAILQ_EMPTY(&b->b_upd_req_list))
1953 nlen += sizeof(struct pfsync_subheader);
1954
1955 if (b->b_len + nlen > sc->sc_ifp->if_mtu) {
1956 pfsync_sendout(0, 0);
1957
1958 nlen = sizeof(struct pfsync_subheader) +
1959 sizeof(struct pfsync_upd_req);
1960 }
1961
1962 TAILQ_INSERT_TAIL(&b->b_upd_req_list, item, ur_entry);
1963 b->b_len += nlen;
1964
1965 pfsync_push(b);
1966 }
1967
1968 static bool
pfsync_update_state_req(struct pf_kstate * st)1969 pfsync_update_state_req(struct pf_kstate *st)
1970 {
1971 struct pfsync_softc *sc = V_pfsyncif;
1972 bool ref = true, full = false;
1973 struct pfsync_bucket *b = pfsync_get_bucket(sc, st);
1974
1975 PF_STATE_LOCK_ASSERT(st);
1976 PFSYNC_BUCKET_LOCK(b);
1977
1978 if (st->state_flags & PFSTATE_NOSYNC) {
1979 if (st->sync_state != PFSYNC_S_NONE)
1980 pfsync_q_del(st, true, b);
1981 PFSYNC_BUCKET_UNLOCK(b);
1982 return (full);
1983 }
1984
1985 switch (st->sync_state) {
1986 case PFSYNC_S_UPD_C:
1987 case PFSYNC_S_IACK:
1988 pfsync_q_del(st, false, b);
1989 ref = false;
1990 /* FALLTHROUGH */
1991
1992 case PFSYNC_S_NONE:
1993 pfsync_q_ins(st, PFSYNC_S_UPD, ref);
1994 pfsync_push(b);
1995 break;
1996
1997 case PFSYNC_S_INS:
1998 case PFSYNC_S_UPD:
1999 case PFSYNC_S_DEL:
2000 /* we're already handling it */
2001 break;
2002
2003 default:
2004 panic("%s: unexpected sync state %d", __func__, st->sync_state);
2005 }
2006
2007 if ((sc->sc_ifp->if_mtu - b->b_len) < sizeof(struct pfsync_state))
2008 full = true;
2009
2010 PFSYNC_BUCKET_UNLOCK(b);
2011
2012 return (full);
2013 }
2014
2015 static void
pfsync_delete_state(struct pf_kstate * st)2016 pfsync_delete_state(struct pf_kstate *st)
2017 {
2018 struct pfsync_softc *sc = V_pfsyncif;
2019 struct pfsync_bucket *b = pfsync_get_bucket(sc, st);
2020 bool ref = true;
2021
2022 PFSYNC_BUCKET_LOCK(b);
2023 if (st->state_flags & PFSTATE_ACK)
2024 pfsync_undefer_state(st, 1);
2025 if (st->state_flags & PFSTATE_NOSYNC) {
2026 if (st->sync_state != PFSYNC_S_NONE)
2027 pfsync_q_del(st, true, b);
2028 PFSYNC_BUCKET_UNLOCK(b);
2029 return;
2030 }
2031
2032 if (b->b_len == PFSYNC_MINPKT)
2033 callout_reset(&b->b_tmo, 1 * hz, pfsync_timeout, b);
2034
2035 switch (st->sync_state) {
2036 case PFSYNC_S_INS:
2037 /* We never got to tell the world so just forget about it. */
2038 pfsync_q_del(st, true, b);
2039 break;
2040
2041 case PFSYNC_S_UPD_C:
2042 case PFSYNC_S_UPD:
2043 case PFSYNC_S_IACK:
2044 pfsync_q_del(st, false, b);
2045 ref = false;
2046 /* FALLTHROUGH */
2047
2048 case PFSYNC_S_NONE:
2049 pfsync_q_ins(st, PFSYNC_S_DEL, ref);
2050 break;
2051
2052 default:
2053 panic("%s: unexpected sync state %d", __func__, st->sync_state);
2054 }
2055
2056 PFSYNC_BUCKET_UNLOCK(b);
2057 }
2058
2059 static void
pfsync_clear_states(u_int32_t creatorid,const char * ifname)2060 pfsync_clear_states(u_int32_t creatorid, const char *ifname)
2061 {
2062 struct {
2063 struct pfsync_subheader subh;
2064 struct pfsync_clr clr;
2065 } __packed r;
2066
2067 bzero(&r, sizeof(r));
2068
2069 r.subh.action = PFSYNC_ACT_CLR;
2070 r.subh.count = htons(1);
2071 V_pfsyncstats.pfsyncs_oacts[PFSYNC_ACT_CLR]++;
2072
2073 strlcpy(r.clr.ifname, ifname, sizeof(r.clr.ifname));
2074 r.clr.creatorid = creatorid;
2075
2076 pfsync_send_plus(&r, sizeof(r));
2077 }
2078
2079 static void
pfsync_q_ins(struct pf_kstate * st,int q,bool ref)2080 pfsync_q_ins(struct pf_kstate *st, int q, bool ref)
2081 {
2082 struct pfsync_softc *sc = V_pfsyncif;
2083 size_t nlen = pfsync_qs[q].len;
2084 struct pfsync_bucket *b = pfsync_get_bucket(sc, st);
2085
2086 PFSYNC_BUCKET_LOCK_ASSERT(b);
2087
2088 KASSERT(st->sync_state == PFSYNC_S_NONE,
2089 ("%s: st->sync_state %u", __func__, st->sync_state));
2090 KASSERT(b->b_len >= PFSYNC_MINPKT, ("pfsync pkt len is too low %zu",
2091 b->b_len));
2092
2093 if (TAILQ_EMPTY(&b->b_qs[q]))
2094 nlen += sizeof(struct pfsync_subheader);
2095
2096 if (b->b_len + nlen > sc->sc_ifp->if_mtu) {
2097 pfsync_sendout(1, b->b_id);
2098
2099 nlen = sizeof(struct pfsync_subheader) + pfsync_qs[q].len;
2100 }
2101
2102 b->b_len += nlen;
2103 TAILQ_INSERT_TAIL(&b->b_qs[q], st, sync_list);
2104 st->sync_state = q;
2105 if (ref)
2106 pf_ref_state(st);
2107 }
2108
2109 static void
pfsync_q_del(struct pf_kstate * st,bool unref,struct pfsync_bucket * b)2110 pfsync_q_del(struct pf_kstate *st, bool unref, struct pfsync_bucket *b)
2111 {
2112 int q = st->sync_state;
2113
2114 PFSYNC_BUCKET_LOCK_ASSERT(b);
2115 KASSERT(st->sync_state != PFSYNC_S_NONE,
2116 ("%s: st->sync_state != PFSYNC_S_NONE", __func__));
2117
2118 b->b_len -= pfsync_qs[q].len;
2119 TAILQ_REMOVE(&b->b_qs[q], st, sync_list);
2120 st->sync_state = PFSYNC_S_NONE;
2121 if (unref)
2122 pf_release_state(st);
2123
2124 if (TAILQ_EMPTY(&b->b_qs[q]))
2125 b->b_len -= sizeof(struct pfsync_subheader);
2126 }
2127
2128 static void
pfsync_bulk_start(void)2129 pfsync_bulk_start(void)
2130 {
2131 struct pfsync_softc *sc = V_pfsyncif;
2132
2133 if (V_pf_status.debug >= PF_DEBUG_MISC)
2134 printf("pfsync: received bulk update request\n");
2135
2136 PFSYNC_BLOCK(sc);
2137
2138 sc->sc_ureq_received = time_uptime;
2139 sc->sc_bulk_hashid = 0;
2140 sc->sc_bulk_stateid = 0;
2141 pfsync_bulk_status(PFSYNC_BUS_START);
2142 callout_reset(&sc->sc_bulk_tmo, 1, pfsync_bulk_update, sc);
2143 PFSYNC_BUNLOCK(sc);
2144 }
2145
2146 static void
pfsync_bulk_update(void * arg)2147 pfsync_bulk_update(void *arg)
2148 {
2149 struct pfsync_softc *sc = arg;
2150 struct pf_kstate *s;
2151 int i, sent = 0;
2152
2153 PFSYNC_BLOCK_ASSERT(sc);
2154 CURVNET_SET(sc->sc_ifp->if_vnet);
2155
2156 /*
2157 * Start with last state from previous invocation.
2158 * It may had gone, in this case start from the
2159 * hash slot.
2160 */
2161 s = pf_find_state_byid(sc->sc_bulk_stateid, sc->sc_bulk_creatorid);
2162
2163 if (s != NULL)
2164 i = PF_IDHASH(s);
2165 else
2166 i = sc->sc_bulk_hashid;
2167
2168 for (; i <= pf_hashmask; i++) {
2169 struct pf_idhash *ih = &V_pf_idhash[i];
2170
2171 if (s != NULL)
2172 PF_HASHROW_ASSERT(ih);
2173 else {
2174 PF_HASHROW_LOCK(ih);
2175 s = LIST_FIRST(&ih->states);
2176 }
2177
2178 for (; s; s = LIST_NEXT(s, entry)) {
2179 if (s->sync_state == PFSYNC_S_NONE &&
2180 s->timeout < PFTM_MAX &&
2181 s->pfsync_time <= sc->sc_ureq_received) {
2182 if (pfsync_update_state_req(s)) {
2183 /* We've filled a packet. */
2184 sc->sc_bulk_hashid = i;
2185 sc->sc_bulk_stateid = s->id;
2186 sc->sc_bulk_creatorid = s->creatorid;
2187 PF_HASHROW_UNLOCK(ih);
2188 callout_reset(&sc->sc_bulk_tmo, 1,
2189 pfsync_bulk_update, sc);
2190 goto full;
2191 }
2192 sent++;
2193 }
2194 }
2195 PF_HASHROW_UNLOCK(ih);
2196 }
2197
2198 /* We're done. */
2199 pfsync_bulk_status(PFSYNC_BUS_END);
2200 full:
2201 CURVNET_RESTORE();
2202 }
2203
2204 static void
pfsync_bulk_status(u_int8_t status)2205 pfsync_bulk_status(u_int8_t status)
2206 {
2207 struct {
2208 struct pfsync_subheader subh;
2209 struct pfsync_bus bus;
2210 } __packed r;
2211
2212 struct pfsync_softc *sc = V_pfsyncif;
2213
2214 bzero(&r, sizeof(r));
2215
2216 r.subh.action = PFSYNC_ACT_BUS;
2217 r.subh.count = htons(1);
2218 V_pfsyncstats.pfsyncs_oacts[PFSYNC_ACT_BUS]++;
2219
2220 r.bus.creatorid = V_pf_status.hostid;
2221 r.bus.endtime = htonl(time_uptime - sc->sc_ureq_received);
2222 r.bus.status = status;
2223
2224 pfsync_send_plus(&r, sizeof(r));
2225 }
2226
2227 static void
pfsync_bulk_fail(void * arg)2228 pfsync_bulk_fail(void *arg)
2229 {
2230 struct pfsync_softc *sc = arg;
2231 struct pfsync_bucket *b = &sc->sc_buckets[0];
2232
2233 CURVNET_SET(sc->sc_ifp->if_vnet);
2234
2235 PFSYNC_BLOCK_ASSERT(sc);
2236
2237 if (sc->sc_bulk_tries++ < PFSYNC_MAX_BULKTRIES) {
2238 /* Try again */
2239 callout_reset(&sc->sc_bulkfail_tmo, 5 * hz,
2240 pfsync_bulk_fail, V_pfsyncif);
2241 PFSYNC_BUCKET_LOCK(b);
2242 pfsync_request_update(0, 0);
2243 PFSYNC_BUCKET_UNLOCK(b);
2244 } else {
2245 /* Pretend like the transfer was ok. */
2246 sc->sc_ureq_sent = 0;
2247 sc->sc_bulk_tries = 0;
2248 PFSYNC_LOCK(sc);
2249 if (!(sc->sc_flags & PFSYNCF_OK) && carp_demote_adj_p)
2250 (*carp_demote_adj_p)(-V_pfsync_carp_adj,
2251 "pfsync bulk fail");
2252 sc->sc_flags |= PFSYNCF_OK;
2253 PFSYNC_UNLOCK(sc);
2254 if (V_pf_status.debug >= PF_DEBUG_MISC)
2255 printf("pfsync: failed to receive bulk update\n");
2256 }
2257
2258 CURVNET_RESTORE();
2259 }
2260
2261 static void
pfsync_send_plus(void * plus,size_t pluslen)2262 pfsync_send_plus(void *plus, size_t pluslen)
2263 {
2264 struct pfsync_softc *sc = V_pfsyncif;
2265 struct pfsync_bucket *b = &sc->sc_buckets[0];
2266
2267 PFSYNC_BUCKET_LOCK(b);
2268
2269 if (b->b_len + pluslen > sc->sc_ifp->if_mtu)
2270 pfsync_sendout(1, b->b_id);
2271
2272 b->b_plus = plus;
2273 b->b_len += (b->b_pluslen = pluslen);
2274
2275 pfsync_sendout(1, b->b_id);
2276 PFSYNC_BUCKET_UNLOCK(b);
2277 }
2278
2279 static void
pfsync_timeout(void * arg)2280 pfsync_timeout(void *arg)
2281 {
2282 struct pfsync_bucket *b = arg;
2283
2284 CURVNET_SET(b->b_sc->sc_ifp->if_vnet);
2285 PFSYNC_BUCKET_LOCK(b);
2286 pfsync_push(b);
2287 PFSYNC_BUCKET_UNLOCK(b);
2288 CURVNET_RESTORE();
2289 }
2290
2291 static void
pfsync_push(struct pfsync_bucket * b)2292 pfsync_push(struct pfsync_bucket *b)
2293 {
2294
2295 PFSYNC_BUCKET_LOCK_ASSERT(b);
2296
2297 b->b_flags |= PFSYNCF_BUCKET_PUSH;
2298 swi_sched(V_pfsync_swi_cookie, 0);
2299 }
2300
2301 static void
pfsync_push_all(struct pfsync_softc * sc)2302 pfsync_push_all(struct pfsync_softc *sc)
2303 {
2304 int c;
2305 struct pfsync_bucket *b;
2306
2307 for (c = 0; c < pfsync_buckets; c++) {
2308 b = &sc->sc_buckets[c];
2309
2310 PFSYNC_BUCKET_LOCK(b);
2311 pfsync_push(b);
2312 PFSYNC_BUCKET_UNLOCK(b);
2313 }
2314 }
2315
2316 static void
pfsyncintr(void * arg)2317 pfsyncintr(void *arg)
2318 {
2319 struct epoch_tracker et;
2320 struct pfsync_softc *sc = arg;
2321 struct pfsync_bucket *b;
2322 struct mbuf *m, *n;
2323 int c;
2324
2325 NET_EPOCH_ENTER(et);
2326 CURVNET_SET(sc->sc_ifp->if_vnet);
2327
2328 for (c = 0; c < pfsync_buckets; c++) {
2329 b = &sc->sc_buckets[c];
2330
2331 PFSYNC_BUCKET_LOCK(b);
2332 if ((b->b_flags & PFSYNCF_BUCKET_PUSH) && b->b_len > PFSYNC_MINPKT) {
2333 pfsync_sendout(0, b->b_id);
2334 b->b_flags &= ~PFSYNCF_BUCKET_PUSH;
2335 }
2336 _IF_DEQUEUE_ALL(&b->b_snd, m);
2337 PFSYNC_BUCKET_UNLOCK(b);
2338
2339 for (; m != NULL; m = n) {
2340 n = m->m_nextpkt;
2341 m->m_nextpkt = NULL;
2342
2343 /*
2344 * We distinguish between a deferral packet and our
2345 * own pfsync packet based on M_SKIP_FIREWALL
2346 * flag. This is XXX.
2347 */
2348 if (m->m_flags & M_SKIP_FIREWALL)
2349 ip_output(m, NULL, NULL, 0, NULL, NULL);
2350 else if (ip_output(m, NULL, NULL, IP_RAWOUTPUT, &sc->sc_imo,
2351 NULL) == 0)
2352 V_pfsyncstats.pfsyncs_opackets++;
2353 else
2354 V_pfsyncstats.pfsyncs_oerrors++;
2355 }
2356 }
2357 CURVNET_RESTORE();
2358 NET_EPOCH_EXIT(et);
2359 }
2360
2361 static int
pfsync_multicast_setup(struct pfsync_softc * sc,struct ifnet * ifp,struct in_mfilter * imf)2362 pfsync_multicast_setup(struct pfsync_softc *sc, struct ifnet *ifp,
2363 struct in_mfilter *imf)
2364 {
2365 struct ip_moptions *imo = &sc->sc_imo;
2366 int error;
2367
2368 if (!(ifp->if_flags & IFF_MULTICAST))
2369 return (EADDRNOTAVAIL);
2370
2371 imo->imo_multicast_vif = -1;
2372
2373 if ((error = in_joingroup(ifp, &sc->sc_sync_peer, NULL,
2374 &imf->imf_inm)) != 0)
2375 return (error);
2376
2377 ip_mfilter_init(&imo->imo_head);
2378 ip_mfilter_insert(&imo->imo_head, imf);
2379 imo->imo_multicast_ifp = ifp;
2380 imo->imo_multicast_ttl = PFSYNC_DFLTTL;
2381 imo->imo_multicast_loop = 0;
2382
2383 return (0);
2384 }
2385
2386 static void
pfsync_multicast_cleanup(struct pfsync_softc * sc)2387 pfsync_multicast_cleanup(struct pfsync_softc *sc)
2388 {
2389 struct ip_moptions *imo = &sc->sc_imo;
2390 struct in_mfilter *imf;
2391
2392 while ((imf = ip_mfilter_first(&imo->imo_head)) != NULL) {
2393 ip_mfilter_remove(&imo->imo_head, imf);
2394 in_leavegroup(imf->imf_inm, NULL);
2395 ip_mfilter_free(imf);
2396 }
2397 imo->imo_multicast_ifp = NULL;
2398 }
2399
2400 void
pfsync_detach_ifnet(struct ifnet * ifp)2401 pfsync_detach_ifnet(struct ifnet *ifp)
2402 {
2403 struct pfsync_softc *sc = V_pfsyncif;
2404
2405 if (sc == NULL)
2406 return;
2407
2408 PFSYNC_LOCK(sc);
2409
2410 if (sc->sc_sync_if == ifp) {
2411 /* We don't need mutlicast cleanup here, because the interface
2412 * is going away. We do need to ensure we don't try to do
2413 * cleanup later.
2414 */
2415 ip_mfilter_init(&sc->sc_imo.imo_head);
2416 sc->sc_imo.imo_multicast_ifp = NULL;
2417 sc->sc_sync_if = NULL;
2418 }
2419
2420 PFSYNC_UNLOCK(sc);
2421 }
2422
2423 #ifdef INET
2424 extern struct domain inetdomain;
2425 static struct protosw in_pfsync_protosw = {
2426 .pr_type = SOCK_RAW,
2427 .pr_domain = &inetdomain,
2428 .pr_protocol = IPPROTO_PFSYNC,
2429 .pr_flags = PR_ATOMIC|PR_ADDR,
2430 .pr_input = pfsync_input,
2431 .pr_output = rip_output,
2432 .pr_ctloutput = rip_ctloutput,
2433 .pr_usrreqs = &rip_usrreqs
2434 };
2435 #endif
2436
2437 static void
pfsync_pointers_init()2438 pfsync_pointers_init()
2439 {
2440
2441 PF_RULES_WLOCK();
2442 V_pfsync_state_import_ptr = pfsync_state_import;
2443 V_pfsync_insert_state_ptr = pfsync_insert_state;
2444 V_pfsync_update_state_ptr = pfsync_update_state;
2445 V_pfsync_delete_state_ptr = pfsync_delete_state;
2446 V_pfsync_clear_states_ptr = pfsync_clear_states;
2447 V_pfsync_defer_ptr = pfsync_defer;
2448 PF_RULES_WUNLOCK();
2449 }
2450
2451 static void
pfsync_pointers_uninit()2452 pfsync_pointers_uninit()
2453 {
2454
2455 PF_RULES_WLOCK();
2456 V_pfsync_state_import_ptr = NULL;
2457 V_pfsync_insert_state_ptr = NULL;
2458 V_pfsync_update_state_ptr = NULL;
2459 V_pfsync_delete_state_ptr = NULL;
2460 V_pfsync_clear_states_ptr = NULL;
2461 V_pfsync_defer_ptr = NULL;
2462 PF_RULES_WUNLOCK();
2463 }
2464
2465 static void
vnet_pfsync_init(const void * unused __unused)2466 vnet_pfsync_init(const void *unused __unused)
2467 {
2468 int error;
2469
2470 V_pfsync_cloner = if_clone_simple(pfsyncname,
2471 pfsync_clone_create, pfsync_clone_destroy, 1);
2472 error = swi_add(&V_pfsync_swi_ie, pfsyncname, pfsyncintr, V_pfsyncif,
2473 SWI_NET, INTR_MPSAFE, &V_pfsync_swi_cookie);
2474 if (error) {
2475 if_clone_detach(V_pfsync_cloner);
2476 log(LOG_INFO, "swi_add() failed in %s\n", __func__);
2477 }
2478
2479 pfsync_pointers_init();
2480 }
2481 VNET_SYSINIT(vnet_pfsync_init, SI_SUB_PROTO_FIREWALL, SI_ORDER_ANY,
2482 vnet_pfsync_init, NULL);
2483
2484 static void
vnet_pfsync_uninit(const void * unused __unused)2485 vnet_pfsync_uninit(const void *unused __unused)
2486 {
2487 int ret;
2488
2489 pfsync_pointers_uninit();
2490
2491 if_clone_detach(V_pfsync_cloner);
2492 ret = swi_remove(V_pfsync_swi_cookie);
2493 MPASS(ret == 0);
2494 ret = intr_event_destroy(V_pfsync_swi_ie);
2495 MPASS(ret == 0);
2496 }
2497
2498 VNET_SYSUNINIT(vnet_pfsync_uninit, SI_SUB_PROTO_FIREWALL, SI_ORDER_FOURTH,
2499 vnet_pfsync_uninit, NULL);
2500
2501 static int
pfsync_init()2502 pfsync_init()
2503 {
2504 #ifdef INET
2505 int error;
2506
2507 pfsync_detach_ifnet_ptr = pfsync_detach_ifnet;
2508
2509 error = pf_proto_register(PF_INET, &in_pfsync_protosw);
2510 if (error)
2511 return (error);
2512 error = ipproto_register(IPPROTO_PFSYNC);
2513 if (error) {
2514 pf_proto_unregister(PF_INET, IPPROTO_PFSYNC, SOCK_RAW);
2515 return (error);
2516 }
2517 #endif
2518
2519 return (0);
2520 }
2521
2522 static void
pfsync_uninit()2523 pfsync_uninit()
2524 {
2525 pfsync_detach_ifnet_ptr = NULL;
2526
2527 #ifdef INET
2528 ipproto_unregister(IPPROTO_PFSYNC);
2529 pf_proto_unregister(PF_INET, IPPROTO_PFSYNC, SOCK_RAW);
2530 #endif
2531 }
2532
2533 static int
pfsync_modevent(module_t mod,int type,void * data)2534 pfsync_modevent(module_t mod, int type, void *data)
2535 {
2536 int error = 0;
2537
2538 switch (type) {
2539 case MOD_LOAD:
2540 error = pfsync_init();
2541 break;
2542 case MOD_UNLOAD:
2543 pfsync_uninit();
2544 break;
2545 default:
2546 error = EINVAL;
2547 break;
2548 }
2549
2550 return (error);
2551 }
2552
2553 static moduledata_t pfsync_mod = {
2554 pfsyncname,
2555 pfsync_modevent,
2556 0
2557 };
2558
2559 #define PFSYNC_MODVER 1
2560
2561 /* Stay on FIREWALL as we depend on pf being initialized and on inetdomain. */
2562 DECLARE_MODULE(pfsync, pfsync_mod, SI_SUB_PROTO_FIREWALL, SI_ORDER_ANY);
2563 MODULE_VERSION(pfsync, PFSYNC_MODVER);
2564 MODULE_DEPEND(pfsync, pf, PF_MODVER, PF_MODVER, PF_MODVER);
2565