1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2001 Daniel Hartmeier
5 * Copyright (c) 2002,2003 Henning Brauer
6 * Copyright (c) 2012 Gleb Smirnoff <[email protected]>
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 *
13 * - Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * - Redistributions in binary form must reproduce the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer in the documentation and/or other materials provided
18 * with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
30 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 *
33 * Effort sponsored in part by the Defense Advanced Research Projects
34 * Agency (DARPA) and Air Force Research Laboratory, Air Force
35 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
36 *
37 * $OpenBSD: pf_ioctl.c,v 1.213 2009/02/15 21:46:12 mbalmer Exp $
38 */
39
40 #include <sys/cdefs.h>
41 #include "opt_inet.h"
42 #include "opt_inet6.h"
43 #include "opt_bpf.h"
44 #include "opt_pf.h"
45
46 #include <sys/param.h>
47 #include <sys/_bitset.h>
48 #include <sys/bitset.h>
49 #include <sys/bus.h>
50 #include <sys/conf.h>
51 #include <sys/endian.h>
52 #include <sys/fcntl.h>
53 #include <sys/filio.h>
54 #include <sys/hash.h>
55 #include <sys/interrupt.h>
56 #include <sys/jail.h>
57 #include <sys/kernel.h>
58 #include <sys/kthread.h>
59 #include <sys/lock.h>
60 #include <sys/mbuf.h>
61 #include <sys/module.h>
62 #include <sys/nv.h>
63 #include <sys/proc.h>
64 #include <sys/sdt.h>
65 #include <sys/smp.h>
66 #include <sys/socket.h>
67 #include <sys/sysctl.h>
68 #include <sys/md5.h>
69 #include <sys/ucred.h>
70
71 #include <net/if.h>
72 #include <net/if_var.h>
73 #include <net/if_private.h>
74 #include <net/vnet.h>
75 #include <net/route.h>
76 #include <net/pfil.h>
77 #include <net/pfvar.h>
78 #include <net/if_pfsync.h>
79 #include <net/if_pflog.h>
80
81 #include <netinet/in.h>
82 #include <netinet/ip.h>
83 #include <netinet/ip_var.h>
84 #include <netinet6/ip6_var.h>
85 #include <netinet/ip_icmp.h>
86 #include <netpfil/pf/pf_nv.h>
87
88 #ifdef INET6
89 #include <netinet/ip6.h>
90 #endif /* INET6 */
91
92 #ifdef ALTQ
93 #include <net/altq/altq.h>
94 #endif
95
96 SDT_PROBE_DEFINE3(pf, ioctl, ioctl, error, "int", "int", "int");
97 SDT_PROBE_DEFINE3(pf, ioctl, function, error, "char *", "int", "int");
98 SDT_PROBE_DEFINE2(pf, ioctl, addrule, error, "int", "int");
99 SDT_PROBE_DEFINE2(pf, ioctl, nvchk, error, "int", "int");
100
101 static struct pf_kpool *pf_get_kpool(const char *, u_int32_t, u_int8_t,
102 u_int32_t, u_int8_t, u_int8_t, u_int8_t);
103
104 static void pf_mv_kpool(struct pf_kpalist *, struct pf_kpalist *);
105 static void pf_empty_kpool(struct pf_kpalist *);
106 static int pfioctl(struct cdev *, u_long, caddr_t, int,
107 struct thread *);
108 static int pf_begin_eth(uint32_t *, const char *);
109 static void pf_rollback_eth_cb(struct epoch_context *);
110 static int pf_rollback_eth(uint32_t, const char *);
111 static int pf_commit_eth(uint32_t, const char *);
112 static void pf_free_eth_rule(struct pf_keth_rule *);
113 #ifdef ALTQ
114 static int pf_begin_altq(u_int32_t *);
115 static int pf_rollback_altq(u_int32_t);
116 static int pf_commit_altq(u_int32_t);
117 static int pf_enable_altq(struct pf_altq *);
118 static int pf_disable_altq(struct pf_altq *);
119 static uint16_t pf_qname2qid(const char *);
120 static void pf_qid_unref(uint16_t);
121 #endif /* ALTQ */
122 static int pf_begin_rules(u_int32_t *, int, const char *);
123 static int pf_rollback_rules(u_int32_t, int, char *);
124 static int pf_setup_pfsync_matching(struct pf_kruleset *);
125 static void pf_hash_rule_rolling(MD5_CTX *, struct pf_krule *);
126 static void pf_hash_rule(struct pf_krule *);
127 static void pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *);
128 static int pf_commit_rules(u_int32_t, int, char *);
129 static int pf_addr_setup(struct pf_kruleset *,
130 struct pf_addr_wrap *, sa_family_t);
131 static void pf_addr_copyout(struct pf_addr_wrap *);
132 static void pf_src_node_copy(const struct pf_ksrc_node *,
133 struct pf_src_node *);
134 #ifdef ALTQ
135 static int pf_export_kaltq(struct pf_altq *,
136 struct pfioc_altq_v1 *, size_t);
137 static int pf_import_kaltq(struct pfioc_altq_v1 *,
138 struct pf_altq *, size_t);
139 #endif /* ALTQ */
140
141 VNET_DEFINE(struct pf_krule, pf_default_rule);
142
143 static __inline int pf_krule_compare(struct pf_krule *,
144 struct pf_krule *);
145
146 RB_GENERATE(pf_krule_global, pf_krule, entry_global, pf_krule_compare);
147
148 #ifdef ALTQ
149 VNET_DEFINE_STATIC(int, pf_altq_running);
150 #define V_pf_altq_running VNET(pf_altq_running)
151 #endif
152
153 #define TAGID_MAX 50000
154 struct pf_tagname {
155 TAILQ_ENTRY(pf_tagname) namehash_entries;
156 TAILQ_ENTRY(pf_tagname) taghash_entries;
157 char name[PF_TAG_NAME_SIZE];
158 uint16_t tag;
159 int ref;
160 };
161
162 struct pf_tagset {
163 TAILQ_HEAD(, pf_tagname) *namehash;
164 TAILQ_HEAD(, pf_tagname) *taghash;
165 unsigned int mask;
166 uint32_t seed;
167 BITSET_DEFINE(, TAGID_MAX) avail;
168 };
169
170 VNET_DEFINE(struct pf_tagset, pf_tags);
171 #define V_pf_tags VNET(pf_tags)
172 static unsigned int pf_rule_tag_hashsize;
173 #define PF_RULE_TAG_HASH_SIZE_DEFAULT 128
174 SYSCTL_UINT(_net_pf, OID_AUTO, rule_tag_hashsize, CTLFLAG_RDTUN,
175 &pf_rule_tag_hashsize, PF_RULE_TAG_HASH_SIZE_DEFAULT,
176 "Size of pf(4) rule tag hashtable");
177
178 #ifdef ALTQ
179 VNET_DEFINE(struct pf_tagset, pf_qids);
180 #define V_pf_qids VNET(pf_qids)
181 static unsigned int pf_queue_tag_hashsize;
182 #define PF_QUEUE_TAG_HASH_SIZE_DEFAULT 128
183 SYSCTL_UINT(_net_pf, OID_AUTO, queue_tag_hashsize, CTLFLAG_RDTUN,
184 &pf_queue_tag_hashsize, PF_QUEUE_TAG_HASH_SIZE_DEFAULT,
185 "Size of pf(4) queue tag hashtable");
186 #endif
187 VNET_DEFINE(uma_zone_t, pf_tag_z);
188 #define V_pf_tag_z VNET(pf_tag_z)
189 static MALLOC_DEFINE(M_PFALTQ, "pf_altq", "pf(4) altq configuration db");
190 static MALLOC_DEFINE(M_PFRULE, "pf_rule", "pf(4) rules");
191
192 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
193 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
194 #endif
195
196 VNET_DEFINE_STATIC(bool, pf_filter_local) = false;
197 #define V_pf_filter_local VNET(pf_filter_local)
198 SYSCTL_BOOL(_net_pf, OID_AUTO, filter_local, CTLFLAG_VNET | CTLFLAG_RW,
199 &VNET_NAME(pf_filter_local), false,
200 "Enable filtering for packets delivered to local network stack");
201
202 static void pf_init_tagset(struct pf_tagset *, unsigned int *,
203 unsigned int);
204 static void pf_cleanup_tagset(struct pf_tagset *);
205 static uint16_t tagname2hashindex(const struct pf_tagset *, const char *);
206 static uint16_t tag2hashindex(const struct pf_tagset *, uint16_t);
207 static u_int16_t tagname2tag(struct pf_tagset *, const char *);
208 static u_int16_t pf_tagname2tag(const char *);
209 static void tag_unref(struct pf_tagset *, u_int16_t);
210
211 #define DPFPRINTF(n, x) if (V_pf_status.debug >= (n)) printf x
212
213 struct cdev *pf_dev;
214
215 /*
216 * XXX - These are new and need to be checked when moveing to a new version
217 */
218 static void pf_clear_all_states(void);
219 static unsigned int pf_clear_states(const struct pf_kstate_kill *);
220 static void pf_killstates(struct pf_kstate_kill *,
221 unsigned int *);
222 static int pf_killstates_row(struct pf_kstate_kill *,
223 struct pf_idhash *);
224 static int pf_killstates_nv(struct pfioc_nv *);
225 static int pf_clearstates_nv(struct pfioc_nv *);
226 static int pf_getstate(struct pfioc_nv *);
227 static int pf_getstatus(struct pfioc_nv *);
228 static int pf_clear_tables(void);
229 static void pf_clear_srcnodes(struct pf_ksrc_node *);
230 static void pf_kill_srcnodes(struct pfioc_src_node_kill *);
231 static int pf_keepcounters(struct pfioc_nv *);
232 static void pf_tbladdr_copyout(struct pf_addr_wrap *);
233
234 /*
235 * Wrapper functions for pfil(9) hooks
236 */
237 static pfil_return_t pf_eth_check_in(struct mbuf **m, struct ifnet *ifp,
238 int flags, void *ruleset __unused, struct inpcb *inp);
239 static pfil_return_t pf_eth_check_out(struct mbuf **m, struct ifnet *ifp,
240 int flags, void *ruleset __unused, struct inpcb *inp);
241 #ifdef INET
242 static pfil_return_t pf_check_in(struct mbuf **m, struct ifnet *ifp,
243 int flags, void *ruleset __unused, struct inpcb *inp);
244 static pfil_return_t pf_check_out(struct mbuf **m, struct ifnet *ifp,
245 int flags, void *ruleset __unused, struct inpcb *inp);
246 #endif
247 #ifdef INET6
248 static pfil_return_t pf_check6_in(struct mbuf **m, struct ifnet *ifp,
249 int flags, void *ruleset __unused, struct inpcb *inp);
250 static pfil_return_t pf_check6_out(struct mbuf **m, struct ifnet *ifp,
251 int flags, void *ruleset __unused, struct inpcb *inp);
252 #endif
253
254 static void hook_pf_eth(void);
255 static void hook_pf(void);
256 static void dehook_pf_eth(void);
257 static void dehook_pf(void);
258 static int shutdown_pf(void);
259 static int pf_load(void);
260 static void pf_unload(void);
261
262 static struct cdevsw pf_cdevsw = {
263 .d_ioctl = pfioctl,
264 .d_name = PF_NAME,
265 .d_version = D_VERSION,
266 };
267
268 VNET_DEFINE_STATIC(bool, pf_pfil_hooked);
269 #define V_pf_pfil_hooked VNET(pf_pfil_hooked)
270 VNET_DEFINE_STATIC(bool, pf_pfil_eth_hooked);
271 #define V_pf_pfil_eth_hooked VNET(pf_pfil_eth_hooked)
272
273 /*
274 * We need a flag that is neither hooked nor running to know when
275 * the VNET is "valid". We primarily need this to control (global)
276 * external event, e.g., eventhandlers.
277 */
278 VNET_DEFINE(int, pf_vnet_active);
279 #define V_pf_vnet_active VNET(pf_vnet_active)
280
281 int pf_end_threads;
282 struct proc *pf_purge_proc;
283
284 VNET_DEFINE(struct rmlock, pf_rules_lock);
285 VNET_DEFINE_STATIC(struct sx, pf_ioctl_lock);
286 #define V_pf_ioctl_lock VNET(pf_ioctl_lock)
287 struct sx pf_end_lock;
288
289 /* pfsync */
290 VNET_DEFINE(pfsync_state_import_t *, pfsync_state_import_ptr);
291 VNET_DEFINE(pfsync_insert_state_t *, pfsync_insert_state_ptr);
292 VNET_DEFINE(pfsync_update_state_t *, pfsync_update_state_ptr);
293 VNET_DEFINE(pfsync_delete_state_t *, pfsync_delete_state_ptr);
294 VNET_DEFINE(pfsync_clear_states_t *, pfsync_clear_states_ptr);
295 VNET_DEFINE(pfsync_defer_t *, pfsync_defer_ptr);
296 pfsync_detach_ifnet_t *pfsync_detach_ifnet_ptr;
297
298 /* pflog */
299 pflog_packet_t *pflog_packet_ptr = NULL;
300
301 /*
302 * Copy a user-provided string, returning an error if truncation would occur.
303 * Avoid scanning past "sz" bytes in the source string since there's no
304 * guarantee that it's nul-terminated.
305 */
306 static int
pf_user_strcpy(char * dst,const char * src,size_t sz)307 pf_user_strcpy(char *dst, const char *src, size_t sz)
308 {
309 if (strnlen(src, sz) == sz)
310 return (EINVAL);
311 (void)strlcpy(dst, src, sz);
312 return (0);
313 }
314
315 static void
pfattach_vnet(void)316 pfattach_vnet(void)
317 {
318 u_int32_t *my_timeout = V_pf_default_rule.timeout;
319
320 bzero(&V_pf_status, sizeof(V_pf_status));
321
322 pf_initialize();
323 pfr_initialize();
324 pfi_initialize_vnet();
325 pf_normalize_init();
326 pf_syncookies_init();
327
328 V_pf_limits[PF_LIMIT_STATES].limit = PFSTATE_HIWAT;
329 V_pf_limits[PF_LIMIT_SRC_NODES].limit = PFSNODE_HIWAT;
330
331 RB_INIT(&V_pf_anchors);
332 pf_init_kruleset(&pf_main_ruleset);
333
334 pf_init_keth(V_pf_keth);
335
336 /* default rule should never be garbage collected */
337 V_pf_default_rule.entries.tqe_prev = &V_pf_default_rule.entries.tqe_next;
338 #ifdef PF_DEFAULT_TO_DROP
339 V_pf_default_rule.action = PF_DROP;
340 #else
341 V_pf_default_rule.action = PF_PASS;
342 #endif
343 V_pf_default_rule.nr = -1;
344 V_pf_default_rule.rtableid = -1;
345
346 pf_counter_u64_init(&V_pf_default_rule.evaluations, M_WAITOK);
347 for (int i = 0; i < 2; i++) {
348 pf_counter_u64_init(&V_pf_default_rule.packets[i], M_WAITOK);
349 pf_counter_u64_init(&V_pf_default_rule.bytes[i], M_WAITOK);
350 }
351 V_pf_default_rule.states_cur = counter_u64_alloc(M_WAITOK);
352 V_pf_default_rule.states_tot = counter_u64_alloc(M_WAITOK);
353 V_pf_default_rule.src_nodes = counter_u64_alloc(M_WAITOK);
354
355 V_pf_default_rule.timestamp = uma_zalloc_pcpu(pf_timestamp_pcpu_zone,
356 M_WAITOK | M_ZERO);
357
358 #ifdef PF_WANT_32_TO_64_COUNTER
359 V_pf_kifmarker = malloc(sizeof(*V_pf_kifmarker), PFI_MTYPE, M_WAITOK | M_ZERO);
360 V_pf_rulemarker = malloc(sizeof(*V_pf_rulemarker), M_PFRULE, M_WAITOK | M_ZERO);
361 PF_RULES_WLOCK();
362 LIST_INSERT_HEAD(&V_pf_allkiflist, V_pf_kifmarker, pfik_allkiflist);
363 LIST_INSERT_HEAD(&V_pf_allrulelist, &V_pf_default_rule, allrulelist);
364 V_pf_allrulecount++;
365 LIST_INSERT_HEAD(&V_pf_allrulelist, V_pf_rulemarker, allrulelist);
366 PF_RULES_WUNLOCK();
367 #endif
368
369 /* initialize default timeouts */
370 my_timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
371 my_timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL;
372 my_timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
373 my_timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL;
374 my_timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL;
375 my_timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL;
376 my_timeout[PFTM_SCTP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
377 my_timeout[PFTM_SCTP_OPENING] = PFTM_TCP_OPENING_VAL;
378 my_timeout[PFTM_SCTP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
379 my_timeout[PFTM_SCTP_CLOSING] = PFTM_TCP_CLOSING_VAL;
380 my_timeout[PFTM_SCTP_CLOSED] = PFTM_TCP_CLOSED_VAL;
381 my_timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL;
382 my_timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL;
383 my_timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL;
384 my_timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL;
385 my_timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL;
386 my_timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL;
387 my_timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL;
388 my_timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL;
389 my_timeout[PFTM_FRAG] = PFTM_FRAG_VAL;
390 my_timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL;
391 my_timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL;
392 my_timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL;
393 my_timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START;
394 my_timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END;
395
396 V_pf_status.debug = PF_DEBUG_URGENT;
397 /*
398 * XXX This is different than in OpenBSD where reassembly is enabled by
399 * defult. In FreeBSD we expect people to still use scrub rules and
400 * switch to the new syntax later. Only when they switch they must
401 * explicitly enable reassemle. We could change the default once the
402 * scrub rule functionality is hopefully removed some day in future.
403 */
404 V_pf_status.reass = 0;
405
406 V_pf_pfil_hooked = false;
407 V_pf_pfil_eth_hooked = false;
408
409 /* XXX do our best to avoid a conflict */
410 V_pf_status.hostid = arc4random();
411
412 for (int i = 0; i < PFRES_MAX; i++)
413 V_pf_status.counters[i] = counter_u64_alloc(M_WAITOK);
414 for (int i = 0; i < KLCNT_MAX; i++)
415 V_pf_status.lcounters[i] = counter_u64_alloc(M_WAITOK);
416 for (int i = 0; i < FCNT_MAX; i++)
417 pf_counter_u64_init(&V_pf_status.fcounters[i], M_WAITOK);
418 for (int i = 0; i < SCNT_MAX; i++)
419 V_pf_status.scounters[i] = counter_u64_alloc(M_WAITOK);
420
421 if (swi_add(&V_pf_swi_ie, "pf send", pf_intr, curvnet, SWI_NET,
422 INTR_MPSAFE, &V_pf_swi_cookie) != 0)
423 /* XXXGL: leaked all above. */
424 return;
425 }
426
427 static struct pf_kpool *
pf_get_kpool(const char * anchor,u_int32_t ticket,u_int8_t rule_action,u_int32_t rule_number,u_int8_t r_last,u_int8_t active,u_int8_t check_ticket)428 pf_get_kpool(const char *anchor, u_int32_t ticket, u_int8_t rule_action,
429 u_int32_t rule_number, u_int8_t r_last, u_int8_t active,
430 u_int8_t check_ticket)
431 {
432 struct pf_kruleset *ruleset;
433 struct pf_krule *rule;
434 int rs_num;
435
436 ruleset = pf_find_kruleset(anchor);
437 if (ruleset == NULL)
438 return (NULL);
439 rs_num = pf_get_ruleset_number(rule_action);
440 if (rs_num >= PF_RULESET_MAX)
441 return (NULL);
442 if (active) {
443 if (check_ticket && ticket !=
444 ruleset->rules[rs_num].active.ticket)
445 return (NULL);
446 if (r_last)
447 rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
448 pf_krulequeue);
449 else
450 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
451 } else {
452 if (check_ticket && ticket !=
453 ruleset->rules[rs_num].inactive.ticket)
454 return (NULL);
455 if (r_last)
456 rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
457 pf_krulequeue);
458 else
459 rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr);
460 }
461 if (!r_last) {
462 while ((rule != NULL) && (rule->nr != rule_number))
463 rule = TAILQ_NEXT(rule, entries);
464 }
465 if (rule == NULL)
466 return (NULL);
467
468 return (&rule->rpool);
469 }
470
471 static void
pf_mv_kpool(struct pf_kpalist * poola,struct pf_kpalist * poolb)472 pf_mv_kpool(struct pf_kpalist *poola, struct pf_kpalist *poolb)
473 {
474 struct pf_kpooladdr *mv_pool_pa;
475
476 while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) {
477 TAILQ_REMOVE(poola, mv_pool_pa, entries);
478 TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries);
479 }
480 }
481
482 static void
pf_empty_kpool(struct pf_kpalist * poola)483 pf_empty_kpool(struct pf_kpalist *poola)
484 {
485 struct pf_kpooladdr *pa;
486
487 while ((pa = TAILQ_FIRST(poola)) != NULL) {
488 switch (pa->addr.type) {
489 case PF_ADDR_DYNIFTL:
490 pfi_dynaddr_remove(pa->addr.p.dyn);
491 break;
492 case PF_ADDR_TABLE:
493 /* XXX: this could be unfinished pooladdr on pabuf */
494 if (pa->addr.p.tbl != NULL)
495 pfr_detach_table(pa->addr.p.tbl);
496 break;
497 }
498 if (pa->kif)
499 pfi_kkif_unref(pa->kif);
500 TAILQ_REMOVE(poola, pa, entries);
501 free(pa, M_PFRULE);
502 }
503 }
504
505 static void
pf_unlink_rule_locked(struct pf_krulequeue * rulequeue,struct pf_krule * rule)506 pf_unlink_rule_locked(struct pf_krulequeue *rulequeue, struct pf_krule *rule)
507 {
508
509 PF_RULES_WASSERT();
510 PF_UNLNKDRULES_ASSERT();
511
512 TAILQ_REMOVE(rulequeue, rule, entries);
513
514 rule->rule_ref |= PFRULE_REFS;
515 TAILQ_INSERT_TAIL(&V_pf_unlinked_rules, rule, entries);
516 }
517
518 static void
pf_unlink_rule(struct pf_krulequeue * rulequeue,struct pf_krule * rule)519 pf_unlink_rule(struct pf_krulequeue *rulequeue, struct pf_krule *rule)
520 {
521
522 PF_RULES_WASSERT();
523
524 PF_UNLNKDRULES_LOCK();
525 pf_unlink_rule_locked(rulequeue, rule);
526 PF_UNLNKDRULES_UNLOCK();
527 }
528
529 static void
pf_free_eth_rule(struct pf_keth_rule * rule)530 pf_free_eth_rule(struct pf_keth_rule *rule)
531 {
532 PF_RULES_WASSERT();
533
534 if (rule == NULL)
535 return;
536
537 if (rule->tag)
538 tag_unref(&V_pf_tags, rule->tag);
539 if (rule->match_tag)
540 tag_unref(&V_pf_tags, rule->match_tag);
541 #ifdef ALTQ
542 pf_qid_unref(rule->qid);
543 #endif
544
545 if (rule->bridge_to)
546 pfi_kkif_unref(rule->bridge_to);
547 if (rule->kif)
548 pfi_kkif_unref(rule->kif);
549
550 if (rule->ipsrc.addr.type == PF_ADDR_TABLE)
551 pfr_detach_table(rule->ipsrc.addr.p.tbl);
552 if (rule->ipdst.addr.type == PF_ADDR_TABLE)
553 pfr_detach_table(rule->ipdst.addr.p.tbl);
554
555 counter_u64_free(rule->evaluations);
556 for (int i = 0; i < 2; i++) {
557 counter_u64_free(rule->packets[i]);
558 counter_u64_free(rule->bytes[i]);
559 }
560 uma_zfree_pcpu(pf_timestamp_pcpu_zone, rule->timestamp);
561 pf_keth_anchor_remove(rule);
562
563 free(rule, M_PFRULE);
564 }
565
566 void
pf_free_rule(struct pf_krule * rule)567 pf_free_rule(struct pf_krule *rule)
568 {
569
570 PF_RULES_WASSERT();
571 PF_CONFIG_ASSERT();
572
573 if (rule->tag)
574 tag_unref(&V_pf_tags, rule->tag);
575 if (rule->match_tag)
576 tag_unref(&V_pf_tags, rule->match_tag);
577 #ifdef ALTQ
578 if (rule->pqid != rule->qid)
579 pf_qid_unref(rule->pqid);
580 pf_qid_unref(rule->qid);
581 #endif
582 switch (rule->src.addr.type) {
583 case PF_ADDR_DYNIFTL:
584 pfi_dynaddr_remove(rule->src.addr.p.dyn);
585 break;
586 case PF_ADDR_TABLE:
587 pfr_detach_table(rule->src.addr.p.tbl);
588 break;
589 }
590 switch (rule->dst.addr.type) {
591 case PF_ADDR_DYNIFTL:
592 pfi_dynaddr_remove(rule->dst.addr.p.dyn);
593 break;
594 case PF_ADDR_TABLE:
595 pfr_detach_table(rule->dst.addr.p.tbl);
596 break;
597 }
598 if (rule->overload_tbl)
599 pfr_detach_table(rule->overload_tbl);
600 if (rule->kif)
601 pfi_kkif_unref(rule->kif);
602 pf_kanchor_remove(rule);
603 pf_empty_kpool(&rule->rpool.list);
604
605 pf_krule_free(rule);
606 }
607
608 static void
pf_init_tagset(struct pf_tagset * ts,unsigned int * tunable_size,unsigned int default_size)609 pf_init_tagset(struct pf_tagset *ts, unsigned int *tunable_size,
610 unsigned int default_size)
611 {
612 unsigned int i;
613 unsigned int hashsize;
614
615 if (*tunable_size == 0 || !powerof2(*tunable_size))
616 *tunable_size = default_size;
617
618 hashsize = *tunable_size;
619 ts->namehash = mallocarray(hashsize, sizeof(*ts->namehash), M_PFHASH,
620 M_WAITOK);
621 ts->taghash = mallocarray(hashsize, sizeof(*ts->taghash), M_PFHASH,
622 M_WAITOK);
623 ts->mask = hashsize - 1;
624 ts->seed = arc4random();
625 for (i = 0; i < hashsize; i++) {
626 TAILQ_INIT(&ts->namehash[i]);
627 TAILQ_INIT(&ts->taghash[i]);
628 }
629 BIT_FILL(TAGID_MAX, &ts->avail);
630 }
631
632 static void
pf_cleanup_tagset(struct pf_tagset * ts)633 pf_cleanup_tagset(struct pf_tagset *ts)
634 {
635 unsigned int i;
636 unsigned int hashsize;
637 struct pf_tagname *t, *tmp;
638
639 /*
640 * Only need to clean up one of the hashes as each tag is hashed
641 * into each table.
642 */
643 hashsize = ts->mask + 1;
644 for (i = 0; i < hashsize; i++)
645 TAILQ_FOREACH_SAFE(t, &ts->namehash[i], namehash_entries, tmp)
646 uma_zfree(V_pf_tag_z, t);
647
648 free(ts->namehash, M_PFHASH);
649 free(ts->taghash, M_PFHASH);
650 }
651
652 static uint16_t
tagname2hashindex(const struct pf_tagset * ts,const char * tagname)653 tagname2hashindex(const struct pf_tagset *ts, const char *tagname)
654 {
655 size_t len;
656
657 len = strnlen(tagname, PF_TAG_NAME_SIZE - 1);
658 return (murmur3_32_hash(tagname, len, ts->seed) & ts->mask);
659 }
660
661 static uint16_t
tag2hashindex(const struct pf_tagset * ts,uint16_t tag)662 tag2hashindex(const struct pf_tagset *ts, uint16_t tag)
663 {
664
665 return (tag & ts->mask);
666 }
667
668 static u_int16_t
tagname2tag(struct pf_tagset * ts,const char * tagname)669 tagname2tag(struct pf_tagset *ts, const char *tagname)
670 {
671 struct pf_tagname *tag;
672 u_int32_t index;
673 u_int16_t new_tagid;
674
675 PF_RULES_WASSERT();
676
677 index = tagname2hashindex(ts, tagname);
678 TAILQ_FOREACH(tag, &ts->namehash[index], namehash_entries)
679 if (strcmp(tagname, tag->name) == 0) {
680 tag->ref++;
681 return (tag->tag);
682 }
683
684 /*
685 * new entry
686 *
687 * to avoid fragmentation, we do a linear search from the beginning
688 * and take the first free slot we find.
689 */
690 new_tagid = BIT_FFS(TAGID_MAX, &ts->avail);
691 /*
692 * Tags are 1-based, with valid tags in the range [1..TAGID_MAX].
693 * BIT_FFS() returns a 1-based bit number, with 0 indicating no bits
694 * set. It may also return a bit number greater than TAGID_MAX due
695 * to rounding of the number of bits in the vector up to a multiple
696 * of the vector word size at declaration/allocation time.
697 */
698 if ((new_tagid == 0) || (new_tagid > TAGID_MAX))
699 return (0);
700
701 /* Mark the tag as in use. Bits are 0-based for BIT_CLR() */
702 BIT_CLR(TAGID_MAX, new_tagid - 1, &ts->avail);
703
704 /* allocate and fill new struct pf_tagname */
705 tag = uma_zalloc(V_pf_tag_z, M_NOWAIT);
706 if (tag == NULL)
707 return (0);
708 strlcpy(tag->name, tagname, sizeof(tag->name));
709 tag->tag = new_tagid;
710 tag->ref = 1;
711
712 /* Insert into namehash */
713 TAILQ_INSERT_TAIL(&ts->namehash[index], tag, namehash_entries);
714
715 /* Insert into taghash */
716 index = tag2hashindex(ts, new_tagid);
717 TAILQ_INSERT_TAIL(&ts->taghash[index], tag, taghash_entries);
718
719 return (tag->tag);
720 }
721
722 static void
tag_unref(struct pf_tagset * ts,u_int16_t tag)723 tag_unref(struct pf_tagset *ts, u_int16_t tag)
724 {
725 struct pf_tagname *t;
726 uint16_t index;
727
728 PF_RULES_WASSERT();
729
730 index = tag2hashindex(ts, tag);
731 TAILQ_FOREACH(t, &ts->taghash[index], taghash_entries)
732 if (tag == t->tag) {
733 if (--t->ref == 0) {
734 TAILQ_REMOVE(&ts->taghash[index], t,
735 taghash_entries);
736 index = tagname2hashindex(ts, t->name);
737 TAILQ_REMOVE(&ts->namehash[index], t,
738 namehash_entries);
739 /* Bits are 0-based for BIT_SET() */
740 BIT_SET(TAGID_MAX, tag - 1, &ts->avail);
741 uma_zfree(V_pf_tag_z, t);
742 }
743 break;
744 }
745 }
746
747 static uint16_t
pf_tagname2tag(const char * tagname)748 pf_tagname2tag(const char *tagname)
749 {
750 return (tagname2tag(&V_pf_tags, tagname));
751 }
752
753 static int
pf_begin_eth(uint32_t * ticket,const char * anchor)754 pf_begin_eth(uint32_t *ticket, const char *anchor)
755 {
756 struct pf_keth_rule *rule, *tmp;
757 struct pf_keth_ruleset *rs;
758
759 PF_RULES_WASSERT();
760
761 rs = pf_find_or_create_keth_ruleset(anchor);
762 if (rs == NULL)
763 return (EINVAL);
764
765 /* Purge old inactive rules. */
766 TAILQ_FOREACH_SAFE(rule, rs->inactive.rules, entries,
767 tmp) {
768 TAILQ_REMOVE(rs->inactive.rules, rule,
769 entries);
770 pf_free_eth_rule(rule);
771 }
772
773 *ticket = ++rs->inactive.ticket;
774 rs->inactive.open = 1;
775
776 return (0);
777 }
778
779 static void
pf_rollback_eth_cb(struct epoch_context * ctx)780 pf_rollback_eth_cb(struct epoch_context *ctx)
781 {
782 struct pf_keth_ruleset *rs;
783
784 rs = __containerof(ctx, struct pf_keth_ruleset, epoch_ctx);
785
786 CURVNET_SET(rs->vnet);
787
788 PF_RULES_WLOCK();
789 pf_rollback_eth(rs->inactive.ticket,
790 rs->anchor ? rs->anchor->path : "");
791 PF_RULES_WUNLOCK();
792
793 CURVNET_RESTORE();
794 }
795
796 static int
pf_rollback_eth(uint32_t ticket,const char * anchor)797 pf_rollback_eth(uint32_t ticket, const char *anchor)
798 {
799 struct pf_keth_rule *rule, *tmp;
800 struct pf_keth_ruleset *rs;
801
802 PF_RULES_WASSERT();
803
804 rs = pf_find_keth_ruleset(anchor);
805 if (rs == NULL)
806 return (EINVAL);
807
808 if (!rs->inactive.open ||
809 ticket != rs->inactive.ticket)
810 return (0);
811
812 /* Purge old inactive rules. */
813 TAILQ_FOREACH_SAFE(rule, rs->inactive.rules, entries,
814 tmp) {
815 TAILQ_REMOVE(rs->inactive.rules, rule, entries);
816 pf_free_eth_rule(rule);
817 }
818
819 rs->inactive.open = 0;
820
821 pf_remove_if_empty_keth_ruleset(rs);
822
823 return (0);
824 }
825
826 #define PF_SET_SKIP_STEPS(i) \
827 do { \
828 while (head[i] != cur) { \
829 head[i]->skip[i].ptr = cur; \
830 head[i] = TAILQ_NEXT(head[i], entries); \
831 } \
832 } while (0)
833
834 static void
pf_eth_calc_skip_steps(struct pf_keth_ruleq * rules)835 pf_eth_calc_skip_steps(struct pf_keth_ruleq *rules)
836 {
837 struct pf_keth_rule *cur, *prev, *head[PFE_SKIP_COUNT];
838 int i;
839
840 cur = TAILQ_FIRST(rules);
841 prev = cur;
842 for (i = 0; i < PFE_SKIP_COUNT; ++i)
843 head[i] = cur;
844 while (cur != NULL) {
845 if (cur->kif != prev->kif || cur->ifnot != prev->ifnot)
846 PF_SET_SKIP_STEPS(PFE_SKIP_IFP);
847 if (cur->direction != prev->direction)
848 PF_SET_SKIP_STEPS(PFE_SKIP_DIR);
849 if (cur->proto != prev->proto)
850 PF_SET_SKIP_STEPS(PFE_SKIP_PROTO);
851 if (memcmp(&cur->src, &prev->src, sizeof(cur->src)) != 0)
852 PF_SET_SKIP_STEPS(PFE_SKIP_SRC_ADDR);
853 if (memcmp(&cur->dst, &prev->dst, sizeof(cur->dst)) != 0)
854 PF_SET_SKIP_STEPS(PFE_SKIP_DST_ADDR);
855 if (cur->ipsrc.neg != prev->ipsrc.neg ||
856 pf_addr_wrap_neq(&cur->ipsrc.addr, &prev->ipsrc.addr))
857 PF_SET_SKIP_STEPS(PFE_SKIP_SRC_IP_ADDR);
858 if (cur->ipdst.neg != prev->ipdst.neg ||
859 pf_addr_wrap_neq(&cur->ipdst.addr, &prev->ipdst.addr))
860 PF_SET_SKIP_STEPS(PFE_SKIP_DST_IP_ADDR);
861
862 prev = cur;
863 cur = TAILQ_NEXT(cur, entries);
864 }
865 for (i = 0; i < PFE_SKIP_COUNT; ++i)
866 PF_SET_SKIP_STEPS(i);
867 }
868
869 static int
pf_commit_eth(uint32_t ticket,const char * anchor)870 pf_commit_eth(uint32_t ticket, const char *anchor)
871 {
872 struct pf_keth_ruleq *rules;
873 struct pf_keth_ruleset *rs;
874
875 rs = pf_find_keth_ruleset(anchor);
876 if (rs == NULL) {
877 return (EINVAL);
878 }
879
880 if (!rs->inactive.open ||
881 ticket != rs->inactive.ticket)
882 return (EBUSY);
883
884 PF_RULES_WASSERT();
885
886 pf_eth_calc_skip_steps(rs->inactive.rules);
887
888 rules = rs->active.rules;
889 ck_pr_store_ptr(&rs->active.rules, rs->inactive.rules);
890 rs->inactive.rules = rules;
891 rs->inactive.ticket = rs->active.ticket;
892
893 /* Clean up inactive rules (i.e. previously active rules), only when
894 * we're sure they're no longer used. */
895 NET_EPOCH_CALL(pf_rollback_eth_cb, &rs->epoch_ctx);
896
897 return (0);
898 }
899
900 #ifdef ALTQ
901 static uint16_t
pf_qname2qid(const char * qname)902 pf_qname2qid(const char *qname)
903 {
904 return (tagname2tag(&V_pf_qids, qname));
905 }
906
907 static void
pf_qid_unref(uint16_t qid)908 pf_qid_unref(uint16_t qid)
909 {
910 tag_unref(&V_pf_qids, qid);
911 }
912
913 static int
pf_begin_altq(u_int32_t * ticket)914 pf_begin_altq(u_int32_t *ticket)
915 {
916 struct pf_altq *altq, *tmp;
917 int error = 0;
918
919 PF_RULES_WASSERT();
920
921 /* Purge the old altq lists */
922 TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) {
923 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
924 /* detach and destroy the discipline */
925 error = altq_remove(altq);
926 }
927 free(altq, M_PFALTQ);
928 }
929 TAILQ_INIT(V_pf_altq_ifs_inactive);
930 TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) {
931 pf_qid_unref(altq->qid);
932 free(altq, M_PFALTQ);
933 }
934 TAILQ_INIT(V_pf_altqs_inactive);
935 if (error)
936 return (error);
937 *ticket = ++V_ticket_altqs_inactive;
938 V_altqs_inactive_open = 1;
939 return (0);
940 }
941
942 static int
pf_rollback_altq(u_int32_t ticket)943 pf_rollback_altq(u_int32_t ticket)
944 {
945 struct pf_altq *altq, *tmp;
946 int error = 0;
947
948 PF_RULES_WASSERT();
949
950 if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive)
951 return (0);
952 /* Purge the old altq lists */
953 TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) {
954 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
955 /* detach and destroy the discipline */
956 error = altq_remove(altq);
957 }
958 free(altq, M_PFALTQ);
959 }
960 TAILQ_INIT(V_pf_altq_ifs_inactive);
961 TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) {
962 pf_qid_unref(altq->qid);
963 free(altq, M_PFALTQ);
964 }
965 TAILQ_INIT(V_pf_altqs_inactive);
966 V_altqs_inactive_open = 0;
967 return (error);
968 }
969
970 static int
pf_commit_altq(u_int32_t ticket)971 pf_commit_altq(u_int32_t ticket)
972 {
973 struct pf_altqqueue *old_altqs, *old_altq_ifs;
974 struct pf_altq *altq, *tmp;
975 int err, error = 0;
976
977 PF_RULES_WASSERT();
978
979 if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive)
980 return (EBUSY);
981
982 /* swap altqs, keep the old. */
983 old_altqs = V_pf_altqs_active;
984 old_altq_ifs = V_pf_altq_ifs_active;
985 V_pf_altqs_active = V_pf_altqs_inactive;
986 V_pf_altq_ifs_active = V_pf_altq_ifs_inactive;
987 V_pf_altqs_inactive = old_altqs;
988 V_pf_altq_ifs_inactive = old_altq_ifs;
989 V_ticket_altqs_active = V_ticket_altqs_inactive;
990
991 /* Attach new disciplines */
992 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
993 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
994 /* attach the discipline */
995 error = altq_pfattach(altq);
996 if (error == 0 && V_pf_altq_running)
997 error = pf_enable_altq(altq);
998 if (error != 0)
999 return (error);
1000 }
1001 }
1002
1003 /* Purge the old altq lists */
1004 TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) {
1005 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
1006 /* detach and destroy the discipline */
1007 if (V_pf_altq_running)
1008 error = pf_disable_altq(altq);
1009 err = altq_pfdetach(altq);
1010 if (err != 0 && error == 0)
1011 error = err;
1012 err = altq_remove(altq);
1013 if (err != 0 && error == 0)
1014 error = err;
1015 }
1016 free(altq, M_PFALTQ);
1017 }
1018 TAILQ_INIT(V_pf_altq_ifs_inactive);
1019 TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) {
1020 pf_qid_unref(altq->qid);
1021 free(altq, M_PFALTQ);
1022 }
1023 TAILQ_INIT(V_pf_altqs_inactive);
1024
1025 V_altqs_inactive_open = 0;
1026 return (error);
1027 }
1028
1029 static int
pf_enable_altq(struct pf_altq * altq)1030 pf_enable_altq(struct pf_altq *altq)
1031 {
1032 struct ifnet *ifp;
1033 struct tb_profile tb;
1034 int error = 0;
1035
1036 if ((ifp = ifunit(altq->ifname)) == NULL)
1037 return (EINVAL);
1038
1039 if (ifp->if_snd.altq_type != ALTQT_NONE)
1040 error = altq_enable(&ifp->if_snd);
1041
1042 /* set tokenbucket regulator */
1043 if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) {
1044 tb.rate = altq->ifbandwidth;
1045 tb.depth = altq->tbrsize;
1046 error = tbr_set(&ifp->if_snd, &tb);
1047 }
1048
1049 return (error);
1050 }
1051
1052 static int
pf_disable_altq(struct pf_altq * altq)1053 pf_disable_altq(struct pf_altq *altq)
1054 {
1055 struct ifnet *ifp;
1056 struct tb_profile tb;
1057 int error;
1058
1059 if ((ifp = ifunit(altq->ifname)) == NULL)
1060 return (EINVAL);
1061
1062 /*
1063 * when the discipline is no longer referenced, it was overridden
1064 * by a new one. if so, just return.
1065 */
1066 if (altq->altq_disc != ifp->if_snd.altq_disc)
1067 return (0);
1068
1069 error = altq_disable(&ifp->if_snd);
1070
1071 if (error == 0) {
1072 /* clear tokenbucket regulator */
1073 tb.rate = 0;
1074 error = tbr_set(&ifp->if_snd, &tb);
1075 }
1076
1077 return (error);
1078 }
1079
1080 static int
pf_altq_ifnet_event_add(struct ifnet * ifp,int remove,u_int32_t ticket,struct pf_altq * altq)1081 pf_altq_ifnet_event_add(struct ifnet *ifp, int remove, u_int32_t ticket,
1082 struct pf_altq *altq)
1083 {
1084 struct ifnet *ifp1;
1085 int error = 0;
1086
1087 /* Deactivate the interface in question */
1088 altq->local_flags &= ~PFALTQ_FLAG_IF_REMOVED;
1089 if ((ifp1 = ifunit(altq->ifname)) == NULL ||
1090 (remove && ifp1 == ifp)) {
1091 altq->local_flags |= PFALTQ_FLAG_IF_REMOVED;
1092 } else {
1093 error = altq_add(ifp1, altq);
1094
1095 if (ticket != V_ticket_altqs_inactive)
1096 error = EBUSY;
1097
1098 if (error)
1099 free(altq, M_PFALTQ);
1100 }
1101
1102 return (error);
1103 }
1104
1105 void
pf_altq_ifnet_event(struct ifnet * ifp,int remove)1106 pf_altq_ifnet_event(struct ifnet *ifp, int remove)
1107 {
1108 struct pf_altq *a1, *a2, *a3;
1109 u_int32_t ticket;
1110 int error = 0;
1111
1112 /*
1113 * No need to re-evaluate the configuration for events on interfaces
1114 * that do not support ALTQ, as it's not possible for such
1115 * interfaces to be part of the configuration.
1116 */
1117 if (!ALTQ_IS_READY(&ifp->if_snd))
1118 return;
1119
1120 /* Interrupt userland queue modifications */
1121 if (V_altqs_inactive_open)
1122 pf_rollback_altq(V_ticket_altqs_inactive);
1123
1124 /* Start new altq ruleset */
1125 if (pf_begin_altq(&ticket))
1126 return;
1127
1128 /* Copy the current active set */
1129 TAILQ_FOREACH(a1, V_pf_altq_ifs_active, entries) {
1130 a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT);
1131 if (a2 == NULL) {
1132 error = ENOMEM;
1133 break;
1134 }
1135 bcopy(a1, a2, sizeof(struct pf_altq));
1136
1137 error = pf_altq_ifnet_event_add(ifp, remove, ticket, a2);
1138 if (error)
1139 break;
1140
1141 TAILQ_INSERT_TAIL(V_pf_altq_ifs_inactive, a2, entries);
1142 }
1143 if (error)
1144 goto out;
1145 TAILQ_FOREACH(a1, V_pf_altqs_active, entries) {
1146 a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT);
1147 if (a2 == NULL) {
1148 error = ENOMEM;
1149 break;
1150 }
1151 bcopy(a1, a2, sizeof(struct pf_altq));
1152
1153 if ((a2->qid = pf_qname2qid(a2->qname)) == 0) {
1154 error = EBUSY;
1155 free(a2, M_PFALTQ);
1156 break;
1157 }
1158 a2->altq_disc = NULL;
1159 TAILQ_FOREACH(a3, V_pf_altq_ifs_inactive, entries) {
1160 if (strncmp(a3->ifname, a2->ifname,
1161 IFNAMSIZ) == 0) {
1162 a2->altq_disc = a3->altq_disc;
1163 break;
1164 }
1165 }
1166 error = pf_altq_ifnet_event_add(ifp, remove, ticket, a2);
1167 if (error)
1168 break;
1169
1170 TAILQ_INSERT_TAIL(V_pf_altqs_inactive, a2, entries);
1171 }
1172
1173 out:
1174 if (error != 0)
1175 pf_rollback_altq(ticket);
1176 else
1177 pf_commit_altq(ticket);
1178 }
1179 #endif /* ALTQ */
1180
1181 static struct pf_krule_global *
pf_rule_tree_alloc(int flags)1182 pf_rule_tree_alloc(int flags)
1183 {
1184 struct pf_krule_global *tree;
1185
1186 tree = malloc(sizeof(struct pf_krule_global), M_TEMP, flags);
1187 if (tree == NULL)
1188 return (NULL);
1189 RB_INIT(tree);
1190 return (tree);
1191 }
1192
1193 static void
pf_rule_tree_free(struct pf_krule_global * tree)1194 pf_rule_tree_free(struct pf_krule_global *tree)
1195 {
1196
1197 free(tree, M_TEMP);
1198 }
1199
1200 static int
pf_begin_rules(u_int32_t * ticket,int rs_num,const char * anchor)1201 pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor)
1202 {
1203 struct pf_krule_global *tree;
1204 struct pf_kruleset *rs;
1205 struct pf_krule *rule;
1206
1207 PF_RULES_WASSERT();
1208
1209 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1210 return (EINVAL);
1211 tree = pf_rule_tree_alloc(M_NOWAIT);
1212 if (tree == NULL)
1213 return (ENOMEM);
1214 rs = pf_find_or_create_kruleset(anchor);
1215 if (rs == NULL) {
1216 free(tree, M_TEMP);
1217 return (EINVAL);
1218 }
1219 pf_rule_tree_free(rs->rules[rs_num].inactive.tree);
1220 rs->rules[rs_num].inactive.tree = tree;
1221
1222 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
1223 pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule);
1224 rs->rules[rs_num].inactive.rcount--;
1225 }
1226 *ticket = ++rs->rules[rs_num].inactive.ticket;
1227 rs->rules[rs_num].inactive.open = 1;
1228 return (0);
1229 }
1230
1231 static int
pf_rollback_rules(u_int32_t ticket,int rs_num,char * anchor)1232 pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor)
1233 {
1234 struct pf_kruleset *rs;
1235 struct pf_krule *rule;
1236
1237 PF_RULES_WASSERT();
1238
1239 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1240 return (EINVAL);
1241 rs = pf_find_kruleset(anchor);
1242 if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1243 rs->rules[rs_num].inactive.ticket != ticket)
1244 return (0);
1245 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
1246 pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule);
1247 rs->rules[rs_num].inactive.rcount--;
1248 }
1249 rs->rules[rs_num].inactive.open = 0;
1250 return (0);
1251 }
1252
1253 #define PF_MD5_UPD(st, elm) \
1254 MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm))
1255
1256 #define PF_MD5_UPD_STR(st, elm) \
1257 MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm))
1258
1259 #define PF_MD5_UPD_HTONL(st, elm, stor) do { \
1260 (stor) = htonl((st)->elm); \
1261 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\
1262 } while (0)
1263
1264 #define PF_MD5_UPD_HTONS(st, elm, stor) do { \
1265 (stor) = htons((st)->elm); \
1266 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\
1267 } while (0)
1268
1269 static void
pf_hash_rule_addr(MD5_CTX * ctx,struct pf_rule_addr * pfr)1270 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr)
1271 {
1272 PF_MD5_UPD(pfr, addr.type);
1273 switch (pfr->addr.type) {
1274 case PF_ADDR_DYNIFTL:
1275 PF_MD5_UPD(pfr, addr.v.ifname);
1276 PF_MD5_UPD(pfr, addr.iflags);
1277 break;
1278 case PF_ADDR_TABLE:
1279 PF_MD5_UPD(pfr, addr.v.tblname);
1280 break;
1281 case PF_ADDR_ADDRMASK:
1282 /* XXX ignore af? */
1283 PF_MD5_UPD(pfr, addr.v.a.addr.addr32);
1284 PF_MD5_UPD(pfr, addr.v.a.mask.addr32);
1285 break;
1286 }
1287
1288 PF_MD5_UPD(pfr, port[0]);
1289 PF_MD5_UPD(pfr, port[1]);
1290 PF_MD5_UPD(pfr, neg);
1291 PF_MD5_UPD(pfr, port_op);
1292 }
1293
1294 static void
pf_hash_rule_rolling(MD5_CTX * ctx,struct pf_krule * rule)1295 pf_hash_rule_rolling(MD5_CTX *ctx, struct pf_krule *rule)
1296 {
1297 u_int16_t x;
1298 u_int32_t y;
1299
1300 pf_hash_rule_addr(ctx, &rule->src);
1301 pf_hash_rule_addr(ctx, &rule->dst);
1302 for (int i = 0; i < PF_RULE_MAX_LABEL_COUNT; i++)
1303 PF_MD5_UPD_STR(rule, label[i]);
1304 PF_MD5_UPD_STR(rule, ifname);
1305 PF_MD5_UPD_STR(rule, match_tagname);
1306 PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */
1307 PF_MD5_UPD_HTONL(rule, os_fingerprint, y);
1308 PF_MD5_UPD_HTONL(rule, prob, y);
1309 PF_MD5_UPD_HTONL(rule, uid.uid[0], y);
1310 PF_MD5_UPD_HTONL(rule, uid.uid[1], y);
1311 PF_MD5_UPD(rule, uid.op);
1312 PF_MD5_UPD_HTONL(rule, gid.gid[0], y);
1313 PF_MD5_UPD_HTONL(rule, gid.gid[1], y);
1314 PF_MD5_UPD(rule, gid.op);
1315 PF_MD5_UPD_HTONL(rule, rule_flag, y);
1316 PF_MD5_UPD(rule, action);
1317 PF_MD5_UPD(rule, direction);
1318 PF_MD5_UPD(rule, af);
1319 PF_MD5_UPD(rule, quick);
1320 PF_MD5_UPD(rule, ifnot);
1321 PF_MD5_UPD(rule, match_tag_not);
1322 PF_MD5_UPD(rule, natpass);
1323 PF_MD5_UPD(rule, keep_state);
1324 PF_MD5_UPD(rule, proto);
1325 PF_MD5_UPD(rule, type);
1326 PF_MD5_UPD(rule, code);
1327 PF_MD5_UPD(rule, flags);
1328 PF_MD5_UPD(rule, flagset);
1329 PF_MD5_UPD(rule, allow_opts);
1330 PF_MD5_UPD(rule, rt);
1331 PF_MD5_UPD(rule, tos);
1332 PF_MD5_UPD(rule, scrub_flags);
1333 PF_MD5_UPD(rule, min_ttl);
1334 PF_MD5_UPD(rule, set_tos);
1335 if (rule->anchor != NULL)
1336 PF_MD5_UPD_STR(rule, anchor->path);
1337 }
1338
1339 static void
pf_hash_rule(struct pf_krule * rule)1340 pf_hash_rule(struct pf_krule *rule)
1341 {
1342 MD5_CTX ctx;
1343
1344 MD5Init(&ctx);
1345 pf_hash_rule_rolling(&ctx, rule);
1346 MD5Final(rule->md5sum, &ctx);
1347 }
1348
1349 static int
pf_krule_compare(struct pf_krule * a,struct pf_krule * b)1350 pf_krule_compare(struct pf_krule *a, struct pf_krule *b)
1351 {
1352
1353 return (memcmp(a->md5sum, b->md5sum, PF_MD5_DIGEST_LENGTH));
1354 }
1355
1356 static int
pf_commit_rules(u_int32_t ticket,int rs_num,char * anchor)1357 pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor)
1358 {
1359 struct pf_kruleset *rs;
1360 struct pf_krule *rule, **old_array, *old_rule;
1361 struct pf_krulequeue *old_rules;
1362 struct pf_krule_global *old_tree;
1363 int error;
1364 u_int32_t old_rcount;
1365
1366 PF_RULES_WASSERT();
1367
1368 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1369 return (EINVAL);
1370 rs = pf_find_kruleset(anchor);
1371 if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1372 ticket != rs->rules[rs_num].inactive.ticket)
1373 return (EBUSY);
1374
1375 /* Calculate checksum for the main ruleset */
1376 if (rs == &pf_main_ruleset) {
1377 error = pf_setup_pfsync_matching(rs);
1378 if (error != 0)
1379 return (error);
1380 }
1381
1382 /* Swap rules, keep the old. */
1383 old_rules = rs->rules[rs_num].active.ptr;
1384 old_rcount = rs->rules[rs_num].active.rcount;
1385 old_array = rs->rules[rs_num].active.ptr_array;
1386 old_tree = rs->rules[rs_num].active.tree;
1387
1388 rs->rules[rs_num].active.ptr =
1389 rs->rules[rs_num].inactive.ptr;
1390 rs->rules[rs_num].active.ptr_array =
1391 rs->rules[rs_num].inactive.ptr_array;
1392 rs->rules[rs_num].active.tree =
1393 rs->rules[rs_num].inactive.tree;
1394 rs->rules[rs_num].active.rcount =
1395 rs->rules[rs_num].inactive.rcount;
1396
1397 /* Attempt to preserve counter information. */
1398 if (V_pf_status.keep_counters && old_tree != NULL) {
1399 TAILQ_FOREACH(rule, rs->rules[rs_num].active.ptr,
1400 entries) {
1401 old_rule = RB_FIND(pf_krule_global, old_tree, rule);
1402 if (old_rule == NULL) {
1403 continue;
1404 }
1405 pf_counter_u64_critical_enter();
1406 pf_counter_u64_add_protected(&rule->evaluations,
1407 pf_counter_u64_fetch(&old_rule->evaluations));
1408 pf_counter_u64_add_protected(&rule->packets[0],
1409 pf_counter_u64_fetch(&old_rule->packets[0]));
1410 pf_counter_u64_add_protected(&rule->packets[1],
1411 pf_counter_u64_fetch(&old_rule->packets[1]));
1412 pf_counter_u64_add_protected(&rule->bytes[0],
1413 pf_counter_u64_fetch(&old_rule->bytes[0]));
1414 pf_counter_u64_add_protected(&rule->bytes[1],
1415 pf_counter_u64_fetch(&old_rule->bytes[1]));
1416 pf_counter_u64_critical_exit();
1417 }
1418 }
1419
1420 rs->rules[rs_num].inactive.ptr = old_rules;
1421 rs->rules[rs_num].inactive.ptr_array = old_array;
1422 rs->rules[rs_num].inactive.tree = NULL; /* important for pf_ioctl_addrule */
1423 rs->rules[rs_num].inactive.rcount = old_rcount;
1424
1425 rs->rules[rs_num].active.ticket =
1426 rs->rules[rs_num].inactive.ticket;
1427 pf_calc_skip_steps(rs->rules[rs_num].active.ptr);
1428
1429 /* Purge the old rule list. */
1430 PF_UNLNKDRULES_LOCK();
1431 while ((rule = TAILQ_FIRST(old_rules)) != NULL)
1432 pf_unlink_rule_locked(old_rules, rule);
1433 PF_UNLNKDRULES_UNLOCK();
1434 if (rs->rules[rs_num].inactive.ptr_array)
1435 free(rs->rules[rs_num].inactive.ptr_array, M_TEMP);
1436 rs->rules[rs_num].inactive.ptr_array = NULL;
1437 rs->rules[rs_num].inactive.rcount = 0;
1438 rs->rules[rs_num].inactive.open = 0;
1439 pf_remove_if_empty_kruleset(rs);
1440 free(old_tree, M_TEMP);
1441
1442 return (0);
1443 }
1444
1445 static int
pf_setup_pfsync_matching(struct pf_kruleset * rs)1446 pf_setup_pfsync_matching(struct pf_kruleset *rs)
1447 {
1448 MD5_CTX ctx;
1449 struct pf_krule *rule;
1450 int rs_cnt;
1451 u_int8_t digest[PF_MD5_DIGEST_LENGTH];
1452
1453 MD5Init(&ctx);
1454 for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) {
1455 /* XXX PF_RULESET_SCRUB as well? */
1456 if (rs_cnt == PF_RULESET_SCRUB)
1457 continue;
1458
1459 if (rs->rules[rs_cnt].inactive.ptr_array)
1460 free(rs->rules[rs_cnt].inactive.ptr_array, M_TEMP);
1461 rs->rules[rs_cnt].inactive.ptr_array = NULL;
1462
1463 if (rs->rules[rs_cnt].inactive.rcount) {
1464 rs->rules[rs_cnt].inactive.ptr_array =
1465 mallocarray(rs->rules[rs_cnt].inactive.rcount,
1466 sizeof(struct pf_rule **),
1467 M_TEMP, M_NOWAIT);
1468
1469 if (!rs->rules[rs_cnt].inactive.ptr_array)
1470 return (ENOMEM);
1471 }
1472
1473 TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr,
1474 entries) {
1475 pf_hash_rule_rolling(&ctx, rule);
1476 (rs->rules[rs_cnt].inactive.ptr_array)[rule->nr] = rule;
1477 }
1478 }
1479
1480 MD5Final(digest, &ctx);
1481 memcpy(V_pf_status.pf_chksum, digest, sizeof(V_pf_status.pf_chksum));
1482 return (0);
1483 }
1484
1485 static int
pf_eth_addr_setup(struct pf_keth_ruleset * ruleset,struct pf_addr_wrap * addr)1486 pf_eth_addr_setup(struct pf_keth_ruleset *ruleset, struct pf_addr_wrap *addr)
1487 {
1488 int error = 0;
1489
1490 switch (addr->type) {
1491 case PF_ADDR_TABLE:
1492 addr->p.tbl = pfr_eth_attach_table(ruleset, addr->v.tblname);
1493 if (addr->p.tbl == NULL)
1494 error = ENOMEM;
1495 break;
1496 default:
1497 error = EINVAL;
1498 }
1499
1500 return (error);
1501 }
1502
1503 static int
pf_addr_setup(struct pf_kruleset * ruleset,struct pf_addr_wrap * addr,sa_family_t af)1504 pf_addr_setup(struct pf_kruleset *ruleset, struct pf_addr_wrap *addr,
1505 sa_family_t af)
1506 {
1507 int error = 0;
1508
1509 switch (addr->type) {
1510 case PF_ADDR_TABLE:
1511 addr->p.tbl = pfr_attach_table(ruleset, addr->v.tblname);
1512 if (addr->p.tbl == NULL)
1513 error = ENOMEM;
1514 break;
1515 case PF_ADDR_DYNIFTL:
1516 error = pfi_dynaddr_setup(addr, af);
1517 break;
1518 }
1519
1520 return (error);
1521 }
1522
1523 static void
pf_addr_copyout(struct pf_addr_wrap * addr)1524 pf_addr_copyout(struct pf_addr_wrap *addr)
1525 {
1526
1527 switch (addr->type) {
1528 case PF_ADDR_DYNIFTL:
1529 pfi_dynaddr_copyout(addr);
1530 break;
1531 case PF_ADDR_TABLE:
1532 pf_tbladdr_copyout(addr);
1533 break;
1534 }
1535 }
1536
1537 static void
pf_src_node_copy(const struct pf_ksrc_node * in,struct pf_src_node * out)1538 pf_src_node_copy(const struct pf_ksrc_node *in, struct pf_src_node *out)
1539 {
1540 int secs = time_uptime, diff;
1541
1542 bzero(out, sizeof(struct pf_src_node));
1543
1544 bcopy(&in->addr, &out->addr, sizeof(struct pf_addr));
1545 bcopy(&in->raddr, &out->raddr, sizeof(struct pf_addr));
1546
1547 if (in->rule.ptr != NULL)
1548 out->rule.nr = in->rule.ptr->nr;
1549
1550 for (int i = 0; i < 2; i++) {
1551 out->bytes[i] = counter_u64_fetch(in->bytes[i]);
1552 out->packets[i] = counter_u64_fetch(in->packets[i]);
1553 }
1554
1555 out->states = in->states;
1556 out->conn = in->conn;
1557 out->af = in->af;
1558 out->ruletype = in->ruletype;
1559
1560 out->creation = secs - in->creation;
1561 if (out->expire > secs)
1562 out->expire -= secs;
1563 else
1564 out->expire = 0;
1565
1566 /* Adjust the connection rate estimate. */
1567 diff = secs - in->conn_rate.last;
1568 if (diff >= in->conn_rate.seconds)
1569 out->conn_rate.count = 0;
1570 else
1571 out->conn_rate.count -=
1572 in->conn_rate.count * diff /
1573 in->conn_rate.seconds;
1574 }
1575
1576 #ifdef ALTQ
1577 /*
1578 * Handle export of struct pf_kaltq to user binaries that may be using any
1579 * version of struct pf_altq.
1580 */
1581 static int
pf_export_kaltq(struct pf_altq * q,struct pfioc_altq_v1 * pa,size_t ioc_size)1582 pf_export_kaltq(struct pf_altq *q, struct pfioc_altq_v1 *pa, size_t ioc_size)
1583 {
1584 u_int32_t version;
1585
1586 if (ioc_size == sizeof(struct pfioc_altq_v0))
1587 version = 0;
1588 else
1589 version = pa->version;
1590
1591 if (version > PFIOC_ALTQ_VERSION)
1592 return (EINVAL);
1593
1594 #define ASSIGN(x) exported_q->x = q->x
1595 #define COPY(x) \
1596 bcopy(&q->x, &exported_q->x, min(sizeof(q->x), sizeof(exported_q->x)))
1597 #define SATU16(x) (u_int32_t)uqmin((x), USHRT_MAX)
1598 #define SATU32(x) (u_int32_t)uqmin((x), UINT_MAX)
1599
1600 switch (version) {
1601 case 0: {
1602 struct pf_altq_v0 *exported_q =
1603 &((struct pfioc_altq_v0 *)pa)->altq;
1604
1605 COPY(ifname);
1606
1607 ASSIGN(scheduler);
1608 ASSIGN(tbrsize);
1609 exported_q->tbrsize = SATU16(q->tbrsize);
1610 exported_q->ifbandwidth = SATU32(q->ifbandwidth);
1611
1612 COPY(qname);
1613 COPY(parent);
1614 ASSIGN(parent_qid);
1615 exported_q->bandwidth = SATU32(q->bandwidth);
1616 ASSIGN(priority);
1617 ASSIGN(local_flags);
1618
1619 ASSIGN(qlimit);
1620 ASSIGN(flags);
1621
1622 if (q->scheduler == ALTQT_HFSC) {
1623 #define ASSIGN_OPT(x) exported_q->pq_u.hfsc_opts.x = q->pq_u.hfsc_opts.x
1624 #define ASSIGN_OPT_SATU32(x) exported_q->pq_u.hfsc_opts.x = \
1625 SATU32(q->pq_u.hfsc_opts.x)
1626
1627 ASSIGN_OPT_SATU32(rtsc_m1);
1628 ASSIGN_OPT(rtsc_d);
1629 ASSIGN_OPT_SATU32(rtsc_m2);
1630
1631 ASSIGN_OPT_SATU32(lssc_m1);
1632 ASSIGN_OPT(lssc_d);
1633 ASSIGN_OPT_SATU32(lssc_m2);
1634
1635 ASSIGN_OPT_SATU32(ulsc_m1);
1636 ASSIGN_OPT(ulsc_d);
1637 ASSIGN_OPT_SATU32(ulsc_m2);
1638
1639 ASSIGN_OPT(flags);
1640
1641 #undef ASSIGN_OPT
1642 #undef ASSIGN_OPT_SATU32
1643 } else
1644 COPY(pq_u);
1645
1646 ASSIGN(qid);
1647 break;
1648 }
1649 case 1: {
1650 struct pf_altq_v1 *exported_q =
1651 &((struct pfioc_altq_v1 *)pa)->altq;
1652
1653 COPY(ifname);
1654
1655 ASSIGN(scheduler);
1656 ASSIGN(tbrsize);
1657 ASSIGN(ifbandwidth);
1658
1659 COPY(qname);
1660 COPY(parent);
1661 ASSIGN(parent_qid);
1662 ASSIGN(bandwidth);
1663 ASSIGN(priority);
1664 ASSIGN(local_flags);
1665
1666 ASSIGN(qlimit);
1667 ASSIGN(flags);
1668 COPY(pq_u);
1669
1670 ASSIGN(qid);
1671 break;
1672 }
1673 default:
1674 panic("%s: unhandled struct pfioc_altq version", __func__);
1675 break;
1676 }
1677
1678 #undef ASSIGN
1679 #undef COPY
1680 #undef SATU16
1681 #undef SATU32
1682
1683 return (0);
1684 }
1685
1686 /*
1687 * Handle import to struct pf_kaltq of struct pf_altq from user binaries
1688 * that may be using any version of it.
1689 */
1690 static int
pf_import_kaltq(struct pfioc_altq_v1 * pa,struct pf_altq * q,size_t ioc_size)1691 pf_import_kaltq(struct pfioc_altq_v1 *pa, struct pf_altq *q, size_t ioc_size)
1692 {
1693 u_int32_t version;
1694
1695 if (ioc_size == sizeof(struct pfioc_altq_v0))
1696 version = 0;
1697 else
1698 version = pa->version;
1699
1700 if (version > PFIOC_ALTQ_VERSION)
1701 return (EINVAL);
1702
1703 #define ASSIGN(x) q->x = imported_q->x
1704 #define COPY(x) \
1705 bcopy(&imported_q->x, &q->x, min(sizeof(imported_q->x), sizeof(q->x)))
1706
1707 switch (version) {
1708 case 0: {
1709 struct pf_altq_v0 *imported_q =
1710 &((struct pfioc_altq_v0 *)pa)->altq;
1711
1712 COPY(ifname);
1713
1714 ASSIGN(scheduler);
1715 ASSIGN(tbrsize); /* 16-bit -> 32-bit */
1716 ASSIGN(ifbandwidth); /* 32-bit -> 64-bit */
1717
1718 COPY(qname);
1719 COPY(parent);
1720 ASSIGN(parent_qid);
1721 ASSIGN(bandwidth); /* 32-bit -> 64-bit */
1722 ASSIGN(priority);
1723 ASSIGN(local_flags);
1724
1725 ASSIGN(qlimit);
1726 ASSIGN(flags);
1727
1728 if (imported_q->scheduler == ALTQT_HFSC) {
1729 #define ASSIGN_OPT(x) q->pq_u.hfsc_opts.x = imported_q->pq_u.hfsc_opts.x
1730
1731 /*
1732 * The m1 and m2 parameters are being copied from
1733 * 32-bit to 64-bit.
1734 */
1735 ASSIGN_OPT(rtsc_m1);
1736 ASSIGN_OPT(rtsc_d);
1737 ASSIGN_OPT(rtsc_m2);
1738
1739 ASSIGN_OPT(lssc_m1);
1740 ASSIGN_OPT(lssc_d);
1741 ASSIGN_OPT(lssc_m2);
1742
1743 ASSIGN_OPT(ulsc_m1);
1744 ASSIGN_OPT(ulsc_d);
1745 ASSIGN_OPT(ulsc_m2);
1746
1747 ASSIGN_OPT(flags);
1748
1749 #undef ASSIGN_OPT
1750 } else
1751 COPY(pq_u);
1752
1753 ASSIGN(qid);
1754 break;
1755 }
1756 case 1: {
1757 struct pf_altq_v1 *imported_q =
1758 &((struct pfioc_altq_v1 *)pa)->altq;
1759
1760 COPY(ifname);
1761
1762 ASSIGN(scheduler);
1763 ASSIGN(tbrsize);
1764 ASSIGN(ifbandwidth);
1765
1766 COPY(qname);
1767 COPY(parent);
1768 ASSIGN(parent_qid);
1769 ASSIGN(bandwidth);
1770 ASSIGN(priority);
1771 ASSIGN(local_flags);
1772
1773 ASSIGN(qlimit);
1774 ASSIGN(flags);
1775 COPY(pq_u);
1776
1777 ASSIGN(qid);
1778 break;
1779 }
1780 default:
1781 panic("%s: unhandled struct pfioc_altq version", __func__);
1782 break;
1783 }
1784
1785 #undef ASSIGN
1786 #undef COPY
1787
1788 return (0);
1789 }
1790
1791 static struct pf_altq *
pf_altq_get_nth_active(u_int32_t n)1792 pf_altq_get_nth_active(u_int32_t n)
1793 {
1794 struct pf_altq *altq;
1795 u_int32_t nr;
1796
1797 nr = 0;
1798 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
1799 if (nr == n)
1800 return (altq);
1801 nr++;
1802 }
1803
1804 TAILQ_FOREACH(altq, V_pf_altqs_active, entries) {
1805 if (nr == n)
1806 return (altq);
1807 nr++;
1808 }
1809
1810 return (NULL);
1811 }
1812 #endif /* ALTQ */
1813
1814 struct pf_krule *
pf_krule_alloc(void)1815 pf_krule_alloc(void)
1816 {
1817 struct pf_krule *rule;
1818
1819 rule = malloc(sizeof(struct pf_krule), M_PFRULE, M_WAITOK | M_ZERO);
1820 mtx_init(&rule->rpool.mtx, "pf_krule_pool", NULL, MTX_DEF);
1821 rule->timestamp = uma_zalloc_pcpu(pf_timestamp_pcpu_zone,
1822 M_WAITOK | M_ZERO);
1823 return (rule);
1824 }
1825
1826 void
pf_krule_free(struct pf_krule * rule)1827 pf_krule_free(struct pf_krule *rule)
1828 {
1829 #ifdef PF_WANT_32_TO_64_COUNTER
1830 bool wowned;
1831 #endif
1832
1833 if (rule == NULL)
1834 return;
1835
1836 #ifdef PF_WANT_32_TO_64_COUNTER
1837 if (rule->allrulelinked) {
1838 wowned = PF_RULES_WOWNED();
1839 if (!wowned)
1840 PF_RULES_WLOCK();
1841 LIST_REMOVE(rule, allrulelist);
1842 V_pf_allrulecount--;
1843 if (!wowned)
1844 PF_RULES_WUNLOCK();
1845 }
1846 #endif
1847
1848 pf_counter_u64_deinit(&rule->evaluations);
1849 for (int i = 0; i < 2; i++) {
1850 pf_counter_u64_deinit(&rule->packets[i]);
1851 pf_counter_u64_deinit(&rule->bytes[i]);
1852 }
1853 counter_u64_free(rule->states_cur);
1854 counter_u64_free(rule->states_tot);
1855 counter_u64_free(rule->src_nodes);
1856 uma_zfree_pcpu(pf_timestamp_pcpu_zone, rule->timestamp);
1857
1858 mtx_destroy(&rule->rpool.mtx);
1859 free(rule, M_PFRULE);
1860 }
1861
1862 static void
pf_kpooladdr_to_pooladdr(const struct pf_kpooladdr * kpool,struct pf_pooladdr * pool)1863 pf_kpooladdr_to_pooladdr(const struct pf_kpooladdr *kpool,
1864 struct pf_pooladdr *pool)
1865 {
1866
1867 bzero(pool, sizeof(*pool));
1868 bcopy(&kpool->addr, &pool->addr, sizeof(pool->addr));
1869 strlcpy(pool->ifname, kpool->ifname, sizeof(pool->ifname));
1870 }
1871
1872 static int
pf_pooladdr_to_kpooladdr(const struct pf_pooladdr * pool,struct pf_kpooladdr * kpool)1873 pf_pooladdr_to_kpooladdr(const struct pf_pooladdr *pool,
1874 struct pf_kpooladdr *kpool)
1875 {
1876 int ret;
1877
1878 bzero(kpool, sizeof(*kpool));
1879 bcopy(&pool->addr, &kpool->addr, sizeof(kpool->addr));
1880 ret = pf_user_strcpy(kpool->ifname, pool->ifname,
1881 sizeof(kpool->ifname));
1882 return (ret);
1883 }
1884
1885 static void
pf_kpool_to_pool(const struct pf_kpool * kpool,struct pf_pool * pool)1886 pf_kpool_to_pool(const struct pf_kpool *kpool, struct pf_pool *pool)
1887 {
1888 bzero(pool, sizeof(*pool));
1889
1890 bcopy(&kpool->key, &pool->key, sizeof(pool->key));
1891 bcopy(&kpool->counter, &pool->counter, sizeof(pool->counter));
1892
1893 pool->tblidx = kpool->tblidx;
1894 pool->proxy_port[0] = kpool->proxy_port[0];
1895 pool->proxy_port[1] = kpool->proxy_port[1];
1896 pool->opts = kpool->opts;
1897 }
1898
1899 static void
pf_pool_to_kpool(const struct pf_pool * pool,struct pf_kpool * kpool)1900 pf_pool_to_kpool(const struct pf_pool *pool, struct pf_kpool *kpool)
1901 {
1902 _Static_assert(sizeof(pool->key) == sizeof(kpool->key), "");
1903 _Static_assert(sizeof(pool->counter) == sizeof(kpool->counter), "");
1904
1905 bcopy(&pool->key, &kpool->key, sizeof(kpool->key));
1906 bcopy(&pool->counter, &kpool->counter, sizeof(kpool->counter));
1907
1908 kpool->tblidx = pool->tblidx;
1909 kpool->proxy_port[0] = pool->proxy_port[0];
1910 kpool->proxy_port[1] = pool->proxy_port[1];
1911 kpool->opts = pool->opts;
1912 }
1913
1914 static void
pf_krule_to_rule(const struct pf_krule * krule,struct pf_rule * rule)1915 pf_krule_to_rule(const struct pf_krule *krule, struct pf_rule *rule)
1916 {
1917
1918 bzero(rule, sizeof(*rule));
1919
1920 bcopy(&krule->src, &rule->src, sizeof(rule->src));
1921 bcopy(&krule->dst, &rule->dst, sizeof(rule->dst));
1922
1923 for (int i = 0; i < PF_SKIP_COUNT; ++i) {
1924 if (rule->skip[i].ptr == NULL)
1925 rule->skip[i].nr = -1;
1926 else
1927 rule->skip[i].nr = krule->skip[i].ptr->nr;
1928 }
1929
1930 strlcpy(rule->label, krule->label[0], sizeof(rule->label));
1931 strlcpy(rule->ifname, krule->ifname, sizeof(rule->ifname));
1932 strlcpy(rule->qname, krule->qname, sizeof(rule->qname));
1933 strlcpy(rule->pqname, krule->pqname, sizeof(rule->pqname));
1934 strlcpy(rule->tagname, krule->tagname, sizeof(rule->tagname));
1935 strlcpy(rule->match_tagname, krule->match_tagname,
1936 sizeof(rule->match_tagname));
1937 strlcpy(rule->overload_tblname, krule->overload_tblname,
1938 sizeof(rule->overload_tblname));
1939
1940 pf_kpool_to_pool(&krule->rpool, &rule->rpool);
1941
1942 rule->evaluations = pf_counter_u64_fetch(&krule->evaluations);
1943 for (int i = 0; i < 2; i++) {
1944 rule->packets[i] = pf_counter_u64_fetch(&krule->packets[i]);
1945 rule->bytes[i] = pf_counter_u64_fetch(&krule->bytes[i]);
1946 }
1947
1948 /* kif, anchor, overload_tbl are not copied over. */
1949
1950 rule->os_fingerprint = krule->os_fingerprint;
1951
1952 rule->rtableid = krule->rtableid;
1953 bcopy(krule->timeout, rule->timeout, sizeof(krule->timeout));
1954 rule->max_states = krule->max_states;
1955 rule->max_src_nodes = krule->max_src_nodes;
1956 rule->max_src_states = krule->max_src_states;
1957 rule->max_src_conn = krule->max_src_conn;
1958 rule->max_src_conn_rate.limit = krule->max_src_conn_rate.limit;
1959 rule->max_src_conn_rate.seconds = krule->max_src_conn_rate.seconds;
1960 rule->qid = krule->qid;
1961 rule->pqid = krule->pqid;
1962 rule->nr = krule->nr;
1963 rule->prob = krule->prob;
1964 rule->cuid = krule->cuid;
1965 rule->cpid = krule->cpid;
1966
1967 rule->return_icmp = krule->return_icmp;
1968 rule->return_icmp6 = krule->return_icmp6;
1969 rule->max_mss = krule->max_mss;
1970 rule->tag = krule->tag;
1971 rule->match_tag = krule->match_tag;
1972 rule->scrub_flags = krule->scrub_flags;
1973
1974 bcopy(&krule->uid, &rule->uid, sizeof(krule->uid));
1975 bcopy(&krule->gid, &rule->gid, sizeof(krule->gid));
1976
1977 rule->rule_flag = krule->rule_flag;
1978 rule->action = krule->action;
1979 rule->direction = krule->direction;
1980 rule->log = krule->log;
1981 rule->logif = krule->logif;
1982 rule->quick = krule->quick;
1983 rule->ifnot = krule->ifnot;
1984 rule->match_tag_not = krule->match_tag_not;
1985 rule->natpass = krule->natpass;
1986
1987 rule->keep_state = krule->keep_state;
1988 rule->af = krule->af;
1989 rule->proto = krule->proto;
1990 rule->type = krule->type;
1991 rule->code = krule->code;
1992 rule->flags = krule->flags;
1993 rule->flagset = krule->flagset;
1994 rule->min_ttl = krule->min_ttl;
1995 rule->allow_opts = krule->allow_opts;
1996 rule->rt = krule->rt;
1997 rule->return_ttl = krule->return_ttl;
1998 rule->tos = krule->tos;
1999 rule->set_tos = krule->set_tos;
2000 rule->anchor_relative = krule->anchor_relative;
2001 rule->anchor_wildcard = krule->anchor_wildcard;
2002
2003 rule->flush = krule->flush;
2004 rule->prio = krule->prio;
2005 rule->set_prio[0] = krule->set_prio[0];
2006 rule->set_prio[1] = krule->set_prio[1];
2007
2008 bcopy(&krule->divert, &rule->divert, sizeof(krule->divert));
2009
2010 rule->u_states_cur = counter_u64_fetch(krule->states_cur);
2011 rule->u_states_tot = counter_u64_fetch(krule->states_tot);
2012 rule->u_src_nodes = counter_u64_fetch(krule->src_nodes);
2013 }
2014
2015 static int
pf_rule_to_krule(const struct pf_rule * rule,struct pf_krule * krule)2016 pf_rule_to_krule(const struct pf_rule *rule, struct pf_krule *krule)
2017 {
2018 int ret;
2019
2020 #ifndef INET
2021 if (rule->af == AF_INET) {
2022 return (EAFNOSUPPORT);
2023 }
2024 #endif /* INET */
2025 #ifndef INET6
2026 if (rule->af == AF_INET6) {
2027 return (EAFNOSUPPORT);
2028 }
2029 #endif /* INET6 */
2030
2031 ret = pf_check_rule_addr(&rule->src);
2032 if (ret != 0)
2033 return (ret);
2034 ret = pf_check_rule_addr(&rule->dst);
2035 if (ret != 0)
2036 return (ret);
2037
2038 bcopy(&rule->src, &krule->src, sizeof(rule->src));
2039 bcopy(&rule->dst, &krule->dst, sizeof(rule->dst));
2040
2041 ret = pf_user_strcpy(krule->label[0], rule->label, sizeof(rule->label));
2042 if (ret != 0)
2043 return (ret);
2044 ret = pf_user_strcpy(krule->ifname, rule->ifname, sizeof(rule->ifname));
2045 if (ret != 0)
2046 return (ret);
2047 ret = pf_user_strcpy(krule->qname, rule->qname, sizeof(rule->qname));
2048 if (ret != 0)
2049 return (ret);
2050 ret = pf_user_strcpy(krule->pqname, rule->pqname, sizeof(rule->pqname));
2051 if (ret != 0)
2052 return (ret);
2053 ret = pf_user_strcpy(krule->tagname, rule->tagname,
2054 sizeof(rule->tagname));
2055 if (ret != 0)
2056 return (ret);
2057 ret = pf_user_strcpy(krule->match_tagname, rule->match_tagname,
2058 sizeof(rule->match_tagname));
2059 if (ret != 0)
2060 return (ret);
2061 ret = pf_user_strcpy(krule->overload_tblname, rule->overload_tblname,
2062 sizeof(rule->overload_tblname));
2063 if (ret != 0)
2064 return (ret);
2065
2066 pf_pool_to_kpool(&rule->rpool, &krule->rpool);
2067
2068 /* Don't allow userspace to set evaluations, packets or bytes. */
2069 /* kif, anchor, overload_tbl are not copied over. */
2070
2071 krule->os_fingerprint = rule->os_fingerprint;
2072
2073 krule->rtableid = rule->rtableid;
2074 /* pf_rule->timeout is smaller than pf_krule->timeout */
2075 bcopy(rule->timeout, krule->timeout, sizeof(rule->timeout));
2076 krule->max_states = rule->max_states;
2077 krule->max_src_nodes = rule->max_src_nodes;
2078 krule->max_src_states = rule->max_src_states;
2079 krule->max_src_conn = rule->max_src_conn;
2080 krule->max_src_conn_rate.limit = rule->max_src_conn_rate.limit;
2081 krule->max_src_conn_rate.seconds = rule->max_src_conn_rate.seconds;
2082 krule->qid = rule->qid;
2083 krule->pqid = rule->pqid;
2084 krule->nr = rule->nr;
2085 krule->prob = rule->prob;
2086 krule->cuid = rule->cuid;
2087 krule->cpid = rule->cpid;
2088
2089 krule->return_icmp = rule->return_icmp;
2090 krule->return_icmp6 = rule->return_icmp6;
2091 krule->max_mss = rule->max_mss;
2092 krule->tag = rule->tag;
2093 krule->match_tag = rule->match_tag;
2094 krule->scrub_flags = rule->scrub_flags;
2095
2096 bcopy(&rule->uid, &krule->uid, sizeof(krule->uid));
2097 bcopy(&rule->gid, &krule->gid, sizeof(krule->gid));
2098
2099 krule->rule_flag = rule->rule_flag;
2100 krule->action = rule->action;
2101 krule->direction = rule->direction;
2102 krule->log = rule->log;
2103 krule->logif = rule->logif;
2104 krule->quick = rule->quick;
2105 krule->ifnot = rule->ifnot;
2106 krule->match_tag_not = rule->match_tag_not;
2107 krule->natpass = rule->natpass;
2108
2109 krule->keep_state = rule->keep_state;
2110 krule->af = rule->af;
2111 krule->proto = rule->proto;
2112 krule->type = rule->type;
2113 krule->code = rule->code;
2114 krule->flags = rule->flags;
2115 krule->flagset = rule->flagset;
2116 krule->min_ttl = rule->min_ttl;
2117 krule->allow_opts = rule->allow_opts;
2118 krule->rt = rule->rt;
2119 krule->return_ttl = rule->return_ttl;
2120 krule->tos = rule->tos;
2121 krule->set_tos = rule->set_tos;
2122
2123 krule->flush = rule->flush;
2124 krule->prio = rule->prio;
2125 krule->set_prio[0] = rule->set_prio[0];
2126 krule->set_prio[1] = rule->set_prio[1];
2127
2128 bcopy(&rule->divert, &krule->divert, sizeof(krule->divert));
2129
2130 return (0);
2131 }
2132
2133 static int
pf_state_kill_to_kstate_kill(const struct pfioc_state_kill * psk,struct pf_kstate_kill * kill)2134 pf_state_kill_to_kstate_kill(const struct pfioc_state_kill *psk,
2135 struct pf_kstate_kill *kill)
2136 {
2137 int ret;
2138
2139 bzero(kill, sizeof(*kill));
2140
2141 bcopy(&psk->psk_pfcmp, &kill->psk_pfcmp, sizeof(kill->psk_pfcmp));
2142 kill->psk_af = psk->psk_af;
2143 kill->psk_proto = psk->psk_proto;
2144 bcopy(&psk->psk_src, &kill->psk_src, sizeof(kill->psk_src));
2145 bcopy(&psk->psk_dst, &kill->psk_dst, sizeof(kill->psk_dst));
2146 ret = pf_user_strcpy(kill->psk_ifname, psk->psk_ifname,
2147 sizeof(kill->psk_ifname));
2148 if (ret != 0)
2149 return (ret);
2150 ret = pf_user_strcpy(kill->psk_label, psk->psk_label,
2151 sizeof(kill->psk_label));
2152 if (ret != 0)
2153 return (ret);
2154
2155 return (0);
2156 }
2157
2158 static int
pf_ioctl_addrule(struct pf_krule * rule,uint32_t ticket,uint32_t pool_ticket,const char * anchor,const char * anchor_call,struct thread * td)2159 pf_ioctl_addrule(struct pf_krule *rule, uint32_t ticket,
2160 uint32_t pool_ticket, const char *anchor, const char *anchor_call,
2161 struct thread *td)
2162 {
2163 struct pf_kruleset *ruleset;
2164 struct pf_krule *tail;
2165 struct pf_kpooladdr *pa;
2166 struct pfi_kkif *kif = NULL;
2167 int rs_num;
2168 int error = 0;
2169
2170 if ((rule->return_icmp >> 8) > ICMP_MAXTYPE) {
2171 error = EINVAL;
2172 goto errout_unlocked;
2173 }
2174
2175 #define ERROUT(x) ERROUT_FUNCTION(errout, x)
2176
2177 if (rule->ifname[0])
2178 kif = pf_kkif_create(M_WAITOK);
2179 pf_counter_u64_init(&rule->evaluations, M_WAITOK);
2180 for (int i = 0; i < 2; i++) {
2181 pf_counter_u64_init(&rule->packets[i], M_WAITOK);
2182 pf_counter_u64_init(&rule->bytes[i], M_WAITOK);
2183 }
2184 rule->states_cur = counter_u64_alloc(M_WAITOK);
2185 rule->states_tot = counter_u64_alloc(M_WAITOK);
2186 rule->src_nodes = counter_u64_alloc(M_WAITOK);
2187 rule->cuid = td->td_ucred->cr_ruid;
2188 rule->cpid = td->td_proc ? td->td_proc->p_pid : 0;
2189 TAILQ_INIT(&rule->rpool.list);
2190
2191 PF_CONFIG_LOCK();
2192 PF_RULES_WLOCK();
2193 #ifdef PF_WANT_32_TO_64_COUNTER
2194 LIST_INSERT_HEAD(&V_pf_allrulelist, rule, allrulelist);
2195 MPASS(!rule->allrulelinked);
2196 rule->allrulelinked = true;
2197 V_pf_allrulecount++;
2198 #endif
2199 ruleset = pf_find_kruleset(anchor);
2200 if (ruleset == NULL)
2201 ERROUT(EINVAL);
2202 rs_num = pf_get_ruleset_number(rule->action);
2203 if (rs_num >= PF_RULESET_MAX)
2204 ERROUT(EINVAL);
2205 if (ticket != ruleset->rules[rs_num].inactive.ticket) {
2206 DPFPRINTF(PF_DEBUG_MISC,
2207 ("ticket: %d != [%d]%d\n", ticket, rs_num,
2208 ruleset->rules[rs_num].inactive.ticket));
2209 ERROUT(EBUSY);
2210 }
2211 if (pool_ticket != V_ticket_pabuf) {
2212 DPFPRINTF(PF_DEBUG_MISC,
2213 ("pool_ticket: %d != %d\n", pool_ticket,
2214 V_ticket_pabuf));
2215 ERROUT(EBUSY);
2216 }
2217 /*
2218 * XXXMJG hack: there is no mechanism to ensure they started the
2219 * transaction. Ticket checked above may happen to match by accident,
2220 * even if nobody called DIOCXBEGIN, let alone this process.
2221 * Partially work around it by checking if the RB tree got allocated,
2222 * see pf_begin_rules.
2223 */
2224 if (ruleset->rules[rs_num].inactive.tree == NULL) {
2225 ERROUT(EINVAL);
2226 }
2227
2228 tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
2229 pf_krulequeue);
2230 if (tail)
2231 rule->nr = tail->nr + 1;
2232 else
2233 rule->nr = 0;
2234 if (rule->ifname[0]) {
2235 rule->kif = pfi_kkif_attach(kif, rule->ifname);
2236 kif = NULL;
2237 pfi_kkif_ref(rule->kif);
2238 } else
2239 rule->kif = NULL;
2240
2241 if (rule->rtableid > 0 && rule->rtableid >= rt_numfibs)
2242 error = EBUSY;
2243
2244 #ifdef ALTQ
2245 /* set queue IDs */
2246 if (rule->qname[0] != 0) {
2247 if ((rule->qid = pf_qname2qid(rule->qname)) == 0)
2248 error = EBUSY;
2249 else if (rule->pqname[0] != 0) {
2250 if ((rule->pqid =
2251 pf_qname2qid(rule->pqname)) == 0)
2252 error = EBUSY;
2253 } else
2254 rule->pqid = rule->qid;
2255 }
2256 #endif
2257 if (rule->tagname[0])
2258 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0)
2259 error = EBUSY;
2260 if (rule->match_tagname[0])
2261 if ((rule->match_tag =
2262 pf_tagname2tag(rule->match_tagname)) == 0)
2263 error = EBUSY;
2264 if (rule->rt && !rule->direction)
2265 error = EINVAL;
2266 if (!rule->log)
2267 rule->logif = 0;
2268 if (rule->logif >= PFLOGIFS_MAX)
2269 error = EINVAL;
2270 if (pf_addr_setup(ruleset, &rule->src.addr, rule->af))
2271 error = ENOMEM;
2272 if (pf_addr_setup(ruleset, &rule->dst.addr, rule->af))
2273 error = ENOMEM;
2274 if (pf_kanchor_setup(rule, ruleset, anchor_call))
2275 error = EINVAL;
2276 if (rule->scrub_flags & PFSTATE_SETPRIO &&
2277 (rule->set_prio[0] > PF_PRIO_MAX ||
2278 rule->set_prio[1] > PF_PRIO_MAX))
2279 error = EINVAL;
2280 TAILQ_FOREACH(pa, &V_pf_pabuf, entries)
2281 if (pa->addr.type == PF_ADDR_TABLE) {
2282 pa->addr.p.tbl = pfr_attach_table(ruleset,
2283 pa->addr.v.tblname);
2284 if (pa->addr.p.tbl == NULL)
2285 error = ENOMEM;
2286 }
2287
2288 rule->overload_tbl = NULL;
2289 if (rule->overload_tblname[0]) {
2290 if ((rule->overload_tbl = pfr_attach_table(ruleset,
2291 rule->overload_tblname)) == NULL)
2292 error = EINVAL;
2293 else
2294 rule->overload_tbl->pfrkt_flags |=
2295 PFR_TFLAG_ACTIVE;
2296 }
2297
2298 pf_mv_kpool(&V_pf_pabuf, &rule->rpool.list);
2299 if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) ||
2300 (rule->action == PF_BINAT)) && rule->anchor == NULL) ||
2301 (rule->rt > PF_NOPFROUTE)) &&
2302 (TAILQ_FIRST(&rule->rpool.list) == NULL))
2303 error = EINVAL;
2304
2305 if (error) {
2306 pf_free_rule(rule);
2307 rule = NULL;
2308 ERROUT(error);
2309 }
2310
2311 rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list);
2312 TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr,
2313 rule, entries);
2314 ruleset->rules[rs_num].inactive.rcount++;
2315
2316 PF_RULES_WUNLOCK();
2317 pf_hash_rule(rule);
2318 if (RB_INSERT(pf_krule_global, ruleset->rules[rs_num].inactive.tree, rule) != NULL) {
2319 PF_RULES_WLOCK();
2320 TAILQ_REMOVE(ruleset->rules[rs_num].inactive.ptr, rule, entries);
2321 ruleset->rules[rs_num].inactive.rcount--;
2322 pf_free_rule(rule);
2323 rule = NULL;
2324 ERROUT(EEXIST);
2325 }
2326 PF_CONFIG_UNLOCK();
2327
2328 return (0);
2329
2330 #undef ERROUT
2331 errout:
2332 PF_RULES_WUNLOCK();
2333 PF_CONFIG_UNLOCK();
2334 errout_unlocked:
2335 pf_kkif_free(kif);
2336 pf_krule_free(rule);
2337 return (error);
2338 }
2339
2340 static bool
pf_label_match(const struct pf_krule * rule,const char * label)2341 pf_label_match(const struct pf_krule *rule, const char *label)
2342 {
2343 int i = 0;
2344
2345 while (*rule->label[i]) {
2346 if (strcmp(rule->label[i], label) == 0)
2347 return (true);
2348 i++;
2349 }
2350
2351 return (false);
2352 }
2353
2354 static unsigned int
pf_kill_matching_state(struct pf_state_key_cmp * key,int dir)2355 pf_kill_matching_state(struct pf_state_key_cmp *key, int dir)
2356 {
2357 struct pf_kstate *s;
2358 int more = 0;
2359
2360 s = pf_find_state_all(key, dir, &more);
2361 if (s == NULL)
2362 return (0);
2363
2364 if (more) {
2365 PF_STATE_UNLOCK(s);
2366 return (0);
2367 }
2368
2369 pf_unlink_state(s);
2370 return (1);
2371 }
2372
2373 static int
pf_killstates_row(struct pf_kstate_kill * psk,struct pf_idhash * ih)2374 pf_killstates_row(struct pf_kstate_kill *psk, struct pf_idhash *ih)
2375 {
2376 struct pf_kstate *s;
2377 struct pf_state_key *sk;
2378 struct pf_addr *srcaddr, *dstaddr;
2379 struct pf_state_key_cmp match_key;
2380 int idx, killed = 0;
2381 unsigned int dir;
2382 u_int16_t srcport, dstport;
2383 struct pfi_kkif *kif;
2384
2385 relock_DIOCKILLSTATES:
2386 PF_HASHROW_LOCK(ih);
2387 LIST_FOREACH(s, &ih->states, entry) {
2388 /* For floating states look at the original kif. */
2389 kif = s->kif == V_pfi_all ? s->orig_kif : s->kif;
2390
2391 sk = s->key[PF_SK_WIRE];
2392 if (s->direction == PF_OUT) {
2393 srcaddr = &sk->addr[1];
2394 dstaddr = &sk->addr[0];
2395 srcport = sk->port[1];
2396 dstport = sk->port[0];
2397 } else {
2398 srcaddr = &sk->addr[0];
2399 dstaddr = &sk->addr[1];
2400 srcport = sk->port[0];
2401 dstport = sk->port[1];
2402 }
2403
2404 if (psk->psk_af && sk->af != psk->psk_af)
2405 continue;
2406
2407 if (psk->psk_proto && psk->psk_proto != sk->proto)
2408 continue;
2409
2410 if (! PF_MATCHA(psk->psk_src.neg, &psk->psk_src.addr.v.a.addr,
2411 &psk->psk_src.addr.v.a.mask, srcaddr, sk->af))
2412 continue;
2413
2414 if (! PF_MATCHA(psk->psk_dst.neg, &psk->psk_dst.addr.v.a.addr,
2415 &psk->psk_dst.addr.v.a.mask, dstaddr, sk->af))
2416 continue;
2417
2418 if (! PF_MATCHA(psk->psk_rt_addr.neg,
2419 &psk->psk_rt_addr.addr.v.a.addr,
2420 &psk->psk_rt_addr.addr.v.a.mask,
2421 &s->rt_addr, sk->af))
2422 continue;
2423
2424 if (psk->psk_src.port_op != 0 &&
2425 ! pf_match_port(psk->psk_src.port_op,
2426 psk->psk_src.port[0], psk->psk_src.port[1], srcport))
2427 continue;
2428
2429 if (psk->psk_dst.port_op != 0 &&
2430 ! pf_match_port(psk->psk_dst.port_op,
2431 psk->psk_dst.port[0], psk->psk_dst.port[1], dstport))
2432 continue;
2433
2434 if (psk->psk_label[0] &&
2435 ! pf_label_match(s->rule.ptr, psk->psk_label))
2436 continue;
2437
2438 if (psk->psk_ifname[0] && strcmp(psk->psk_ifname,
2439 kif->pfik_name))
2440 continue;
2441
2442 if (psk->psk_kill_match) {
2443 /* Create the key to find matching states, with lock
2444 * held. */
2445
2446 bzero(&match_key, sizeof(match_key));
2447
2448 if (s->direction == PF_OUT) {
2449 dir = PF_IN;
2450 idx = PF_SK_STACK;
2451 } else {
2452 dir = PF_OUT;
2453 idx = PF_SK_WIRE;
2454 }
2455
2456 match_key.af = s->key[idx]->af;
2457 match_key.proto = s->key[idx]->proto;
2458 PF_ACPY(&match_key.addr[0],
2459 &s->key[idx]->addr[1], match_key.af);
2460 match_key.port[0] = s->key[idx]->port[1];
2461 PF_ACPY(&match_key.addr[1],
2462 &s->key[idx]->addr[0], match_key.af);
2463 match_key.port[1] = s->key[idx]->port[0];
2464 }
2465
2466 pf_unlink_state(s);
2467 killed++;
2468
2469 if (psk->psk_kill_match)
2470 killed += pf_kill_matching_state(&match_key, dir);
2471
2472 goto relock_DIOCKILLSTATES;
2473 }
2474 PF_HASHROW_UNLOCK(ih);
2475
2476 return (killed);
2477 }
2478
2479 static int
pfioctl(struct cdev * dev,u_long cmd,caddr_t addr,int flags,struct thread * td)2480 pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
2481 {
2482 int error = 0;
2483 PF_RULES_RLOCK_TRACKER;
2484
2485 #define ERROUT_IOCTL(target, x) \
2486 do { \
2487 error = (x); \
2488 SDT_PROBE3(pf, ioctl, ioctl, error, cmd, error, __LINE__); \
2489 goto target; \
2490 } while (0)
2491
2492
2493 /* XXX keep in sync with switch() below */
2494 if (securelevel_gt(td->td_ucred, 2))
2495 switch (cmd) {
2496 case DIOCGETRULES:
2497 case DIOCGETRULE:
2498 case DIOCGETRULENV:
2499 case DIOCGETADDRS:
2500 case DIOCGETADDR:
2501 case DIOCGETSTATE:
2502 case DIOCGETSTATENV:
2503 case DIOCSETSTATUSIF:
2504 case DIOCGETSTATUS:
2505 case DIOCGETSTATUSNV:
2506 case DIOCCLRSTATUS:
2507 case DIOCNATLOOK:
2508 case DIOCSETDEBUG:
2509 case DIOCGETSTATES:
2510 case DIOCGETSTATESV2:
2511 case DIOCGETTIMEOUT:
2512 case DIOCCLRRULECTRS:
2513 case DIOCGETLIMIT:
2514 case DIOCGETALTQSV0:
2515 case DIOCGETALTQSV1:
2516 case DIOCGETALTQV0:
2517 case DIOCGETALTQV1:
2518 case DIOCGETQSTATSV0:
2519 case DIOCGETQSTATSV1:
2520 case DIOCGETRULESETS:
2521 case DIOCGETRULESET:
2522 case DIOCRGETTABLES:
2523 case DIOCRGETTSTATS:
2524 case DIOCRCLRTSTATS:
2525 case DIOCRCLRADDRS:
2526 case DIOCRADDADDRS:
2527 case DIOCRDELADDRS:
2528 case DIOCRSETADDRS:
2529 case DIOCRGETADDRS:
2530 case DIOCRGETASTATS:
2531 case DIOCRCLRASTATS:
2532 case DIOCRTSTADDRS:
2533 case DIOCOSFPGET:
2534 case DIOCGETSRCNODES:
2535 case DIOCCLRSRCNODES:
2536 case DIOCGETSYNCOOKIES:
2537 case DIOCIGETIFACES:
2538 case DIOCGIFSPEEDV0:
2539 case DIOCGIFSPEEDV1:
2540 case DIOCSETIFFLAG:
2541 case DIOCCLRIFFLAG:
2542 case DIOCGETETHRULES:
2543 case DIOCGETETHRULE:
2544 case DIOCGETETHRULESETS:
2545 case DIOCGETETHRULESET:
2546 break;
2547 case DIOCRCLRTABLES:
2548 case DIOCRADDTABLES:
2549 case DIOCRDELTABLES:
2550 case DIOCRSETTFLAGS:
2551 if (((struct pfioc_table *)addr)->pfrio_flags &
2552 PFR_FLAG_DUMMY)
2553 break; /* dummy operation ok */
2554 return (EPERM);
2555 default:
2556 return (EPERM);
2557 }
2558
2559 if (!(flags & FWRITE))
2560 switch (cmd) {
2561 case DIOCGETRULES:
2562 case DIOCGETADDRS:
2563 case DIOCGETADDR:
2564 case DIOCGETSTATE:
2565 case DIOCGETSTATENV:
2566 case DIOCGETSTATUS:
2567 case DIOCGETSTATUSNV:
2568 case DIOCGETSTATES:
2569 case DIOCGETSTATESV2:
2570 case DIOCGETTIMEOUT:
2571 case DIOCGETLIMIT:
2572 case DIOCGETALTQSV0:
2573 case DIOCGETALTQSV1:
2574 case DIOCGETALTQV0:
2575 case DIOCGETALTQV1:
2576 case DIOCGETQSTATSV0:
2577 case DIOCGETQSTATSV1:
2578 case DIOCGETRULESETS:
2579 case DIOCGETRULESET:
2580 case DIOCNATLOOK:
2581 case DIOCRGETTABLES:
2582 case DIOCRGETTSTATS:
2583 case DIOCRGETADDRS:
2584 case DIOCRGETASTATS:
2585 case DIOCRTSTADDRS:
2586 case DIOCOSFPGET:
2587 case DIOCGETSRCNODES:
2588 case DIOCGETSYNCOOKIES:
2589 case DIOCIGETIFACES:
2590 case DIOCGIFSPEEDV1:
2591 case DIOCGIFSPEEDV0:
2592 case DIOCGETRULENV:
2593 case DIOCGETETHRULES:
2594 case DIOCGETETHRULE:
2595 case DIOCGETETHRULESETS:
2596 case DIOCGETETHRULESET:
2597 break;
2598 case DIOCRCLRTABLES:
2599 case DIOCRADDTABLES:
2600 case DIOCRDELTABLES:
2601 case DIOCRCLRTSTATS:
2602 case DIOCRCLRADDRS:
2603 case DIOCRADDADDRS:
2604 case DIOCRDELADDRS:
2605 case DIOCRSETADDRS:
2606 case DIOCRSETTFLAGS:
2607 if (((struct pfioc_table *)addr)->pfrio_flags &
2608 PFR_FLAG_DUMMY) {
2609 flags |= FWRITE; /* need write lock for dummy */
2610 break; /* dummy operation ok */
2611 }
2612 return (EACCES);
2613 case DIOCGETRULE:
2614 if (((struct pfioc_rule *)addr)->action ==
2615 PF_GET_CLR_CNTR)
2616 return (EACCES);
2617 break;
2618 default:
2619 return (EACCES);
2620 }
2621
2622 CURVNET_SET(TD_TO_VNET(td));
2623
2624 switch (cmd) {
2625 case DIOCSTART:
2626 sx_xlock(&V_pf_ioctl_lock);
2627 if (V_pf_status.running)
2628 error = EEXIST;
2629 else {
2630 hook_pf();
2631 if (! TAILQ_EMPTY(V_pf_keth->active.rules))
2632 hook_pf_eth();
2633 V_pf_status.running = 1;
2634 V_pf_status.since = time_second;
2635 new_unrhdr64(&V_pf_stateid, time_second);
2636
2637 DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n"));
2638 }
2639 break;
2640
2641 case DIOCSTOP:
2642 sx_xlock(&V_pf_ioctl_lock);
2643 if (!V_pf_status.running)
2644 error = ENOENT;
2645 else {
2646 V_pf_status.running = 0;
2647 dehook_pf();
2648 dehook_pf_eth();
2649 V_pf_status.since = time_second;
2650 DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n"));
2651 }
2652 break;
2653
2654 case DIOCGETETHRULES: {
2655 struct pfioc_nv *nv = (struct pfioc_nv *)addr;
2656 nvlist_t *nvl;
2657 void *packed;
2658 struct pf_keth_rule *tail;
2659 struct pf_keth_ruleset *rs;
2660 u_int32_t ticket, nr;
2661 const char *anchor = "";
2662
2663 nvl = NULL;
2664 packed = NULL;
2665
2666 #define ERROUT(x) ERROUT_IOCTL(DIOCGETETHRULES_error, x)
2667
2668 if (nv->len > pf_ioctl_maxcount)
2669 ERROUT(ENOMEM);
2670
2671 /* Copy the request in */
2672 packed = malloc(nv->len, M_NVLIST, M_WAITOK);
2673 error = copyin(nv->data, packed, nv->len);
2674 if (error)
2675 ERROUT(error);
2676
2677 nvl = nvlist_unpack(packed, nv->len, 0);
2678 if (nvl == NULL)
2679 ERROUT(EBADMSG);
2680
2681 if (! nvlist_exists_string(nvl, "anchor"))
2682 ERROUT(EBADMSG);
2683
2684 anchor = nvlist_get_string(nvl, "anchor");
2685
2686 rs = pf_find_keth_ruleset(anchor);
2687
2688 nvlist_destroy(nvl);
2689 nvl = NULL;
2690 free(packed, M_NVLIST);
2691 packed = NULL;
2692
2693 if (rs == NULL)
2694 ERROUT(ENOENT);
2695
2696 /* Reply */
2697 nvl = nvlist_create(0);
2698 if (nvl == NULL)
2699 ERROUT(ENOMEM);
2700
2701 PF_RULES_RLOCK();
2702
2703 ticket = rs->active.ticket;
2704 tail = TAILQ_LAST(rs->active.rules, pf_keth_ruleq);
2705 if (tail)
2706 nr = tail->nr + 1;
2707 else
2708 nr = 0;
2709
2710 PF_RULES_RUNLOCK();
2711
2712 nvlist_add_number(nvl, "ticket", ticket);
2713 nvlist_add_number(nvl, "nr", nr);
2714
2715 packed = nvlist_pack(nvl, &nv->len);
2716 if (packed == NULL)
2717 ERROUT(ENOMEM);
2718
2719 if (nv->size == 0)
2720 ERROUT(0);
2721 else if (nv->size < nv->len)
2722 ERROUT(ENOSPC);
2723
2724 error = copyout(packed, nv->data, nv->len);
2725
2726 #undef ERROUT
2727 DIOCGETETHRULES_error:
2728 free(packed, M_NVLIST);
2729 nvlist_destroy(nvl);
2730 break;
2731 }
2732
2733 case DIOCGETETHRULE: {
2734 struct epoch_tracker et;
2735 struct pfioc_nv *nv = (struct pfioc_nv *)addr;
2736 nvlist_t *nvl = NULL;
2737 void *nvlpacked = NULL;
2738 struct pf_keth_rule *rule = NULL;
2739 struct pf_keth_ruleset *rs;
2740 u_int32_t ticket, nr;
2741 bool clear = false;
2742 const char *anchor;
2743
2744 #define ERROUT(x) ERROUT_IOCTL(DIOCGETETHRULE_error, x)
2745
2746 if (nv->len > pf_ioctl_maxcount)
2747 ERROUT(ENOMEM);
2748
2749 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
2750 error = copyin(nv->data, nvlpacked, nv->len);
2751 if (error)
2752 ERROUT(error);
2753
2754 nvl = nvlist_unpack(nvlpacked, nv->len, 0);
2755 if (nvl == NULL)
2756 ERROUT(EBADMSG);
2757 if (! nvlist_exists_number(nvl, "ticket"))
2758 ERROUT(EBADMSG);
2759 ticket = nvlist_get_number(nvl, "ticket");
2760 if (! nvlist_exists_string(nvl, "anchor"))
2761 ERROUT(EBADMSG);
2762 anchor = nvlist_get_string(nvl, "anchor");
2763
2764 if (nvlist_exists_bool(nvl, "clear"))
2765 clear = nvlist_get_bool(nvl, "clear");
2766
2767 if (clear && !(flags & FWRITE))
2768 ERROUT(EACCES);
2769
2770 if (! nvlist_exists_number(nvl, "nr"))
2771 ERROUT(EBADMSG);
2772 nr = nvlist_get_number(nvl, "nr");
2773
2774 PF_RULES_RLOCK();
2775 rs = pf_find_keth_ruleset(anchor);
2776 if (rs == NULL) {
2777 PF_RULES_RUNLOCK();
2778 ERROUT(ENOENT);
2779 }
2780 if (ticket != rs->active.ticket) {
2781 PF_RULES_RUNLOCK();
2782 ERROUT(EBUSY);
2783 }
2784
2785 nvlist_destroy(nvl);
2786 nvl = NULL;
2787 free(nvlpacked, M_NVLIST);
2788 nvlpacked = NULL;
2789
2790 rule = TAILQ_FIRST(rs->active.rules);
2791 while ((rule != NULL) && (rule->nr != nr))
2792 rule = TAILQ_NEXT(rule, entries);
2793 if (rule == NULL) {
2794 PF_RULES_RUNLOCK();
2795 ERROUT(ENOENT);
2796 }
2797 /* Make sure rule can't go away. */
2798 NET_EPOCH_ENTER(et);
2799 PF_RULES_RUNLOCK();
2800 nvl = pf_keth_rule_to_nveth_rule(rule);
2801 if (pf_keth_anchor_nvcopyout(rs, rule, nvl))
2802 ERROUT(EBUSY);
2803 NET_EPOCH_EXIT(et);
2804 if (nvl == NULL)
2805 ERROUT(ENOMEM);
2806
2807 nvlpacked = nvlist_pack(nvl, &nv->len);
2808 if (nvlpacked == NULL)
2809 ERROUT(ENOMEM);
2810
2811 if (nv->size == 0)
2812 ERROUT(0);
2813 else if (nv->size < nv->len)
2814 ERROUT(ENOSPC);
2815
2816 error = copyout(nvlpacked, nv->data, nv->len);
2817 if (error == 0 && clear) {
2818 counter_u64_zero(rule->evaluations);
2819 for (int i = 0; i < 2; i++) {
2820 counter_u64_zero(rule->packets[i]);
2821 counter_u64_zero(rule->bytes[i]);
2822 }
2823 }
2824
2825 #undef ERROUT
2826 DIOCGETETHRULE_error:
2827 free(nvlpacked, M_NVLIST);
2828 nvlist_destroy(nvl);
2829 break;
2830 }
2831
2832 case DIOCADDETHRULE: {
2833 struct pfioc_nv *nv = (struct pfioc_nv *)addr;
2834 nvlist_t *nvl = NULL;
2835 void *nvlpacked = NULL;
2836 struct pf_keth_rule *rule = NULL, *tail = NULL;
2837 struct pf_keth_ruleset *ruleset = NULL;
2838 struct pfi_kkif *kif = NULL, *bridge_to_kif = NULL;
2839 const char *anchor = "", *anchor_call = "";
2840
2841 #define ERROUT(x) ERROUT_IOCTL(DIOCADDETHRULE_error, x)
2842
2843 if (nv->len > pf_ioctl_maxcount)
2844 ERROUT(ENOMEM);
2845
2846 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
2847 error = copyin(nv->data, nvlpacked, nv->len);
2848 if (error)
2849 ERROUT(error);
2850
2851 nvl = nvlist_unpack(nvlpacked, nv->len, 0);
2852 if (nvl == NULL)
2853 ERROUT(EBADMSG);
2854
2855 if (! nvlist_exists_number(nvl, "ticket"))
2856 ERROUT(EBADMSG);
2857
2858 if (nvlist_exists_string(nvl, "anchor"))
2859 anchor = nvlist_get_string(nvl, "anchor");
2860 if (nvlist_exists_string(nvl, "anchor_call"))
2861 anchor_call = nvlist_get_string(nvl, "anchor_call");
2862
2863 ruleset = pf_find_keth_ruleset(anchor);
2864 if (ruleset == NULL)
2865 ERROUT(EINVAL);
2866
2867 if (nvlist_get_number(nvl, "ticket") !=
2868 ruleset->inactive.ticket) {
2869 DPFPRINTF(PF_DEBUG_MISC,
2870 ("ticket: %d != %d\n",
2871 (u_int32_t)nvlist_get_number(nvl, "ticket"),
2872 ruleset->inactive.ticket));
2873 ERROUT(EBUSY);
2874 }
2875
2876 rule = malloc(sizeof(*rule), M_PFRULE, M_WAITOK);
2877 rule->timestamp = NULL;
2878
2879 error = pf_nveth_rule_to_keth_rule(nvl, rule);
2880 if (error != 0)
2881 ERROUT(error);
2882
2883 if (rule->ifname[0])
2884 kif = pf_kkif_create(M_WAITOK);
2885 if (rule->bridge_to_name[0])
2886 bridge_to_kif = pf_kkif_create(M_WAITOK);
2887 rule->evaluations = counter_u64_alloc(M_WAITOK);
2888 for (int i = 0; i < 2; i++) {
2889 rule->packets[i] = counter_u64_alloc(M_WAITOK);
2890 rule->bytes[i] = counter_u64_alloc(M_WAITOK);
2891 }
2892 rule->timestamp = uma_zalloc_pcpu(pf_timestamp_pcpu_zone,
2893 M_WAITOK | M_ZERO);
2894
2895 PF_RULES_WLOCK();
2896
2897 if (rule->ifname[0]) {
2898 rule->kif = pfi_kkif_attach(kif, rule->ifname);
2899 pfi_kkif_ref(rule->kif);
2900 } else
2901 rule->kif = NULL;
2902 if (rule->bridge_to_name[0]) {
2903 rule->bridge_to = pfi_kkif_attach(bridge_to_kif,
2904 rule->bridge_to_name);
2905 pfi_kkif_ref(rule->bridge_to);
2906 } else
2907 rule->bridge_to = NULL;
2908
2909 #ifdef ALTQ
2910 /* set queue IDs */
2911 if (rule->qname[0] != 0) {
2912 if ((rule->qid = pf_qname2qid(rule->qname)) == 0)
2913 error = EBUSY;
2914 else
2915 rule->qid = rule->qid;
2916 }
2917 #endif
2918 if (rule->tagname[0])
2919 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0)
2920 error = EBUSY;
2921 if (rule->match_tagname[0])
2922 if ((rule->match_tag = pf_tagname2tag(
2923 rule->match_tagname)) == 0)
2924 error = EBUSY;
2925
2926 if (error == 0 && rule->ipdst.addr.type == PF_ADDR_TABLE)
2927 error = pf_eth_addr_setup(ruleset, &rule->ipdst.addr);
2928 if (error == 0 && rule->ipsrc.addr.type == PF_ADDR_TABLE)
2929 error = pf_eth_addr_setup(ruleset, &rule->ipsrc.addr);
2930
2931 if (error) {
2932 pf_free_eth_rule(rule);
2933 PF_RULES_WUNLOCK();
2934 ERROUT(error);
2935 }
2936
2937 if (pf_keth_anchor_setup(rule, ruleset, anchor_call)) {
2938 pf_free_eth_rule(rule);
2939 PF_RULES_WUNLOCK();
2940 ERROUT(EINVAL);
2941 }
2942
2943 tail = TAILQ_LAST(ruleset->inactive.rules, pf_keth_ruleq);
2944 if (tail)
2945 rule->nr = tail->nr + 1;
2946 else
2947 rule->nr = 0;
2948
2949 TAILQ_INSERT_TAIL(ruleset->inactive.rules, rule, entries);
2950
2951 PF_RULES_WUNLOCK();
2952
2953 #undef ERROUT
2954 DIOCADDETHRULE_error:
2955 nvlist_destroy(nvl);
2956 free(nvlpacked, M_NVLIST);
2957 break;
2958 }
2959
2960 case DIOCGETETHRULESETS: {
2961 struct epoch_tracker et;
2962 struct pfioc_nv *nv = (struct pfioc_nv *)addr;
2963 nvlist_t *nvl = NULL;
2964 void *nvlpacked = NULL;
2965 struct pf_keth_ruleset *ruleset;
2966 struct pf_keth_anchor *anchor;
2967 int nr = 0;
2968
2969 #define ERROUT(x) ERROUT_IOCTL(DIOCGETETHRULESETS_error, x)
2970
2971 if (nv->len > pf_ioctl_maxcount)
2972 ERROUT(ENOMEM);
2973
2974 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
2975 error = copyin(nv->data, nvlpacked, nv->len);
2976 if (error)
2977 ERROUT(error);
2978
2979 nvl = nvlist_unpack(nvlpacked, nv->len, 0);
2980 if (nvl == NULL)
2981 ERROUT(EBADMSG);
2982 if (! nvlist_exists_string(nvl, "path"))
2983 ERROUT(EBADMSG);
2984
2985 NET_EPOCH_ENTER(et);
2986
2987 if ((ruleset = pf_find_keth_ruleset(
2988 nvlist_get_string(nvl, "path"))) == NULL) {
2989 NET_EPOCH_EXIT(et);
2990 ERROUT(ENOENT);
2991 }
2992
2993 if (ruleset->anchor == NULL) {
2994 RB_FOREACH(anchor, pf_keth_anchor_global, &V_pf_keth_anchors)
2995 if (anchor->parent == NULL)
2996 nr++;
2997 } else {
2998 RB_FOREACH(anchor, pf_keth_anchor_node,
2999 &ruleset->anchor->children)
3000 nr++;
3001 }
3002
3003 NET_EPOCH_EXIT(et);
3004
3005 nvlist_destroy(nvl);
3006 nvl = NULL;
3007 free(nvlpacked, M_NVLIST);
3008 nvlpacked = NULL;
3009
3010 nvl = nvlist_create(0);
3011 if (nvl == NULL)
3012 ERROUT(ENOMEM);
3013
3014 nvlist_add_number(nvl, "nr", nr);
3015
3016 nvlpacked = nvlist_pack(nvl, &nv->len);
3017 if (nvlpacked == NULL)
3018 ERROUT(ENOMEM);
3019
3020 if (nv->size == 0)
3021 ERROUT(0);
3022 else if (nv->size < nv->len)
3023 ERROUT(ENOSPC);
3024
3025 error = copyout(nvlpacked, nv->data, nv->len);
3026
3027 #undef ERROUT
3028 DIOCGETETHRULESETS_error:
3029 free(nvlpacked, M_NVLIST);
3030 nvlist_destroy(nvl);
3031 break;
3032 }
3033
3034 case DIOCGETETHRULESET: {
3035 struct epoch_tracker et;
3036 struct pfioc_nv *nv = (struct pfioc_nv *)addr;
3037 nvlist_t *nvl = NULL;
3038 void *nvlpacked = NULL;
3039 struct pf_keth_ruleset *ruleset;
3040 struct pf_keth_anchor *anchor;
3041 int nr = 0, req_nr = 0;
3042 bool found = false;
3043
3044 #define ERROUT(x) ERROUT_IOCTL(DIOCGETETHRULESET_error, x)
3045
3046 if (nv->len > pf_ioctl_maxcount)
3047 ERROUT(ENOMEM);
3048
3049 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
3050 error = copyin(nv->data, nvlpacked, nv->len);
3051 if (error)
3052 ERROUT(error);
3053
3054 nvl = nvlist_unpack(nvlpacked, nv->len, 0);
3055 if (nvl == NULL)
3056 ERROUT(EBADMSG);
3057 if (! nvlist_exists_string(nvl, "path"))
3058 ERROUT(EBADMSG);
3059 if (! nvlist_exists_number(nvl, "nr"))
3060 ERROUT(EBADMSG);
3061
3062 req_nr = nvlist_get_number(nvl, "nr");
3063
3064 NET_EPOCH_ENTER(et);
3065
3066 if ((ruleset = pf_find_keth_ruleset(
3067 nvlist_get_string(nvl, "path"))) == NULL) {
3068 NET_EPOCH_EXIT(et);
3069 ERROUT(ENOENT);
3070 }
3071
3072 nvlist_destroy(nvl);
3073 nvl = NULL;
3074 free(nvlpacked, M_NVLIST);
3075 nvlpacked = NULL;
3076
3077 nvl = nvlist_create(0);
3078 if (nvl == NULL) {
3079 NET_EPOCH_EXIT(et);
3080 ERROUT(ENOMEM);
3081 }
3082
3083 if (ruleset->anchor == NULL) {
3084 RB_FOREACH(anchor, pf_keth_anchor_global,
3085 &V_pf_keth_anchors) {
3086 if (anchor->parent == NULL && nr++ == req_nr) {
3087 found = true;
3088 break;
3089 }
3090 }
3091 } else {
3092 RB_FOREACH(anchor, pf_keth_anchor_node,
3093 &ruleset->anchor->children) {
3094 if (nr++ == req_nr) {
3095 found = true;
3096 break;
3097 }
3098 }
3099 }
3100
3101 NET_EPOCH_EXIT(et);
3102 if (found) {
3103 nvlist_add_number(nvl, "nr", nr);
3104 nvlist_add_string(nvl, "name", anchor->name);
3105 if (ruleset->anchor)
3106 nvlist_add_string(nvl, "path",
3107 ruleset->anchor->path);
3108 else
3109 nvlist_add_string(nvl, "path", "");
3110 } else {
3111 ERROUT(EBUSY);
3112 }
3113
3114 nvlpacked = nvlist_pack(nvl, &nv->len);
3115 if (nvlpacked == NULL)
3116 ERROUT(ENOMEM);
3117
3118 if (nv->size == 0)
3119 ERROUT(0);
3120 else if (nv->size < nv->len)
3121 ERROUT(ENOSPC);
3122
3123 error = copyout(nvlpacked, nv->data, nv->len);
3124
3125 #undef ERROUT
3126 DIOCGETETHRULESET_error:
3127 free(nvlpacked, M_NVLIST);
3128 nvlist_destroy(nvl);
3129 break;
3130 }
3131
3132 case DIOCADDRULENV: {
3133 struct pfioc_nv *nv = (struct pfioc_nv *)addr;
3134 nvlist_t *nvl = NULL;
3135 void *nvlpacked = NULL;
3136 struct pf_krule *rule = NULL;
3137 const char *anchor = "", *anchor_call = "";
3138 uint32_t ticket = 0, pool_ticket = 0;
3139
3140 #define ERROUT(x) ERROUT_IOCTL(DIOCADDRULENV_error, x)
3141
3142 if (nv->len > pf_ioctl_maxcount)
3143 ERROUT(ENOMEM);
3144
3145 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
3146 error = copyin(nv->data, nvlpacked, nv->len);
3147 if (error)
3148 ERROUT(error);
3149
3150 nvl = nvlist_unpack(nvlpacked, nv->len, 0);
3151 if (nvl == NULL)
3152 ERROUT(EBADMSG);
3153
3154 if (! nvlist_exists_number(nvl, "ticket"))
3155 ERROUT(EINVAL);
3156 ticket = nvlist_get_number(nvl, "ticket");
3157
3158 if (! nvlist_exists_number(nvl, "pool_ticket"))
3159 ERROUT(EINVAL);
3160 pool_ticket = nvlist_get_number(nvl, "pool_ticket");
3161
3162 if (! nvlist_exists_nvlist(nvl, "rule"))
3163 ERROUT(EINVAL);
3164
3165 rule = pf_krule_alloc();
3166 error = pf_nvrule_to_krule(nvlist_get_nvlist(nvl, "rule"),
3167 rule);
3168 if (error)
3169 ERROUT(error);
3170
3171 if (nvlist_exists_string(nvl, "anchor"))
3172 anchor = nvlist_get_string(nvl, "anchor");
3173 if (nvlist_exists_string(nvl, "anchor_call"))
3174 anchor_call = nvlist_get_string(nvl, "anchor_call");
3175
3176 if ((error = nvlist_error(nvl)))
3177 ERROUT(error);
3178
3179 /* Frees rule on error */
3180 error = pf_ioctl_addrule(rule, ticket, pool_ticket, anchor,
3181 anchor_call, td);
3182
3183 nvlist_destroy(nvl);
3184 free(nvlpacked, M_NVLIST);
3185 break;
3186 #undef ERROUT
3187 DIOCADDRULENV_error:
3188 pf_krule_free(rule);
3189 nvlist_destroy(nvl);
3190 free(nvlpacked, M_NVLIST);
3191
3192 break;
3193 }
3194 case DIOCADDRULE: {
3195 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
3196 struct pf_krule *rule;
3197
3198 rule = pf_krule_alloc();
3199 error = pf_rule_to_krule(&pr->rule, rule);
3200 if (error != 0) {
3201 pf_krule_free(rule);
3202 break;
3203 }
3204
3205 pr->anchor[sizeof(pr->anchor) - 1] = 0;
3206
3207 /* Frees rule on error */
3208 error = pf_ioctl_addrule(rule, pr->ticket, pr->pool_ticket,
3209 pr->anchor, pr->anchor_call, td);
3210 break;
3211 }
3212
3213 case DIOCGETRULES: {
3214 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
3215 struct pf_kruleset *ruleset;
3216 struct pf_krule *tail;
3217 int rs_num;
3218
3219 pr->anchor[sizeof(pr->anchor) - 1] = 0;
3220
3221 PF_RULES_WLOCK();
3222 ruleset = pf_find_kruleset(pr->anchor);
3223 if (ruleset == NULL) {
3224 PF_RULES_WUNLOCK();
3225 error = EINVAL;
3226 break;
3227 }
3228 rs_num = pf_get_ruleset_number(pr->rule.action);
3229 if (rs_num >= PF_RULESET_MAX) {
3230 PF_RULES_WUNLOCK();
3231 error = EINVAL;
3232 break;
3233 }
3234 tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
3235 pf_krulequeue);
3236 if (tail)
3237 pr->nr = tail->nr + 1;
3238 else
3239 pr->nr = 0;
3240 pr->ticket = ruleset->rules[rs_num].active.ticket;
3241 PF_RULES_WUNLOCK();
3242 break;
3243 }
3244
3245 case DIOCGETRULE: {
3246 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
3247 struct pf_kruleset *ruleset;
3248 struct pf_krule *rule;
3249 int rs_num;
3250
3251 pr->anchor[sizeof(pr->anchor) - 1] = 0;
3252
3253 PF_RULES_WLOCK();
3254 ruleset = pf_find_kruleset(pr->anchor);
3255 if (ruleset == NULL) {
3256 PF_RULES_WUNLOCK();
3257 error = EINVAL;
3258 break;
3259 }
3260 rs_num = pf_get_ruleset_number(pr->rule.action);
3261 if (rs_num >= PF_RULESET_MAX) {
3262 PF_RULES_WUNLOCK();
3263 error = EINVAL;
3264 break;
3265 }
3266 if (pr->ticket != ruleset->rules[rs_num].active.ticket) {
3267 PF_RULES_WUNLOCK();
3268 error = EBUSY;
3269 break;
3270 }
3271 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
3272 while ((rule != NULL) && (rule->nr != pr->nr))
3273 rule = TAILQ_NEXT(rule, entries);
3274 if (rule == NULL) {
3275 PF_RULES_WUNLOCK();
3276 error = EBUSY;
3277 break;
3278 }
3279
3280 pf_krule_to_rule(rule, &pr->rule);
3281
3282 if (pf_kanchor_copyout(ruleset, rule, pr)) {
3283 PF_RULES_WUNLOCK();
3284 error = EBUSY;
3285 break;
3286 }
3287 pf_addr_copyout(&pr->rule.src.addr);
3288 pf_addr_copyout(&pr->rule.dst.addr);
3289
3290 if (pr->action == PF_GET_CLR_CNTR) {
3291 pf_counter_u64_zero(&rule->evaluations);
3292 for (int i = 0; i < 2; i++) {
3293 pf_counter_u64_zero(&rule->packets[i]);
3294 pf_counter_u64_zero(&rule->bytes[i]);
3295 }
3296 counter_u64_zero(rule->states_tot);
3297 }
3298 PF_RULES_WUNLOCK();
3299 break;
3300 }
3301
3302 case DIOCGETRULENV: {
3303 struct pfioc_nv *nv = (struct pfioc_nv *)addr;
3304 nvlist_t *nvrule = NULL;
3305 nvlist_t *nvl = NULL;
3306 struct pf_kruleset *ruleset;
3307 struct pf_krule *rule;
3308 void *nvlpacked = NULL;
3309 int rs_num, nr;
3310 bool clear_counter = false;
3311
3312 #define ERROUT(x) ERROUT_IOCTL(DIOCGETRULENV_error, x)
3313
3314 if (nv->len > pf_ioctl_maxcount)
3315 ERROUT(ENOMEM);
3316
3317 /* Copy the request in */
3318 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
3319 error = copyin(nv->data, nvlpacked, nv->len);
3320 if (error)
3321 ERROUT(error);
3322
3323 nvl = nvlist_unpack(nvlpacked, nv->len, 0);
3324 if (nvl == NULL)
3325 ERROUT(EBADMSG);
3326
3327 if (! nvlist_exists_string(nvl, "anchor"))
3328 ERROUT(EBADMSG);
3329 if (! nvlist_exists_number(nvl, "ruleset"))
3330 ERROUT(EBADMSG);
3331 if (! nvlist_exists_number(nvl, "ticket"))
3332 ERROUT(EBADMSG);
3333 if (! nvlist_exists_number(nvl, "nr"))
3334 ERROUT(EBADMSG);
3335
3336 if (nvlist_exists_bool(nvl, "clear_counter"))
3337 clear_counter = nvlist_get_bool(nvl, "clear_counter");
3338
3339 if (clear_counter && !(flags & FWRITE))
3340 ERROUT(EACCES);
3341
3342 nr = nvlist_get_number(nvl, "nr");
3343
3344 PF_RULES_WLOCK();
3345 ruleset = pf_find_kruleset(nvlist_get_string(nvl, "anchor"));
3346 if (ruleset == NULL) {
3347 PF_RULES_WUNLOCK();
3348 ERROUT(ENOENT);
3349 }
3350
3351 rs_num = pf_get_ruleset_number(nvlist_get_number(nvl, "ruleset"));
3352 if (rs_num >= PF_RULESET_MAX) {
3353 PF_RULES_WUNLOCK();
3354 ERROUT(EINVAL);
3355 }
3356
3357 if (nvlist_get_number(nvl, "ticket") !=
3358 ruleset->rules[rs_num].active.ticket) {
3359 PF_RULES_WUNLOCK();
3360 ERROUT(EBUSY);
3361 }
3362
3363 if ((error = nvlist_error(nvl))) {
3364 PF_RULES_WUNLOCK();
3365 ERROUT(error);
3366 }
3367
3368 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
3369 while ((rule != NULL) && (rule->nr != nr))
3370 rule = TAILQ_NEXT(rule, entries);
3371 if (rule == NULL) {
3372 PF_RULES_WUNLOCK();
3373 ERROUT(EBUSY);
3374 }
3375
3376 nvrule = pf_krule_to_nvrule(rule);
3377
3378 nvlist_destroy(nvl);
3379 nvl = nvlist_create(0);
3380 if (nvl == NULL) {
3381 PF_RULES_WUNLOCK();
3382 ERROUT(ENOMEM);
3383 }
3384 nvlist_add_number(nvl, "nr", nr);
3385 nvlist_add_nvlist(nvl, "rule", nvrule);
3386 nvlist_destroy(nvrule);
3387 nvrule = NULL;
3388 if (pf_kanchor_nvcopyout(ruleset, rule, nvl)) {
3389 PF_RULES_WUNLOCK();
3390 ERROUT(EBUSY);
3391 }
3392
3393 free(nvlpacked, M_NVLIST);
3394 nvlpacked = nvlist_pack(nvl, &nv->len);
3395 if (nvlpacked == NULL) {
3396 PF_RULES_WUNLOCK();
3397 ERROUT(ENOMEM);
3398 }
3399
3400 if (nv->size == 0) {
3401 PF_RULES_WUNLOCK();
3402 ERROUT(0);
3403 }
3404 else if (nv->size < nv->len) {
3405 PF_RULES_WUNLOCK();
3406 ERROUT(ENOSPC);
3407 }
3408
3409 if (clear_counter) {
3410 pf_counter_u64_zero(&rule->evaluations);
3411 for (int i = 0; i < 2; i++) {
3412 pf_counter_u64_zero(&rule->packets[i]);
3413 pf_counter_u64_zero(&rule->bytes[i]);
3414 }
3415 counter_u64_zero(rule->states_tot);
3416 }
3417 PF_RULES_WUNLOCK();
3418
3419 error = copyout(nvlpacked, nv->data, nv->len);
3420
3421 #undef ERROUT
3422 DIOCGETRULENV_error:
3423 free(nvlpacked, M_NVLIST);
3424 nvlist_destroy(nvrule);
3425 nvlist_destroy(nvl);
3426
3427 break;
3428 }
3429
3430 case DIOCCHANGERULE: {
3431 struct pfioc_rule *pcr = (struct pfioc_rule *)addr;
3432 struct pf_kruleset *ruleset;
3433 struct pf_krule *oldrule = NULL, *newrule = NULL;
3434 struct pfi_kkif *kif = NULL;
3435 struct pf_kpooladdr *pa;
3436 u_int32_t nr = 0;
3437 int rs_num;
3438
3439 pcr->anchor[sizeof(pcr->anchor) - 1] = 0;
3440
3441 if (pcr->action < PF_CHANGE_ADD_HEAD ||
3442 pcr->action > PF_CHANGE_GET_TICKET) {
3443 error = EINVAL;
3444 break;
3445 }
3446 if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
3447 error = EINVAL;
3448 break;
3449 }
3450
3451 if (pcr->action != PF_CHANGE_REMOVE) {
3452 newrule = pf_krule_alloc();
3453 error = pf_rule_to_krule(&pcr->rule, newrule);
3454 if (error != 0) {
3455 pf_krule_free(newrule);
3456 break;
3457 }
3458
3459 if (newrule->ifname[0])
3460 kif = pf_kkif_create(M_WAITOK);
3461 pf_counter_u64_init(&newrule->evaluations, M_WAITOK);
3462 for (int i = 0; i < 2; i++) {
3463 pf_counter_u64_init(&newrule->packets[i], M_WAITOK);
3464 pf_counter_u64_init(&newrule->bytes[i], M_WAITOK);
3465 }
3466 newrule->states_cur = counter_u64_alloc(M_WAITOK);
3467 newrule->states_tot = counter_u64_alloc(M_WAITOK);
3468 newrule->src_nodes = counter_u64_alloc(M_WAITOK);
3469 newrule->cuid = td->td_ucred->cr_ruid;
3470 newrule->cpid = td->td_proc ? td->td_proc->p_pid : 0;
3471 TAILQ_INIT(&newrule->rpool.list);
3472 }
3473 #define ERROUT(x) ERROUT_IOCTL(DIOCCHANGERULE_error, x)
3474
3475 PF_CONFIG_LOCK();
3476 PF_RULES_WLOCK();
3477 #ifdef PF_WANT_32_TO_64_COUNTER
3478 if (newrule != NULL) {
3479 LIST_INSERT_HEAD(&V_pf_allrulelist, newrule, allrulelist);
3480 newrule->allrulelinked = true;
3481 V_pf_allrulecount++;
3482 }
3483 #endif
3484
3485 if (!(pcr->action == PF_CHANGE_REMOVE ||
3486 pcr->action == PF_CHANGE_GET_TICKET) &&
3487 pcr->pool_ticket != V_ticket_pabuf)
3488 ERROUT(EBUSY);
3489
3490 ruleset = pf_find_kruleset(pcr->anchor);
3491 if (ruleset == NULL)
3492 ERROUT(EINVAL);
3493
3494 rs_num = pf_get_ruleset_number(pcr->rule.action);
3495 if (rs_num >= PF_RULESET_MAX)
3496 ERROUT(EINVAL);
3497
3498 /*
3499 * XXXMJG: there is no guarantee that the ruleset was
3500 * created by the usual route of calling DIOCXBEGIN.
3501 * As a result it is possible the rule tree will not
3502 * be allocated yet. Hack around it by doing it here.
3503 * Note it is fine to let the tree persist in case of
3504 * error as it will be freed down the road on future
3505 * updates (if need be).
3506 */
3507 if (ruleset->rules[rs_num].active.tree == NULL) {
3508 ruleset->rules[rs_num].active.tree = pf_rule_tree_alloc(M_NOWAIT);
3509 if (ruleset->rules[rs_num].active.tree == NULL) {
3510 ERROUT(ENOMEM);
3511 }
3512 }
3513
3514 if (pcr->action == PF_CHANGE_GET_TICKET) {
3515 pcr->ticket = ++ruleset->rules[rs_num].active.ticket;
3516 ERROUT(0);
3517 } else if (pcr->ticket !=
3518 ruleset->rules[rs_num].active.ticket)
3519 ERROUT(EINVAL);
3520
3521 if (pcr->action != PF_CHANGE_REMOVE) {
3522 if (newrule->ifname[0]) {
3523 newrule->kif = pfi_kkif_attach(kif,
3524 newrule->ifname);
3525 kif = NULL;
3526 pfi_kkif_ref(newrule->kif);
3527 } else
3528 newrule->kif = NULL;
3529
3530 if (newrule->rtableid > 0 &&
3531 newrule->rtableid >= rt_numfibs)
3532 error = EBUSY;
3533
3534 #ifdef ALTQ
3535 /* set queue IDs */
3536 if (newrule->qname[0] != 0) {
3537 if ((newrule->qid =
3538 pf_qname2qid(newrule->qname)) == 0)
3539 error = EBUSY;
3540 else if (newrule->pqname[0] != 0) {
3541 if ((newrule->pqid =
3542 pf_qname2qid(newrule->pqname)) == 0)
3543 error = EBUSY;
3544 } else
3545 newrule->pqid = newrule->qid;
3546 }
3547 #endif /* ALTQ */
3548 if (newrule->tagname[0])
3549 if ((newrule->tag =
3550 pf_tagname2tag(newrule->tagname)) == 0)
3551 error = EBUSY;
3552 if (newrule->match_tagname[0])
3553 if ((newrule->match_tag = pf_tagname2tag(
3554 newrule->match_tagname)) == 0)
3555 error = EBUSY;
3556 if (newrule->rt && !newrule->direction)
3557 error = EINVAL;
3558 if (!newrule->log)
3559 newrule->logif = 0;
3560 if (newrule->logif >= PFLOGIFS_MAX)
3561 error = EINVAL;
3562 if (pf_addr_setup(ruleset, &newrule->src.addr, newrule->af))
3563 error = ENOMEM;
3564 if (pf_addr_setup(ruleset, &newrule->dst.addr, newrule->af))
3565 error = ENOMEM;
3566 if (pf_kanchor_setup(newrule, ruleset, pcr->anchor_call))
3567 error = EINVAL;
3568 TAILQ_FOREACH(pa, &V_pf_pabuf, entries)
3569 if (pa->addr.type == PF_ADDR_TABLE) {
3570 pa->addr.p.tbl =
3571 pfr_attach_table(ruleset,
3572 pa->addr.v.tblname);
3573 if (pa->addr.p.tbl == NULL)
3574 error = ENOMEM;
3575 }
3576
3577 newrule->overload_tbl = NULL;
3578 if (newrule->overload_tblname[0]) {
3579 if ((newrule->overload_tbl = pfr_attach_table(
3580 ruleset, newrule->overload_tblname)) ==
3581 NULL)
3582 error = EINVAL;
3583 else
3584 newrule->overload_tbl->pfrkt_flags |=
3585 PFR_TFLAG_ACTIVE;
3586 }
3587
3588 pf_mv_kpool(&V_pf_pabuf, &newrule->rpool.list);
3589 if (((((newrule->action == PF_NAT) ||
3590 (newrule->action == PF_RDR) ||
3591 (newrule->action == PF_BINAT) ||
3592 (newrule->rt > PF_NOPFROUTE)) &&
3593 !newrule->anchor)) &&
3594 (TAILQ_FIRST(&newrule->rpool.list) == NULL))
3595 error = EINVAL;
3596
3597 if (error) {
3598 pf_free_rule(newrule);
3599 PF_RULES_WUNLOCK();
3600 PF_CONFIG_UNLOCK();
3601 break;
3602 }
3603
3604 newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list);
3605 }
3606 pf_empty_kpool(&V_pf_pabuf);
3607
3608 if (pcr->action == PF_CHANGE_ADD_HEAD)
3609 oldrule = TAILQ_FIRST(
3610 ruleset->rules[rs_num].active.ptr);
3611 else if (pcr->action == PF_CHANGE_ADD_TAIL)
3612 oldrule = TAILQ_LAST(
3613 ruleset->rules[rs_num].active.ptr, pf_krulequeue);
3614 else {
3615 oldrule = TAILQ_FIRST(
3616 ruleset->rules[rs_num].active.ptr);
3617 while ((oldrule != NULL) && (oldrule->nr != pcr->nr))
3618 oldrule = TAILQ_NEXT(oldrule, entries);
3619 if (oldrule == NULL) {
3620 if (newrule != NULL)
3621 pf_free_rule(newrule);
3622 PF_RULES_WUNLOCK();
3623 PF_CONFIG_UNLOCK();
3624 error = EINVAL;
3625 break;
3626 }
3627 }
3628
3629 if (pcr->action == PF_CHANGE_REMOVE) {
3630 pf_unlink_rule(ruleset->rules[rs_num].active.ptr,
3631 oldrule);
3632 RB_REMOVE(pf_krule_global,
3633 ruleset->rules[rs_num].active.tree, oldrule);
3634 ruleset->rules[rs_num].active.rcount--;
3635 } else {
3636 pf_hash_rule(newrule);
3637 if (RB_INSERT(pf_krule_global,
3638 ruleset->rules[rs_num].active.tree, newrule) != NULL) {
3639 pf_free_rule(newrule);
3640 PF_RULES_WUNLOCK();
3641 PF_CONFIG_UNLOCK();
3642 error = EEXIST;
3643 break;
3644 }
3645
3646 if (oldrule == NULL)
3647 TAILQ_INSERT_TAIL(
3648 ruleset->rules[rs_num].active.ptr,
3649 newrule, entries);
3650 else if (pcr->action == PF_CHANGE_ADD_HEAD ||
3651 pcr->action == PF_CHANGE_ADD_BEFORE)
3652 TAILQ_INSERT_BEFORE(oldrule, newrule, entries);
3653 else
3654 TAILQ_INSERT_AFTER(
3655 ruleset->rules[rs_num].active.ptr,
3656 oldrule, newrule, entries);
3657 ruleset->rules[rs_num].active.rcount++;
3658 }
3659
3660 nr = 0;
3661 TAILQ_FOREACH(oldrule,
3662 ruleset->rules[rs_num].active.ptr, entries)
3663 oldrule->nr = nr++;
3664
3665 ruleset->rules[rs_num].active.ticket++;
3666
3667 pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr);
3668 pf_remove_if_empty_kruleset(ruleset);
3669
3670 PF_RULES_WUNLOCK();
3671 PF_CONFIG_UNLOCK();
3672 break;
3673
3674 #undef ERROUT
3675 DIOCCHANGERULE_error:
3676 PF_RULES_WUNLOCK();
3677 PF_CONFIG_UNLOCK();
3678 pf_krule_free(newrule);
3679 pf_kkif_free(kif);
3680 break;
3681 }
3682
3683 case DIOCCLRSTATES: {
3684 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
3685 struct pf_kstate_kill kill;
3686
3687 error = pf_state_kill_to_kstate_kill(psk, &kill);
3688 if (error)
3689 break;
3690
3691 psk->psk_killed = pf_clear_states(&kill);
3692 break;
3693 }
3694
3695 case DIOCCLRSTATESNV: {
3696 error = pf_clearstates_nv((struct pfioc_nv *)addr);
3697 break;
3698 }
3699
3700 case DIOCKILLSTATES: {
3701 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
3702 struct pf_kstate_kill kill;
3703
3704 error = pf_state_kill_to_kstate_kill(psk, &kill);
3705 if (error)
3706 break;
3707
3708 psk->psk_killed = 0;
3709 pf_killstates(&kill, &psk->psk_killed);
3710 break;
3711 }
3712
3713 case DIOCKILLSTATESNV: {
3714 error = pf_killstates_nv((struct pfioc_nv *)addr);
3715 break;
3716 }
3717
3718 case DIOCADDSTATE: {
3719 struct pfioc_state *ps = (struct pfioc_state *)addr;
3720 struct pfsync_state_1301 *sp = &ps->state;
3721
3722 if (sp->timeout >= PFTM_MAX) {
3723 error = EINVAL;
3724 break;
3725 }
3726 if (V_pfsync_state_import_ptr != NULL) {
3727 PF_RULES_RLOCK();
3728 error = V_pfsync_state_import_ptr(
3729 (union pfsync_state_union *)sp, PFSYNC_SI_IOCTL,
3730 PFSYNC_MSG_VERSION_1301);
3731 PF_RULES_RUNLOCK();
3732 } else
3733 error = EOPNOTSUPP;
3734 break;
3735 }
3736
3737 case DIOCGETSTATE: {
3738 struct pfioc_state *ps = (struct pfioc_state *)addr;
3739 struct pf_kstate *s;
3740
3741 s = pf_find_state_byid(ps->state.id, ps->state.creatorid);
3742 if (s == NULL) {
3743 error = ENOENT;
3744 break;
3745 }
3746
3747 pfsync_state_export((union pfsync_state_union*)&ps->state,
3748 s, PFSYNC_MSG_VERSION_1301);
3749 PF_STATE_UNLOCK(s);
3750 break;
3751 }
3752
3753 case DIOCGETSTATENV: {
3754 error = pf_getstate((struct pfioc_nv *)addr);
3755 break;
3756 }
3757
3758 case DIOCGETSTATES: {
3759 struct pfioc_states *ps = (struct pfioc_states *)addr;
3760 struct pf_kstate *s;
3761 struct pfsync_state_1301 *pstore, *p;
3762 int i, nr;
3763 size_t slice_count = 16, count;
3764 void *out;
3765
3766 if (ps->ps_len <= 0) {
3767 nr = uma_zone_get_cur(V_pf_state_z);
3768 ps->ps_len = sizeof(struct pfsync_state_1301) * nr;
3769 break;
3770 }
3771
3772 out = ps->ps_states;
3773 pstore = mallocarray(slice_count,
3774 sizeof(struct pfsync_state_1301), M_TEMP, M_WAITOK | M_ZERO);
3775 nr = 0;
3776
3777 for (i = 0; i <= V_pf_hashmask; i++) {
3778 struct pf_idhash *ih = &V_pf_idhash[i];
3779
3780 DIOCGETSTATES_retry:
3781 p = pstore;
3782
3783 if (LIST_EMPTY(&ih->states))
3784 continue;
3785
3786 PF_HASHROW_LOCK(ih);
3787 count = 0;
3788 LIST_FOREACH(s, &ih->states, entry) {
3789 if (s->timeout == PFTM_UNLINKED)
3790 continue;
3791 count++;
3792 }
3793
3794 if (count > slice_count) {
3795 PF_HASHROW_UNLOCK(ih);
3796 free(pstore, M_TEMP);
3797 slice_count = count * 2;
3798 pstore = mallocarray(slice_count,
3799 sizeof(struct pfsync_state_1301), M_TEMP,
3800 M_WAITOK | M_ZERO);
3801 goto DIOCGETSTATES_retry;
3802 }
3803
3804 if ((nr+count) * sizeof(*p) > ps->ps_len) {
3805 PF_HASHROW_UNLOCK(ih);
3806 goto DIOCGETSTATES_full;
3807 }
3808
3809 LIST_FOREACH(s, &ih->states, entry) {
3810 if (s->timeout == PFTM_UNLINKED)
3811 continue;
3812
3813 pfsync_state_export((union pfsync_state_union*)p,
3814 s, PFSYNC_MSG_VERSION_1301);
3815 p++;
3816 nr++;
3817 }
3818 PF_HASHROW_UNLOCK(ih);
3819 error = copyout(pstore, out,
3820 sizeof(struct pfsync_state_1301) * count);
3821 if (error)
3822 break;
3823 out = ps->ps_states + nr;
3824 }
3825 DIOCGETSTATES_full:
3826 ps->ps_len = sizeof(struct pfsync_state_1301) * nr;
3827 free(pstore, M_TEMP);
3828
3829 break;
3830 }
3831
3832 case DIOCGETSTATESV2: {
3833 struct pfioc_states_v2 *ps = (struct pfioc_states_v2 *)addr;
3834 struct pf_kstate *s;
3835 struct pf_state_export *pstore, *p;
3836 int i, nr;
3837 size_t slice_count = 16, count;
3838 void *out;
3839
3840 if (ps->ps_req_version > PF_STATE_VERSION) {
3841 error = ENOTSUP;
3842 break;
3843 }
3844
3845 if (ps->ps_len <= 0) {
3846 nr = uma_zone_get_cur(V_pf_state_z);
3847 ps->ps_len = sizeof(struct pf_state_export) * nr;
3848 break;
3849 }
3850
3851 out = ps->ps_states;
3852 pstore = mallocarray(slice_count,
3853 sizeof(struct pf_state_export), M_TEMP, M_WAITOK | M_ZERO);
3854 nr = 0;
3855
3856 for (i = 0; i <= V_pf_hashmask; i++) {
3857 struct pf_idhash *ih = &V_pf_idhash[i];
3858
3859 DIOCGETSTATESV2_retry:
3860 p = pstore;
3861
3862 if (LIST_EMPTY(&ih->states))
3863 continue;
3864
3865 PF_HASHROW_LOCK(ih);
3866 count = 0;
3867 LIST_FOREACH(s, &ih->states, entry) {
3868 if (s->timeout == PFTM_UNLINKED)
3869 continue;
3870 count++;
3871 }
3872
3873 if (count > slice_count) {
3874 PF_HASHROW_UNLOCK(ih);
3875 free(pstore, M_TEMP);
3876 slice_count = count * 2;
3877 pstore = mallocarray(slice_count,
3878 sizeof(struct pf_state_export), M_TEMP,
3879 M_WAITOK | M_ZERO);
3880 goto DIOCGETSTATESV2_retry;
3881 }
3882
3883 if ((nr+count) * sizeof(*p) > ps->ps_len) {
3884 PF_HASHROW_UNLOCK(ih);
3885 goto DIOCGETSTATESV2_full;
3886 }
3887
3888 LIST_FOREACH(s, &ih->states, entry) {
3889 if (s->timeout == PFTM_UNLINKED)
3890 continue;
3891
3892 pf_state_export(p, s);
3893 p++;
3894 nr++;
3895 }
3896 PF_HASHROW_UNLOCK(ih);
3897 error = copyout(pstore, out,
3898 sizeof(struct pf_state_export) * count);
3899 if (error)
3900 break;
3901 out = ps->ps_states + nr;
3902 }
3903 DIOCGETSTATESV2_full:
3904 ps->ps_len = nr * sizeof(struct pf_state_export);
3905 free(pstore, M_TEMP);
3906
3907 break;
3908 }
3909
3910 case DIOCGETSTATUS: {
3911 struct pf_status *s = (struct pf_status *)addr;
3912
3913 PF_RULES_RLOCK();
3914 s->running = V_pf_status.running;
3915 s->since = V_pf_status.since;
3916 s->debug = V_pf_status.debug;
3917 s->hostid = V_pf_status.hostid;
3918 s->states = V_pf_status.states;
3919 s->src_nodes = V_pf_status.src_nodes;
3920
3921 for (int i = 0; i < PFRES_MAX; i++)
3922 s->counters[i] =
3923 counter_u64_fetch(V_pf_status.counters[i]);
3924 for (int i = 0; i < LCNT_MAX; i++)
3925 s->lcounters[i] =
3926 counter_u64_fetch(V_pf_status.lcounters[i]);
3927 for (int i = 0; i < FCNT_MAX; i++)
3928 s->fcounters[i] =
3929 pf_counter_u64_fetch(&V_pf_status.fcounters[i]);
3930 for (int i = 0; i < SCNT_MAX; i++)
3931 s->scounters[i] =
3932 counter_u64_fetch(V_pf_status.scounters[i]);
3933
3934 bcopy(V_pf_status.ifname, s->ifname, IFNAMSIZ);
3935 bcopy(V_pf_status.pf_chksum, s->pf_chksum,
3936 PF_MD5_DIGEST_LENGTH);
3937
3938 pfi_update_status(s->ifname, s);
3939 PF_RULES_RUNLOCK();
3940 break;
3941 }
3942
3943 case DIOCGETSTATUSNV: {
3944 error = pf_getstatus((struct pfioc_nv *)addr);
3945 break;
3946 }
3947
3948 case DIOCSETSTATUSIF: {
3949 struct pfioc_if *pi = (struct pfioc_if *)addr;
3950
3951 if (pi->ifname[0] == 0) {
3952 bzero(V_pf_status.ifname, IFNAMSIZ);
3953 break;
3954 }
3955 PF_RULES_WLOCK();
3956 error = pf_user_strcpy(V_pf_status.ifname, pi->ifname, IFNAMSIZ);
3957 PF_RULES_WUNLOCK();
3958 break;
3959 }
3960
3961 case DIOCCLRSTATUS: {
3962 PF_RULES_WLOCK();
3963 for (int i = 0; i < PFRES_MAX; i++)
3964 counter_u64_zero(V_pf_status.counters[i]);
3965 for (int i = 0; i < FCNT_MAX; i++)
3966 pf_counter_u64_zero(&V_pf_status.fcounters[i]);
3967 for (int i = 0; i < SCNT_MAX; i++)
3968 counter_u64_zero(V_pf_status.scounters[i]);
3969 for (int i = 0; i < KLCNT_MAX; i++)
3970 counter_u64_zero(V_pf_status.lcounters[i]);
3971 V_pf_status.since = time_second;
3972 if (*V_pf_status.ifname)
3973 pfi_update_status(V_pf_status.ifname, NULL);
3974 PF_RULES_WUNLOCK();
3975 break;
3976 }
3977
3978 case DIOCNATLOOK: {
3979 struct pfioc_natlook *pnl = (struct pfioc_natlook *)addr;
3980 struct pf_state_key *sk;
3981 struct pf_kstate *state;
3982 struct pf_state_key_cmp key;
3983 int m = 0, direction = pnl->direction;
3984 int sidx, didx;
3985
3986 /* NATLOOK src and dst are reversed, so reverse sidx/didx */
3987 sidx = (direction == PF_IN) ? 1 : 0;
3988 didx = (direction == PF_IN) ? 0 : 1;
3989
3990 if (!pnl->proto ||
3991 PF_AZERO(&pnl->saddr, pnl->af) ||
3992 PF_AZERO(&pnl->daddr, pnl->af) ||
3993 ((pnl->proto == IPPROTO_TCP ||
3994 pnl->proto == IPPROTO_UDP) &&
3995 (!pnl->dport || !pnl->sport)))
3996 error = EINVAL;
3997 else {
3998 bzero(&key, sizeof(key));
3999 key.af = pnl->af;
4000 key.proto = pnl->proto;
4001 PF_ACPY(&key.addr[sidx], &pnl->saddr, pnl->af);
4002 key.port[sidx] = pnl->sport;
4003 PF_ACPY(&key.addr[didx], &pnl->daddr, pnl->af);
4004 key.port[didx] = pnl->dport;
4005
4006 state = pf_find_state_all(&key, direction, &m);
4007 if (state == NULL) {
4008 error = ENOENT;
4009 } else {
4010 if (m > 1) {
4011 PF_STATE_UNLOCK(state);
4012 error = E2BIG; /* more than one state */
4013 } else {
4014 sk = state->key[sidx];
4015 PF_ACPY(&pnl->rsaddr, &sk->addr[sidx], sk->af);
4016 pnl->rsport = sk->port[sidx];
4017 PF_ACPY(&pnl->rdaddr, &sk->addr[didx], sk->af);
4018 pnl->rdport = sk->port[didx];
4019 PF_STATE_UNLOCK(state);
4020 }
4021 }
4022 }
4023 break;
4024 }
4025
4026 case DIOCSETTIMEOUT: {
4027 struct pfioc_tm *pt = (struct pfioc_tm *)addr;
4028 int old;
4029
4030 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX ||
4031 pt->seconds < 0) {
4032 error = EINVAL;
4033 break;
4034 }
4035 PF_RULES_WLOCK();
4036 old = V_pf_default_rule.timeout[pt->timeout];
4037 if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0)
4038 pt->seconds = 1;
4039 V_pf_default_rule.timeout[pt->timeout] = pt->seconds;
4040 if (pt->timeout == PFTM_INTERVAL && pt->seconds < old)
4041 wakeup(pf_purge_thread);
4042 pt->seconds = old;
4043 PF_RULES_WUNLOCK();
4044 break;
4045 }
4046
4047 case DIOCGETTIMEOUT: {
4048 struct pfioc_tm *pt = (struct pfioc_tm *)addr;
4049
4050 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) {
4051 error = EINVAL;
4052 break;
4053 }
4054 PF_RULES_RLOCK();
4055 pt->seconds = V_pf_default_rule.timeout[pt->timeout];
4056 PF_RULES_RUNLOCK();
4057 break;
4058 }
4059
4060 case DIOCGETLIMIT: {
4061 struct pfioc_limit *pl = (struct pfioc_limit *)addr;
4062
4063 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) {
4064 error = EINVAL;
4065 break;
4066 }
4067 PF_RULES_RLOCK();
4068 pl->limit = V_pf_limits[pl->index].limit;
4069 PF_RULES_RUNLOCK();
4070 break;
4071 }
4072
4073 case DIOCSETLIMIT: {
4074 struct pfioc_limit *pl = (struct pfioc_limit *)addr;
4075 int old_limit;
4076
4077 PF_RULES_WLOCK();
4078 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX ||
4079 V_pf_limits[pl->index].zone == NULL) {
4080 PF_RULES_WUNLOCK();
4081 error = EINVAL;
4082 break;
4083 }
4084 uma_zone_set_max(V_pf_limits[pl->index].zone, pl->limit);
4085 old_limit = V_pf_limits[pl->index].limit;
4086 V_pf_limits[pl->index].limit = pl->limit;
4087 pl->limit = old_limit;
4088 PF_RULES_WUNLOCK();
4089 break;
4090 }
4091
4092 case DIOCSETDEBUG: {
4093 u_int32_t *level = (u_int32_t *)addr;
4094
4095 PF_RULES_WLOCK();
4096 V_pf_status.debug = *level;
4097 PF_RULES_WUNLOCK();
4098 break;
4099 }
4100
4101 case DIOCCLRRULECTRS: {
4102 /* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */
4103 struct pf_kruleset *ruleset = &pf_main_ruleset;
4104 struct pf_krule *rule;
4105
4106 PF_RULES_WLOCK();
4107 TAILQ_FOREACH(rule,
4108 ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) {
4109 pf_counter_u64_zero(&rule->evaluations);
4110 for (int i = 0; i < 2; i++) {
4111 pf_counter_u64_zero(&rule->packets[i]);
4112 pf_counter_u64_zero(&rule->bytes[i]);
4113 }
4114 }
4115 PF_RULES_WUNLOCK();
4116 break;
4117 }
4118
4119 case DIOCGIFSPEEDV0:
4120 case DIOCGIFSPEEDV1: {
4121 struct pf_ifspeed_v1 *psp = (struct pf_ifspeed_v1 *)addr;
4122 struct pf_ifspeed_v1 ps;
4123 struct ifnet *ifp;
4124
4125 if (psp->ifname[0] == '\0') {
4126 error = EINVAL;
4127 break;
4128 }
4129
4130 error = pf_user_strcpy(ps.ifname, psp->ifname, IFNAMSIZ);
4131 if (error != 0)
4132 break;
4133 ifp = ifunit(ps.ifname);
4134 if (ifp != NULL) {
4135 psp->baudrate32 =
4136 (u_int32_t)uqmin(ifp->if_baudrate, UINT_MAX);
4137 if (cmd == DIOCGIFSPEEDV1)
4138 psp->baudrate = ifp->if_baudrate;
4139 } else {
4140 error = EINVAL;
4141 }
4142 break;
4143 }
4144
4145 #ifdef ALTQ
4146 case DIOCSTARTALTQ: {
4147 struct pf_altq *altq;
4148
4149 PF_RULES_WLOCK();
4150 /* enable all altq interfaces on active list */
4151 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
4152 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
4153 error = pf_enable_altq(altq);
4154 if (error != 0)
4155 break;
4156 }
4157 }
4158 if (error == 0)
4159 V_pf_altq_running = 1;
4160 PF_RULES_WUNLOCK();
4161 DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n"));
4162 break;
4163 }
4164
4165 case DIOCSTOPALTQ: {
4166 struct pf_altq *altq;
4167
4168 PF_RULES_WLOCK();
4169 /* disable all altq interfaces on active list */
4170 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
4171 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
4172 error = pf_disable_altq(altq);
4173 if (error != 0)
4174 break;
4175 }
4176 }
4177 if (error == 0)
4178 V_pf_altq_running = 0;
4179 PF_RULES_WUNLOCK();
4180 DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n"));
4181 break;
4182 }
4183
4184 case DIOCADDALTQV0:
4185 case DIOCADDALTQV1: {
4186 struct pfioc_altq_v1 *pa = (struct pfioc_altq_v1 *)addr;
4187 struct pf_altq *altq, *a;
4188 struct ifnet *ifp;
4189
4190 altq = malloc(sizeof(*altq), M_PFALTQ, M_WAITOK | M_ZERO);
4191 error = pf_import_kaltq(pa, altq, IOCPARM_LEN(cmd));
4192 if (error)
4193 break;
4194 altq->local_flags = 0;
4195
4196 PF_RULES_WLOCK();
4197 if (pa->ticket != V_ticket_altqs_inactive) {
4198 PF_RULES_WUNLOCK();
4199 free(altq, M_PFALTQ);
4200 error = EBUSY;
4201 break;
4202 }
4203
4204 /*
4205 * if this is for a queue, find the discipline and
4206 * copy the necessary fields
4207 */
4208 if (altq->qname[0] != 0) {
4209 if ((altq->qid = pf_qname2qid(altq->qname)) == 0) {
4210 PF_RULES_WUNLOCK();
4211 error = EBUSY;
4212 free(altq, M_PFALTQ);
4213 break;
4214 }
4215 altq->altq_disc = NULL;
4216 TAILQ_FOREACH(a, V_pf_altq_ifs_inactive, entries) {
4217 if (strncmp(a->ifname, altq->ifname,
4218 IFNAMSIZ) == 0) {
4219 altq->altq_disc = a->altq_disc;
4220 break;
4221 }
4222 }
4223 }
4224
4225 if ((ifp = ifunit(altq->ifname)) == NULL)
4226 altq->local_flags |= PFALTQ_FLAG_IF_REMOVED;
4227 else
4228 error = altq_add(ifp, altq);
4229
4230 if (error) {
4231 PF_RULES_WUNLOCK();
4232 free(altq, M_PFALTQ);
4233 break;
4234 }
4235
4236 if (altq->qname[0] != 0)
4237 TAILQ_INSERT_TAIL(V_pf_altqs_inactive, altq, entries);
4238 else
4239 TAILQ_INSERT_TAIL(V_pf_altq_ifs_inactive, altq, entries);
4240 /* version error check done on import above */
4241 pf_export_kaltq(altq, pa, IOCPARM_LEN(cmd));
4242 PF_RULES_WUNLOCK();
4243 break;
4244 }
4245
4246 case DIOCGETALTQSV0:
4247 case DIOCGETALTQSV1: {
4248 struct pfioc_altq_v1 *pa = (struct pfioc_altq_v1 *)addr;
4249 struct pf_altq *altq;
4250
4251 PF_RULES_RLOCK();
4252 pa->nr = 0;
4253 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries)
4254 pa->nr++;
4255 TAILQ_FOREACH(altq, V_pf_altqs_active, entries)
4256 pa->nr++;
4257 pa->ticket = V_ticket_altqs_active;
4258 PF_RULES_RUNLOCK();
4259 break;
4260 }
4261
4262 case DIOCGETALTQV0:
4263 case DIOCGETALTQV1: {
4264 struct pfioc_altq_v1 *pa = (struct pfioc_altq_v1 *)addr;
4265 struct pf_altq *altq;
4266
4267 PF_RULES_RLOCK();
4268 if (pa->ticket != V_ticket_altqs_active) {
4269 PF_RULES_RUNLOCK();
4270 error = EBUSY;
4271 break;
4272 }
4273 altq = pf_altq_get_nth_active(pa->nr);
4274 if (altq == NULL) {
4275 PF_RULES_RUNLOCK();
4276 error = EBUSY;
4277 break;
4278 }
4279 pf_export_kaltq(altq, pa, IOCPARM_LEN(cmd));
4280 PF_RULES_RUNLOCK();
4281 break;
4282 }
4283
4284 case DIOCCHANGEALTQV0:
4285 case DIOCCHANGEALTQV1:
4286 /* CHANGEALTQ not supported yet! */
4287 error = ENODEV;
4288 break;
4289
4290 case DIOCGETQSTATSV0:
4291 case DIOCGETQSTATSV1: {
4292 struct pfioc_qstats_v1 *pq = (struct pfioc_qstats_v1 *)addr;
4293 struct pf_altq *altq;
4294 int nbytes;
4295 u_int32_t version;
4296
4297 PF_RULES_RLOCK();
4298 if (pq->ticket != V_ticket_altqs_active) {
4299 PF_RULES_RUNLOCK();
4300 error = EBUSY;
4301 break;
4302 }
4303 nbytes = pq->nbytes;
4304 altq = pf_altq_get_nth_active(pq->nr);
4305 if (altq == NULL) {
4306 PF_RULES_RUNLOCK();
4307 error = EBUSY;
4308 break;
4309 }
4310
4311 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) != 0) {
4312 PF_RULES_RUNLOCK();
4313 error = ENXIO;
4314 break;
4315 }
4316 PF_RULES_RUNLOCK();
4317 if (cmd == DIOCGETQSTATSV0)
4318 version = 0; /* DIOCGETQSTATSV0 means stats struct v0 */
4319 else
4320 version = pq->version;
4321 error = altq_getqstats(altq, pq->buf, &nbytes, version);
4322 if (error == 0) {
4323 pq->scheduler = altq->scheduler;
4324 pq->nbytes = nbytes;
4325 }
4326 break;
4327 }
4328 #endif /* ALTQ */
4329
4330 case DIOCBEGINADDRS: {
4331 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
4332
4333 PF_RULES_WLOCK();
4334 pf_empty_kpool(&V_pf_pabuf);
4335 pp->ticket = ++V_ticket_pabuf;
4336 PF_RULES_WUNLOCK();
4337 break;
4338 }
4339
4340 case DIOCADDADDR: {
4341 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
4342 struct pf_kpooladdr *pa;
4343 struct pfi_kkif *kif = NULL;
4344
4345 #ifndef INET
4346 if (pp->af == AF_INET) {
4347 error = EAFNOSUPPORT;
4348 break;
4349 }
4350 #endif /* INET */
4351 #ifndef INET6
4352 if (pp->af == AF_INET6) {
4353 error = EAFNOSUPPORT;
4354 break;
4355 }
4356 #endif /* INET6 */
4357 if (pp->addr.addr.type != PF_ADDR_ADDRMASK &&
4358 pp->addr.addr.type != PF_ADDR_DYNIFTL &&
4359 pp->addr.addr.type != PF_ADDR_TABLE) {
4360 error = EINVAL;
4361 break;
4362 }
4363 if (pp->addr.addr.p.dyn != NULL) {
4364 error = EINVAL;
4365 break;
4366 }
4367 pa = malloc(sizeof(*pa), M_PFRULE, M_WAITOK);
4368 error = pf_pooladdr_to_kpooladdr(&pp->addr, pa);
4369 if (error != 0)
4370 break;
4371 if (pa->ifname[0])
4372 kif = pf_kkif_create(M_WAITOK);
4373 PF_RULES_WLOCK();
4374 if (pp->ticket != V_ticket_pabuf) {
4375 PF_RULES_WUNLOCK();
4376 if (pa->ifname[0])
4377 pf_kkif_free(kif);
4378 free(pa, M_PFRULE);
4379 error = EBUSY;
4380 break;
4381 }
4382 if (pa->ifname[0]) {
4383 pa->kif = pfi_kkif_attach(kif, pa->ifname);
4384 kif = NULL;
4385 pfi_kkif_ref(pa->kif);
4386 } else
4387 pa->kif = NULL;
4388 if (pa->addr.type == PF_ADDR_DYNIFTL && ((error =
4389 pfi_dynaddr_setup(&pa->addr, pp->af)) != 0)) {
4390 if (pa->ifname[0])
4391 pfi_kkif_unref(pa->kif);
4392 PF_RULES_WUNLOCK();
4393 free(pa, M_PFRULE);
4394 break;
4395 }
4396 TAILQ_INSERT_TAIL(&V_pf_pabuf, pa, entries);
4397 PF_RULES_WUNLOCK();
4398 break;
4399 }
4400
4401 case DIOCGETADDRS: {
4402 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
4403 struct pf_kpool *pool;
4404 struct pf_kpooladdr *pa;
4405
4406 pp->anchor[sizeof(pp->anchor) - 1] = 0;
4407 pp->nr = 0;
4408
4409 PF_RULES_RLOCK();
4410 pool = pf_get_kpool(pp->anchor, pp->ticket, pp->r_action,
4411 pp->r_num, 0, 1, 0);
4412 if (pool == NULL) {
4413 PF_RULES_RUNLOCK();
4414 error = EBUSY;
4415 break;
4416 }
4417 TAILQ_FOREACH(pa, &pool->list, entries)
4418 pp->nr++;
4419 PF_RULES_RUNLOCK();
4420 break;
4421 }
4422
4423 case DIOCGETADDR: {
4424 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
4425 struct pf_kpool *pool;
4426 struct pf_kpooladdr *pa;
4427 u_int32_t nr = 0;
4428
4429 pp->anchor[sizeof(pp->anchor) - 1] = 0;
4430
4431 PF_RULES_RLOCK();
4432 pool = pf_get_kpool(pp->anchor, pp->ticket, pp->r_action,
4433 pp->r_num, 0, 1, 1);
4434 if (pool == NULL) {
4435 PF_RULES_RUNLOCK();
4436 error = EBUSY;
4437 break;
4438 }
4439 pa = TAILQ_FIRST(&pool->list);
4440 while ((pa != NULL) && (nr < pp->nr)) {
4441 pa = TAILQ_NEXT(pa, entries);
4442 nr++;
4443 }
4444 if (pa == NULL) {
4445 PF_RULES_RUNLOCK();
4446 error = EBUSY;
4447 break;
4448 }
4449 pf_kpooladdr_to_pooladdr(pa, &pp->addr);
4450 pf_addr_copyout(&pp->addr.addr);
4451 PF_RULES_RUNLOCK();
4452 break;
4453 }
4454
4455 case DIOCCHANGEADDR: {
4456 struct pfioc_pooladdr *pca = (struct pfioc_pooladdr *)addr;
4457 struct pf_kpool *pool;
4458 struct pf_kpooladdr *oldpa = NULL, *newpa = NULL;
4459 struct pf_kruleset *ruleset;
4460 struct pfi_kkif *kif = NULL;
4461
4462 pca->anchor[sizeof(pca->anchor) - 1] = 0;
4463
4464 if (pca->action < PF_CHANGE_ADD_HEAD ||
4465 pca->action > PF_CHANGE_REMOVE) {
4466 error = EINVAL;
4467 break;
4468 }
4469 if (pca->addr.addr.type != PF_ADDR_ADDRMASK &&
4470 pca->addr.addr.type != PF_ADDR_DYNIFTL &&
4471 pca->addr.addr.type != PF_ADDR_TABLE) {
4472 error = EINVAL;
4473 break;
4474 }
4475 if (pca->addr.addr.p.dyn != NULL) {
4476 error = EINVAL;
4477 break;
4478 }
4479
4480 if (pca->action != PF_CHANGE_REMOVE) {
4481 #ifndef INET
4482 if (pca->af == AF_INET) {
4483 error = EAFNOSUPPORT;
4484 break;
4485 }
4486 #endif /* INET */
4487 #ifndef INET6
4488 if (pca->af == AF_INET6) {
4489 error = EAFNOSUPPORT;
4490 break;
4491 }
4492 #endif /* INET6 */
4493 newpa = malloc(sizeof(*newpa), M_PFRULE, M_WAITOK);
4494 bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr));
4495 if (newpa->ifname[0])
4496 kif = pf_kkif_create(M_WAITOK);
4497 newpa->kif = NULL;
4498 }
4499 #define ERROUT(x) ERROUT_IOCTL(DIOCCHANGEADDR_error, x)
4500 PF_RULES_WLOCK();
4501 ruleset = pf_find_kruleset(pca->anchor);
4502 if (ruleset == NULL)
4503 ERROUT(EBUSY);
4504
4505 pool = pf_get_kpool(pca->anchor, pca->ticket, pca->r_action,
4506 pca->r_num, pca->r_last, 1, 1);
4507 if (pool == NULL)
4508 ERROUT(EBUSY);
4509
4510 if (pca->action != PF_CHANGE_REMOVE) {
4511 if (newpa->ifname[0]) {
4512 newpa->kif = pfi_kkif_attach(kif, newpa->ifname);
4513 pfi_kkif_ref(newpa->kif);
4514 kif = NULL;
4515 }
4516
4517 switch (newpa->addr.type) {
4518 case PF_ADDR_DYNIFTL:
4519 error = pfi_dynaddr_setup(&newpa->addr,
4520 pca->af);
4521 break;
4522 case PF_ADDR_TABLE:
4523 newpa->addr.p.tbl = pfr_attach_table(ruleset,
4524 newpa->addr.v.tblname);
4525 if (newpa->addr.p.tbl == NULL)
4526 error = ENOMEM;
4527 break;
4528 }
4529 if (error)
4530 goto DIOCCHANGEADDR_error;
4531 }
4532
4533 switch (pca->action) {
4534 case PF_CHANGE_ADD_HEAD:
4535 oldpa = TAILQ_FIRST(&pool->list);
4536 break;
4537 case PF_CHANGE_ADD_TAIL:
4538 oldpa = TAILQ_LAST(&pool->list, pf_kpalist);
4539 break;
4540 default:
4541 oldpa = TAILQ_FIRST(&pool->list);
4542 for (int i = 0; oldpa && i < pca->nr; i++)
4543 oldpa = TAILQ_NEXT(oldpa, entries);
4544
4545 if (oldpa == NULL)
4546 ERROUT(EINVAL);
4547 }
4548
4549 if (pca->action == PF_CHANGE_REMOVE) {
4550 TAILQ_REMOVE(&pool->list, oldpa, entries);
4551 switch (oldpa->addr.type) {
4552 case PF_ADDR_DYNIFTL:
4553 pfi_dynaddr_remove(oldpa->addr.p.dyn);
4554 break;
4555 case PF_ADDR_TABLE:
4556 pfr_detach_table(oldpa->addr.p.tbl);
4557 break;
4558 }
4559 if (oldpa->kif)
4560 pfi_kkif_unref(oldpa->kif);
4561 free(oldpa, M_PFRULE);
4562 } else {
4563 if (oldpa == NULL)
4564 TAILQ_INSERT_TAIL(&pool->list, newpa, entries);
4565 else if (pca->action == PF_CHANGE_ADD_HEAD ||
4566 pca->action == PF_CHANGE_ADD_BEFORE)
4567 TAILQ_INSERT_BEFORE(oldpa, newpa, entries);
4568 else
4569 TAILQ_INSERT_AFTER(&pool->list, oldpa,
4570 newpa, entries);
4571 }
4572
4573 pool->cur = TAILQ_FIRST(&pool->list);
4574 PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr, pca->af);
4575 PF_RULES_WUNLOCK();
4576 break;
4577
4578 #undef ERROUT
4579 DIOCCHANGEADDR_error:
4580 if (newpa != NULL) {
4581 if (newpa->kif)
4582 pfi_kkif_unref(newpa->kif);
4583 free(newpa, M_PFRULE);
4584 }
4585 PF_RULES_WUNLOCK();
4586 pf_kkif_free(kif);
4587 break;
4588 }
4589
4590 case DIOCGETRULESETS: {
4591 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr;
4592 struct pf_kruleset *ruleset;
4593 struct pf_kanchor *anchor;
4594
4595 pr->path[sizeof(pr->path) - 1] = 0;
4596
4597 PF_RULES_RLOCK();
4598 if ((ruleset = pf_find_kruleset(pr->path)) == NULL) {
4599 PF_RULES_RUNLOCK();
4600 error = ENOENT;
4601 break;
4602 }
4603 pr->nr = 0;
4604 if (ruleset->anchor == NULL) {
4605 /* XXX kludge for pf_main_ruleset */
4606 RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors)
4607 if (anchor->parent == NULL)
4608 pr->nr++;
4609 } else {
4610 RB_FOREACH(anchor, pf_kanchor_node,
4611 &ruleset->anchor->children)
4612 pr->nr++;
4613 }
4614 PF_RULES_RUNLOCK();
4615 break;
4616 }
4617
4618 case DIOCGETRULESET: {
4619 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr;
4620 struct pf_kruleset *ruleset;
4621 struct pf_kanchor *anchor;
4622 u_int32_t nr = 0;
4623
4624 pr->path[sizeof(pr->path) - 1] = 0;
4625
4626 PF_RULES_RLOCK();
4627 if ((ruleset = pf_find_kruleset(pr->path)) == NULL) {
4628 PF_RULES_RUNLOCK();
4629 error = ENOENT;
4630 break;
4631 }
4632 pr->name[0] = 0;
4633 if (ruleset->anchor == NULL) {
4634 /* XXX kludge for pf_main_ruleset */
4635 RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors)
4636 if (anchor->parent == NULL && nr++ == pr->nr) {
4637 strlcpy(pr->name, anchor->name,
4638 sizeof(pr->name));
4639 break;
4640 }
4641 } else {
4642 RB_FOREACH(anchor, pf_kanchor_node,
4643 &ruleset->anchor->children)
4644 if (nr++ == pr->nr) {
4645 strlcpy(pr->name, anchor->name,
4646 sizeof(pr->name));
4647 break;
4648 }
4649 }
4650 if (!pr->name[0])
4651 error = EBUSY;
4652 PF_RULES_RUNLOCK();
4653 break;
4654 }
4655
4656 case DIOCRCLRTABLES: {
4657 struct pfioc_table *io = (struct pfioc_table *)addr;
4658
4659 if (io->pfrio_esize != 0) {
4660 error = ENODEV;
4661 break;
4662 }
4663 PF_RULES_WLOCK();
4664 error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel,
4665 io->pfrio_flags | PFR_FLAG_USERIOCTL);
4666 PF_RULES_WUNLOCK();
4667 break;
4668 }
4669
4670 case DIOCRADDTABLES: {
4671 struct pfioc_table *io = (struct pfioc_table *)addr;
4672 struct pfr_table *pfrts;
4673 size_t totlen;
4674
4675 if (io->pfrio_esize != sizeof(struct pfr_table)) {
4676 error = ENODEV;
4677 break;
4678 }
4679
4680 if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount ||
4681 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) {
4682 error = ENOMEM;
4683 break;
4684 }
4685
4686 totlen = io->pfrio_size * sizeof(struct pfr_table);
4687 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4688 M_TEMP, M_WAITOK);
4689 error = copyin(io->pfrio_buffer, pfrts, totlen);
4690 if (error) {
4691 free(pfrts, M_TEMP);
4692 break;
4693 }
4694 PF_RULES_WLOCK();
4695 error = pfr_add_tables(pfrts, io->pfrio_size,
4696 &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4697 PF_RULES_WUNLOCK();
4698 free(pfrts, M_TEMP);
4699 break;
4700 }
4701
4702 case DIOCRDELTABLES: {
4703 struct pfioc_table *io = (struct pfioc_table *)addr;
4704 struct pfr_table *pfrts;
4705 size_t totlen;
4706
4707 if (io->pfrio_esize != sizeof(struct pfr_table)) {
4708 error = ENODEV;
4709 break;
4710 }
4711
4712 if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount ||
4713 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) {
4714 error = ENOMEM;
4715 break;
4716 }
4717
4718 totlen = io->pfrio_size * sizeof(struct pfr_table);
4719 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4720 M_TEMP, M_WAITOK);
4721 error = copyin(io->pfrio_buffer, pfrts, totlen);
4722 if (error) {
4723 free(pfrts, M_TEMP);
4724 break;
4725 }
4726 PF_RULES_WLOCK();
4727 error = pfr_del_tables(pfrts, io->pfrio_size,
4728 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4729 PF_RULES_WUNLOCK();
4730 free(pfrts, M_TEMP);
4731 break;
4732 }
4733
4734 case DIOCRGETTABLES: {
4735 struct pfioc_table *io = (struct pfioc_table *)addr;
4736 struct pfr_table *pfrts;
4737 size_t totlen;
4738 int n;
4739
4740 if (io->pfrio_esize != sizeof(struct pfr_table)) {
4741 error = ENODEV;
4742 break;
4743 }
4744 PF_RULES_RLOCK();
4745 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
4746 if (n < 0) {
4747 PF_RULES_RUNLOCK();
4748 error = EINVAL;
4749 break;
4750 }
4751 io->pfrio_size = min(io->pfrio_size, n);
4752
4753 totlen = io->pfrio_size * sizeof(struct pfr_table);
4754
4755 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4756 M_TEMP, M_NOWAIT | M_ZERO);
4757 if (pfrts == NULL) {
4758 error = ENOMEM;
4759 PF_RULES_RUNLOCK();
4760 break;
4761 }
4762 error = pfr_get_tables(&io->pfrio_table, pfrts,
4763 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4764 PF_RULES_RUNLOCK();
4765 if (error == 0)
4766 error = copyout(pfrts, io->pfrio_buffer, totlen);
4767 free(pfrts, M_TEMP);
4768 break;
4769 }
4770
4771 case DIOCRGETTSTATS: {
4772 struct pfioc_table *io = (struct pfioc_table *)addr;
4773 struct pfr_tstats *pfrtstats;
4774 size_t totlen;
4775 int n;
4776
4777 if (io->pfrio_esize != sizeof(struct pfr_tstats)) {
4778 error = ENODEV;
4779 break;
4780 }
4781 PF_TABLE_STATS_LOCK();
4782 PF_RULES_RLOCK();
4783 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
4784 if (n < 0) {
4785 PF_RULES_RUNLOCK();
4786 PF_TABLE_STATS_UNLOCK();
4787 error = EINVAL;
4788 break;
4789 }
4790 io->pfrio_size = min(io->pfrio_size, n);
4791
4792 totlen = io->pfrio_size * sizeof(struct pfr_tstats);
4793 pfrtstats = mallocarray(io->pfrio_size,
4794 sizeof(struct pfr_tstats), M_TEMP, M_NOWAIT | M_ZERO);
4795 if (pfrtstats == NULL) {
4796 error = ENOMEM;
4797 PF_RULES_RUNLOCK();
4798 PF_TABLE_STATS_UNLOCK();
4799 break;
4800 }
4801 error = pfr_get_tstats(&io->pfrio_table, pfrtstats,
4802 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4803 PF_RULES_RUNLOCK();
4804 PF_TABLE_STATS_UNLOCK();
4805 if (error == 0)
4806 error = copyout(pfrtstats, io->pfrio_buffer, totlen);
4807 free(pfrtstats, M_TEMP);
4808 break;
4809 }
4810
4811 case DIOCRCLRTSTATS: {
4812 struct pfioc_table *io = (struct pfioc_table *)addr;
4813 struct pfr_table *pfrts;
4814 size_t totlen;
4815
4816 if (io->pfrio_esize != sizeof(struct pfr_table)) {
4817 error = ENODEV;
4818 break;
4819 }
4820
4821 if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount ||
4822 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) {
4823 /* We used to count tables and use the minimum required
4824 * size, so we didn't fail on overly large requests.
4825 * Keep doing so. */
4826 io->pfrio_size = pf_ioctl_maxcount;
4827 break;
4828 }
4829
4830 totlen = io->pfrio_size * sizeof(struct pfr_table);
4831 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4832 M_TEMP, M_WAITOK);
4833 error = copyin(io->pfrio_buffer, pfrts, totlen);
4834 if (error) {
4835 free(pfrts, M_TEMP);
4836 break;
4837 }
4838
4839 PF_TABLE_STATS_LOCK();
4840 PF_RULES_RLOCK();
4841 error = pfr_clr_tstats(pfrts, io->pfrio_size,
4842 &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4843 PF_RULES_RUNLOCK();
4844 PF_TABLE_STATS_UNLOCK();
4845 free(pfrts, M_TEMP);
4846 break;
4847 }
4848
4849 case DIOCRSETTFLAGS: {
4850 struct pfioc_table *io = (struct pfioc_table *)addr;
4851 struct pfr_table *pfrts;
4852 size_t totlen;
4853 int n;
4854
4855 if (io->pfrio_esize != sizeof(struct pfr_table)) {
4856 error = ENODEV;
4857 break;
4858 }
4859
4860 PF_RULES_RLOCK();
4861 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
4862 if (n < 0) {
4863 PF_RULES_RUNLOCK();
4864 error = EINVAL;
4865 break;
4866 }
4867
4868 io->pfrio_size = min(io->pfrio_size, n);
4869 PF_RULES_RUNLOCK();
4870
4871 totlen = io->pfrio_size * sizeof(struct pfr_table);
4872 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4873 M_TEMP, M_WAITOK);
4874 error = copyin(io->pfrio_buffer, pfrts, totlen);
4875 if (error) {
4876 free(pfrts, M_TEMP);
4877 break;
4878 }
4879 PF_RULES_WLOCK();
4880 error = pfr_set_tflags(pfrts, io->pfrio_size,
4881 io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange,
4882 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4883 PF_RULES_WUNLOCK();
4884 free(pfrts, M_TEMP);
4885 break;
4886 }
4887
4888 case DIOCRCLRADDRS: {
4889 struct pfioc_table *io = (struct pfioc_table *)addr;
4890
4891 if (io->pfrio_esize != 0) {
4892 error = ENODEV;
4893 break;
4894 }
4895 PF_RULES_WLOCK();
4896 error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel,
4897 io->pfrio_flags | PFR_FLAG_USERIOCTL);
4898 PF_RULES_WUNLOCK();
4899 break;
4900 }
4901
4902 case DIOCRADDADDRS: {
4903 struct pfioc_table *io = (struct pfioc_table *)addr;
4904 struct pfr_addr *pfras;
4905 size_t totlen;
4906
4907 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4908 error = ENODEV;
4909 break;
4910 }
4911 if (io->pfrio_size < 0 ||
4912 io->pfrio_size > pf_ioctl_maxcount ||
4913 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
4914 error = EINVAL;
4915 break;
4916 }
4917 totlen = io->pfrio_size * sizeof(struct pfr_addr);
4918 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
4919 M_TEMP, M_WAITOK);
4920 error = copyin(io->pfrio_buffer, pfras, totlen);
4921 if (error) {
4922 free(pfras, M_TEMP);
4923 break;
4924 }
4925 PF_RULES_WLOCK();
4926 error = pfr_add_addrs(&io->pfrio_table, pfras,
4927 io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags |
4928 PFR_FLAG_USERIOCTL);
4929 PF_RULES_WUNLOCK();
4930 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
4931 error = copyout(pfras, io->pfrio_buffer, totlen);
4932 free(pfras, M_TEMP);
4933 break;
4934 }
4935
4936 case DIOCRDELADDRS: {
4937 struct pfioc_table *io = (struct pfioc_table *)addr;
4938 struct pfr_addr *pfras;
4939 size_t totlen;
4940
4941 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4942 error = ENODEV;
4943 break;
4944 }
4945 if (io->pfrio_size < 0 ||
4946 io->pfrio_size > pf_ioctl_maxcount ||
4947 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
4948 error = EINVAL;
4949 break;
4950 }
4951 totlen = io->pfrio_size * sizeof(struct pfr_addr);
4952 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
4953 M_TEMP, M_WAITOK);
4954 error = copyin(io->pfrio_buffer, pfras, totlen);
4955 if (error) {
4956 free(pfras, M_TEMP);
4957 break;
4958 }
4959 PF_RULES_WLOCK();
4960 error = pfr_del_addrs(&io->pfrio_table, pfras,
4961 io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags |
4962 PFR_FLAG_USERIOCTL);
4963 PF_RULES_WUNLOCK();
4964 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
4965 error = copyout(pfras, io->pfrio_buffer, totlen);
4966 free(pfras, M_TEMP);
4967 break;
4968 }
4969
4970 case DIOCRSETADDRS: {
4971 struct pfioc_table *io = (struct pfioc_table *)addr;
4972 struct pfr_addr *pfras;
4973 size_t totlen, count;
4974
4975 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4976 error = ENODEV;
4977 break;
4978 }
4979 if (io->pfrio_size < 0 || io->pfrio_size2 < 0) {
4980 error = EINVAL;
4981 break;
4982 }
4983 count = max(io->pfrio_size, io->pfrio_size2);
4984 if (count > pf_ioctl_maxcount ||
4985 WOULD_OVERFLOW(count, sizeof(struct pfr_addr))) {
4986 error = EINVAL;
4987 break;
4988 }
4989 totlen = count * sizeof(struct pfr_addr);
4990 pfras = mallocarray(count, sizeof(struct pfr_addr), M_TEMP,
4991 M_WAITOK);
4992 error = copyin(io->pfrio_buffer, pfras, totlen);
4993 if (error) {
4994 free(pfras, M_TEMP);
4995 break;
4996 }
4997 PF_RULES_WLOCK();
4998 error = pfr_set_addrs(&io->pfrio_table, pfras,
4999 io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd,
5000 &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags |
5001 PFR_FLAG_USERIOCTL, 0);
5002 PF_RULES_WUNLOCK();
5003 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
5004 error = copyout(pfras, io->pfrio_buffer, totlen);
5005 free(pfras, M_TEMP);
5006 break;
5007 }
5008
5009 case DIOCRGETADDRS: {
5010 struct pfioc_table *io = (struct pfioc_table *)addr;
5011 struct pfr_addr *pfras;
5012 size_t totlen;
5013
5014 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
5015 error = ENODEV;
5016 break;
5017 }
5018 if (io->pfrio_size < 0 ||
5019 io->pfrio_size > pf_ioctl_maxcount ||
5020 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
5021 error = EINVAL;
5022 break;
5023 }
5024 totlen = io->pfrio_size * sizeof(struct pfr_addr);
5025 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
5026 M_TEMP, M_WAITOK | M_ZERO);
5027 PF_RULES_RLOCK();
5028 error = pfr_get_addrs(&io->pfrio_table, pfras,
5029 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
5030 PF_RULES_RUNLOCK();
5031 if (error == 0)
5032 error = copyout(pfras, io->pfrio_buffer, totlen);
5033 free(pfras, M_TEMP);
5034 break;
5035 }
5036
5037 case DIOCRGETASTATS: {
5038 struct pfioc_table *io = (struct pfioc_table *)addr;
5039 struct pfr_astats *pfrastats;
5040 size_t totlen;
5041
5042 if (io->pfrio_esize != sizeof(struct pfr_astats)) {
5043 error = ENODEV;
5044 break;
5045 }
5046 if (io->pfrio_size < 0 ||
5047 io->pfrio_size > pf_ioctl_maxcount ||
5048 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_astats))) {
5049 error = EINVAL;
5050 break;
5051 }
5052 totlen = io->pfrio_size * sizeof(struct pfr_astats);
5053 pfrastats = mallocarray(io->pfrio_size,
5054 sizeof(struct pfr_astats), M_TEMP, M_WAITOK | M_ZERO);
5055 PF_RULES_RLOCK();
5056 error = pfr_get_astats(&io->pfrio_table, pfrastats,
5057 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
5058 PF_RULES_RUNLOCK();
5059 if (error == 0)
5060 error = copyout(pfrastats, io->pfrio_buffer, totlen);
5061 free(pfrastats, M_TEMP);
5062 break;
5063 }
5064
5065 case DIOCRCLRASTATS: {
5066 struct pfioc_table *io = (struct pfioc_table *)addr;
5067 struct pfr_addr *pfras;
5068 size_t totlen;
5069
5070 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
5071 error = ENODEV;
5072 break;
5073 }
5074 if (io->pfrio_size < 0 ||
5075 io->pfrio_size > pf_ioctl_maxcount ||
5076 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
5077 error = EINVAL;
5078 break;
5079 }
5080 totlen = io->pfrio_size * sizeof(struct pfr_addr);
5081 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
5082 M_TEMP, M_WAITOK);
5083 error = copyin(io->pfrio_buffer, pfras, totlen);
5084 if (error) {
5085 free(pfras, M_TEMP);
5086 break;
5087 }
5088 PF_RULES_WLOCK();
5089 error = pfr_clr_astats(&io->pfrio_table, pfras,
5090 io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags |
5091 PFR_FLAG_USERIOCTL);
5092 PF_RULES_WUNLOCK();
5093 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
5094 error = copyout(pfras, io->pfrio_buffer, totlen);
5095 free(pfras, M_TEMP);
5096 break;
5097 }
5098
5099 case DIOCRTSTADDRS: {
5100 struct pfioc_table *io = (struct pfioc_table *)addr;
5101 struct pfr_addr *pfras;
5102 size_t totlen;
5103
5104 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
5105 error = ENODEV;
5106 break;
5107 }
5108 if (io->pfrio_size < 0 ||
5109 io->pfrio_size > pf_ioctl_maxcount ||
5110 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
5111 error = EINVAL;
5112 break;
5113 }
5114 totlen = io->pfrio_size * sizeof(struct pfr_addr);
5115 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
5116 M_TEMP, M_WAITOK);
5117 error = copyin(io->pfrio_buffer, pfras, totlen);
5118 if (error) {
5119 free(pfras, M_TEMP);
5120 break;
5121 }
5122 PF_RULES_RLOCK();
5123 error = pfr_tst_addrs(&io->pfrio_table, pfras,
5124 io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags |
5125 PFR_FLAG_USERIOCTL);
5126 PF_RULES_RUNLOCK();
5127 if (error == 0)
5128 error = copyout(pfras, io->pfrio_buffer, totlen);
5129 free(pfras, M_TEMP);
5130 break;
5131 }
5132
5133 case DIOCRINADEFINE: {
5134 struct pfioc_table *io = (struct pfioc_table *)addr;
5135 struct pfr_addr *pfras;
5136 size_t totlen;
5137
5138 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
5139 error = ENODEV;
5140 break;
5141 }
5142 if (io->pfrio_size < 0 ||
5143 io->pfrio_size > pf_ioctl_maxcount ||
5144 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
5145 error = EINVAL;
5146 break;
5147 }
5148 totlen = io->pfrio_size * sizeof(struct pfr_addr);
5149 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
5150 M_TEMP, M_WAITOK);
5151 error = copyin(io->pfrio_buffer, pfras, totlen);
5152 if (error) {
5153 free(pfras, M_TEMP);
5154 break;
5155 }
5156 PF_RULES_WLOCK();
5157 error = pfr_ina_define(&io->pfrio_table, pfras,
5158 io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr,
5159 io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL);
5160 PF_RULES_WUNLOCK();
5161 free(pfras, M_TEMP);
5162 break;
5163 }
5164
5165 case DIOCOSFPADD: {
5166 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
5167 PF_RULES_WLOCK();
5168 error = pf_osfp_add(io);
5169 PF_RULES_WUNLOCK();
5170 break;
5171 }
5172
5173 case DIOCOSFPGET: {
5174 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
5175 PF_RULES_RLOCK();
5176 error = pf_osfp_get(io);
5177 PF_RULES_RUNLOCK();
5178 break;
5179 }
5180
5181 case DIOCXBEGIN: {
5182 struct pfioc_trans *io = (struct pfioc_trans *)addr;
5183 struct pfioc_trans_e *ioes, *ioe;
5184 size_t totlen;
5185 int i;
5186
5187 if (io->esize != sizeof(*ioe)) {
5188 error = ENODEV;
5189 break;
5190 }
5191 if (io->size < 0 ||
5192 io->size > pf_ioctl_maxcount ||
5193 WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
5194 error = EINVAL;
5195 break;
5196 }
5197 totlen = sizeof(struct pfioc_trans_e) * io->size;
5198 ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
5199 M_TEMP, M_WAITOK);
5200 error = copyin(io->array, ioes, totlen);
5201 if (error) {
5202 free(ioes, M_TEMP);
5203 break;
5204 }
5205 /* Ensure there's no more ethernet rules to clean up. */
5206 NET_EPOCH_DRAIN_CALLBACKS();
5207 PF_RULES_WLOCK();
5208 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
5209 ioe->anchor[sizeof(ioe->anchor) - 1] = '\0';
5210 switch (ioe->rs_num) {
5211 case PF_RULESET_ETH:
5212 if ((error = pf_begin_eth(&ioe->ticket, ioe->anchor))) {
5213 PF_RULES_WUNLOCK();
5214 free(ioes, M_TEMP);
5215 goto fail;
5216 }
5217 break;
5218 #ifdef ALTQ
5219 case PF_RULESET_ALTQ:
5220 if (ioe->anchor[0]) {
5221 PF_RULES_WUNLOCK();
5222 free(ioes, M_TEMP);
5223 error = EINVAL;
5224 goto fail;
5225 }
5226 if ((error = pf_begin_altq(&ioe->ticket))) {
5227 PF_RULES_WUNLOCK();
5228 free(ioes, M_TEMP);
5229 goto fail;
5230 }
5231 break;
5232 #endif /* ALTQ */
5233 case PF_RULESET_TABLE:
5234 {
5235 struct pfr_table table;
5236
5237 bzero(&table, sizeof(table));
5238 strlcpy(table.pfrt_anchor, ioe->anchor,
5239 sizeof(table.pfrt_anchor));
5240 if ((error = pfr_ina_begin(&table,
5241 &ioe->ticket, NULL, 0))) {
5242 PF_RULES_WUNLOCK();
5243 free(ioes, M_TEMP);
5244 goto fail;
5245 }
5246 break;
5247 }
5248 default:
5249 if ((error = pf_begin_rules(&ioe->ticket,
5250 ioe->rs_num, ioe->anchor))) {
5251 PF_RULES_WUNLOCK();
5252 free(ioes, M_TEMP);
5253 goto fail;
5254 }
5255 break;
5256 }
5257 }
5258 PF_RULES_WUNLOCK();
5259 error = copyout(ioes, io->array, totlen);
5260 free(ioes, M_TEMP);
5261 break;
5262 }
5263
5264 case DIOCXROLLBACK: {
5265 struct pfioc_trans *io = (struct pfioc_trans *)addr;
5266 struct pfioc_trans_e *ioe, *ioes;
5267 size_t totlen;
5268 int i;
5269
5270 if (io->esize != sizeof(*ioe)) {
5271 error = ENODEV;
5272 break;
5273 }
5274 if (io->size < 0 ||
5275 io->size > pf_ioctl_maxcount ||
5276 WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
5277 error = EINVAL;
5278 break;
5279 }
5280 totlen = sizeof(struct pfioc_trans_e) * io->size;
5281 ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
5282 M_TEMP, M_WAITOK);
5283 error = copyin(io->array, ioes, totlen);
5284 if (error) {
5285 free(ioes, M_TEMP);
5286 break;
5287 }
5288 PF_RULES_WLOCK();
5289 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
5290 ioe->anchor[sizeof(ioe->anchor) - 1] = '\0';
5291 switch (ioe->rs_num) {
5292 case PF_RULESET_ETH:
5293 if ((error = pf_rollback_eth(ioe->ticket,
5294 ioe->anchor))) {
5295 PF_RULES_WUNLOCK();
5296 free(ioes, M_TEMP);
5297 goto fail; /* really bad */
5298 }
5299 break;
5300 #ifdef ALTQ
5301 case PF_RULESET_ALTQ:
5302 if (ioe->anchor[0]) {
5303 PF_RULES_WUNLOCK();
5304 free(ioes, M_TEMP);
5305 error = EINVAL;
5306 goto fail;
5307 }
5308 if ((error = pf_rollback_altq(ioe->ticket))) {
5309 PF_RULES_WUNLOCK();
5310 free(ioes, M_TEMP);
5311 goto fail; /* really bad */
5312 }
5313 break;
5314 #endif /* ALTQ */
5315 case PF_RULESET_TABLE:
5316 {
5317 struct pfr_table table;
5318
5319 bzero(&table, sizeof(table));
5320 strlcpy(table.pfrt_anchor, ioe->anchor,
5321 sizeof(table.pfrt_anchor));
5322 if ((error = pfr_ina_rollback(&table,
5323 ioe->ticket, NULL, 0))) {
5324 PF_RULES_WUNLOCK();
5325 free(ioes, M_TEMP);
5326 goto fail; /* really bad */
5327 }
5328 break;
5329 }
5330 default:
5331 if ((error = pf_rollback_rules(ioe->ticket,
5332 ioe->rs_num, ioe->anchor))) {
5333 PF_RULES_WUNLOCK();
5334 free(ioes, M_TEMP);
5335 goto fail; /* really bad */
5336 }
5337 break;
5338 }
5339 }
5340 PF_RULES_WUNLOCK();
5341 free(ioes, M_TEMP);
5342 break;
5343 }
5344
5345 case DIOCXCOMMIT: {
5346 struct pfioc_trans *io = (struct pfioc_trans *)addr;
5347 struct pfioc_trans_e *ioe, *ioes;
5348 struct pf_kruleset *rs;
5349 struct pf_keth_ruleset *ers;
5350 size_t totlen;
5351 int i;
5352
5353 if (io->esize != sizeof(*ioe)) {
5354 error = ENODEV;
5355 break;
5356 }
5357
5358 if (io->size < 0 ||
5359 io->size > pf_ioctl_maxcount ||
5360 WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
5361 error = EINVAL;
5362 break;
5363 }
5364
5365 totlen = sizeof(struct pfioc_trans_e) * io->size;
5366 ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
5367 M_TEMP, M_WAITOK);
5368 error = copyin(io->array, ioes, totlen);
5369 if (error) {
5370 free(ioes, M_TEMP);
5371 break;
5372 }
5373 PF_RULES_WLOCK();
5374 /* First makes sure everything will succeed. */
5375 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
5376 ioe->anchor[sizeof(ioe->anchor) - 1] = 0;
5377 switch (ioe->rs_num) {
5378 case PF_RULESET_ETH:
5379 ers = pf_find_keth_ruleset(ioe->anchor);
5380 if (ers == NULL || ioe->ticket == 0 ||
5381 ioe->ticket != ers->inactive.ticket) {
5382 PF_RULES_WUNLOCK();
5383 free(ioes, M_TEMP);
5384 error = EINVAL;
5385 goto fail;
5386 }
5387 break;
5388 #ifdef ALTQ
5389 case PF_RULESET_ALTQ:
5390 if (ioe->anchor[0]) {
5391 PF_RULES_WUNLOCK();
5392 free(ioes, M_TEMP);
5393 error = EINVAL;
5394 goto fail;
5395 }
5396 if (!V_altqs_inactive_open || ioe->ticket !=
5397 V_ticket_altqs_inactive) {
5398 PF_RULES_WUNLOCK();
5399 free(ioes, M_TEMP);
5400 error = EBUSY;
5401 goto fail;
5402 }
5403 break;
5404 #endif /* ALTQ */
5405 case PF_RULESET_TABLE:
5406 rs = pf_find_kruleset(ioe->anchor);
5407 if (rs == NULL || !rs->topen || ioe->ticket !=
5408 rs->tticket) {
5409 PF_RULES_WUNLOCK();
5410 free(ioes, M_TEMP);
5411 error = EBUSY;
5412 goto fail;
5413 }
5414 break;
5415 default:
5416 if (ioe->rs_num < 0 || ioe->rs_num >=
5417 PF_RULESET_MAX) {
5418 PF_RULES_WUNLOCK();
5419 free(ioes, M_TEMP);
5420 error = EINVAL;
5421 goto fail;
5422 }
5423 rs = pf_find_kruleset(ioe->anchor);
5424 if (rs == NULL ||
5425 !rs->rules[ioe->rs_num].inactive.open ||
5426 rs->rules[ioe->rs_num].inactive.ticket !=
5427 ioe->ticket) {
5428 PF_RULES_WUNLOCK();
5429 free(ioes, M_TEMP);
5430 error = EBUSY;
5431 goto fail;
5432 }
5433 break;
5434 }
5435 }
5436 /* Now do the commit - no errors should happen here. */
5437 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
5438 switch (ioe->rs_num) {
5439 case PF_RULESET_ETH:
5440 if ((error = pf_commit_eth(ioe->ticket, ioe->anchor))) {
5441 PF_RULES_WUNLOCK();
5442 free(ioes, M_TEMP);
5443 goto fail; /* really bad */
5444 }
5445 break;
5446 #ifdef ALTQ
5447 case PF_RULESET_ALTQ:
5448 if ((error = pf_commit_altq(ioe->ticket))) {
5449 PF_RULES_WUNLOCK();
5450 free(ioes, M_TEMP);
5451 goto fail; /* really bad */
5452 }
5453 break;
5454 #endif /* ALTQ */
5455 case PF_RULESET_TABLE:
5456 {
5457 struct pfr_table table;
5458
5459 bzero(&table, sizeof(table));
5460 (void)strlcpy(table.pfrt_anchor, ioe->anchor,
5461 sizeof(table.pfrt_anchor));
5462 if ((error = pfr_ina_commit(&table,
5463 ioe->ticket, NULL, NULL, 0))) {
5464 PF_RULES_WUNLOCK();
5465 free(ioes, M_TEMP);
5466 goto fail; /* really bad */
5467 }
5468 break;
5469 }
5470 default:
5471 if ((error = pf_commit_rules(ioe->ticket,
5472 ioe->rs_num, ioe->anchor))) {
5473 PF_RULES_WUNLOCK();
5474 free(ioes, M_TEMP);
5475 goto fail; /* really bad */
5476 }
5477 break;
5478 }
5479 }
5480 PF_RULES_WUNLOCK();
5481
5482 /* Only hook into EtherNet taffic if we've got rules for it. */
5483 if (! TAILQ_EMPTY(V_pf_keth->active.rules))
5484 hook_pf_eth();
5485 else
5486 dehook_pf_eth();
5487
5488 free(ioes, M_TEMP);
5489 break;
5490 }
5491
5492 case DIOCGETSRCNODES: {
5493 struct pfioc_src_nodes *psn = (struct pfioc_src_nodes *)addr;
5494 struct pf_srchash *sh;
5495 struct pf_ksrc_node *n;
5496 struct pf_src_node *p, *pstore;
5497 uint32_t i, nr = 0;
5498
5499 for (i = 0, sh = V_pf_srchash; i <= V_pf_srchashmask;
5500 i++, sh++) {
5501 PF_HASHROW_LOCK(sh);
5502 LIST_FOREACH(n, &sh->nodes, entry)
5503 nr++;
5504 PF_HASHROW_UNLOCK(sh);
5505 }
5506
5507 psn->psn_len = min(psn->psn_len,
5508 sizeof(struct pf_src_node) * nr);
5509
5510 if (psn->psn_len == 0) {
5511 psn->psn_len = sizeof(struct pf_src_node) * nr;
5512 break;
5513 }
5514
5515 nr = 0;
5516
5517 p = pstore = malloc(psn->psn_len, M_TEMP, M_WAITOK | M_ZERO);
5518 for (i = 0, sh = V_pf_srchash; i <= V_pf_srchashmask;
5519 i++, sh++) {
5520 PF_HASHROW_LOCK(sh);
5521 LIST_FOREACH(n, &sh->nodes, entry) {
5522
5523 if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len)
5524 break;
5525
5526 pf_src_node_copy(n, p);
5527
5528 p++;
5529 nr++;
5530 }
5531 PF_HASHROW_UNLOCK(sh);
5532 }
5533 error = copyout(pstore, psn->psn_src_nodes,
5534 sizeof(struct pf_src_node) * nr);
5535 if (error) {
5536 free(pstore, M_TEMP);
5537 break;
5538 }
5539 psn->psn_len = sizeof(struct pf_src_node) * nr;
5540 free(pstore, M_TEMP);
5541 break;
5542 }
5543
5544 case DIOCCLRSRCNODES: {
5545 pf_clear_srcnodes(NULL);
5546 pf_purge_expired_src_nodes();
5547 break;
5548 }
5549
5550 case DIOCKILLSRCNODES:
5551 pf_kill_srcnodes((struct pfioc_src_node_kill *)addr);
5552 break;
5553
5554 #ifdef COMPAT_FREEBSD13
5555 case DIOCKEEPCOUNTERS_FREEBSD13:
5556 #endif
5557 case DIOCKEEPCOUNTERS:
5558 error = pf_keepcounters((struct pfioc_nv *)addr);
5559 break;
5560
5561 case DIOCGETSYNCOOKIES:
5562 error = pf_get_syncookies((struct pfioc_nv *)addr);
5563 break;
5564
5565 case DIOCSETSYNCOOKIES:
5566 error = pf_set_syncookies((struct pfioc_nv *)addr);
5567 break;
5568
5569 case DIOCSETHOSTID: {
5570 u_int32_t *hostid = (u_int32_t *)addr;
5571
5572 PF_RULES_WLOCK();
5573 if (*hostid == 0)
5574 V_pf_status.hostid = arc4random();
5575 else
5576 V_pf_status.hostid = *hostid;
5577 PF_RULES_WUNLOCK();
5578 break;
5579 }
5580
5581 case DIOCOSFPFLUSH:
5582 PF_RULES_WLOCK();
5583 pf_osfp_flush();
5584 PF_RULES_WUNLOCK();
5585 break;
5586
5587 case DIOCIGETIFACES: {
5588 struct pfioc_iface *io = (struct pfioc_iface *)addr;
5589 struct pfi_kif *ifstore;
5590 size_t bufsiz;
5591
5592 if (io->pfiio_esize != sizeof(struct pfi_kif)) {
5593 error = ENODEV;
5594 break;
5595 }
5596
5597 if (io->pfiio_size < 0 ||
5598 io->pfiio_size > pf_ioctl_maxcount ||
5599 WOULD_OVERFLOW(io->pfiio_size, sizeof(struct pfi_kif))) {
5600 error = EINVAL;
5601 break;
5602 }
5603
5604 io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0';
5605
5606 bufsiz = io->pfiio_size * sizeof(struct pfi_kif);
5607 ifstore = mallocarray(io->pfiio_size, sizeof(struct pfi_kif),
5608 M_TEMP, M_WAITOK | M_ZERO);
5609
5610 PF_RULES_RLOCK();
5611 pfi_get_ifaces(io->pfiio_name, ifstore, &io->pfiio_size);
5612 PF_RULES_RUNLOCK();
5613 error = copyout(ifstore, io->pfiio_buffer, bufsiz);
5614 free(ifstore, M_TEMP);
5615 break;
5616 }
5617
5618 case DIOCSETIFFLAG: {
5619 struct pfioc_iface *io = (struct pfioc_iface *)addr;
5620
5621 io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0';
5622
5623 PF_RULES_WLOCK();
5624 error = pfi_set_flags(io->pfiio_name, io->pfiio_flags);
5625 PF_RULES_WUNLOCK();
5626 break;
5627 }
5628
5629 case DIOCCLRIFFLAG: {
5630 struct pfioc_iface *io = (struct pfioc_iface *)addr;
5631
5632 io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0';
5633
5634 PF_RULES_WLOCK();
5635 error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags);
5636 PF_RULES_WUNLOCK();
5637 break;
5638 }
5639
5640 case DIOCSETREASS: {
5641 u_int32_t *reass = (u_int32_t *)addr;
5642
5643 V_pf_status.reass = *reass & (PF_REASS_ENABLED|PF_REASS_NODF);
5644 /* Removal of DF flag without reassembly enabled is not a
5645 * valid combination. Disable reassembly in such case. */
5646 if (!(V_pf_status.reass & PF_REASS_ENABLED))
5647 V_pf_status.reass = 0;
5648 break;
5649 }
5650
5651 default:
5652 error = ENODEV;
5653 break;
5654 }
5655 fail:
5656 if (sx_xlocked(&V_pf_ioctl_lock))
5657 sx_xunlock(&V_pf_ioctl_lock);
5658 CURVNET_RESTORE();
5659
5660 #undef ERROUT_IOCTL
5661
5662 return (error);
5663 }
5664
5665 void
pfsync_state_export(union pfsync_state_union * sp,struct pf_kstate * st,int msg_version)5666 pfsync_state_export(union pfsync_state_union *sp, struct pf_kstate *st, int msg_version)
5667 {
5668 bzero(sp, sizeof(union pfsync_state_union));
5669
5670 /* copy from state key */
5671 sp->pfs_1301.key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0];
5672 sp->pfs_1301.key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1];
5673 sp->pfs_1301.key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0];
5674 sp->pfs_1301.key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1];
5675 sp->pfs_1301.key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0];
5676 sp->pfs_1301.key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1];
5677 sp->pfs_1301.key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0];
5678 sp->pfs_1301.key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1];
5679 sp->pfs_1301.proto = st->key[PF_SK_WIRE]->proto;
5680 sp->pfs_1301.af = st->key[PF_SK_WIRE]->af;
5681
5682 /* copy from state */
5683 strlcpy(sp->pfs_1301.ifname, st->kif->pfik_name, sizeof(sp->pfs_1301.ifname));
5684 bcopy(&st->rt_addr, &sp->pfs_1301.rt_addr, sizeof(sp->pfs_1301.rt_addr));
5685 sp->pfs_1301.creation = htonl(time_uptime - st->creation);
5686 sp->pfs_1301.expire = pf_state_expires(st);
5687 if (sp->pfs_1301.expire <= time_uptime)
5688 sp->pfs_1301.expire = htonl(0);
5689 else
5690 sp->pfs_1301.expire = htonl(sp->pfs_1301.expire - time_uptime);
5691
5692 sp->pfs_1301.direction = st->direction;
5693 sp->pfs_1301.log = st->act.log;
5694 sp->pfs_1301.timeout = st->timeout;
5695
5696 switch (msg_version) {
5697 case PFSYNC_MSG_VERSION_1301:
5698 sp->pfs_1301.state_flags = st->state_flags;
5699 break;
5700 case PFSYNC_MSG_VERSION_1400:
5701 sp->pfs_1400.state_flags = htons(st->state_flags);
5702 sp->pfs_1400.qid = htons(st->act.qid);
5703 sp->pfs_1400.pqid = htons(st->act.pqid);
5704 sp->pfs_1400.dnpipe = htons(st->act.dnpipe);
5705 sp->pfs_1400.dnrpipe = htons(st->act.dnrpipe);
5706 sp->pfs_1400.rtableid = htonl(st->act.rtableid);
5707 sp->pfs_1400.min_ttl = st->act.min_ttl;
5708 sp->pfs_1400.set_tos = st->act.set_tos;
5709 sp->pfs_1400.max_mss = htons(st->act.max_mss);
5710 sp->pfs_1400.set_prio[0] = st->act.set_prio[0];
5711 sp->pfs_1400.set_prio[1] = st->act.set_prio[1];
5712 sp->pfs_1400.rt = st->rt;
5713 if (st->rt_kif)
5714 strlcpy(sp->pfs_1400.rt_ifname,
5715 st->rt_kif->pfik_name,
5716 sizeof(sp->pfs_1400.rt_ifname));
5717 break;
5718 default:
5719 panic("%s: Unsupported pfsync_msg_version %d",
5720 __func__, msg_version);
5721 }
5722
5723 if (st->src_node)
5724 sp->pfs_1301.sync_flags |= PFSYNC_FLAG_SRCNODE;
5725 if (st->nat_src_node)
5726 sp->pfs_1301.sync_flags |= PFSYNC_FLAG_NATSRCNODE;
5727
5728 sp->pfs_1301.id = st->id;
5729 sp->pfs_1301.creatorid = st->creatorid;
5730 pf_state_peer_hton(&st->src, &sp->pfs_1301.src);
5731 pf_state_peer_hton(&st->dst, &sp->pfs_1301.dst);
5732
5733 if (st->rule.ptr == NULL)
5734 sp->pfs_1301.rule = htonl(-1);
5735 else
5736 sp->pfs_1301.rule = htonl(st->rule.ptr->nr);
5737 if (st->anchor.ptr == NULL)
5738 sp->pfs_1301.anchor = htonl(-1);
5739 else
5740 sp->pfs_1301.anchor = htonl(st->anchor.ptr->nr);
5741 if (st->nat_rule.ptr == NULL)
5742 sp->pfs_1301.nat_rule = htonl(-1);
5743 else
5744 sp->pfs_1301.nat_rule = htonl(st->nat_rule.ptr->nr);
5745
5746 pf_state_counter_hton(st->packets[0], sp->pfs_1301.packets[0]);
5747 pf_state_counter_hton(st->packets[1], sp->pfs_1301.packets[1]);
5748 pf_state_counter_hton(st->bytes[0], sp->pfs_1301.bytes[0]);
5749 pf_state_counter_hton(st->bytes[1], sp->pfs_1301.bytes[1]);
5750 }
5751
5752 void
pf_state_export(struct pf_state_export * sp,struct pf_kstate * st)5753 pf_state_export(struct pf_state_export *sp, struct pf_kstate *st)
5754 {
5755 bzero(sp, sizeof(*sp));
5756
5757 sp->version = PF_STATE_VERSION;
5758
5759 /* copy from state key */
5760 sp->key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0];
5761 sp->key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1];
5762 sp->key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0];
5763 sp->key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1];
5764 sp->key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0];
5765 sp->key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1];
5766 sp->key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0];
5767 sp->key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1];
5768 sp->proto = st->key[PF_SK_WIRE]->proto;
5769 sp->af = st->key[PF_SK_WIRE]->af;
5770
5771 /* copy from state */
5772 strlcpy(sp->ifname, st->kif->pfik_name, sizeof(sp->ifname));
5773 strlcpy(sp->orig_ifname, st->orig_kif->pfik_name,
5774 sizeof(sp->orig_ifname));
5775 bcopy(&st->rt_addr, &sp->rt_addr, sizeof(sp->rt_addr));
5776 sp->creation = htonl(time_uptime - st->creation);
5777 sp->expire = pf_state_expires(st);
5778 if (sp->expire <= time_uptime)
5779 sp->expire = htonl(0);
5780 else
5781 sp->expire = htonl(sp->expire - time_uptime);
5782
5783 sp->direction = st->direction;
5784 sp->log = st->act.log;
5785 sp->timeout = st->timeout;
5786 /* 8 bits for the old libpfctl, 16 bits for the new libpfctl */
5787 sp->state_flags_compat = st->state_flags;
5788 sp->state_flags = htons(st->state_flags);
5789 if (st->src_node)
5790 sp->sync_flags |= PFSYNC_FLAG_SRCNODE;
5791 if (st->nat_src_node)
5792 sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE;
5793
5794 sp->id = st->id;
5795 sp->creatorid = st->creatorid;
5796 pf_state_peer_hton(&st->src, &sp->src);
5797 pf_state_peer_hton(&st->dst, &sp->dst);
5798
5799 if (st->rule.ptr == NULL)
5800 sp->rule = htonl(-1);
5801 else
5802 sp->rule = htonl(st->rule.ptr->nr);
5803 if (st->anchor.ptr == NULL)
5804 sp->anchor = htonl(-1);
5805 else
5806 sp->anchor = htonl(st->anchor.ptr->nr);
5807 if (st->nat_rule.ptr == NULL)
5808 sp->nat_rule = htonl(-1);
5809 else
5810 sp->nat_rule = htonl(st->nat_rule.ptr->nr);
5811
5812 sp->packets[0] = st->packets[0];
5813 sp->packets[1] = st->packets[1];
5814 sp->bytes[0] = st->bytes[0];
5815 sp->bytes[1] = st->bytes[1];
5816
5817 sp->qid = htons(st->act.qid);
5818 sp->pqid = htons(st->act.pqid);
5819 sp->dnpipe = htons(st->act.dnpipe);
5820 sp->dnrpipe = htons(st->act.dnrpipe);
5821 sp->rtableid = htonl(st->act.rtableid);
5822 sp->min_ttl = st->act.min_ttl;
5823 sp->set_tos = st->act.set_tos;
5824 sp->max_mss = htons(st->act.max_mss);
5825 sp->rt = st->rt;
5826 if (st->rt_kif)
5827 strlcpy(sp->rt_ifname, st->rt_kif->pfik_name,
5828 sizeof(sp->rt_ifname));
5829 sp->set_prio[0] = st->act.set_prio[0];
5830 sp->set_prio[1] = st->act.set_prio[1];
5831
5832 }
5833
5834 static void
pf_tbladdr_copyout(struct pf_addr_wrap * aw)5835 pf_tbladdr_copyout(struct pf_addr_wrap *aw)
5836 {
5837 struct pfr_ktable *kt;
5838
5839 KASSERT(aw->type == PF_ADDR_TABLE, ("%s: type %u", __func__, aw->type));
5840
5841 kt = aw->p.tbl;
5842 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
5843 kt = kt->pfrkt_root;
5844 aw->p.tbl = NULL;
5845 aw->p.tblcnt = (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) ?
5846 kt->pfrkt_cnt : -1;
5847 }
5848
5849 static int
pf_add_status_counters(nvlist_t * nvl,const char * name,counter_u64_t * counters,size_t number,char ** names)5850 pf_add_status_counters(nvlist_t *nvl, const char *name, counter_u64_t *counters,
5851 size_t number, char **names)
5852 {
5853 nvlist_t *nvc;
5854
5855 nvc = nvlist_create(0);
5856 if (nvc == NULL)
5857 return (ENOMEM);
5858
5859 for (int i = 0; i < number; i++) {
5860 nvlist_append_number_array(nvc, "counters",
5861 counter_u64_fetch(counters[i]));
5862 nvlist_append_string_array(nvc, "names",
5863 names[i]);
5864 nvlist_append_number_array(nvc, "ids",
5865 i);
5866 }
5867 nvlist_add_nvlist(nvl, name, nvc);
5868 nvlist_destroy(nvc);
5869
5870 return (0);
5871 }
5872
5873 static int
pf_getstatus(struct pfioc_nv * nv)5874 pf_getstatus(struct pfioc_nv *nv)
5875 {
5876 nvlist_t *nvl = NULL, *nvc = NULL;
5877 void *nvlpacked = NULL;
5878 int error;
5879 struct pf_status s;
5880 char *pf_reasons[PFRES_MAX+1] = PFRES_NAMES;
5881 char *pf_lcounter[KLCNT_MAX+1] = KLCNT_NAMES;
5882 char *pf_fcounter[FCNT_MAX+1] = FCNT_NAMES;
5883 PF_RULES_RLOCK_TRACKER;
5884
5885 #define ERROUT(x) ERROUT_FUNCTION(errout, x)
5886
5887 PF_RULES_RLOCK();
5888
5889 nvl = nvlist_create(0);
5890 if (nvl == NULL)
5891 ERROUT(ENOMEM);
5892
5893 nvlist_add_bool(nvl, "running", V_pf_status.running);
5894 nvlist_add_number(nvl, "since", V_pf_status.since);
5895 nvlist_add_number(nvl, "debug", V_pf_status.debug);
5896 nvlist_add_number(nvl, "hostid", V_pf_status.hostid);
5897 nvlist_add_number(nvl, "states", V_pf_status.states);
5898 nvlist_add_number(nvl, "src_nodes", V_pf_status.src_nodes);
5899 nvlist_add_number(nvl, "reass", V_pf_status.reass);
5900 nvlist_add_bool(nvl, "syncookies_active",
5901 V_pf_status.syncookies_active);
5902 nvlist_add_number(nvl, "halfopen_states", V_pf_status.states_halfopen);
5903
5904 /* counters */
5905 error = pf_add_status_counters(nvl, "counters", V_pf_status.counters,
5906 PFRES_MAX, pf_reasons);
5907 if (error != 0)
5908 ERROUT(error);
5909
5910 /* lcounters */
5911 error = pf_add_status_counters(nvl, "lcounters", V_pf_status.lcounters,
5912 KLCNT_MAX, pf_lcounter);
5913 if (error != 0)
5914 ERROUT(error);
5915
5916 /* fcounters */
5917 nvc = nvlist_create(0);
5918 if (nvc == NULL)
5919 ERROUT(ENOMEM);
5920
5921 for (int i = 0; i < FCNT_MAX; i++) {
5922 nvlist_append_number_array(nvc, "counters",
5923 pf_counter_u64_fetch(&V_pf_status.fcounters[i]));
5924 nvlist_append_string_array(nvc, "names",
5925 pf_fcounter[i]);
5926 nvlist_append_number_array(nvc, "ids",
5927 i);
5928 }
5929 nvlist_add_nvlist(nvl, "fcounters", nvc);
5930 nvlist_destroy(nvc);
5931 nvc = NULL;
5932
5933 /* scounters */
5934 error = pf_add_status_counters(nvl, "scounters", V_pf_status.scounters,
5935 SCNT_MAX, pf_fcounter);
5936 if (error != 0)
5937 ERROUT(error);
5938
5939 nvlist_add_string(nvl, "ifname", V_pf_status.ifname);
5940 nvlist_add_binary(nvl, "chksum", V_pf_status.pf_chksum,
5941 PF_MD5_DIGEST_LENGTH);
5942
5943 pfi_update_status(V_pf_status.ifname, &s);
5944
5945 /* pcounters / bcounters */
5946 for (int i = 0; i < 2; i++) {
5947 for (int j = 0; j < 2; j++) {
5948 for (int k = 0; k < 2; k++) {
5949 nvlist_append_number_array(nvl, "pcounters",
5950 s.pcounters[i][j][k]);
5951 }
5952 nvlist_append_number_array(nvl, "bcounters",
5953 s.bcounters[i][j]);
5954 }
5955 }
5956
5957 nvlpacked = nvlist_pack(nvl, &nv->len);
5958 if (nvlpacked == NULL)
5959 ERROUT(ENOMEM);
5960
5961 if (nv->size == 0)
5962 ERROUT(0);
5963 else if (nv->size < nv->len)
5964 ERROUT(ENOSPC);
5965
5966 PF_RULES_RUNLOCK();
5967 error = copyout(nvlpacked, nv->data, nv->len);
5968 goto done;
5969
5970 #undef ERROUT
5971 errout:
5972 PF_RULES_RUNLOCK();
5973 done:
5974 free(nvlpacked, M_NVLIST);
5975 nvlist_destroy(nvc);
5976 nvlist_destroy(nvl);
5977
5978 return (error);
5979 }
5980
5981 /*
5982 * XXX - Check for version mismatch!!!
5983 */
5984 static void
pf_clear_all_states(void)5985 pf_clear_all_states(void)
5986 {
5987 struct pf_kstate *s;
5988 u_int i;
5989
5990 for (i = 0; i <= V_pf_hashmask; i++) {
5991 struct pf_idhash *ih = &V_pf_idhash[i];
5992 relock:
5993 PF_HASHROW_LOCK(ih);
5994 LIST_FOREACH(s, &ih->states, entry) {
5995 s->timeout = PFTM_PURGE;
5996 /* Don't send out individual delete messages. */
5997 s->state_flags |= PFSTATE_NOSYNC;
5998 pf_unlink_state(s);
5999 goto relock;
6000 }
6001 PF_HASHROW_UNLOCK(ih);
6002 }
6003 }
6004
6005 static int
pf_clear_tables(void)6006 pf_clear_tables(void)
6007 {
6008 struct pfioc_table io;
6009 int error;
6010
6011 bzero(&io, sizeof(io));
6012 io.pfrio_flags |= PFR_FLAG_ALLRSETS;
6013
6014 error = pfr_clr_tables(&io.pfrio_table, &io.pfrio_ndel,
6015 io.pfrio_flags);
6016
6017 return (error);
6018 }
6019
6020 static void
pf_clear_srcnodes(struct pf_ksrc_node * n)6021 pf_clear_srcnodes(struct pf_ksrc_node *n)
6022 {
6023 struct pf_kstate *s;
6024 int i;
6025
6026 for (i = 0; i <= V_pf_hashmask; i++) {
6027 struct pf_idhash *ih = &V_pf_idhash[i];
6028
6029 PF_HASHROW_LOCK(ih);
6030 LIST_FOREACH(s, &ih->states, entry) {
6031 if (n == NULL || n == s->src_node)
6032 s->src_node = NULL;
6033 if (n == NULL || n == s->nat_src_node)
6034 s->nat_src_node = NULL;
6035 }
6036 PF_HASHROW_UNLOCK(ih);
6037 }
6038
6039 if (n == NULL) {
6040 struct pf_srchash *sh;
6041
6042 for (i = 0, sh = V_pf_srchash; i <= V_pf_srchashmask;
6043 i++, sh++) {
6044 PF_HASHROW_LOCK(sh);
6045 LIST_FOREACH(n, &sh->nodes, entry) {
6046 n->expire = 1;
6047 n->states = 0;
6048 }
6049 PF_HASHROW_UNLOCK(sh);
6050 }
6051 } else {
6052 /* XXX: hash slot should already be locked here. */
6053 n->expire = 1;
6054 n->states = 0;
6055 }
6056 }
6057
6058 static void
pf_kill_srcnodes(struct pfioc_src_node_kill * psnk)6059 pf_kill_srcnodes(struct pfioc_src_node_kill *psnk)
6060 {
6061 struct pf_ksrc_node_list kill;
6062
6063 LIST_INIT(&kill);
6064 for (int i = 0; i <= V_pf_srchashmask; i++) {
6065 struct pf_srchash *sh = &V_pf_srchash[i];
6066 struct pf_ksrc_node *sn, *tmp;
6067
6068 PF_HASHROW_LOCK(sh);
6069 LIST_FOREACH_SAFE(sn, &sh->nodes, entry, tmp)
6070 if (PF_MATCHA(psnk->psnk_src.neg,
6071 &psnk->psnk_src.addr.v.a.addr,
6072 &psnk->psnk_src.addr.v.a.mask,
6073 &sn->addr, sn->af) &&
6074 PF_MATCHA(psnk->psnk_dst.neg,
6075 &psnk->psnk_dst.addr.v.a.addr,
6076 &psnk->psnk_dst.addr.v.a.mask,
6077 &sn->raddr, sn->af)) {
6078 pf_unlink_src_node(sn);
6079 LIST_INSERT_HEAD(&kill, sn, entry);
6080 sn->expire = 1;
6081 }
6082 PF_HASHROW_UNLOCK(sh);
6083 }
6084
6085 for (int i = 0; i <= V_pf_hashmask; i++) {
6086 struct pf_idhash *ih = &V_pf_idhash[i];
6087 struct pf_kstate *s;
6088
6089 PF_HASHROW_LOCK(ih);
6090 LIST_FOREACH(s, &ih->states, entry) {
6091 if (s->src_node && s->src_node->expire == 1)
6092 s->src_node = NULL;
6093 if (s->nat_src_node && s->nat_src_node->expire == 1)
6094 s->nat_src_node = NULL;
6095 }
6096 PF_HASHROW_UNLOCK(ih);
6097 }
6098
6099 psnk->psnk_killed = pf_free_src_nodes(&kill);
6100 }
6101
6102 static int
pf_keepcounters(struct pfioc_nv * nv)6103 pf_keepcounters(struct pfioc_nv *nv)
6104 {
6105 nvlist_t *nvl = NULL;
6106 void *nvlpacked = NULL;
6107 int error = 0;
6108
6109 #define ERROUT(x) ERROUT_FUNCTION(on_error, x)
6110
6111 if (nv->len > pf_ioctl_maxcount)
6112 ERROUT(ENOMEM);
6113
6114 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
6115 error = copyin(nv->data, nvlpacked, nv->len);
6116 if (error)
6117 ERROUT(error);
6118
6119 nvl = nvlist_unpack(nvlpacked, nv->len, 0);
6120 if (nvl == NULL)
6121 ERROUT(EBADMSG);
6122
6123 if (! nvlist_exists_bool(nvl, "keep_counters"))
6124 ERROUT(EBADMSG);
6125
6126 V_pf_status.keep_counters = nvlist_get_bool(nvl, "keep_counters");
6127
6128 on_error:
6129 nvlist_destroy(nvl);
6130 free(nvlpacked, M_NVLIST);
6131 return (error);
6132 }
6133
6134 static unsigned int
pf_clear_states(const struct pf_kstate_kill * kill)6135 pf_clear_states(const struct pf_kstate_kill *kill)
6136 {
6137 struct pf_state_key_cmp match_key;
6138 struct pf_kstate *s;
6139 struct pfi_kkif *kif;
6140 int idx;
6141 unsigned int killed = 0, dir;
6142
6143 for (unsigned int i = 0; i <= V_pf_hashmask; i++) {
6144 struct pf_idhash *ih = &V_pf_idhash[i];
6145
6146 relock_DIOCCLRSTATES:
6147 PF_HASHROW_LOCK(ih);
6148 LIST_FOREACH(s, &ih->states, entry) {
6149 /* For floating states look at the original kif. */
6150 kif = s->kif == V_pfi_all ? s->orig_kif : s->kif;
6151
6152 if (kill->psk_ifname[0] &&
6153 strcmp(kill->psk_ifname,
6154 kif->pfik_name))
6155 continue;
6156
6157 if (kill->psk_kill_match) {
6158 bzero(&match_key, sizeof(match_key));
6159
6160 if (s->direction == PF_OUT) {
6161 dir = PF_IN;
6162 idx = PF_SK_STACK;
6163 } else {
6164 dir = PF_OUT;
6165 idx = PF_SK_WIRE;
6166 }
6167
6168 match_key.af = s->key[idx]->af;
6169 match_key.proto = s->key[idx]->proto;
6170 PF_ACPY(&match_key.addr[0],
6171 &s->key[idx]->addr[1], match_key.af);
6172 match_key.port[0] = s->key[idx]->port[1];
6173 PF_ACPY(&match_key.addr[1],
6174 &s->key[idx]->addr[0], match_key.af);
6175 match_key.port[1] = s->key[idx]->port[0];
6176 }
6177
6178 /*
6179 * Don't send out individual
6180 * delete messages.
6181 */
6182 s->state_flags |= PFSTATE_NOSYNC;
6183 pf_unlink_state(s);
6184 killed++;
6185
6186 if (kill->psk_kill_match)
6187 killed += pf_kill_matching_state(&match_key,
6188 dir);
6189
6190 goto relock_DIOCCLRSTATES;
6191 }
6192 PF_HASHROW_UNLOCK(ih);
6193 }
6194
6195 if (V_pfsync_clear_states_ptr != NULL)
6196 V_pfsync_clear_states_ptr(V_pf_status.hostid, kill->psk_ifname);
6197
6198 return (killed);
6199 }
6200
6201 static void
pf_killstates(struct pf_kstate_kill * kill,unsigned int * killed)6202 pf_killstates(struct pf_kstate_kill *kill, unsigned int *killed)
6203 {
6204 struct pf_kstate *s;
6205
6206 if (kill->psk_pfcmp.id) {
6207 if (kill->psk_pfcmp.creatorid == 0)
6208 kill->psk_pfcmp.creatorid = V_pf_status.hostid;
6209 if ((s = pf_find_state_byid(kill->psk_pfcmp.id,
6210 kill->psk_pfcmp.creatorid))) {
6211 pf_unlink_state(s);
6212 *killed = 1;
6213 }
6214 return;
6215 }
6216
6217 for (unsigned int i = 0; i <= V_pf_hashmask; i++)
6218 *killed += pf_killstates_row(kill, &V_pf_idhash[i]);
6219
6220 return;
6221 }
6222
6223 static int
pf_killstates_nv(struct pfioc_nv * nv)6224 pf_killstates_nv(struct pfioc_nv *nv)
6225 {
6226 struct pf_kstate_kill kill;
6227 nvlist_t *nvl = NULL;
6228 void *nvlpacked = NULL;
6229 int error = 0;
6230 unsigned int killed = 0;
6231
6232 #define ERROUT(x) ERROUT_FUNCTION(on_error, x)
6233
6234 if (nv->len > pf_ioctl_maxcount)
6235 ERROUT(ENOMEM);
6236
6237 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
6238 error = copyin(nv->data, nvlpacked, nv->len);
6239 if (error)
6240 ERROUT(error);
6241
6242 nvl = nvlist_unpack(nvlpacked, nv->len, 0);
6243 if (nvl == NULL)
6244 ERROUT(EBADMSG);
6245
6246 error = pf_nvstate_kill_to_kstate_kill(nvl, &kill);
6247 if (error)
6248 ERROUT(error);
6249
6250 pf_killstates(&kill, &killed);
6251
6252 free(nvlpacked, M_NVLIST);
6253 nvlpacked = NULL;
6254 nvlist_destroy(nvl);
6255 nvl = nvlist_create(0);
6256 if (nvl == NULL)
6257 ERROUT(ENOMEM);
6258
6259 nvlist_add_number(nvl, "killed", killed);
6260
6261 nvlpacked = nvlist_pack(nvl, &nv->len);
6262 if (nvlpacked == NULL)
6263 ERROUT(ENOMEM);
6264
6265 if (nv->size == 0)
6266 ERROUT(0);
6267 else if (nv->size < nv->len)
6268 ERROUT(ENOSPC);
6269
6270 error = copyout(nvlpacked, nv->data, nv->len);
6271
6272 on_error:
6273 nvlist_destroy(nvl);
6274 free(nvlpacked, M_NVLIST);
6275 return (error);
6276 }
6277
6278 static int
pf_clearstates_nv(struct pfioc_nv * nv)6279 pf_clearstates_nv(struct pfioc_nv *nv)
6280 {
6281 struct pf_kstate_kill kill;
6282 nvlist_t *nvl = NULL;
6283 void *nvlpacked = NULL;
6284 int error = 0;
6285 unsigned int killed;
6286
6287 #define ERROUT(x) ERROUT_FUNCTION(on_error, x)
6288
6289 if (nv->len > pf_ioctl_maxcount)
6290 ERROUT(ENOMEM);
6291
6292 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
6293 error = copyin(nv->data, nvlpacked, nv->len);
6294 if (error)
6295 ERROUT(error);
6296
6297 nvl = nvlist_unpack(nvlpacked, nv->len, 0);
6298 if (nvl == NULL)
6299 ERROUT(EBADMSG);
6300
6301 error = pf_nvstate_kill_to_kstate_kill(nvl, &kill);
6302 if (error)
6303 ERROUT(error);
6304
6305 killed = pf_clear_states(&kill);
6306
6307 free(nvlpacked, M_NVLIST);
6308 nvlpacked = NULL;
6309 nvlist_destroy(nvl);
6310 nvl = nvlist_create(0);
6311 if (nvl == NULL)
6312 ERROUT(ENOMEM);
6313
6314 nvlist_add_number(nvl, "killed", killed);
6315
6316 nvlpacked = nvlist_pack(nvl, &nv->len);
6317 if (nvlpacked == NULL)
6318 ERROUT(ENOMEM);
6319
6320 if (nv->size == 0)
6321 ERROUT(0);
6322 else if (nv->size < nv->len)
6323 ERROUT(ENOSPC);
6324
6325 error = copyout(nvlpacked, nv->data, nv->len);
6326
6327 #undef ERROUT
6328 on_error:
6329 nvlist_destroy(nvl);
6330 free(nvlpacked, M_NVLIST);
6331 return (error);
6332 }
6333
6334 static int
pf_getstate(struct pfioc_nv * nv)6335 pf_getstate(struct pfioc_nv *nv)
6336 {
6337 nvlist_t *nvl = NULL, *nvls;
6338 void *nvlpacked = NULL;
6339 struct pf_kstate *s = NULL;
6340 int error = 0;
6341 uint64_t id, creatorid;
6342
6343 #define ERROUT(x) ERROUT_FUNCTION(errout, x)
6344
6345 if (nv->len > pf_ioctl_maxcount)
6346 ERROUT(ENOMEM);
6347
6348 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
6349 error = copyin(nv->data, nvlpacked, nv->len);
6350 if (error)
6351 ERROUT(error);
6352
6353 nvl = nvlist_unpack(nvlpacked, nv->len, 0);
6354 if (nvl == NULL)
6355 ERROUT(EBADMSG);
6356
6357 PFNV_CHK(pf_nvuint64(nvl, "id", &id));
6358 PFNV_CHK(pf_nvuint64(nvl, "creatorid", &creatorid));
6359
6360 s = pf_find_state_byid(id, creatorid);
6361 if (s == NULL)
6362 ERROUT(ENOENT);
6363
6364 free(nvlpacked, M_NVLIST);
6365 nvlpacked = NULL;
6366 nvlist_destroy(nvl);
6367 nvl = nvlist_create(0);
6368 if (nvl == NULL)
6369 ERROUT(ENOMEM);
6370
6371 nvls = pf_state_to_nvstate(s);
6372 if (nvls == NULL)
6373 ERROUT(ENOMEM);
6374
6375 nvlist_add_nvlist(nvl, "state", nvls);
6376 nvlist_destroy(nvls);
6377
6378 nvlpacked = nvlist_pack(nvl, &nv->len);
6379 if (nvlpacked == NULL)
6380 ERROUT(ENOMEM);
6381
6382 if (nv->size == 0)
6383 ERROUT(0);
6384 else if (nv->size < nv->len)
6385 ERROUT(ENOSPC);
6386
6387 error = copyout(nvlpacked, nv->data, nv->len);
6388
6389 #undef ERROUT
6390 errout:
6391 if (s != NULL)
6392 PF_STATE_UNLOCK(s);
6393 free(nvlpacked, M_NVLIST);
6394 nvlist_destroy(nvl);
6395 return (error);
6396 }
6397
6398 /*
6399 * XXX - Check for version mismatch!!!
6400 */
6401
6402 /*
6403 * Duplicate pfctl -Fa operation to get rid of as much as we can.
6404 */
6405 static int
shutdown_pf(void)6406 shutdown_pf(void)
6407 {
6408 int error = 0;
6409 u_int32_t t[5];
6410 char nn = '\0';
6411 struct pf_kanchor *anchor;
6412 struct pf_keth_anchor *eth_anchor;
6413 int rs_num;
6414
6415 do {
6416 /* Unlink rules of all user defined anchors */
6417 RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors) {
6418 /* Wildcard based anchors may not have a respective
6419 * explicit anchor rule or they may be left empty
6420 * without rules. It leads to anchor.refcnt=0, and the
6421 * rest of the logic does not expect it. */
6422 if (anchor->refcnt == 0)
6423 anchor->refcnt = 1;
6424 for (rs_num = 0; rs_num < PF_RULESET_MAX; ++rs_num) {
6425 if ((error = pf_begin_rules(&t[rs_num], rs_num,
6426 anchor->path)) != 0) {
6427 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: "
6428 "anchor.path=%s rs_num=%d\n",
6429 anchor->path, rs_num));
6430 goto error; /* XXX: rollback? */
6431 }
6432 }
6433 for (rs_num = 0; rs_num < PF_RULESET_MAX; ++rs_num) {
6434 error = pf_commit_rules(t[rs_num], rs_num,
6435 anchor->path);
6436 MPASS(error == 0);
6437 }
6438 }
6439
6440 /* Unlink rules of all user defined ether anchors */
6441 RB_FOREACH(eth_anchor, pf_keth_anchor_global,
6442 &V_pf_keth_anchors) {
6443 /* Wildcard based anchors may not have a respective
6444 * explicit anchor rule or they may be left empty
6445 * without rules. It leads to anchor.refcnt=0, and the
6446 * rest of the logic does not expect it. */
6447 if (eth_anchor->refcnt == 0)
6448 eth_anchor->refcnt = 1;
6449 if ((error = pf_begin_eth(&t[0], eth_anchor->path))
6450 != 0) {
6451 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: eth "
6452 "anchor.path=%s\n", eth_anchor->path));
6453 goto error;
6454 }
6455 error = pf_commit_eth(t[0], eth_anchor->path);
6456 MPASS(error == 0);
6457 }
6458
6459 if ((error = pf_begin_rules(&t[0], PF_RULESET_SCRUB, &nn))
6460 != 0) {
6461 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: SCRUB\n"));
6462 break;
6463 }
6464 if ((error = pf_begin_rules(&t[1], PF_RULESET_FILTER, &nn))
6465 != 0) {
6466 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: FILTER\n"));
6467 break; /* XXX: rollback? */
6468 }
6469 if ((error = pf_begin_rules(&t[2], PF_RULESET_NAT, &nn))
6470 != 0) {
6471 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: NAT\n"));
6472 break; /* XXX: rollback? */
6473 }
6474 if ((error = pf_begin_rules(&t[3], PF_RULESET_BINAT, &nn))
6475 != 0) {
6476 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: BINAT\n"));
6477 break; /* XXX: rollback? */
6478 }
6479 if ((error = pf_begin_rules(&t[4], PF_RULESET_RDR, &nn))
6480 != 0) {
6481 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: RDR\n"));
6482 break; /* XXX: rollback? */
6483 }
6484
6485 error = pf_commit_rules(t[0], PF_RULESET_SCRUB, &nn);
6486 MPASS(error == 0);
6487 error = pf_commit_rules(t[1], PF_RULESET_FILTER, &nn);
6488 MPASS(error == 0);
6489 error = pf_commit_rules(t[2], PF_RULESET_NAT, &nn);
6490 MPASS(error == 0);
6491 error = pf_commit_rules(t[3], PF_RULESET_BINAT, &nn);
6492 MPASS(error == 0);
6493 error = pf_commit_rules(t[4], PF_RULESET_RDR, &nn);
6494 MPASS(error == 0);
6495
6496 if ((error = pf_clear_tables()) != 0)
6497 break;
6498
6499 if ((error = pf_begin_eth(&t[0], &nn)) != 0) {
6500 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: eth\n"));
6501 break;
6502 }
6503 error = pf_commit_eth(t[0], &nn);
6504 MPASS(error == 0);
6505
6506 #ifdef ALTQ
6507 if ((error = pf_begin_altq(&t[0])) != 0) {
6508 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: ALTQ\n"));
6509 break;
6510 }
6511 pf_commit_altq(t[0]);
6512 #endif
6513
6514 pf_clear_all_states();
6515
6516 pf_clear_srcnodes(NULL);
6517
6518 /* status does not use malloced mem so no need to cleanup */
6519 /* fingerprints and interfaces have their own cleanup code */
6520 } while(0);
6521
6522 error:
6523 return (error);
6524 }
6525
6526 static pfil_return_t
pf_check_return(int chk,struct mbuf ** m)6527 pf_check_return(int chk, struct mbuf **m)
6528 {
6529
6530 switch (chk) {
6531 case PF_PASS:
6532 if (*m == NULL)
6533 return (PFIL_CONSUMED);
6534 else
6535 return (PFIL_PASS);
6536 break;
6537 default:
6538 if (*m != NULL) {
6539 m_freem(*m);
6540 *m = NULL;
6541 }
6542 return (PFIL_DROPPED);
6543 }
6544 }
6545
6546 static pfil_return_t
pf_eth_check_in(struct mbuf ** m,struct ifnet * ifp,int flags,void * ruleset __unused,struct inpcb * inp)6547 pf_eth_check_in(struct mbuf **m, struct ifnet *ifp, int flags,
6548 void *ruleset __unused, struct inpcb *inp)
6549 {
6550 int chk;
6551
6552 chk = pf_test_eth(PF_IN, flags, ifp, m, inp);
6553
6554 return (pf_check_return(chk, m));
6555 }
6556
6557 static pfil_return_t
pf_eth_check_out(struct mbuf ** m,struct ifnet * ifp,int flags,void * ruleset __unused,struct inpcb * inp)6558 pf_eth_check_out(struct mbuf **m, struct ifnet *ifp, int flags,
6559 void *ruleset __unused, struct inpcb *inp)
6560 {
6561 int chk;
6562
6563 chk = pf_test_eth(PF_OUT, flags, ifp, m, inp);
6564
6565 return (pf_check_return(chk, m));
6566 }
6567
6568 #ifdef INET
6569 static pfil_return_t
pf_check_in(struct mbuf ** m,struct ifnet * ifp,int flags,void * ruleset __unused,struct inpcb * inp)6570 pf_check_in(struct mbuf **m, struct ifnet *ifp, int flags,
6571 void *ruleset __unused, struct inpcb *inp)
6572 {
6573 int chk;
6574
6575 chk = pf_test(PF_IN, flags, ifp, m, inp, NULL);
6576
6577 return (pf_check_return(chk, m));
6578 }
6579
6580 static pfil_return_t
pf_check_out(struct mbuf ** m,struct ifnet * ifp,int flags,void * ruleset __unused,struct inpcb * inp)6581 pf_check_out(struct mbuf **m, struct ifnet *ifp, int flags,
6582 void *ruleset __unused, struct inpcb *inp)
6583 {
6584 int chk;
6585
6586 chk = pf_test(PF_OUT, flags, ifp, m, inp, NULL);
6587
6588 return (pf_check_return(chk, m));
6589 }
6590 #endif
6591
6592 #ifdef INET6
6593 static pfil_return_t
pf_check6_in(struct mbuf ** m,struct ifnet * ifp,int flags,void * ruleset __unused,struct inpcb * inp)6594 pf_check6_in(struct mbuf **m, struct ifnet *ifp, int flags,
6595 void *ruleset __unused, struct inpcb *inp)
6596 {
6597 int chk;
6598
6599 /*
6600 * In case of loopback traffic IPv6 uses the real interface in
6601 * order to support scoped addresses. In order to support stateful
6602 * filtering we have change this to lo0 as it is the case in IPv4.
6603 */
6604 CURVNET_SET(ifp->if_vnet);
6605 chk = pf_test6(PF_IN, flags, (*m)->m_flags & M_LOOP ? V_loif : ifp,
6606 m, inp, NULL);
6607 CURVNET_RESTORE();
6608
6609 return (pf_check_return(chk, m));
6610 }
6611
6612 static pfil_return_t
pf_check6_out(struct mbuf ** m,struct ifnet * ifp,int flags,void * ruleset __unused,struct inpcb * inp)6613 pf_check6_out(struct mbuf **m, struct ifnet *ifp, int flags,
6614 void *ruleset __unused, struct inpcb *inp)
6615 {
6616 int chk;
6617
6618 CURVNET_SET(ifp->if_vnet);
6619 chk = pf_test6(PF_OUT, flags, ifp, m, inp, NULL);
6620 CURVNET_RESTORE();
6621
6622 return (pf_check_return(chk, m));
6623 }
6624 #endif /* INET6 */
6625
6626 VNET_DEFINE_STATIC(pfil_hook_t, pf_eth_in_hook);
6627 VNET_DEFINE_STATIC(pfil_hook_t, pf_eth_out_hook);
6628 #define V_pf_eth_in_hook VNET(pf_eth_in_hook)
6629 #define V_pf_eth_out_hook VNET(pf_eth_out_hook)
6630
6631 #ifdef INET
6632 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip4_in_hook);
6633 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip4_out_hook);
6634 #define V_pf_ip4_in_hook VNET(pf_ip4_in_hook)
6635 #define V_pf_ip4_out_hook VNET(pf_ip4_out_hook)
6636 #endif
6637 #ifdef INET6
6638 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip6_in_hook);
6639 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip6_out_hook);
6640 #define V_pf_ip6_in_hook VNET(pf_ip6_in_hook)
6641 #define V_pf_ip6_out_hook VNET(pf_ip6_out_hook)
6642 #endif
6643
6644 static void
hook_pf_eth(void)6645 hook_pf_eth(void)
6646 {
6647 struct pfil_hook_args pha = {
6648 .pa_version = PFIL_VERSION,
6649 .pa_modname = "pf",
6650 .pa_type = PFIL_TYPE_ETHERNET,
6651 };
6652 struct pfil_link_args pla = {
6653 .pa_version = PFIL_VERSION,
6654 };
6655 int ret __diagused;
6656
6657 if (atomic_load_bool(&V_pf_pfil_eth_hooked))
6658 return;
6659
6660 pha.pa_mbuf_chk = pf_eth_check_in;
6661 pha.pa_flags = PFIL_IN;
6662 pha.pa_rulname = "eth-in";
6663 V_pf_eth_in_hook = pfil_add_hook(&pha);
6664 pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR;
6665 pla.pa_head = V_link_pfil_head;
6666 pla.pa_hook = V_pf_eth_in_hook;
6667 ret = pfil_link(&pla);
6668 MPASS(ret == 0);
6669 pha.pa_mbuf_chk = pf_eth_check_out;
6670 pha.pa_flags = PFIL_OUT;
6671 pha.pa_rulname = "eth-out";
6672 V_pf_eth_out_hook = pfil_add_hook(&pha);
6673 pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
6674 pla.pa_head = V_link_pfil_head;
6675 pla.pa_hook = V_pf_eth_out_hook;
6676 ret = pfil_link(&pla);
6677 MPASS(ret == 0);
6678
6679 atomic_store_bool(&V_pf_pfil_eth_hooked, true);
6680 }
6681
6682 static void
hook_pf(void)6683 hook_pf(void)
6684 {
6685 struct pfil_hook_args pha = {
6686 .pa_version = PFIL_VERSION,
6687 .pa_modname = "pf",
6688 };
6689 struct pfil_link_args pla = {
6690 .pa_version = PFIL_VERSION,
6691 };
6692 int ret __diagused;
6693
6694 if (atomic_load_bool(&V_pf_pfil_hooked))
6695 return;
6696
6697 #ifdef INET
6698 pha.pa_type = PFIL_TYPE_IP4;
6699 pha.pa_mbuf_chk = pf_check_in;
6700 pha.pa_flags = PFIL_IN;
6701 pha.pa_rulname = "default-in";
6702 V_pf_ip4_in_hook = pfil_add_hook(&pha);
6703 pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR;
6704 pla.pa_head = V_inet_pfil_head;
6705 pla.pa_hook = V_pf_ip4_in_hook;
6706 ret = pfil_link(&pla);
6707 MPASS(ret == 0);
6708 pha.pa_mbuf_chk = pf_check_out;
6709 pha.pa_flags = PFIL_OUT;
6710 pha.pa_rulname = "default-out";
6711 V_pf_ip4_out_hook = pfil_add_hook(&pha);
6712 pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
6713 pla.pa_head = V_inet_pfil_head;
6714 pla.pa_hook = V_pf_ip4_out_hook;
6715 ret = pfil_link(&pla);
6716 MPASS(ret == 0);
6717 if (V_pf_filter_local) {
6718 pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
6719 pla.pa_head = V_inet_local_pfil_head;
6720 pla.pa_hook = V_pf_ip4_out_hook;
6721 ret = pfil_link(&pla);
6722 MPASS(ret == 0);
6723 }
6724 #endif
6725 #ifdef INET6
6726 pha.pa_type = PFIL_TYPE_IP6;
6727 pha.pa_mbuf_chk = pf_check6_in;
6728 pha.pa_flags = PFIL_IN;
6729 pha.pa_rulname = "default-in6";
6730 V_pf_ip6_in_hook = pfil_add_hook(&pha);
6731 pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR;
6732 pla.pa_head = V_inet6_pfil_head;
6733 pla.pa_hook = V_pf_ip6_in_hook;
6734 ret = pfil_link(&pla);
6735 MPASS(ret == 0);
6736 pha.pa_mbuf_chk = pf_check6_out;
6737 pha.pa_rulname = "default-out6";
6738 pha.pa_flags = PFIL_OUT;
6739 V_pf_ip6_out_hook = pfil_add_hook(&pha);
6740 pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
6741 pla.pa_head = V_inet6_pfil_head;
6742 pla.pa_hook = V_pf_ip6_out_hook;
6743 ret = pfil_link(&pla);
6744 MPASS(ret == 0);
6745 if (V_pf_filter_local) {
6746 pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
6747 pla.pa_head = V_inet6_local_pfil_head;
6748 pla.pa_hook = V_pf_ip6_out_hook;
6749 ret = pfil_link(&pla);
6750 MPASS(ret == 0);
6751 }
6752 #endif
6753
6754 atomic_store_bool(&V_pf_pfil_hooked, true);
6755 }
6756
6757 static void
dehook_pf_eth(void)6758 dehook_pf_eth(void)
6759 {
6760
6761 if (!atomic_load_bool(&V_pf_pfil_eth_hooked))
6762 return;
6763
6764 pfil_remove_hook(V_pf_eth_in_hook);
6765 pfil_remove_hook(V_pf_eth_out_hook);
6766
6767 atomic_store_bool(&V_pf_pfil_eth_hooked, false);
6768 }
6769
6770 static void
dehook_pf(void)6771 dehook_pf(void)
6772 {
6773
6774 if (!atomic_load_bool(&V_pf_pfil_hooked))
6775 return;
6776
6777 #ifdef INET
6778 pfil_remove_hook(V_pf_ip4_in_hook);
6779 pfil_remove_hook(V_pf_ip4_out_hook);
6780 #endif
6781 #ifdef INET6
6782 pfil_remove_hook(V_pf_ip6_in_hook);
6783 pfil_remove_hook(V_pf_ip6_out_hook);
6784 #endif
6785
6786 atomic_store_bool(&V_pf_pfil_hooked, false);
6787 }
6788
6789 static void
pf_load_vnet(void)6790 pf_load_vnet(void)
6791 {
6792 V_pf_tag_z = uma_zcreate("pf tags", sizeof(struct pf_tagname),
6793 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
6794
6795 rm_init_flags(&V_pf_rules_lock, "pf rulesets", RM_RECURSE);
6796 sx_init(&V_pf_ioctl_lock, "pf ioctl");
6797
6798 pf_init_tagset(&V_pf_tags, &pf_rule_tag_hashsize,
6799 PF_RULE_TAG_HASH_SIZE_DEFAULT);
6800 #ifdef ALTQ
6801 pf_init_tagset(&V_pf_qids, &pf_queue_tag_hashsize,
6802 PF_QUEUE_TAG_HASH_SIZE_DEFAULT);
6803 #endif
6804
6805 V_pf_keth = &V_pf_main_keth_anchor.ruleset;
6806
6807 pfattach_vnet();
6808 V_pf_vnet_active = 1;
6809 }
6810
6811 static int
pf_load(void)6812 pf_load(void)
6813 {
6814 int error;
6815
6816 sx_init(&pf_end_lock, "pf end thread");
6817
6818 pf_mtag_initialize();
6819
6820 pf_dev = make_dev(&pf_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, PF_NAME);
6821 if (pf_dev == NULL)
6822 return (ENOMEM);
6823
6824 pf_end_threads = 0;
6825 error = kproc_create(pf_purge_thread, NULL, &pf_purge_proc, 0, 0, "pf purge");
6826 if (error != 0)
6827 return (error);
6828
6829 pfi_initialize();
6830
6831 return (0);
6832 }
6833
6834 static void
pf_unload_vnet(void)6835 pf_unload_vnet(void)
6836 {
6837 int ret __diagused;
6838
6839 V_pf_vnet_active = 0;
6840 V_pf_status.running = 0;
6841 dehook_pf();
6842 dehook_pf_eth();
6843
6844 PF_RULES_WLOCK();
6845 pf_syncookies_cleanup();
6846 shutdown_pf();
6847 PF_RULES_WUNLOCK();
6848
6849 /* Make sure we've cleaned up ethernet rules before we continue. */
6850 NET_EPOCH_DRAIN_CALLBACKS();
6851
6852 ret = swi_remove(V_pf_swi_cookie);
6853 MPASS(ret == 0);
6854 ret = intr_event_destroy(V_pf_swi_ie);
6855 MPASS(ret == 0);
6856
6857 pf_unload_vnet_purge();
6858
6859 pf_normalize_cleanup();
6860 PF_RULES_WLOCK();
6861 pfi_cleanup_vnet();
6862 PF_RULES_WUNLOCK();
6863 pfr_cleanup();
6864 pf_osfp_flush();
6865 pf_cleanup();
6866 if (IS_DEFAULT_VNET(curvnet))
6867 pf_mtag_cleanup();
6868
6869 pf_cleanup_tagset(&V_pf_tags);
6870 #ifdef ALTQ
6871 pf_cleanup_tagset(&V_pf_qids);
6872 #endif
6873 uma_zdestroy(V_pf_tag_z);
6874
6875 #ifdef PF_WANT_32_TO_64_COUNTER
6876 PF_RULES_WLOCK();
6877 LIST_REMOVE(V_pf_kifmarker, pfik_allkiflist);
6878
6879 MPASS(LIST_EMPTY(&V_pf_allkiflist));
6880 MPASS(V_pf_allkifcount == 0);
6881
6882 LIST_REMOVE(&V_pf_default_rule, allrulelist);
6883 V_pf_allrulecount--;
6884 LIST_REMOVE(V_pf_rulemarker, allrulelist);
6885
6886 MPASS(LIST_EMPTY(&V_pf_allrulelist));
6887 MPASS(V_pf_allrulecount == 0);
6888
6889 PF_RULES_WUNLOCK();
6890
6891 free(V_pf_kifmarker, PFI_MTYPE);
6892 free(V_pf_rulemarker, M_PFRULE);
6893 #endif
6894
6895 /* Free counters last as we updated them during shutdown. */
6896 pf_counter_u64_deinit(&V_pf_default_rule.evaluations);
6897 for (int i = 0; i < 2; i++) {
6898 pf_counter_u64_deinit(&V_pf_default_rule.packets[i]);
6899 pf_counter_u64_deinit(&V_pf_default_rule.bytes[i]);
6900 }
6901 counter_u64_free(V_pf_default_rule.states_cur);
6902 counter_u64_free(V_pf_default_rule.states_tot);
6903 counter_u64_free(V_pf_default_rule.src_nodes);
6904 uma_zfree_pcpu(pf_timestamp_pcpu_zone, V_pf_default_rule.timestamp);
6905
6906 for (int i = 0; i < PFRES_MAX; i++)
6907 counter_u64_free(V_pf_status.counters[i]);
6908 for (int i = 0; i < KLCNT_MAX; i++)
6909 counter_u64_free(V_pf_status.lcounters[i]);
6910 for (int i = 0; i < FCNT_MAX; i++)
6911 pf_counter_u64_deinit(&V_pf_status.fcounters[i]);
6912 for (int i = 0; i < SCNT_MAX; i++)
6913 counter_u64_free(V_pf_status.scounters[i]);
6914
6915 rm_destroy(&V_pf_rules_lock);
6916 sx_destroy(&V_pf_ioctl_lock);
6917 }
6918
6919 static void
pf_unload(void)6920 pf_unload(void)
6921 {
6922
6923 sx_xlock(&pf_end_lock);
6924 pf_end_threads = 1;
6925 while (pf_end_threads < 2) {
6926 wakeup_one(pf_purge_thread);
6927 sx_sleep(pf_purge_proc, &pf_end_lock, 0, "pftmo", 0);
6928 }
6929 sx_xunlock(&pf_end_lock);
6930
6931 if (pf_dev != NULL)
6932 destroy_dev(pf_dev);
6933
6934 pfi_cleanup();
6935
6936 sx_destroy(&pf_end_lock);
6937 }
6938
6939 static void
vnet_pf_init(void * unused __unused)6940 vnet_pf_init(void *unused __unused)
6941 {
6942
6943 pf_load_vnet();
6944 }
6945 VNET_SYSINIT(vnet_pf_init, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD,
6946 vnet_pf_init, NULL);
6947
6948 static void
vnet_pf_uninit(const void * unused __unused)6949 vnet_pf_uninit(const void *unused __unused)
6950 {
6951
6952 pf_unload_vnet();
6953 }
6954 SYSUNINIT(pf_unload, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND, pf_unload, NULL);
6955 VNET_SYSUNINIT(vnet_pf_uninit, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD,
6956 vnet_pf_uninit, NULL);
6957
6958 static int
pf_modevent(module_t mod,int type,void * data)6959 pf_modevent(module_t mod, int type, void *data)
6960 {
6961 int error = 0;
6962
6963 switch(type) {
6964 case MOD_LOAD:
6965 error = pf_load();
6966 break;
6967 case MOD_UNLOAD:
6968 /* Handled in SYSUNINIT(pf_unload) to ensure it's done after
6969 * the vnet_pf_uninit()s */
6970 break;
6971 default:
6972 error = EINVAL;
6973 break;
6974 }
6975
6976 return (error);
6977 }
6978
6979 static moduledata_t pf_mod = {
6980 "pf",
6981 pf_modevent,
6982 0
6983 };
6984
6985 DECLARE_MODULE(pf, pf_mod, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND);
6986 MODULE_VERSION(pf, PF_MODVER);
6987