1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2002 Cedric Berger
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * - Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * - Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following
15 * disclaimer in the documentation and/or other materials provided
16 * with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
21 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
22 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
28 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 *
31 * $OpenBSD: pf_table.c,v 1.79 2008/10/08 06:24:50 mcbride Exp $
32 */
33
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36
37 #include "opt_inet.h"
38 #include "opt_inet6.h"
39
40 #include <sys/param.h>
41 #include <sys/kernel.h>
42 #include <sys/lock.h>
43 #include <sys/malloc.h>
44 #include <sys/mbuf.h>
45 #include <sys/mutex.h>
46 #include <sys/refcount.h>
47 #include <sys/socket.h>
48 #include <vm/uma.h>
49
50 #include <net/if.h>
51 #include <net/vnet.h>
52 #include <net/pfvar.h>
53
54 #define DPFPRINTF(n, x) if (V_pf_status.debug >= (n)) printf x
55
56 #define ACCEPT_FLAGS(flags, oklist) \
57 do { \
58 if ((flags & ~(oklist)) & \
59 PFR_FLAG_ALLMASK) \
60 return (EINVAL); \
61 } while (0)
62
63 #define FILLIN_SIN(sin, addr) \
64 do { \
65 (sin).sin_len = sizeof(sin); \
66 (sin).sin_family = AF_INET; \
67 (sin).sin_addr = (addr); \
68 } while (0)
69
70 #define FILLIN_SIN6(sin6, addr) \
71 do { \
72 (sin6).sin6_len = sizeof(sin6); \
73 (sin6).sin6_family = AF_INET6; \
74 (sin6).sin6_addr = (addr); \
75 } while (0)
76
77 #define SWAP(type, a1, a2) \
78 do { \
79 type tmp = a1; \
80 a1 = a2; \
81 a2 = tmp; \
82 } while (0)
83
84 #define SUNION2PF(su, af) (((af)==AF_INET) ? \
85 (struct pf_addr *)&(su)->sin.sin_addr : \
86 (struct pf_addr *)&(su)->sin6.sin6_addr)
87
88 #define AF_BITS(af) (((af)==AF_INET)?32:128)
89 #define ADDR_NETWORK(ad) ((ad)->pfra_net < AF_BITS((ad)->pfra_af))
90 #define KENTRY_NETWORK(ke) ((ke)->pfrke_net < AF_BITS((ke)->pfrke_af))
91 #define KENTRY_RNF_ROOT(ke) \
92 ((((struct radix_node *)(ke))->rn_flags & RNF_ROOT) != 0)
93
94 #define NO_ADDRESSES (-1)
95 #define ENQUEUE_UNMARKED_ONLY (1)
96 #define INVERT_NEG_FLAG (1)
97
98 struct pfr_walktree {
99 enum pfrw_op {
100 PFRW_MARK,
101 PFRW_SWEEP,
102 PFRW_ENQUEUE,
103 PFRW_GET_ADDRS,
104 PFRW_GET_ASTATS,
105 PFRW_POOL_GET,
106 PFRW_DYNADDR_UPDATE
107 } pfrw_op;
108 union {
109 struct pfr_addr *pfrw1_addr;
110 struct pfr_astats *pfrw1_astats;
111 struct pfr_kentryworkq *pfrw1_workq;
112 struct pfr_kentry *pfrw1_kentry;
113 struct pfi_dynaddr *pfrw1_dyn;
114 } pfrw_1;
115 int pfrw_free;
116 int pfrw_flags;
117 };
118 #define pfrw_addr pfrw_1.pfrw1_addr
119 #define pfrw_astats pfrw_1.pfrw1_astats
120 #define pfrw_workq pfrw_1.pfrw1_workq
121 #define pfrw_kentry pfrw_1.pfrw1_kentry
122 #define pfrw_dyn pfrw_1.pfrw1_dyn
123 #define pfrw_cnt pfrw_free
124
125 #define senderr(e) do { rv = (e); goto _bad; } while (0)
126
127 static MALLOC_DEFINE(M_PFTABLE, "pf_table", "pf(4) tables structures");
128 VNET_DEFINE_STATIC(uma_zone_t, pfr_kentry_z);
129 #define V_pfr_kentry_z VNET(pfr_kentry_z)
130 VNET_DEFINE_STATIC(uma_zone_t, pfr_kentry_counter_z);
131 #define V_pfr_kentry_counter_z VNET(pfr_kentry_counter_z)
132
133 static struct pf_addr pfr_ffaddr = {
134 .addr32 = { 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff }
135 };
136
137 static void pfr_copyout_astats(struct pfr_astats *,
138 const struct pfr_kentry *,
139 const struct pfr_walktree *);
140 static void pfr_copyout_addr(struct pfr_addr *,
141 const struct pfr_kentry *ke);
142 static int pfr_validate_addr(struct pfr_addr *);
143 static void pfr_enqueue_addrs(struct pfr_ktable *,
144 struct pfr_kentryworkq *, int *, int);
145 static void pfr_mark_addrs(struct pfr_ktable *);
146 static struct pfr_kentry
147 *pfr_lookup_addr(struct pfr_ktable *,
148 struct pfr_addr *, int);
149 static struct pfr_kentry *pfr_create_kentry(struct pfr_addr *, bool);
150 static void pfr_destroy_kentries(struct pfr_kentryworkq *);
151 static void pfr_destroy_kentry(struct pfr_kentry *);
152 static void pfr_insert_kentries(struct pfr_ktable *,
153 struct pfr_kentryworkq *, long);
154 static void pfr_remove_kentries(struct pfr_ktable *,
155 struct pfr_kentryworkq *);
156 static void pfr_clstats_kentries(struct pfr_ktable *,
157 struct pfr_kentryworkq *, long, int);
158 static void pfr_reset_feedback(struct pfr_addr *, int);
159 static void pfr_prepare_network(union sockaddr_union *, int, int);
160 static int pfr_route_kentry(struct pfr_ktable *,
161 struct pfr_kentry *);
162 static int pfr_unroute_kentry(struct pfr_ktable *,
163 struct pfr_kentry *);
164 static int pfr_walktree(struct radix_node *, void *);
165 static int pfr_validate_table(struct pfr_table *, int, int);
166 static int pfr_fix_anchor(char *);
167 static void pfr_commit_ktable(struct pfr_ktable *, long);
168 static void pfr_insert_ktables(struct pfr_ktableworkq *);
169 static void pfr_insert_ktable(struct pfr_ktable *);
170 static void pfr_setflags_ktables(struct pfr_ktableworkq *);
171 static void pfr_setflags_ktable(struct pfr_ktable *, int);
172 static void pfr_clstats_ktables(struct pfr_ktableworkq *, long,
173 int);
174 static void pfr_clstats_ktable(struct pfr_ktable *, long, int);
175 static struct pfr_ktable
176 *pfr_create_ktable(struct pfr_table *, long, int);
177 static void pfr_destroy_ktables(struct pfr_ktableworkq *, int);
178 static void pfr_destroy_ktable(struct pfr_ktable *, int);
179 static int pfr_ktable_compare(struct pfr_ktable *,
180 struct pfr_ktable *);
181 static struct pfr_ktable
182 *pfr_lookup_table(struct pfr_table *);
183 static void pfr_clean_node_mask(struct pfr_ktable *,
184 struct pfr_kentryworkq *);
185 static int pfr_skip_table(struct pfr_table *,
186 struct pfr_ktable *, int);
187 static struct pfr_kentry
188 *pfr_kentry_byidx(struct pfr_ktable *, int, int);
189
190 static RB_PROTOTYPE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
191 static RB_GENERATE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
192
193 VNET_DEFINE_STATIC(struct pfr_ktablehead, pfr_ktables);
194 #define V_pfr_ktables VNET(pfr_ktables)
195
196 VNET_DEFINE_STATIC(struct pfr_table, pfr_nulltable);
197 #define V_pfr_nulltable VNET(pfr_nulltable)
198
199 VNET_DEFINE_STATIC(int, pfr_ktable_cnt);
200 #define V_pfr_ktable_cnt VNET(pfr_ktable_cnt)
201
202 void
pfr_initialize(void)203 pfr_initialize(void)
204 {
205
206 V_pfr_kentry_counter_z = uma_zcreate("pf table entry counters",
207 PFR_NUM_COUNTERS * sizeof(uint64_t), NULL, NULL, NULL, NULL,
208 UMA_ALIGN_PTR, UMA_ZONE_PCPU);
209 V_pfr_kentry_z = uma_zcreate("pf table entries",
210 sizeof(struct pfr_kentry), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
211 0);
212 V_pf_limits[PF_LIMIT_TABLE_ENTRIES].zone = V_pfr_kentry_z;
213 V_pf_limits[PF_LIMIT_TABLE_ENTRIES].limit = PFR_KENTRY_HIWAT;
214 }
215
216 void
pfr_cleanup(void)217 pfr_cleanup(void)
218 {
219
220 uma_zdestroy(V_pfr_kentry_z);
221 uma_zdestroy(V_pfr_kentry_counter_z);
222 }
223
224 int
pfr_clr_addrs(struct pfr_table * tbl,int * ndel,int flags)225 pfr_clr_addrs(struct pfr_table *tbl, int *ndel, int flags)
226 {
227 struct pfr_ktable *kt;
228 struct pfr_kentryworkq workq;
229
230 PF_RULES_WASSERT();
231
232 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
233 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
234 return (EINVAL);
235 kt = pfr_lookup_table(tbl);
236 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
237 return (ESRCH);
238 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
239 return (EPERM);
240 pfr_enqueue_addrs(kt, &workq, ndel, 0);
241
242 if (!(flags & PFR_FLAG_DUMMY)) {
243 pfr_remove_kentries(kt, &workq);
244 KASSERT(kt->pfrkt_cnt == 0, ("%s: non-null pfrkt_cnt", __func__));
245 }
246 return (0);
247 }
248
249 int
pfr_add_addrs(struct pfr_table * tbl,struct pfr_addr * addr,int size,int * nadd,int flags)250 pfr_add_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
251 int *nadd, int flags)
252 {
253 struct pfr_ktable *kt, *tmpkt;
254 struct pfr_kentryworkq workq;
255 struct pfr_kentry *p, *q;
256 struct pfr_addr *ad;
257 int i, rv, xadd = 0;
258 long tzero = time_second;
259
260 PF_RULES_WASSERT();
261
262 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK);
263 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
264 return (EINVAL);
265 kt = pfr_lookup_table(tbl);
266 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
267 return (ESRCH);
268 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
269 return (EPERM);
270 tmpkt = pfr_create_ktable(&V_pfr_nulltable, 0, 0);
271 if (tmpkt == NULL)
272 return (ENOMEM);
273 SLIST_INIT(&workq);
274 for (i = 0, ad = addr; i < size; i++, ad++) {
275 if (pfr_validate_addr(ad))
276 senderr(EINVAL);
277 p = pfr_lookup_addr(kt, ad, 1);
278 q = pfr_lookup_addr(tmpkt, ad, 1);
279 if (flags & PFR_FLAG_FEEDBACK) {
280 if (q != NULL)
281 ad->pfra_fback = PFR_FB_DUPLICATE;
282 else if (p == NULL)
283 ad->pfra_fback = PFR_FB_ADDED;
284 else if (p->pfrke_not != ad->pfra_not)
285 ad->pfra_fback = PFR_FB_CONFLICT;
286 else
287 ad->pfra_fback = PFR_FB_NONE;
288 }
289 if (p == NULL && q == NULL) {
290 p = pfr_create_kentry(ad,
291 (kt->pfrkt_flags & PFR_TFLAG_COUNTERS) != 0);
292 if (p == NULL)
293 senderr(ENOMEM);
294 if (pfr_route_kentry(tmpkt, p)) {
295 pfr_destroy_kentry(p);
296 ad->pfra_fback = PFR_FB_NONE;
297 } else {
298 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
299 xadd++;
300 }
301 }
302 }
303 pfr_clean_node_mask(tmpkt, &workq);
304 if (!(flags & PFR_FLAG_DUMMY))
305 pfr_insert_kentries(kt, &workq, tzero);
306 else
307 pfr_destroy_kentries(&workq);
308 if (nadd != NULL)
309 *nadd = xadd;
310 pfr_destroy_ktable(tmpkt, 0);
311 return (0);
312 _bad:
313 pfr_clean_node_mask(tmpkt, &workq);
314 pfr_destroy_kentries(&workq);
315 if (flags & PFR_FLAG_FEEDBACK)
316 pfr_reset_feedback(addr, size);
317 pfr_destroy_ktable(tmpkt, 0);
318 return (rv);
319 }
320
321 int
pfr_del_addrs(struct pfr_table * tbl,struct pfr_addr * addr,int size,int * ndel,int flags)322 pfr_del_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
323 int *ndel, int flags)
324 {
325 struct pfr_ktable *kt;
326 struct pfr_kentryworkq workq;
327 struct pfr_kentry *p;
328 struct pfr_addr *ad;
329 int i, rv, xdel = 0, log = 1;
330
331 PF_RULES_WASSERT();
332
333 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK);
334 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
335 return (EINVAL);
336 kt = pfr_lookup_table(tbl);
337 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
338 return (ESRCH);
339 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
340 return (EPERM);
341 /*
342 * there are two algorithms to choose from here.
343 * with:
344 * n: number of addresses to delete
345 * N: number of addresses in the table
346 *
347 * one is O(N) and is better for large 'n'
348 * one is O(n*LOG(N)) and is better for small 'n'
349 *
350 * following code try to decide which one is best.
351 */
352 for (i = kt->pfrkt_cnt; i > 0; i >>= 1)
353 log++;
354 if (size > kt->pfrkt_cnt/log) {
355 /* full table scan */
356 pfr_mark_addrs(kt);
357 } else {
358 /* iterate over addresses to delete */
359 for (i = 0, ad = addr; i < size; i++, ad++) {
360 if (pfr_validate_addr(ad))
361 return (EINVAL);
362 p = pfr_lookup_addr(kt, ad, 1);
363 if (p != NULL)
364 p->pfrke_mark = 0;
365 }
366 }
367 SLIST_INIT(&workq);
368 for (i = 0, ad = addr; i < size; i++, ad++) {
369 if (pfr_validate_addr(ad))
370 senderr(EINVAL);
371 p = pfr_lookup_addr(kt, ad, 1);
372 if (flags & PFR_FLAG_FEEDBACK) {
373 if (p == NULL)
374 ad->pfra_fback = PFR_FB_NONE;
375 else if (p->pfrke_not != ad->pfra_not)
376 ad->pfra_fback = PFR_FB_CONFLICT;
377 else if (p->pfrke_mark)
378 ad->pfra_fback = PFR_FB_DUPLICATE;
379 else
380 ad->pfra_fback = PFR_FB_DELETED;
381 }
382 if (p != NULL && p->pfrke_not == ad->pfra_not &&
383 !p->pfrke_mark) {
384 p->pfrke_mark = 1;
385 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
386 xdel++;
387 }
388 }
389 if (!(flags & PFR_FLAG_DUMMY))
390 pfr_remove_kentries(kt, &workq);
391 if (ndel != NULL)
392 *ndel = xdel;
393 return (0);
394 _bad:
395 if (flags & PFR_FLAG_FEEDBACK)
396 pfr_reset_feedback(addr, size);
397 return (rv);
398 }
399
400 int
pfr_set_addrs(struct pfr_table * tbl,struct pfr_addr * addr,int size,int * size2,int * nadd,int * ndel,int * nchange,int flags,u_int32_t ignore_pfrt_flags)401 pfr_set_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
402 int *size2, int *nadd, int *ndel, int *nchange, int flags,
403 u_int32_t ignore_pfrt_flags)
404 {
405 struct pfr_ktable *kt, *tmpkt;
406 struct pfr_kentryworkq addq, delq, changeq;
407 struct pfr_kentry *p, *q;
408 struct pfr_addr ad;
409 int i, rv, xadd = 0, xdel = 0, xchange = 0;
410 long tzero = time_second;
411
412 PF_RULES_WASSERT();
413
414 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK);
415 if (pfr_validate_table(tbl, ignore_pfrt_flags, flags &
416 PFR_FLAG_USERIOCTL))
417 return (EINVAL);
418 kt = pfr_lookup_table(tbl);
419 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
420 return (ESRCH);
421 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
422 return (EPERM);
423 tmpkt = pfr_create_ktable(&V_pfr_nulltable, 0, 0);
424 if (tmpkt == NULL)
425 return (ENOMEM);
426 pfr_mark_addrs(kt);
427 SLIST_INIT(&addq);
428 SLIST_INIT(&delq);
429 SLIST_INIT(&changeq);
430 for (i = 0; i < size; i++) {
431 /*
432 * XXXGL: undertand pf_if usage of this function
433 * and make ad a moving pointer
434 */
435 bcopy(addr + i, &ad, sizeof(ad));
436 if (pfr_validate_addr(&ad))
437 senderr(EINVAL);
438 ad.pfra_fback = PFR_FB_NONE;
439 p = pfr_lookup_addr(kt, &ad, 1);
440 if (p != NULL) {
441 if (p->pfrke_mark) {
442 ad.pfra_fback = PFR_FB_DUPLICATE;
443 goto _skip;
444 }
445 p->pfrke_mark = 1;
446 if (p->pfrke_not != ad.pfra_not) {
447 SLIST_INSERT_HEAD(&changeq, p, pfrke_workq);
448 ad.pfra_fback = PFR_FB_CHANGED;
449 xchange++;
450 }
451 } else {
452 q = pfr_lookup_addr(tmpkt, &ad, 1);
453 if (q != NULL) {
454 ad.pfra_fback = PFR_FB_DUPLICATE;
455 goto _skip;
456 }
457 p = pfr_create_kentry(&ad,
458 (kt->pfrkt_flags & PFR_TFLAG_COUNTERS) != 0);
459 if (p == NULL)
460 senderr(ENOMEM);
461 if (pfr_route_kentry(tmpkt, p)) {
462 pfr_destroy_kentry(p);
463 ad.pfra_fback = PFR_FB_NONE;
464 } else {
465 SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
466 ad.pfra_fback = PFR_FB_ADDED;
467 xadd++;
468 }
469 }
470 _skip:
471 if (flags & PFR_FLAG_FEEDBACK)
472 bcopy(&ad, addr + i, sizeof(ad));
473 }
474 pfr_enqueue_addrs(kt, &delq, &xdel, ENQUEUE_UNMARKED_ONLY);
475 if ((flags & PFR_FLAG_FEEDBACK) && *size2) {
476 if (*size2 < size+xdel) {
477 *size2 = size+xdel;
478 senderr(0);
479 }
480 i = 0;
481 SLIST_FOREACH(p, &delq, pfrke_workq) {
482 pfr_copyout_addr(&ad, p);
483 ad.pfra_fback = PFR_FB_DELETED;
484 bcopy(&ad, addr + size + i, sizeof(ad));
485 i++;
486 }
487 }
488 pfr_clean_node_mask(tmpkt, &addq);
489 if (!(flags & PFR_FLAG_DUMMY)) {
490 pfr_insert_kentries(kt, &addq, tzero);
491 pfr_remove_kentries(kt, &delq);
492 pfr_clstats_kentries(kt, &changeq, tzero, INVERT_NEG_FLAG);
493 } else
494 pfr_destroy_kentries(&addq);
495 if (nadd != NULL)
496 *nadd = xadd;
497 if (ndel != NULL)
498 *ndel = xdel;
499 if (nchange != NULL)
500 *nchange = xchange;
501 if ((flags & PFR_FLAG_FEEDBACK) && size2)
502 *size2 = size+xdel;
503 pfr_destroy_ktable(tmpkt, 0);
504 return (0);
505 _bad:
506 pfr_clean_node_mask(tmpkt, &addq);
507 pfr_destroy_kentries(&addq);
508 if (flags & PFR_FLAG_FEEDBACK)
509 pfr_reset_feedback(addr, size);
510 pfr_destroy_ktable(tmpkt, 0);
511 return (rv);
512 }
513
514 int
pfr_tst_addrs(struct pfr_table * tbl,struct pfr_addr * addr,int size,int * nmatch,int flags)515 pfr_tst_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
516 int *nmatch, int flags)
517 {
518 struct pfr_ktable *kt;
519 struct pfr_kentry *p;
520 struct pfr_addr *ad;
521 int i, xmatch = 0;
522
523 PF_RULES_RASSERT();
524
525 ACCEPT_FLAGS(flags, PFR_FLAG_REPLACE);
526 if (pfr_validate_table(tbl, 0, 0))
527 return (EINVAL);
528 kt = pfr_lookup_table(tbl);
529 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
530 return (ESRCH);
531
532 for (i = 0, ad = addr; i < size; i++, ad++) {
533 if (pfr_validate_addr(ad))
534 return (EINVAL);
535 if (ADDR_NETWORK(ad))
536 return (EINVAL);
537 p = pfr_lookup_addr(kt, ad, 0);
538 if (flags & PFR_FLAG_REPLACE)
539 pfr_copyout_addr(ad, p);
540 ad->pfra_fback = (p == NULL) ? PFR_FB_NONE :
541 (p->pfrke_not ? PFR_FB_NOTMATCH : PFR_FB_MATCH);
542 if (p != NULL && !p->pfrke_not)
543 xmatch++;
544 }
545 if (nmatch != NULL)
546 *nmatch = xmatch;
547 return (0);
548 }
549
550 int
pfr_get_addrs(struct pfr_table * tbl,struct pfr_addr * addr,int * size,int flags)551 pfr_get_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int *size,
552 int flags)
553 {
554 struct pfr_ktable *kt;
555 struct pfr_walktree w;
556 int rv;
557
558 PF_RULES_RASSERT();
559
560 ACCEPT_FLAGS(flags, 0);
561 if (pfr_validate_table(tbl, 0, 0))
562 return (EINVAL);
563 kt = pfr_lookup_table(tbl);
564 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
565 return (ESRCH);
566 if (kt->pfrkt_cnt > *size) {
567 *size = kt->pfrkt_cnt;
568 return (0);
569 }
570
571 bzero(&w, sizeof(w));
572 w.pfrw_op = PFRW_GET_ADDRS;
573 w.pfrw_addr = addr;
574 w.pfrw_free = kt->pfrkt_cnt;
575 rv = kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w);
576 if (!rv)
577 rv = kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh,
578 pfr_walktree, &w);
579 if (rv)
580 return (rv);
581
582 KASSERT(w.pfrw_free == 0, ("%s: corruption detected (%d)", __func__,
583 w.pfrw_free));
584
585 *size = kt->pfrkt_cnt;
586 return (0);
587 }
588
589 int
pfr_get_astats(struct pfr_table * tbl,struct pfr_astats * addr,int * size,int flags)590 pfr_get_astats(struct pfr_table *tbl, struct pfr_astats *addr, int *size,
591 int flags)
592 {
593 struct pfr_ktable *kt;
594 struct pfr_walktree w;
595 struct pfr_kentryworkq workq;
596 int rv;
597 long tzero = time_second;
598
599 PF_RULES_RASSERT();
600
601 /* XXX PFR_FLAG_CLSTATS disabled */
602 ACCEPT_FLAGS(flags, 0);
603 if (pfr_validate_table(tbl, 0, 0))
604 return (EINVAL);
605 kt = pfr_lookup_table(tbl);
606 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
607 return (ESRCH);
608 if (kt->pfrkt_cnt > *size) {
609 *size = kt->pfrkt_cnt;
610 return (0);
611 }
612
613 bzero(&w, sizeof(w));
614 w.pfrw_op = PFRW_GET_ASTATS;
615 w.pfrw_astats = addr;
616 w.pfrw_free = kt->pfrkt_cnt;
617 /*
618 * Flags below are for backward compatibility. It was possible to have
619 * a table without per-entry counters. Now they are always allocated,
620 * we just discard data when reading it if table is not configured to
621 * have counters.
622 */
623 w.pfrw_flags = kt->pfrkt_flags;
624 rv = kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w);
625 if (!rv)
626 rv = kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh,
627 pfr_walktree, &w);
628 if (!rv && (flags & PFR_FLAG_CLSTATS)) {
629 pfr_enqueue_addrs(kt, &workq, NULL, 0);
630 pfr_clstats_kentries(kt, &workq, tzero, 0);
631 }
632 if (rv)
633 return (rv);
634
635 if (w.pfrw_free) {
636 printf("pfr_get_astats: corruption detected (%d).\n",
637 w.pfrw_free);
638 return (ENOTTY);
639 }
640 *size = kt->pfrkt_cnt;
641 return (0);
642 }
643
644 int
pfr_clr_astats(struct pfr_table * tbl,struct pfr_addr * addr,int size,int * nzero,int flags)645 pfr_clr_astats(struct pfr_table *tbl, struct pfr_addr *addr, int size,
646 int *nzero, int flags)
647 {
648 struct pfr_ktable *kt;
649 struct pfr_kentryworkq workq;
650 struct pfr_kentry *p;
651 struct pfr_addr *ad;
652 int i, rv, xzero = 0;
653
654 PF_RULES_WASSERT();
655
656 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK);
657 if (pfr_validate_table(tbl, 0, 0))
658 return (EINVAL);
659 kt = pfr_lookup_table(tbl);
660 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
661 return (ESRCH);
662 SLIST_INIT(&workq);
663 for (i = 0, ad = addr; i < size; i++, ad++) {
664 if (pfr_validate_addr(ad))
665 senderr(EINVAL);
666 p = pfr_lookup_addr(kt, ad, 1);
667 if (flags & PFR_FLAG_FEEDBACK) {
668 ad->pfra_fback = (p != NULL) ?
669 PFR_FB_CLEARED : PFR_FB_NONE;
670 }
671 if (p != NULL) {
672 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
673 xzero++;
674 }
675 }
676
677 if (!(flags & PFR_FLAG_DUMMY))
678 pfr_clstats_kentries(kt, &workq, 0, 0);
679 if (nzero != NULL)
680 *nzero = xzero;
681 return (0);
682 _bad:
683 if (flags & PFR_FLAG_FEEDBACK)
684 pfr_reset_feedback(addr, size);
685 return (rv);
686 }
687
688 static int
pfr_validate_addr(struct pfr_addr * ad)689 pfr_validate_addr(struct pfr_addr *ad)
690 {
691 int i;
692
693 switch (ad->pfra_af) {
694 #ifdef INET
695 case AF_INET:
696 if (ad->pfra_net > 32)
697 return (-1);
698 break;
699 #endif /* INET */
700 #ifdef INET6
701 case AF_INET6:
702 if (ad->pfra_net > 128)
703 return (-1);
704 break;
705 #endif /* INET6 */
706 default:
707 return (-1);
708 }
709 if (ad->pfra_net < 128 &&
710 (((caddr_t)ad)[ad->pfra_net/8] & (0xFF >> (ad->pfra_net%8))))
711 return (-1);
712 for (i = (ad->pfra_net+7)/8; i < sizeof(ad->pfra_u); i++)
713 if (((caddr_t)ad)[i])
714 return (-1);
715 if (ad->pfra_not && ad->pfra_not != 1)
716 return (-1);
717 if (ad->pfra_fback)
718 return (-1);
719 return (0);
720 }
721
722 static void
pfr_enqueue_addrs(struct pfr_ktable * kt,struct pfr_kentryworkq * workq,int * naddr,int sweep)723 pfr_enqueue_addrs(struct pfr_ktable *kt, struct pfr_kentryworkq *workq,
724 int *naddr, int sweep)
725 {
726 struct pfr_walktree w;
727
728 SLIST_INIT(workq);
729 bzero(&w, sizeof(w));
730 w.pfrw_op = sweep ? PFRW_SWEEP : PFRW_ENQUEUE;
731 w.pfrw_workq = workq;
732 if (kt->pfrkt_ip4 != NULL)
733 if (kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh,
734 pfr_walktree, &w))
735 printf("pfr_enqueue_addrs: IPv4 walktree failed.\n");
736 if (kt->pfrkt_ip6 != NULL)
737 if (kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh,
738 pfr_walktree, &w))
739 printf("pfr_enqueue_addrs: IPv6 walktree failed.\n");
740 if (naddr != NULL)
741 *naddr = w.pfrw_cnt;
742 }
743
744 static void
pfr_mark_addrs(struct pfr_ktable * kt)745 pfr_mark_addrs(struct pfr_ktable *kt)
746 {
747 struct pfr_walktree w;
748
749 bzero(&w, sizeof(w));
750 w.pfrw_op = PFRW_MARK;
751 if (kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w))
752 printf("pfr_mark_addrs: IPv4 walktree failed.\n");
753 if (kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w))
754 printf("pfr_mark_addrs: IPv6 walktree failed.\n");
755 }
756
757 static struct pfr_kentry *
pfr_lookup_addr(struct pfr_ktable * kt,struct pfr_addr * ad,int exact)758 pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr *ad, int exact)
759 {
760 union sockaddr_union sa, mask;
761 struct radix_head *head = NULL;
762 struct pfr_kentry *ke;
763
764 PF_RULES_ASSERT();
765
766 bzero(&sa, sizeof(sa));
767 if (ad->pfra_af == AF_INET) {
768 FILLIN_SIN(sa.sin, ad->pfra_ip4addr);
769 head = &kt->pfrkt_ip4->rh;
770 } else if ( ad->pfra_af == AF_INET6 ) {
771 FILLIN_SIN6(sa.sin6, ad->pfra_ip6addr);
772 head = &kt->pfrkt_ip6->rh;
773 }
774 if (ADDR_NETWORK(ad)) {
775 pfr_prepare_network(&mask, ad->pfra_af, ad->pfra_net);
776 ke = (struct pfr_kentry *)rn_lookup(&sa, &mask, head);
777 if (ke && KENTRY_RNF_ROOT(ke))
778 ke = NULL;
779 } else {
780 ke = (struct pfr_kentry *)rn_match(&sa, head);
781 if (ke && KENTRY_RNF_ROOT(ke))
782 ke = NULL;
783 if (exact && ke && KENTRY_NETWORK(ke))
784 ke = NULL;
785 }
786 return (ke);
787 }
788
789 static struct pfr_kentry *
pfr_create_kentry(struct pfr_addr * ad,bool counters)790 pfr_create_kentry(struct pfr_addr *ad, bool counters)
791 {
792 struct pfr_kentry *ke;
793 counter_u64_t c;
794
795 ke = uma_zalloc(V_pfr_kentry_z, M_NOWAIT | M_ZERO);
796 if (ke == NULL)
797 return (NULL);
798
799 if (ad->pfra_af == AF_INET)
800 FILLIN_SIN(ke->pfrke_sa.sin, ad->pfra_ip4addr);
801 else if (ad->pfra_af == AF_INET6)
802 FILLIN_SIN6(ke->pfrke_sa.sin6, ad->pfra_ip6addr);
803 ke->pfrke_af = ad->pfra_af;
804 ke->pfrke_net = ad->pfra_net;
805 ke->pfrke_not = ad->pfra_not;
806 ke->pfrke_counters.pfrkc_tzero = 0;
807 if (counters) {
808 c = uma_zalloc_pcpu(V_pfr_kentry_counter_z, M_NOWAIT | M_ZERO);
809 if (c == NULL) {
810 pfr_destroy_kentry(ke);
811 return (NULL);
812 }
813 ke->pfrke_counters.pfrkc_counters = c;
814 }
815 return (ke);
816 }
817
818 static void
pfr_destroy_kentries(struct pfr_kentryworkq * workq)819 pfr_destroy_kentries(struct pfr_kentryworkq *workq)
820 {
821 struct pfr_kentry *p, *q;
822
823 for (p = SLIST_FIRST(workq); p != NULL; p = q) {
824 q = SLIST_NEXT(p, pfrke_workq);
825 pfr_destroy_kentry(p);
826 }
827 }
828
829 static void
pfr_destroy_kentry(struct pfr_kentry * ke)830 pfr_destroy_kentry(struct pfr_kentry *ke)
831 {
832 counter_u64_t c;
833
834 if ((c = ke->pfrke_counters.pfrkc_counters) != NULL)
835 uma_zfree_pcpu(V_pfr_kentry_counter_z, c);
836 uma_zfree(V_pfr_kentry_z, ke);
837 }
838
839 static void
pfr_insert_kentries(struct pfr_ktable * kt,struct pfr_kentryworkq * workq,long tzero)840 pfr_insert_kentries(struct pfr_ktable *kt,
841 struct pfr_kentryworkq *workq, long tzero)
842 {
843 struct pfr_kentry *p;
844 int rv, n = 0;
845
846 SLIST_FOREACH(p, workq, pfrke_workq) {
847 rv = pfr_route_kentry(kt, p);
848 if (rv) {
849 printf("pfr_insert_kentries: cannot route entry "
850 "(code=%d).\n", rv);
851 break;
852 }
853 p->pfrke_counters.pfrkc_tzero = tzero;
854 n++;
855 }
856 kt->pfrkt_cnt += n;
857 }
858
859 int
pfr_insert_kentry(struct pfr_ktable * kt,struct pfr_addr * ad,long tzero)860 pfr_insert_kentry(struct pfr_ktable *kt, struct pfr_addr *ad, long tzero)
861 {
862 struct pfr_kentry *p;
863 int rv;
864
865 p = pfr_lookup_addr(kt, ad, 1);
866 if (p != NULL)
867 return (0);
868 p = pfr_create_kentry(ad, (kt->pfrkt_flags & PFR_TFLAG_COUNTERS) != 0);
869 if (p == NULL)
870 return (ENOMEM);
871
872 rv = pfr_route_kentry(kt, p);
873 if (rv)
874 return (rv);
875
876 p->pfrke_counters.pfrkc_tzero = tzero;
877 kt->pfrkt_cnt++;
878
879 return (0);
880 }
881
882 static void
pfr_remove_kentries(struct pfr_ktable * kt,struct pfr_kentryworkq * workq)883 pfr_remove_kentries(struct pfr_ktable *kt,
884 struct pfr_kentryworkq *workq)
885 {
886 struct pfr_kentry *p;
887 int n = 0;
888
889 SLIST_FOREACH(p, workq, pfrke_workq) {
890 pfr_unroute_kentry(kt, p);
891 n++;
892 }
893 kt->pfrkt_cnt -= n;
894 pfr_destroy_kentries(workq);
895 }
896
897 static void
pfr_clean_node_mask(struct pfr_ktable * kt,struct pfr_kentryworkq * workq)898 pfr_clean_node_mask(struct pfr_ktable *kt,
899 struct pfr_kentryworkq *workq)
900 {
901 struct pfr_kentry *p;
902
903 SLIST_FOREACH(p, workq, pfrke_workq)
904 pfr_unroute_kentry(kt, p);
905 }
906
907 static void
pfr_clstats_kentries(struct pfr_ktable * kt,struct pfr_kentryworkq * workq,long tzero,int negchange)908 pfr_clstats_kentries(struct pfr_ktable *kt, struct pfr_kentryworkq *workq,
909 long tzero, int negchange)
910 {
911 struct pfr_kentry *p;
912 int i;
913
914 SLIST_FOREACH(p, workq, pfrke_workq) {
915 if (negchange)
916 p->pfrke_not = !p->pfrke_not;
917 if ((kt->pfrkt_flags & PFR_TFLAG_COUNTERS) != 0)
918 for (i = 0; i < PFR_NUM_COUNTERS; i++)
919 counter_u64_zero(
920 p->pfrke_counters.pfrkc_counters + i);
921 p->pfrke_counters.pfrkc_tzero = tzero;
922 }
923 }
924
925 static void
pfr_reset_feedback(struct pfr_addr * addr,int size)926 pfr_reset_feedback(struct pfr_addr *addr, int size)
927 {
928 struct pfr_addr *ad;
929 int i;
930
931 for (i = 0, ad = addr; i < size; i++, ad++)
932 ad->pfra_fback = PFR_FB_NONE;
933 }
934
935 static void
pfr_prepare_network(union sockaddr_union * sa,int af,int net)936 pfr_prepare_network(union sockaddr_union *sa, int af, int net)
937 {
938 int i;
939
940 bzero(sa, sizeof(*sa));
941 if (af == AF_INET) {
942 sa->sin.sin_len = sizeof(sa->sin);
943 sa->sin.sin_family = AF_INET;
944 sa->sin.sin_addr.s_addr = net ? htonl(-1 << (32-net)) : 0;
945 } else if (af == AF_INET6) {
946 sa->sin6.sin6_len = sizeof(sa->sin6);
947 sa->sin6.sin6_family = AF_INET6;
948 for (i = 0; i < 4; i++) {
949 if (net <= 32) {
950 sa->sin6.sin6_addr.s6_addr32[i] =
951 net ? htonl(-1 << (32-net)) : 0;
952 break;
953 }
954 sa->sin6.sin6_addr.s6_addr32[i] = 0xFFFFFFFF;
955 net -= 32;
956 }
957 }
958 }
959
960 static int
pfr_route_kentry(struct pfr_ktable * kt,struct pfr_kentry * ke)961 pfr_route_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
962 {
963 union sockaddr_union mask;
964 struct radix_node *rn;
965 struct radix_head *head = NULL;
966
967 PF_RULES_WASSERT();
968
969 bzero(ke->pfrke_node, sizeof(ke->pfrke_node));
970 if (ke->pfrke_af == AF_INET)
971 head = &kt->pfrkt_ip4->rh;
972 else if (ke->pfrke_af == AF_INET6)
973 head = &kt->pfrkt_ip6->rh;
974
975 if (KENTRY_NETWORK(ke)) {
976 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
977 rn = rn_addroute(&ke->pfrke_sa, &mask, head, ke->pfrke_node);
978 } else
979 rn = rn_addroute(&ke->pfrke_sa, NULL, head, ke->pfrke_node);
980
981 return (rn == NULL ? -1 : 0);
982 }
983
984 static int
pfr_unroute_kentry(struct pfr_ktable * kt,struct pfr_kentry * ke)985 pfr_unroute_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
986 {
987 union sockaddr_union mask;
988 struct radix_node *rn;
989 struct radix_head *head = NULL;
990
991 if (ke->pfrke_af == AF_INET)
992 head = &kt->pfrkt_ip4->rh;
993 else if (ke->pfrke_af == AF_INET6)
994 head = &kt->pfrkt_ip6->rh;
995
996 if (KENTRY_NETWORK(ke)) {
997 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
998 rn = rn_delete(&ke->pfrke_sa, &mask, head);
999 } else
1000 rn = rn_delete(&ke->pfrke_sa, NULL, head);
1001
1002 if (rn == NULL) {
1003 printf("pfr_unroute_kentry: delete failed.\n");
1004 return (-1);
1005 }
1006 return (0);
1007 }
1008
1009 static void
pfr_copyout_addr(struct pfr_addr * ad,const struct pfr_kentry * ke)1010 pfr_copyout_addr(struct pfr_addr *ad, const struct pfr_kentry *ke)
1011 {
1012 bzero(ad, sizeof(*ad));
1013 if (ke == NULL)
1014 return;
1015 ad->pfra_af = ke->pfrke_af;
1016 ad->pfra_net = ke->pfrke_net;
1017 ad->pfra_not = ke->pfrke_not;
1018 if (ad->pfra_af == AF_INET)
1019 ad->pfra_ip4addr = ke->pfrke_sa.sin.sin_addr;
1020 else if (ad->pfra_af == AF_INET6)
1021 ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr;
1022 }
1023
1024 static void
pfr_copyout_astats(struct pfr_astats * as,const struct pfr_kentry * ke,const struct pfr_walktree * w)1025 pfr_copyout_astats(struct pfr_astats *as, const struct pfr_kentry *ke,
1026 const struct pfr_walktree *w)
1027 {
1028 int dir, op;
1029 const struct pfr_kcounters *kc = &ke->pfrke_counters;
1030
1031 pfr_copyout_addr(&as->pfras_a, ke);
1032 as->pfras_tzero = kc->pfrkc_tzero;
1033
1034 if (! (w->pfrw_flags & PFR_TFLAG_COUNTERS)) {
1035 bzero(as->pfras_packets, sizeof(as->pfras_packets));
1036 bzero(as->pfras_bytes, sizeof(as->pfras_bytes));
1037 as->pfras_a.pfra_fback = PFR_FB_NOCOUNT;
1038 return;
1039 }
1040
1041 for (dir = 0; dir < PFR_DIR_MAX; dir++) {
1042 for (op = 0; op < PFR_OP_ADDR_MAX; op ++) {
1043 as->pfras_packets[dir][op] = counter_u64_fetch(
1044 pfr_kentry_counter(kc, dir, op, PFR_TYPE_PACKETS));
1045 as->pfras_bytes[dir][op] = counter_u64_fetch(
1046 pfr_kentry_counter(kc, dir, op, PFR_TYPE_BYTES));
1047 }
1048 }
1049 }
1050
1051 static int
pfr_walktree(struct radix_node * rn,void * arg)1052 pfr_walktree(struct radix_node *rn, void *arg)
1053 {
1054 struct pfr_kentry *ke = (struct pfr_kentry *)rn;
1055 struct pfr_walktree *w = arg;
1056
1057 switch (w->pfrw_op) {
1058 case PFRW_MARK:
1059 ke->pfrke_mark = 0;
1060 break;
1061 case PFRW_SWEEP:
1062 if (ke->pfrke_mark)
1063 break;
1064 /* FALLTHROUGH */
1065 case PFRW_ENQUEUE:
1066 SLIST_INSERT_HEAD(w->pfrw_workq, ke, pfrke_workq);
1067 w->pfrw_cnt++;
1068 break;
1069 case PFRW_GET_ADDRS:
1070 if (w->pfrw_free-- > 0) {
1071 pfr_copyout_addr(w->pfrw_addr, ke);
1072 w->pfrw_addr++;
1073 }
1074 break;
1075 case PFRW_GET_ASTATS:
1076 if (w->pfrw_free-- > 0) {
1077 struct pfr_astats as;
1078
1079 pfr_copyout_astats(&as, ke, w);
1080
1081 bcopy(&as, w->pfrw_astats, sizeof(as));
1082 w->pfrw_astats++;
1083 }
1084 break;
1085 case PFRW_POOL_GET:
1086 if (ke->pfrke_not)
1087 break; /* negative entries are ignored */
1088 if (!w->pfrw_cnt--) {
1089 w->pfrw_kentry = ke;
1090 return (1); /* finish search */
1091 }
1092 break;
1093 case PFRW_DYNADDR_UPDATE:
1094 {
1095 union sockaddr_union pfr_mask;
1096
1097 if (ke->pfrke_af == AF_INET) {
1098 if (w->pfrw_dyn->pfid_acnt4++ > 0)
1099 break;
1100 pfr_prepare_network(&pfr_mask, AF_INET, ke->pfrke_net);
1101 w->pfrw_dyn->pfid_addr4 = *SUNION2PF(&ke->pfrke_sa,
1102 AF_INET);
1103 w->pfrw_dyn->pfid_mask4 = *SUNION2PF(&pfr_mask,
1104 AF_INET);
1105 } else if (ke->pfrke_af == AF_INET6){
1106 if (w->pfrw_dyn->pfid_acnt6++ > 0)
1107 break;
1108 pfr_prepare_network(&pfr_mask, AF_INET6, ke->pfrke_net);
1109 w->pfrw_dyn->pfid_addr6 = *SUNION2PF(&ke->pfrke_sa,
1110 AF_INET6);
1111 w->pfrw_dyn->pfid_mask6 = *SUNION2PF(&pfr_mask,
1112 AF_INET6);
1113 }
1114 break;
1115 }
1116 }
1117 return (0);
1118 }
1119
1120 int
pfr_clr_tables(struct pfr_table * filter,int * ndel,int flags)1121 pfr_clr_tables(struct pfr_table *filter, int *ndel, int flags)
1122 {
1123 struct pfr_ktableworkq workq;
1124 struct pfr_ktable *p;
1125 int xdel = 0;
1126
1127 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ALLRSETS);
1128 if (pfr_fix_anchor(filter->pfrt_anchor))
1129 return (EINVAL);
1130 if (pfr_table_count(filter, flags) < 0)
1131 return (ENOENT);
1132
1133 SLIST_INIT(&workq);
1134 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
1135 if (pfr_skip_table(filter, p, flags))
1136 continue;
1137 if (!strcmp(p->pfrkt_anchor, PF_RESERVED_ANCHOR))
1138 continue;
1139 if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE))
1140 continue;
1141 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1142 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1143 xdel++;
1144 }
1145 if (!(flags & PFR_FLAG_DUMMY))
1146 pfr_setflags_ktables(&workq);
1147 if (ndel != NULL)
1148 *ndel = xdel;
1149 return (0);
1150 }
1151
1152 int
pfr_add_tables(struct pfr_table * tbl,int size,int * nadd,int flags)1153 pfr_add_tables(struct pfr_table *tbl, int size, int *nadd, int flags)
1154 {
1155 struct pfr_ktableworkq addq, changeq;
1156 struct pfr_ktable *p, *q, *r, key;
1157 int i, rv, xadd = 0;
1158 long tzero = time_second;
1159
1160 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1161 SLIST_INIT(&addq);
1162 SLIST_INIT(&changeq);
1163 for (i = 0; i < size; i++) {
1164 bcopy(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t));
1165 if (pfr_validate_table(&key.pfrkt_t, PFR_TFLAG_USRMASK,
1166 flags & PFR_FLAG_USERIOCTL))
1167 senderr(EINVAL);
1168 key.pfrkt_flags |= PFR_TFLAG_ACTIVE;
1169 p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
1170 if (p == NULL) {
1171 p = pfr_create_ktable(&key.pfrkt_t, tzero, 1);
1172 if (p == NULL)
1173 senderr(ENOMEM);
1174 SLIST_FOREACH(q, &addq, pfrkt_workq) {
1175 if (!pfr_ktable_compare(p, q)) {
1176 pfr_destroy_ktable(p, 0);
1177 goto _skip;
1178 }
1179 }
1180 SLIST_INSERT_HEAD(&addq, p, pfrkt_workq);
1181 xadd++;
1182 if (!key.pfrkt_anchor[0])
1183 goto _skip;
1184
1185 /* find or create root table */
1186 bzero(key.pfrkt_anchor, sizeof(key.pfrkt_anchor));
1187 r = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
1188 if (r != NULL) {
1189 p->pfrkt_root = r;
1190 goto _skip;
1191 }
1192 SLIST_FOREACH(q, &addq, pfrkt_workq) {
1193 if (!pfr_ktable_compare(&key, q)) {
1194 p->pfrkt_root = q;
1195 goto _skip;
1196 }
1197 }
1198 key.pfrkt_flags = 0;
1199 r = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1200 if (r == NULL)
1201 senderr(ENOMEM);
1202 SLIST_INSERT_HEAD(&addq, r, pfrkt_workq);
1203 p->pfrkt_root = r;
1204 } else if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1205 SLIST_FOREACH(q, &changeq, pfrkt_workq)
1206 if (!pfr_ktable_compare(&key, q))
1207 goto _skip;
1208 p->pfrkt_nflags = (p->pfrkt_flags &
1209 ~PFR_TFLAG_USRMASK) | key.pfrkt_flags;
1210 SLIST_INSERT_HEAD(&changeq, p, pfrkt_workq);
1211 xadd++;
1212 }
1213 _skip:
1214 ;
1215 }
1216 if (!(flags & PFR_FLAG_DUMMY)) {
1217 pfr_insert_ktables(&addq);
1218 pfr_setflags_ktables(&changeq);
1219 } else
1220 pfr_destroy_ktables(&addq, 0);
1221 if (nadd != NULL)
1222 *nadd = xadd;
1223 return (0);
1224 _bad:
1225 pfr_destroy_ktables(&addq, 0);
1226 return (rv);
1227 }
1228
1229 int
pfr_del_tables(struct pfr_table * tbl,int size,int * ndel,int flags)1230 pfr_del_tables(struct pfr_table *tbl, int size, int *ndel, int flags)
1231 {
1232 struct pfr_ktableworkq workq;
1233 struct pfr_ktable *p, *q, key;
1234 int i, xdel = 0;
1235
1236 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1237 SLIST_INIT(&workq);
1238 for (i = 0; i < size; i++) {
1239 bcopy(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t));
1240 if (pfr_validate_table(&key.pfrkt_t, 0,
1241 flags & PFR_FLAG_USERIOCTL))
1242 return (EINVAL);
1243 p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
1244 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1245 SLIST_FOREACH(q, &workq, pfrkt_workq)
1246 if (!pfr_ktable_compare(p, q))
1247 goto _skip;
1248 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1249 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1250 xdel++;
1251 }
1252 _skip:
1253 ;
1254 }
1255
1256 if (!(flags & PFR_FLAG_DUMMY))
1257 pfr_setflags_ktables(&workq);
1258 if (ndel != NULL)
1259 *ndel = xdel;
1260 return (0);
1261 }
1262
1263 int
pfr_get_tables(struct pfr_table * filter,struct pfr_table * tbl,int * size,int flags)1264 pfr_get_tables(struct pfr_table *filter, struct pfr_table *tbl, int *size,
1265 int flags)
1266 {
1267 struct pfr_ktable *p;
1268 int n, nn;
1269
1270 PF_RULES_RASSERT();
1271
1272 ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS);
1273 if (pfr_fix_anchor(filter->pfrt_anchor))
1274 return (EINVAL);
1275 n = nn = pfr_table_count(filter, flags);
1276 if (n < 0)
1277 return (ENOENT);
1278 if (n > *size) {
1279 *size = n;
1280 return (0);
1281 }
1282 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
1283 if (pfr_skip_table(filter, p, flags))
1284 continue;
1285 if (n-- <= 0)
1286 continue;
1287 bcopy(&p->pfrkt_t, tbl++, sizeof(*tbl));
1288 }
1289
1290 KASSERT(n == 0, ("%s: corruption detected (%d)", __func__, n));
1291
1292 *size = nn;
1293 return (0);
1294 }
1295
1296 int
pfr_get_tstats(struct pfr_table * filter,struct pfr_tstats * tbl,int * size,int flags)1297 pfr_get_tstats(struct pfr_table *filter, struct pfr_tstats *tbl, int *size,
1298 int flags)
1299 {
1300 struct pfr_ktable *p;
1301 struct pfr_ktableworkq workq;
1302 int n, nn;
1303 long tzero = time_second;
1304 int pfr_dir, pfr_op;
1305
1306 /* XXX PFR_FLAG_CLSTATS disabled */
1307 ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS);
1308 if (pfr_fix_anchor(filter->pfrt_anchor))
1309 return (EINVAL);
1310 n = nn = pfr_table_count(filter, flags);
1311 if (n < 0)
1312 return (ENOENT);
1313 if (n > *size) {
1314 *size = n;
1315 return (0);
1316 }
1317 SLIST_INIT(&workq);
1318 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
1319 if (pfr_skip_table(filter, p, flags))
1320 continue;
1321 if (n-- <= 0)
1322 continue;
1323 bcopy(&p->pfrkt_kts.pfrts_t, &tbl->pfrts_t,
1324 sizeof(struct pfr_table));
1325 for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) {
1326 for (pfr_op = 0; pfr_op < PFR_OP_TABLE_MAX; pfr_op ++) {
1327 tbl->pfrts_packets[pfr_dir][pfr_op] =
1328 counter_u64_fetch(
1329 p->pfrkt_packets[pfr_dir][pfr_op]);
1330 tbl->pfrts_bytes[pfr_dir][pfr_op] =
1331 counter_u64_fetch(
1332 p->pfrkt_bytes[pfr_dir][pfr_op]);
1333 }
1334 }
1335 tbl->pfrts_match = counter_u64_fetch(p->pfrkt_match);
1336 tbl->pfrts_nomatch = counter_u64_fetch(p->pfrkt_nomatch);
1337 tbl->pfrts_tzero = p->pfrkt_tzero;
1338 tbl->pfrts_cnt = p->pfrkt_cnt;
1339 for (pfr_op = 0; pfr_op < PFR_REFCNT_MAX; pfr_op++)
1340 tbl->pfrts_refcnt[pfr_op] = p->pfrkt_refcnt[pfr_op];
1341 tbl++;
1342 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1343 }
1344 if (flags & PFR_FLAG_CLSTATS)
1345 pfr_clstats_ktables(&workq, tzero,
1346 flags & PFR_FLAG_ADDRSTOO);
1347
1348 KASSERT(n == 0, ("%s: corruption detected (%d)", __func__, n));
1349
1350 *size = nn;
1351 return (0);
1352 }
1353
1354 int
pfr_clr_tstats(struct pfr_table * tbl,int size,int * nzero,int flags)1355 pfr_clr_tstats(struct pfr_table *tbl, int size, int *nzero, int flags)
1356 {
1357 struct pfr_ktableworkq workq;
1358 struct pfr_ktable *p, key;
1359 int i, xzero = 0;
1360 long tzero = time_second;
1361
1362 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO);
1363 SLIST_INIT(&workq);
1364 for (i = 0; i < size; i++) {
1365 bcopy(tbl + i, &key.pfrkt_t, sizeof(key.pfrkt_t));
1366 if (pfr_validate_table(&key.pfrkt_t, 0, 0))
1367 return (EINVAL);
1368 p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
1369 if (p != NULL) {
1370 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1371 xzero++;
1372 }
1373 }
1374 if (!(flags & PFR_FLAG_DUMMY))
1375 pfr_clstats_ktables(&workq, tzero, flags & PFR_FLAG_ADDRSTOO);
1376 if (nzero != NULL)
1377 *nzero = xzero;
1378 return (0);
1379 }
1380
1381 int
pfr_set_tflags(struct pfr_table * tbl,int size,int setflag,int clrflag,int * nchange,int * ndel,int flags)1382 pfr_set_tflags(struct pfr_table *tbl, int size, int setflag, int clrflag,
1383 int *nchange, int *ndel, int flags)
1384 {
1385 struct pfr_ktableworkq workq;
1386 struct pfr_ktable *p, *q, key;
1387 int i, xchange = 0, xdel = 0;
1388
1389 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1390 if ((setflag & ~PFR_TFLAG_USRMASK) ||
1391 (clrflag & ~PFR_TFLAG_USRMASK) ||
1392 (setflag & clrflag))
1393 return (EINVAL);
1394 SLIST_INIT(&workq);
1395 for (i = 0; i < size; i++) {
1396 bcopy(tbl + i, &key.pfrkt_t, sizeof(key.pfrkt_t));
1397 if (pfr_validate_table(&key.pfrkt_t, 0,
1398 flags & PFR_FLAG_USERIOCTL))
1399 return (EINVAL);
1400 p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
1401 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1402 p->pfrkt_nflags = (p->pfrkt_flags | setflag) &
1403 ~clrflag;
1404 if (p->pfrkt_nflags == p->pfrkt_flags)
1405 goto _skip;
1406 SLIST_FOREACH(q, &workq, pfrkt_workq)
1407 if (!pfr_ktable_compare(p, q))
1408 goto _skip;
1409 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1410 if ((p->pfrkt_flags & PFR_TFLAG_PERSIST) &&
1411 (clrflag & PFR_TFLAG_PERSIST) &&
1412 !(p->pfrkt_flags & PFR_TFLAG_REFERENCED))
1413 xdel++;
1414 else
1415 xchange++;
1416 }
1417 _skip:
1418 ;
1419 }
1420 if (!(flags & PFR_FLAG_DUMMY))
1421 pfr_setflags_ktables(&workq);
1422 if (nchange != NULL)
1423 *nchange = xchange;
1424 if (ndel != NULL)
1425 *ndel = xdel;
1426 return (0);
1427 }
1428
1429 int
pfr_ina_begin(struct pfr_table * trs,u_int32_t * ticket,int * ndel,int flags)1430 pfr_ina_begin(struct pfr_table *trs, u_int32_t *ticket, int *ndel, int flags)
1431 {
1432 struct pfr_ktableworkq workq;
1433 struct pfr_ktable *p;
1434 struct pf_kruleset *rs;
1435 int xdel = 0;
1436
1437 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1438 rs = pf_find_or_create_kruleset(trs->pfrt_anchor);
1439 if (rs == NULL)
1440 return (ENOMEM);
1441 SLIST_INIT(&workq);
1442 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
1443 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1444 pfr_skip_table(trs, p, 0))
1445 continue;
1446 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1447 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1448 xdel++;
1449 }
1450 if (!(flags & PFR_FLAG_DUMMY)) {
1451 pfr_setflags_ktables(&workq);
1452 if (ticket != NULL)
1453 *ticket = ++rs->tticket;
1454 rs->topen = 1;
1455 } else
1456 pf_remove_if_empty_kruleset(rs);
1457 if (ndel != NULL)
1458 *ndel = xdel;
1459 return (0);
1460 }
1461
1462 int
pfr_ina_define(struct pfr_table * tbl,struct pfr_addr * addr,int size,int * nadd,int * naddr,u_int32_t ticket,int flags)1463 pfr_ina_define(struct pfr_table *tbl, struct pfr_addr *addr, int size,
1464 int *nadd, int *naddr, u_int32_t ticket, int flags)
1465 {
1466 struct pfr_ktableworkq tableq;
1467 struct pfr_kentryworkq addrq;
1468 struct pfr_ktable *kt, *rt, *shadow, key;
1469 struct pfr_kentry *p;
1470 struct pfr_addr *ad;
1471 struct pf_kruleset *rs;
1472 int i, rv, xadd = 0, xaddr = 0;
1473
1474 PF_RULES_WASSERT();
1475
1476 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO);
1477 if (size && !(flags & PFR_FLAG_ADDRSTOO))
1478 return (EINVAL);
1479 if (pfr_validate_table(tbl, PFR_TFLAG_USRMASK,
1480 flags & PFR_FLAG_USERIOCTL))
1481 return (EINVAL);
1482 rs = pf_find_kruleset(tbl->pfrt_anchor);
1483 if (rs == NULL || !rs->topen || ticket != rs->tticket)
1484 return (EBUSY);
1485 tbl->pfrt_flags |= PFR_TFLAG_INACTIVE;
1486 SLIST_INIT(&tableq);
1487 kt = RB_FIND(pfr_ktablehead, &V_pfr_ktables, (struct pfr_ktable *)tbl);
1488 if (kt == NULL) {
1489 kt = pfr_create_ktable(tbl, 0, 1);
1490 if (kt == NULL)
1491 return (ENOMEM);
1492 SLIST_INSERT_HEAD(&tableq, kt, pfrkt_workq);
1493 xadd++;
1494 if (!tbl->pfrt_anchor[0])
1495 goto _skip;
1496
1497 /* find or create root table */
1498 bzero(&key, sizeof(key));
1499 strlcpy(key.pfrkt_name, tbl->pfrt_name, sizeof(key.pfrkt_name));
1500 rt = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
1501 if (rt != NULL) {
1502 kt->pfrkt_root = rt;
1503 goto _skip;
1504 }
1505 rt = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1506 if (rt == NULL) {
1507 pfr_destroy_ktables(&tableq, 0);
1508 return (ENOMEM);
1509 }
1510 SLIST_INSERT_HEAD(&tableq, rt, pfrkt_workq);
1511 kt->pfrkt_root = rt;
1512 } else if (!(kt->pfrkt_flags & PFR_TFLAG_INACTIVE))
1513 xadd++;
1514 _skip:
1515 shadow = pfr_create_ktable(tbl, 0, 0);
1516 if (shadow == NULL) {
1517 pfr_destroy_ktables(&tableq, 0);
1518 return (ENOMEM);
1519 }
1520 SLIST_INIT(&addrq);
1521 for (i = 0, ad = addr; i < size; i++, ad++) {
1522 if (pfr_validate_addr(ad))
1523 senderr(EINVAL);
1524 if (pfr_lookup_addr(shadow, ad, 1) != NULL)
1525 continue;
1526 p = pfr_create_kentry(ad,
1527 (shadow->pfrkt_flags & PFR_TFLAG_COUNTERS) != 0);
1528 if (p == NULL)
1529 senderr(ENOMEM);
1530 if (pfr_route_kentry(shadow, p)) {
1531 pfr_destroy_kentry(p);
1532 continue;
1533 }
1534 SLIST_INSERT_HEAD(&addrq, p, pfrke_workq);
1535 xaddr++;
1536 }
1537 if (!(flags & PFR_FLAG_DUMMY)) {
1538 if (kt->pfrkt_shadow != NULL)
1539 pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1540 kt->pfrkt_flags |= PFR_TFLAG_INACTIVE;
1541 pfr_insert_ktables(&tableq);
1542 shadow->pfrkt_cnt = (flags & PFR_FLAG_ADDRSTOO) ?
1543 xaddr : NO_ADDRESSES;
1544 kt->pfrkt_shadow = shadow;
1545 } else {
1546 pfr_clean_node_mask(shadow, &addrq);
1547 pfr_destroy_ktable(shadow, 0);
1548 pfr_destroy_ktables(&tableq, 0);
1549 pfr_destroy_kentries(&addrq);
1550 }
1551 if (nadd != NULL)
1552 *nadd = xadd;
1553 if (naddr != NULL)
1554 *naddr = xaddr;
1555 return (0);
1556 _bad:
1557 pfr_destroy_ktable(shadow, 0);
1558 pfr_destroy_ktables(&tableq, 0);
1559 pfr_destroy_kentries(&addrq);
1560 return (rv);
1561 }
1562
1563 int
pfr_ina_rollback(struct pfr_table * trs,u_int32_t ticket,int * ndel,int flags)1564 pfr_ina_rollback(struct pfr_table *trs, u_int32_t ticket, int *ndel, int flags)
1565 {
1566 struct pfr_ktableworkq workq;
1567 struct pfr_ktable *p;
1568 struct pf_kruleset *rs;
1569 int xdel = 0;
1570
1571 PF_RULES_WASSERT();
1572
1573 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1574 rs = pf_find_kruleset(trs->pfrt_anchor);
1575 if (rs == NULL || !rs->topen || ticket != rs->tticket)
1576 return (0);
1577 SLIST_INIT(&workq);
1578 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
1579 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1580 pfr_skip_table(trs, p, 0))
1581 continue;
1582 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1583 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1584 xdel++;
1585 }
1586 if (!(flags & PFR_FLAG_DUMMY)) {
1587 pfr_setflags_ktables(&workq);
1588 rs->topen = 0;
1589 pf_remove_if_empty_kruleset(rs);
1590 }
1591 if (ndel != NULL)
1592 *ndel = xdel;
1593 return (0);
1594 }
1595
1596 int
pfr_ina_commit(struct pfr_table * trs,u_int32_t ticket,int * nadd,int * nchange,int flags)1597 pfr_ina_commit(struct pfr_table *trs, u_int32_t ticket, int *nadd,
1598 int *nchange, int flags)
1599 {
1600 struct pfr_ktable *p, *q;
1601 struct pfr_ktableworkq workq;
1602 struct pf_kruleset *rs;
1603 int xadd = 0, xchange = 0;
1604 long tzero = time_second;
1605
1606 PF_RULES_WASSERT();
1607
1608 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1609 rs = pf_find_kruleset(trs->pfrt_anchor);
1610 if (rs == NULL || !rs->topen || ticket != rs->tticket)
1611 return (EBUSY);
1612
1613 SLIST_INIT(&workq);
1614 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
1615 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1616 pfr_skip_table(trs, p, 0))
1617 continue;
1618 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1619 if (p->pfrkt_flags & PFR_TFLAG_ACTIVE)
1620 xchange++;
1621 else
1622 xadd++;
1623 }
1624
1625 if (!(flags & PFR_FLAG_DUMMY)) {
1626 for (p = SLIST_FIRST(&workq); p != NULL; p = q) {
1627 q = SLIST_NEXT(p, pfrkt_workq);
1628 pfr_commit_ktable(p, tzero);
1629 }
1630 rs->topen = 0;
1631 pf_remove_if_empty_kruleset(rs);
1632 }
1633 if (nadd != NULL)
1634 *nadd = xadd;
1635 if (nchange != NULL)
1636 *nchange = xchange;
1637
1638 return (0);
1639 }
1640
1641 static void
pfr_commit_ktable(struct pfr_ktable * kt,long tzero)1642 pfr_commit_ktable(struct pfr_ktable *kt, long tzero)
1643 {
1644 counter_u64_t *pkc, *qkc;
1645 struct pfr_ktable *shadow = kt->pfrkt_shadow;
1646 int nflags;
1647
1648 PF_RULES_WASSERT();
1649
1650 if (shadow->pfrkt_cnt == NO_ADDRESSES) {
1651 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1652 pfr_clstats_ktable(kt, tzero, 1);
1653 } else if (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) {
1654 /* kt might contain addresses */
1655 struct pfr_kentryworkq addrq, addq, changeq, delq, garbageq;
1656 struct pfr_kentry *p, *q, *next;
1657 struct pfr_addr ad;
1658
1659 pfr_enqueue_addrs(shadow, &addrq, NULL, 0);
1660 pfr_mark_addrs(kt);
1661 SLIST_INIT(&addq);
1662 SLIST_INIT(&changeq);
1663 SLIST_INIT(&delq);
1664 SLIST_INIT(&garbageq);
1665 pfr_clean_node_mask(shadow, &addrq);
1666 SLIST_FOREACH_SAFE(p, &addrq, pfrke_workq, next) {
1667 pfr_copyout_addr(&ad, p);
1668 q = pfr_lookup_addr(kt, &ad, 1);
1669 if (q != NULL) {
1670 if (q->pfrke_not != p->pfrke_not)
1671 SLIST_INSERT_HEAD(&changeq, q,
1672 pfrke_workq);
1673 pkc = &p->pfrke_counters.pfrkc_counters;
1674 qkc = &q->pfrke_counters.pfrkc_counters;
1675 if ((*pkc == NULL) != (*qkc == NULL))
1676 SWAP(counter_u64_t, *pkc, *qkc);
1677 q->pfrke_mark = 1;
1678 SLIST_INSERT_HEAD(&garbageq, p, pfrke_workq);
1679 } else {
1680 p->pfrke_counters.pfrkc_tzero = tzero;
1681 SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
1682 }
1683 }
1684 pfr_enqueue_addrs(kt, &delq, NULL, ENQUEUE_UNMARKED_ONLY);
1685 pfr_insert_kentries(kt, &addq, tzero);
1686 pfr_remove_kentries(kt, &delq);
1687 pfr_clstats_kentries(kt, &changeq, tzero, INVERT_NEG_FLAG);
1688 pfr_destroy_kentries(&garbageq);
1689 } else {
1690 /* kt cannot contain addresses */
1691 SWAP(struct radix_node_head *, kt->pfrkt_ip4,
1692 shadow->pfrkt_ip4);
1693 SWAP(struct radix_node_head *, kt->pfrkt_ip6,
1694 shadow->pfrkt_ip6);
1695 SWAP(int, kt->pfrkt_cnt, shadow->pfrkt_cnt);
1696 pfr_clstats_ktable(kt, tzero, 1);
1697 }
1698 nflags = ((shadow->pfrkt_flags & PFR_TFLAG_USRMASK) |
1699 (kt->pfrkt_flags & PFR_TFLAG_SETMASK) | PFR_TFLAG_ACTIVE)
1700 & ~PFR_TFLAG_INACTIVE;
1701 pfr_destroy_ktable(shadow, 0);
1702 kt->pfrkt_shadow = NULL;
1703 pfr_setflags_ktable(kt, nflags);
1704 }
1705
1706 static int
pfr_validate_table(struct pfr_table * tbl,int allowedflags,int no_reserved)1707 pfr_validate_table(struct pfr_table *tbl, int allowedflags, int no_reserved)
1708 {
1709 int i;
1710
1711 if (!tbl->pfrt_name[0])
1712 return (-1);
1713 if (no_reserved && !strcmp(tbl->pfrt_anchor, PF_RESERVED_ANCHOR))
1714 return (-1);
1715 if (tbl->pfrt_name[PF_TABLE_NAME_SIZE-1])
1716 return (-1);
1717 for (i = strlen(tbl->pfrt_name); i < PF_TABLE_NAME_SIZE; i++)
1718 if (tbl->pfrt_name[i])
1719 return (-1);
1720 if (pfr_fix_anchor(tbl->pfrt_anchor))
1721 return (-1);
1722 if (tbl->pfrt_flags & ~allowedflags)
1723 return (-1);
1724 return (0);
1725 }
1726
1727 /*
1728 * Rewrite anchors referenced by tables to remove slashes
1729 * and check for validity.
1730 */
1731 static int
pfr_fix_anchor(char * anchor)1732 pfr_fix_anchor(char *anchor)
1733 {
1734 size_t siz = MAXPATHLEN;
1735 int i;
1736
1737 if (anchor[0] == '/') {
1738 char *path;
1739 int off;
1740
1741 path = anchor;
1742 off = 1;
1743 while (*++path == '/')
1744 off++;
1745 bcopy(path, anchor, siz - off);
1746 memset(anchor + siz - off, 0, off);
1747 }
1748 if (anchor[siz - 1])
1749 return (-1);
1750 for (i = strlen(anchor); i < siz; i++)
1751 if (anchor[i])
1752 return (-1);
1753 return (0);
1754 }
1755
1756 int
pfr_table_count(struct pfr_table * filter,int flags)1757 pfr_table_count(struct pfr_table *filter, int flags)
1758 {
1759 struct pf_kruleset *rs;
1760
1761 PF_RULES_ASSERT();
1762
1763 if (flags & PFR_FLAG_ALLRSETS)
1764 return (V_pfr_ktable_cnt);
1765 if (filter->pfrt_anchor[0]) {
1766 rs = pf_find_kruleset(filter->pfrt_anchor);
1767 return ((rs != NULL) ? rs->tables : -1);
1768 }
1769 return (pf_main_ruleset.tables);
1770 }
1771
1772 static int
pfr_skip_table(struct pfr_table * filter,struct pfr_ktable * kt,int flags)1773 pfr_skip_table(struct pfr_table *filter, struct pfr_ktable *kt, int flags)
1774 {
1775 if (flags & PFR_FLAG_ALLRSETS)
1776 return (0);
1777 if (strcmp(filter->pfrt_anchor, kt->pfrkt_anchor))
1778 return (1);
1779 return (0);
1780 }
1781
1782 static void
pfr_insert_ktables(struct pfr_ktableworkq * workq)1783 pfr_insert_ktables(struct pfr_ktableworkq *workq)
1784 {
1785 struct pfr_ktable *p;
1786
1787 SLIST_FOREACH(p, workq, pfrkt_workq)
1788 pfr_insert_ktable(p);
1789 }
1790
1791 static void
pfr_insert_ktable(struct pfr_ktable * kt)1792 pfr_insert_ktable(struct pfr_ktable *kt)
1793 {
1794
1795 PF_RULES_WASSERT();
1796
1797 RB_INSERT(pfr_ktablehead, &V_pfr_ktables, kt);
1798 V_pfr_ktable_cnt++;
1799 if (kt->pfrkt_root != NULL)
1800 if (!kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]++)
1801 pfr_setflags_ktable(kt->pfrkt_root,
1802 kt->pfrkt_root->pfrkt_flags|PFR_TFLAG_REFDANCHOR);
1803 }
1804
1805 static void
pfr_setflags_ktables(struct pfr_ktableworkq * workq)1806 pfr_setflags_ktables(struct pfr_ktableworkq *workq)
1807 {
1808 struct pfr_ktable *p, *q;
1809
1810 for (p = SLIST_FIRST(workq); p; p = q) {
1811 q = SLIST_NEXT(p, pfrkt_workq);
1812 pfr_setflags_ktable(p, p->pfrkt_nflags);
1813 }
1814 }
1815
1816 static void
pfr_setflags_ktable(struct pfr_ktable * kt,int newf)1817 pfr_setflags_ktable(struct pfr_ktable *kt, int newf)
1818 {
1819 struct pfr_kentryworkq addrq;
1820
1821 PF_RULES_WASSERT();
1822
1823 if (!(newf & PFR_TFLAG_REFERENCED) &&
1824 !(newf & PFR_TFLAG_REFDANCHOR) &&
1825 !(newf & PFR_TFLAG_PERSIST))
1826 newf &= ~PFR_TFLAG_ACTIVE;
1827 if (!(newf & PFR_TFLAG_ACTIVE))
1828 newf &= ~PFR_TFLAG_USRMASK;
1829 if (!(newf & PFR_TFLAG_SETMASK)) {
1830 RB_REMOVE(pfr_ktablehead, &V_pfr_ktables, kt);
1831 if (kt->pfrkt_root != NULL)
1832 if (!--kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR])
1833 pfr_setflags_ktable(kt->pfrkt_root,
1834 kt->pfrkt_root->pfrkt_flags &
1835 ~PFR_TFLAG_REFDANCHOR);
1836 pfr_destroy_ktable(kt, 1);
1837 V_pfr_ktable_cnt--;
1838 return;
1839 }
1840 if (!(newf & PFR_TFLAG_ACTIVE) && kt->pfrkt_cnt) {
1841 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1842 pfr_remove_kentries(kt, &addrq);
1843 }
1844 if (!(newf & PFR_TFLAG_INACTIVE) && kt->pfrkt_shadow != NULL) {
1845 pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1846 kt->pfrkt_shadow = NULL;
1847 }
1848 kt->pfrkt_flags = newf;
1849 }
1850
1851 static void
pfr_clstats_ktables(struct pfr_ktableworkq * workq,long tzero,int recurse)1852 pfr_clstats_ktables(struct pfr_ktableworkq *workq, long tzero, int recurse)
1853 {
1854 struct pfr_ktable *p;
1855
1856 SLIST_FOREACH(p, workq, pfrkt_workq)
1857 pfr_clstats_ktable(p, tzero, recurse);
1858 }
1859
1860 static void
pfr_clstats_ktable(struct pfr_ktable * kt,long tzero,int recurse)1861 pfr_clstats_ktable(struct pfr_ktable *kt, long tzero, int recurse)
1862 {
1863 struct pfr_kentryworkq addrq;
1864 int pfr_dir, pfr_op;
1865
1866 if (recurse) {
1867 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1868 pfr_clstats_kentries(kt, &addrq, tzero, 0);
1869 }
1870 for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) {
1871 for (pfr_op = 0; pfr_op < PFR_OP_TABLE_MAX; pfr_op ++) {
1872 counter_u64_zero(kt->pfrkt_packets[pfr_dir][pfr_op]);
1873 counter_u64_zero(kt->pfrkt_bytes[pfr_dir][pfr_op]);
1874 }
1875 }
1876 counter_u64_zero(kt->pfrkt_match);
1877 counter_u64_zero(kt->pfrkt_nomatch);
1878 kt->pfrkt_tzero = tzero;
1879 }
1880
1881 static struct pfr_ktable *
pfr_create_ktable(struct pfr_table * tbl,long tzero,int attachruleset)1882 pfr_create_ktable(struct pfr_table *tbl, long tzero, int attachruleset)
1883 {
1884 struct pfr_ktable *kt;
1885 struct pf_kruleset *rs;
1886 int pfr_dir, pfr_op;
1887
1888 PF_RULES_WASSERT();
1889
1890 kt = malloc(sizeof(*kt), M_PFTABLE, M_NOWAIT|M_ZERO);
1891 if (kt == NULL)
1892 return (NULL);
1893 kt->pfrkt_t = *tbl;
1894
1895 if (attachruleset) {
1896 rs = pf_find_or_create_kruleset(tbl->pfrt_anchor);
1897 if (!rs) {
1898 pfr_destroy_ktable(kt, 0);
1899 return (NULL);
1900 }
1901 kt->pfrkt_rs = rs;
1902 rs->tables++;
1903 }
1904
1905 for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) {
1906 for (pfr_op = 0; pfr_op < PFR_OP_TABLE_MAX; pfr_op ++) {
1907 kt->pfrkt_packets[pfr_dir][pfr_op] =
1908 counter_u64_alloc(M_NOWAIT);
1909 if (! kt->pfrkt_packets[pfr_dir][pfr_op]) {
1910 pfr_destroy_ktable(kt, 0);
1911 return (NULL);
1912 }
1913 kt->pfrkt_bytes[pfr_dir][pfr_op] =
1914 counter_u64_alloc(M_NOWAIT);
1915 if (! kt->pfrkt_bytes[pfr_dir][pfr_op]) {
1916 pfr_destroy_ktable(kt, 0);
1917 return (NULL);
1918 }
1919 }
1920 }
1921 kt->pfrkt_match = counter_u64_alloc(M_NOWAIT);
1922 if (! kt->pfrkt_match) {
1923 pfr_destroy_ktable(kt, 0);
1924 return (NULL);
1925 }
1926
1927 kt->pfrkt_nomatch = counter_u64_alloc(M_NOWAIT);
1928 if (! kt->pfrkt_nomatch) {
1929 pfr_destroy_ktable(kt, 0);
1930 return (NULL);
1931 }
1932
1933 if (!rn_inithead((void **)&kt->pfrkt_ip4,
1934 offsetof(struct sockaddr_in, sin_addr) * 8) ||
1935 !rn_inithead((void **)&kt->pfrkt_ip6,
1936 offsetof(struct sockaddr_in6, sin6_addr) * 8)) {
1937 pfr_destroy_ktable(kt, 0);
1938 return (NULL);
1939 }
1940 kt->pfrkt_tzero = tzero;
1941
1942 return (kt);
1943 }
1944
1945 static void
pfr_destroy_ktables(struct pfr_ktableworkq * workq,int flushaddr)1946 pfr_destroy_ktables(struct pfr_ktableworkq *workq, int flushaddr)
1947 {
1948 struct pfr_ktable *p, *q;
1949
1950 for (p = SLIST_FIRST(workq); p; p = q) {
1951 q = SLIST_NEXT(p, pfrkt_workq);
1952 pfr_destroy_ktable(p, flushaddr);
1953 }
1954 }
1955
1956 static void
pfr_destroy_ktable(struct pfr_ktable * kt,int flushaddr)1957 pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr)
1958 {
1959 struct pfr_kentryworkq addrq;
1960 int pfr_dir, pfr_op;
1961
1962 if (flushaddr) {
1963 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1964 pfr_clean_node_mask(kt, &addrq);
1965 pfr_destroy_kentries(&addrq);
1966 }
1967 if (kt->pfrkt_ip4 != NULL)
1968 rn_detachhead((void **)&kt->pfrkt_ip4);
1969 if (kt->pfrkt_ip6 != NULL)
1970 rn_detachhead((void **)&kt->pfrkt_ip6);
1971 if (kt->pfrkt_shadow != NULL)
1972 pfr_destroy_ktable(kt->pfrkt_shadow, flushaddr);
1973 if (kt->pfrkt_rs != NULL) {
1974 kt->pfrkt_rs->tables--;
1975 pf_remove_if_empty_kruleset(kt->pfrkt_rs);
1976 }
1977 for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) {
1978 for (pfr_op = 0; pfr_op < PFR_OP_TABLE_MAX; pfr_op ++) {
1979 counter_u64_free(kt->pfrkt_packets[pfr_dir][pfr_op]);
1980 counter_u64_free(kt->pfrkt_bytes[pfr_dir][pfr_op]);
1981 }
1982 }
1983 counter_u64_free(kt->pfrkt_match);
1984 counter_u64_free(kt->pfrkt_nomatch);
1985
1986 free(kt, M_PFTABLE);
1987 }
1988
1989 static int
pfr_ktable_compare(struct pfr_ktable * p,struct pfr_ktable * q)1990 pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q)
1991 {
1992 int d;
1993
1994 if ((d = strncmp(p->pfrkt_name, q->pfrkt_name, PF_TABLE_NAME_SIZE)))
1995 return (d);
1996 return (strcmp(p->pfrkt_anchor, q->pfrkt_anchor));
1997 }
1998
1999 static struct pfr_ktable *
pfr_lookup_table(struct pfr_table * tbl)2000 pfr_lookup_table(struct pfr_table *tbl)
2001 {
2002 /* struct pfr_ktable start like a struct pfr_table */
2003 return (RB_FIND(pfr_ktablehead, &V_pfr_ktables,
2004 (struct pfr_ktable *)tbl));
2005 }
2006
2007 int
pfr_match_addr(struct pfr_ktable * kt,struct pf_addr * a,sa_family_t af)2008 pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af)
2009 {
2010 struct pfr_kentry *ke = NULL;
2011 int match;
2012
2013 PF_RULES_RASSERT();
2014
2015 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2016 kt = kt->pfrkt_root;
2017 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2018 return (0);
2019
2020 switch (af) {
2021 #ifdef INET
2022 case AF_INET:
2023 {
2024 struct sockaddr_in sin;
2025
2026 bzero(&sin, sizeof(sin));
2027 sin.sin_len = sizeof(sin);
2028 sin.sin_family = AF_INET;
2029 sin.sin_addr.s_addr = a->addr32[0];
2030 ke = (struct pfr_kentry *)rn_match(&sin, &kt->pfrkt_ip4->rh);
2031 if (ke && KENTRY_RNF_ROOT(ke))
2032 ke = NULL;
2033 break;
2034 }
2035 #endif /* INET */
2036 #ifdef INET6
2037 case AF_INET6:
2038 {
2039 struct sockaddr_in6 sin6;
2040
2041 bzero(&sin6, sizeof(sin6));
2042 sin6.sin6_len = sizeof(sin6);
2043 sin6.sin6_family = AF_INET6;
2044 bcopy(a, &sin6.sin6_addr, sizeof(sin6.sin6_addr));
2045 ke = (struct pfr_kentry *)rn_match(&sin6, &kt->pfrkt_ip6->rh);
2046 if (ke && KENTRY_RNF_ROOT(ke))
2047 ke = NULL;
2048 break;
2049 }
2050 #endif /* INET6 */
2051 }
2052 match = (ke && !ke->pfrke_not);
2053 if (match)
2054 counter_u64_add(kt->pfrkt_match, 1);
2055 else
2056 counter_u64_add(kt->pfrkt_nomatch, 1);
2057 return (match);
2058 }
2059
2060 void
pfr_update_stats(struct pfr_ktable * kt,struct pf_addr * a,sa_family_t af,u_int64_t len,int dir_out,int op_pass,int notrule)2061 pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af,
2062 u_int64_t len, int dir_out, int op_pass, int notrule)
2063 {
2064 struct pfr_kentry *ke = NULL;
2065
2066 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2067 kt = kt->pfrkt_root;
2068 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2069 return;
2070
2071 switch (af) {
2072 #ifdef INET
2073 case AF_INET:
2074 {
2075 struct sockaddr_in sin;
2076
2077 bzero(&sin, sizeof(sin));
2078 sin.sin_len = sizeof(sin);
2079 sin.sin_family = AF_INET;
2080 sin.sin_addr.s_addr = a->addr32[0];
2081 ke = (struct pfr_kentry *)rn_match(&sin, &kt->pfrkt_ip4->rh);
2082 if (ke && KENTRY_RNF_ROOT(ke))
2083 ke = NULL;
2084 break;
2085 }
2086 #endif /* INET */
2087 #ifdef INET6
2088 case AF_INET6:
2089 {
2090 struct sockaddr_in6 sin6;
2091
2092 bzero(&sin6, sizeof(sin6));
2093 sin6.sin6_len = sizeof(sin6);
2094 sin6.sin6_family = AF_INET6;
2095 bcopy(a, &sin6.sin6_addr, sizeof(sin6.sin6_addr));
2096 ke = (struct pfr_kentry *)rn_match(&sin6, &kt->pfrkt_ip6->rh);
2097 if (ke && KENTRY_RNF_ROOT(ke))
2098 ke = NULL;
2099 break;
2100 }
2101 #endif /* INET6 */
2102 default:
2103 panic("%s: unknown address family %u", __func__, af);
2104 }
2105 if ((ke == NULL || ke->pfrke_not) != notrule) {
2106 if (op_pass != PFR_OP_PASS)
2107 DPFPRINTF(PF_DEBUG_URGENT,
2108 ("pfr_update_stats: assertion failed.\n"));
2109 op_pass = PFR_OP_XPASS;
2110 }
2111 counter_u64_add(kt->pfrkt_packets[dir_out][op_pass], 1);
2112 counter_u64_add(kt->pfrkt_bytes[dir_out][op_pass], len);
2113 if (ke != NULL && op_pass != PFR_OP_XPASS &&
2114 (kt->pfrkt_flags & PFR_TFLAG_COUNTERS)) {
2115 counter_u64_add(pfr_kentry_counter(&ke->pfrke_counters,
2116 dir_out, op_pass, PFR_TYPE_PACKETS), 1);
2117 counter_u64_add(pfr_kentry_counter(&ke->pfrke_counters,
2118 dir_out, op_pass, PFR_TYPE_BYTES), len);
2119 }
2120 }
2121
2122 struct pfr_ktable *
pfr_attach_table(struct pf_kruleset * rs,char * name)2123 pfr_attach_table(struct pf_kruleset *rs, char *name)
2124 {
2125 struct pfr_ktable *kt, *rt;
2126 struct pfr_table tbl;
2127 struct pf_kanchor *ac = rs->anchor;
2128
2129 PF_RULES_WASSERT();
2130
2131 bzero(&tbl, sizeof(tbl));
2132 strlcpy(tbl.pfrt_name, name, sizeof(tbl.pfrt_name));
2133 if (ac != NULL)
2134 strlcpy(tbl.pfrt_anchor, ac->path, sizeof(tbl.pfrt_anchor));
2135 kt = pfr_lookup_table(&tbl);
2136 if (kt == NULL) {
2137 kt = pfr_create_ktable(&tbl, time_second, 1);
2138 if (kt == NULL)
2139 return (NULL);
2140 if (ac != NULL) {
2141 bzero(tbl.pfrt_anchor, sizeof(tbl.pfrt_anchor));
2142 rt = pfr_lookup_table(&tbl);
2143 if (rt == NULL) {
2144 rt = pfr_create_ktable(&tbl, 0, 1);
2145 if (rt == NULL) {
2146 pfr_destroy_ktable(kt, 0);
2147 return (NULL);
2148 }
2149 pfr_insert_ktable(rt);
2150 }
2151 kt->pfrkt_root = rt;
2152 }
2153 pfr_insert_ktable(kt);
2154 }
2155 if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++)
2156 pfr_setflags_ktable(kt, kt->pfrkt_flags|PFR_TFLAG_REFERENCED);
2157 return (kt);
2158 }
2159
2160 void
pfr_detach_table(struct pfr_ktable * kt)2161 pfr_detach_table(struct pfr_ktable *kt)
2162 {
2163
2164 PF_RULES_WASSERT();
2165 KASSERT(kt->pfrkt_refcnt[PFR_REFCNT_RULE] > 0, ("%s: refcount %d\n",
2166 __func__, kt->pfrkt_refcnt[PFR_REFCNT_RULE]));
2167
2168 if (!--kt->pfrkt_refcnt[PFR_REFCNT_RULE])
2169 pfr_setflags_ktable(kt, kt->pfrkt_flags&~PFR_TFLAG_REFERENCED);
2170 }
2171
2172 int
pfr_pool_get(struct pfr_ktable * kt,int * pidx,struct pf_addr * counter,sa_family_t af)2173 pfr_pool_get(struct pfr_ktable *kt, int *pidx, struct pf_addr *counter,
2174 sa_family_t af)
2175 {
2176 struct pf_addr *addr, *cur, *mask;
2177 union sockaddr_union uaddr, umask;
2178 struct pfr_kentry *ke, *ke2 = NULL;
2179 int idx = -1, use_counter = 0;
2180
2181 switch (af) {
2182 case AF_INET:
2183 uaddr.sin.sin_len = sizeof(struct sockaddr_in);
2184 uaddr.sin.sin_family = AF_INET;
2185 break;
2186 case AF_INET6:
2187 uaddr.sin6.sin6_len = sizeof(struct sockaddr_in6);
2188 uaddr.sin6.sin6_family = AF_INET6;
2189 break;
2190 }
2191 addr = SUNION2PF(&uaddr, af);
2192
2193 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2194 kt = kt->pfrkt_root;
2195 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2196 return (-1);
2197
2198 if (pidx != NULL)
2199 idx = *pidx;
2200 if (counter != NULL && idx >= 0)
2201 use_counter = 1;
2202 if (idx < 0)
2203 idx = 0;
2204
2205 _next_block:
2206 ke = pfr_kentry_byidx(kt, idx, af);
2207 if (ke == NULL) {
2208 counter_u64_add(kt->pfrkt_nomatch, 1);
2209 return (1);
2210 }
2211 pfr_prepare_network(&umask, af, ke->pfrke_net);
2212 cur = SUNION2PF(&ke->pfrke_sa, af);
2213 mask = SUNION2PF(&umask, af);
2214
2215 if (use_counter) {
2216 /* is supplied address within block? */
2217 if (!PF_MATCHA(0, cur, mask, counter, af)) {
2218 /* no, go to next block in table */
2219 idx++;
2220 use_counter = 0;
2221 goto _next_block;
2222 }
2223 PF_ACPY(addr, counter, af);
2224 } else {
2225 /* use first address of block */
2226 PF_ACPY(addr, cur, af);
2227 }
2228
2229 if (!KENTRY_NETWORK(ke)) {
2230 /* this is a single IP address - no possible nested block */
2231 PF_ACPY(counter, addr, af);
2232 *pidx = idx;
2233 counter_u64_add(kt->pfrkt_match, 1);
2234 return (0);
2235 }
2236 for (;;) {
2237 /* we don't want to use a nested block */
2238 switch (af) {
2239 case AF_INET:
2240 ke2 = (struct pfr_kentry *)rn_match(&uaddr,
2241 &kt->pfrkt_ip4->rh);
2242 break;
2243 case AF_INET6:
2244 ke2 = (struct pfr_kentry *)rn_match(&uaddr,
2245 &kt->pfrkt_ip6->rh);
2246 break;
2247 }
2248 /* no need to check KENTRY_RNF_ROOT() here */
2249 if (ke2 == ke) {
2250 /* lookup return the same block - perfect */
2251 PF_ACPY(counter, addr, af);
2252 *pidx = idx;
2253 counter_u64_add(kt->pfrkt_match, 1);
2254 return (0);
2255 }
2256
2257 /* we need to increase the counter past the nested block */
2258 pfr_prepare_network(&umask, AF_INET, ke2->pfrke_net);
2259 PF_POOLMASK(addr, addr, SUNION2PF(&umask, af), &pfr_ffaddr, af);
2260 PF_AINC(addr, af);
2261 if (!PF_MATCHA(0, cur, mask, addr, af)) {
2262 /* ok, we reached the end of our main block */
2263 /* go to next block in table */
2264 idx++;
2265 use_counter = 0;
2266 goto _next_block;
2267 }
2268 }
2269 }
2270
2271 static struct pfr_kentry *
pfr_kentry_byidx(struct pfr_ktable * kt,int idx,int af)2272 pfr_kentry_byidx(struct pfr_ktable *kt, int idx, int af)
2273 {
2274 struct pfr_walktree w;
2275
2276 bzero(&w, sizeof(w));
2277 w.pfrw_op = PFRW_POOL_GET;
2278 w.pfrw_cnt = idx;
2279
2280 switch (af) {
2281 #ifdef INET
2282 case AF_INET:
2283 kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w);
2284 return (w.pfrw_kentry);
2285 #endif /* INET */
2286 #ifdef INET6
2287 case AF_INET6:
2288 kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w);
2289 return (w.pfrw_kentry);
2290 #endif /* INET6 */
2291 default:
2292 return (NULL);
2293 }
2294 }
2295
2296 void
pfr_dynaddr_update(struct pfr_ktable * kt,struct pfi_dynaddr * dyn)2297 pfr_dynaddr_update(struct pfr_ktable *kt, struct pfi_dynaddr *dyn)
2298 {
2299 struct pfr_walktree w;
2300
2301 bzero(&w, sizeof(w));
2302 w.pfrw_op = PFRW_DYNADDR_UPDATE;
2303 w.pfrw_dyn = dyn;
2304
2305 dyn->pfid_acnt4 = 0;
2306 dyn->pfid_acnt6 = 0;
2307 if (!dyn->pfid_af || dyn->pfid_af == AF_INET)
2308 kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w);
2309 if (!dyn->pfid_af || dyn->pfid_af == AF_INET6)
2310 kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w);
2311 }
2312