1 /* $FreeBSD$ */
2
3 /*
4 * Copyright (C) 2012 by Darren Reed.
5 *
6 * See the IPFILTER.LICENCE file for details on licencing.
7 */
8 #if defined(KERNEL) || defined(_KERNEL)
9 # undef KERNEL
10 # undef _KERNEL
11 # define KERNEL 1
12 # define _KERNEL 1
13 #endif
14 #include <sys/errno.h>
15 #include <sys/types.h>
16 #include <sys/param.h>
17 #include <sys/time.h>
18 #include <sys/file.h>
19 #if !defined(_KERNEL)
20 # include <stdio.h>
21 # include <string.h>
22 # include <stdlib.h>
23 # define _KERNEL
24 # include <sys/uio.h>
25 # undef _KERNEL
26 #endif
27 #if defined(_KERNEL) && defined(__FreeBSD_version)
28 # include <sys/filio.h>
29 # include <sys/fcntl.h>
30 #else
31 # include <sys/ioctl.h>
32 #endif
33 # include <sys/protosw.h>
34 #include <sys/socket.h>
35 #if defined(_KERNEL)
36 # include <sys/systm.h>
37 # if !defined(__SVR4)
38 # include <sys/mbuf.h>
39 # endif
40 #endif
41 #if !defined(__SVR4)
42 # if defined(_KERNEL)
43 # include <sys/kernel.h>
44 # endif
45 #else
46 # include <sys/byteorder.h>
47 # ifdef _KERNEL
48 # include <sys/dditypes.h>
49 # endif
50 # include <sys/stream.h>
51 # include <sys/kmem.h>
52 #endif
53 #include <net/if.h>
54 #ifdef sun
55 # include <net/af.h>
56 #endif
57 #include <netinet/in.h>
58 #include <netinet/in_systm.h>
59 #include <netinet/ip.h>
60 # include <netinet/ip_var.h>
61 #include <netinet/tcp.h>
62 #include <netinet/udp.h>
63 #include <netinet/ip_icmp.h>
64 #include "netinet/ip_compat.h"
65 #include <netinet/tcpip.h>
66 #include "netinet/ip_fil.h"
67 #include "netinet/ip_nat.h"
68 #include "netinet/ip_frag.h"
69 #include "netinet/ip_state.h"
70 #include "netinet/ip_auth.h"
71 #include "netinet/ip_lookup.h"
72 #include "netinet/ip_proxy.h"
73 #include "netinet/ip_sync.h"
74 /* END OF INCLUDES */
75
76 #if !defined(lint)
77 static const char sccsid[] = "@(#)ip_frag.c 1.11 3/24/96 (C) 1993-2000 Darren Reed";
78 static const char rcsid[] = "@(#)$FreeBSD$";
79 /* static const char rcsid[] = "@(#)$Id: ip_frag.c,v 2.77.2.12 2007/09/20 12:51:51 darrenr Exp $"; */
80 #endif
81
82
83 #ifdef USE_MUTEXES
84 static ipfr_t *ipfr_frag_new __P((ipf_main_softc_t *, ipf_frag_softc_t *,
85 fr_info_t *, u_32_t, ipfr_t **,
86 ipfrwlock_t *));
87 static ipfr_t *ipf_frag_lookup __P((ipf_main_softc_t *, ipf_frag_softc_t *, fr_info_t *, ipfr_t **, ipfrwlock_t *));
88 static void ipf_frag_deref __P((void *, ipfr_t **, ipfrwlock_t *));
89 static int ipf_frag_next __P((ipf_main_softc_t *, ipftoken_t *, ipfgeniter_t *,
90 ipfr_t **, ipfrwlock_t *));
91 #else
92 static ipfr_t *ipfr_frag_new __P((ipf_main_softc_t *, ipf_frag_softc_t *,
93 fr_info_t *, u_32_t, ipfr_t **));
94 static ipfr_t *ipf_frag_lookup __P((ipf_main_softc_t *, ipf_frag_softc_t *, fr_info_t *, ipfr_t **));
95 static void ipf_frag_deref __P((void *, ipfr_t **));
96 static int ipf_frag_next __P((ipf_main_softc_t *, ipftoken_t *, ipfgeniter_t *,
97 ipfr_t **));
98 #endif
99 static void ipf_frag_delete __P((ipf_main_softc_t *, ipfr_t *, ipfr_t ***));
100 static void ipf_frag_free __P((ipf_frag_softc_t *, ipfr_t *));
101
102 static frentry_t ipfr_block;
103
104 static ipftuneable_t ipf_frag_tuneables[] = {
105 { { (void *)offsetof(ipf_frag_softc_t, ipfr_size) },
106 "frag_size", 1, 0x7fffffff,
107 stsizeof(ipf_frag_softc_t, ipfr_size),
108 IPFT_WRDISABLED, NULL, NULL },
109 { { (void *)offsetof(ipf_frag_softc_t, ipfr_ttl) },
110 "frag_ttl", 1, 0x7fffffff,
111 stsizeof(ipf_frag_softc_t, ipfr_ttl),
112 0, NULL, NULL },
113 { { NULL },
114 NULL, 0, 0,
115 0,
116 0, NULL, NULL }
117 };
118
119 #define FBUMP(x) softf->ipfr_stats.x++
120 #define FBUMPD(x) do { softf->ipfr_stats.x++; DT(x); } while (0)
121
122
123 /* ------------------------------------------------------------------------ */
124 /* Function: ipf_frag_main_load */
125 /* Returns: int - 0 == success, -1 == error */
126 /* Parameters: Nil */
127 /* */
128 /* Initialise the filter rule associted with blocked packets - everyone can */
129 /* use it. */
130 /* ------------------------------------------------------------------------ */
131 int
ipf_frag_main_load()132 ipf_frag_main_load()
133 {
134 bzero((char *)&ipfr_block, sizeof(ipfr_block));
135 ipfr_block.fr_flags = FR_BLOCK|FR_QUICK;
136 ipfr_block.fr_ref = 1;
137
138 return 0;
139 }
140
141
142 /* ------------------------------------------------------------------------ */
143 /* Function: ipf_frag_main_unload */
144 /* Returns: int - 0 == success, -1 == error */
145 /* Parameters: Nil */
146 /* */
147 /* A null-op function that exists as a placeholder so that the flow in */
148 /* other functions is obvious. */
149 /* ------------------------------------------------------------------------ */
150 int
ipf_frag_main_unload()151 ipf_frag_main_unload()
152 {
153 return 0;
154 }
155
156
157 /* ------------------------------------------------------------------------ */
158 /* Function: ipf_frag_soft_create */
159 /* Returns: void * - NULL = failure, else pointer to local context */
160 /* Parameters: softc(I) - pointer to soft context main structure */
161 /* */
162 /* Allocate a new soft context structure to track fragment related info. */
163 /* ------------------------------------------------------------------------ */
164 /*ARGSUSED*/
165 void *
ipf_frag_soft_create(softc)166 ipf_frag_soft_create(softc)
167 ipf_main_softc_t *softc;
168 {
169 ipf_frag_softc_t *softf;
170
171 KMALLOC(softf, ipf_frag_softc_t *);
172 if (softf == NULL)
173 return NULL;
174
175 bzero((char *)softf, sizeof(*softf));
176
177 RWLOCK_INIT(&softf->ipfr_ipidfrag, "frag ipid lock");
178 RWLOCK_INIT(&softf->ipfr_frag, "ipf fragment rwlock");
179 RWLOCK_INIT(&softf->ipfr_natfrag, "ipf NAT fragment rwlock");
180
181 softf->ipf_frag_tune = ipf_tune_array_copy(softf,
182 sizeof(ipf_frag_tuneables),
183 ipf_frag_tuneables);
184 if (softf->ipf_frag_tune == NULL) {
185 ipf_frag_soft_destroy(softc, softf);
186 return NULL;
187 }
188 if (ipf_tune_array_link(softc, softf->ipf_frag_tune) == -1) {
189 ipf_frag_soft_destroy(softc, softf);
190 return NULL;
191 }
192
193 softf->ipfr_size = IPFT_SIZE;
194 softf->ipfr_ttl = IPF_TTLVAL(60);
195 softf->ipfr_lock = 1;
196 softf->ipfr_tail = &softf->ipfr_list;
197 softf->ipfr_nattail = &softf->ipfr_natlist;
198 softf->ipfr_ipidtail = &softf->ipfr_ipidlist;
199
200 return softf;
201 }
202
203
204 /* ------------------------------------------------------------------------ */
205 /* Function: ipf_frag_soft_destroy */
206 /* Returns: Nil */
207 /* Parameters: softc(I) - pointer to soft context main structure */
208 /* arg(I) - pointer to local context to use */
209 /* */
210 /* Initialise the hash tables for the fragment cache lookups. */
211 /* ------------------------------------------------------------------------ */
212 void
ipf_frag_soft_destroy(softc,arg)213 ipf_frag_soft_destroy(softc, arg)
214 ipf_main_softc_t *softc;
215 void *arg;
216 {
217 ipf_frag_softc_t *softf = arg;
218
219 RW_DESTROY(&softf->ipfr_ipidfrag);
220 RW_DESTROY(&softf->ipfr_frag);
221 RW_DESTROY(&softf->ipfr_natfrag);
222
223 if (softf->ipf_frag_tune != NULL) {
224 ipf_tune_array_unlink(softc, softf->ipf_frag_tune);
225 KFREES(softf->ipf_frag_tune, sizeof(ipf_frag_tuneables));
226 softf->ipf_frag_tune = NULL;
227 }
228
229 KFREE(softf);
230 }
231
232
233 /* ------------------------------------------------------------------------ */
234 /* Function: ipf_frag_soft_init */
235 /* Returns: int - 0 == success, -1 == error */
236 /* Parameters: softc(I) - pointer to soft context main structure */
237 /* arg(I) - pointer to local context to use */
238 /* */
239 /* Initialise the hash tables for the fragment cache lookups. */
240 /* ------------------------------------------------------------------------ */
241 /*ARGSUSED*/
242 int
ipf_frag_soft_init(softc,arg)243 ipf_frag_soft_init(softc, arg)
244 ipf_main_softc_t *softc;
245 void *arg;
246 {
247 ipf_frag_softc_t *softf = arg;
248
249 KMALLOCS(softf->ipfr_heads, ipfr_t **,
250 softf->ipfr_size * sizeof(ipfr_t *));
251 if (softf->ipfr_heads == NULL)
252 return -1;
253
254 bzero((char *)softf->ipfr_heads, softf->ipfr_size * sizeof(ipfr_t *));
255
256 KMALLOCS(softf->ipfr_nattab, ipfr_t **,
257 softf->ipfr_size * sizeof(ipfr_t *));
258 if (softf->ipfr_nattab == NULL)
259 return -2;
260
261 bzero((char *)softf->ipfr_nattab, softf->ipfr_size * sizeof(ipfr_t *));
262
263 KMALLOCS(softf->ipfr_ipidtab, ipfr_t **,
264 softf->ipfr_size * sizeof(ipfr_t *));
265 if (softf->ipfr_ipidtab == NULL)
266 return -3;
267
268 bzero((char *)softf->ipfr_ipidtab,
269 softf->ipfr_size * sizeof(ipfr_t *));
270
271 softf->ipfr_lock = 0;
272 softf->ipfr_inited = 1;
273
274 return 0;
275 }
276
277
278 /* ------------------------------------------------------------------------ */
279 /* Function: ipf_frag_soft_fini */
280 /* Returns: int - 0 == success, -1 == error */
281 /* Parameters: softc(I) - pointer to soft context main structure */
282 /* arg(I) - pointer to local context to use */
283 /* */
284 /* Free all memory allocated whilst running and from initialisation. */
285 /* ------------------------------------------------------------------------ */
286 int
ipf_frag_soft_fini(softc,arg)287 ipf_frag_soft_fini(softc, arg)
288 ipf_main_softc_t *softc;
289 void *arg;
290 {
291 ipf_frag_softc_t *softf = arg;
292
293 softf->ipfr_lock = 1;
294
295 if (softf->ipfr_inited == 1) {
296 ipf_frag_clear(softc);
297
298 softf->ipfr_inited = 0;
299 }
300
301 if (softf->ipfr_heads != NULL)
302 KFREES(softf->ipfr_heads,
303 softf->ipfr_size * sizeof(ipfr_t *));
304 softf->ipfr_heads = NULL;
305
306 if (softf->ipfr_nattab != NULL)
307 KFREES(softf->ipfr_nattab,
308 softf->ipfr_size * sizeof(ipfr_t *));
309 softf->ipfr_nattab = NULL;
310
311 if (softf->ipfr_ipidtab != NULL)
312 KFREES(softf->ipfr_ipidtab,
313 softf->ipfr_size * sizeof(ipfr_t *));
314 softf->ipfr_ipidtab = NULL;
315
316 return 0;
317 }
318
319
320 /* ------------------------------------------------------------------------ */
321 /* Function: ipf_frag_set_lock */
322 /* Returns: Nil */
323 /* Parameters: arg(I) - pointer to local context to use */
324 /* tmp(I) - new value for lock */
325 /* */
326 /* Stub function that allows for external manipulation of ipfr_lock */
327 /* ------------------------------------------------------------------------ */
328 void
ipf_frag_setlock(arg,tmp)329 ipf_frag_setlock(arg, tmp)
330 void *arg;
331 int tmp;
332 {
333 ipf_frag_softc_t *softf = arg;
334
335 softf->ipfr_lock = tmp;
336 }
337
338
339 /* ------------------------------------------------------------------------ */
340 /* Function: ipf_frag_stats */
341 /* Returns: ipfrstat_t* - pointer to struct with current frag stats */
342 /* Parameters: arg(I) - pointer to local context to use */
343 /* */
344 /* Updates ipfr_stats with current information and returns a pointer to it */
345 /* ------------------------------------------------------------------------ */
346 ipfrstat_t *
ipf_frag_stats(arg)347 ipf_frag_stats(arg)
348 void *arg;
349 {
350 ipf_frag_softc_t *softf = arg;
351
352 softf->ipfr_stats.ifs_table = softf->ipfr_heads;
353 softf->ipfr_stats.ifs_nattab = softf->ipfr_nattab;
354 return &softf->ipfr_stats;
355 }
356
357
358 /* ------------------------------------------------------------------------ */
359 /* Function: ipfr_frag_new */
360 /* Returns: ipfr_t * - pointer to fragment cache state info or NULL */
361 /* Parameters: fin(I) - pointer to packet information */
362 /* table(I) - pointer to frag table to add to */
363 /* lock(I) - pointer to lock to get a write hold of */
364 /* */
365 /* Add a new entry to the fragment cache, registering it as having come */
366 /* through this box, with the result of the filter operation. */
367 /* */
368 /* If this function succeeds, it returns with a write lock held on "lock". */
369 /* If it fails, no lock is held on return. */
370 /* ------------------------------------------------------------------------ */
371 static ipfr_t *
ipfr_frag_new(softc,softf,fin,pass,table,lock)372 ipfr_frag_new(softc, softf, fin, pass, table
373 #ifdef USE_MUTEXES
374 , lock
375 #endif
376 )
377 ipf_main_softc_t *softc;
378 ipf_frag_softc_t *softf;
379 fr_info_t *fin;
380 u_32_t pass;
381 ipfr_t *table[];
382 #ifdef USE_MUTEXES
383 ipfrwlock_t *lock;
384 #endif
385 {
386 ipfr_t *fra, frag, *fran;
387 u_int idx, off;
388 frentry_t *fr;
389
390 if (softf->ipfr_stats.ifs_inuse >= softf->ipfr_size) {
391 FBUMPD(ifs_maximum);
392 return NULL;
393 }
394
395 if ((fin->fin_flx & (FI_FRAG|FI_BAD)) != FI_FRAG) {
396 FBUMPD(ifs_newbad);
397 return NULL;
398 }
399
400 if (pass & FR_FRSTRICT) {
401 if (fin->fin_off != 0) {
402 FBUMPD(ifs_newrestrictnot0);
403 return NULL;
404 }
405 }
406
407 memset(&frag, 0, sizeof(frag));
408 frag.ipfr_v = fin->fin_v;
409 idx = fin->fin_v;
410 frag.ipfr_p = fin->fin_p;
411 idx += fin->fin_p;
412 frag.ipfr_id = fin->fin_id;
413 idx += fin->fin_id;
414 frag.ipfr_source = fin->fin_fi.fi_src;
415 idx += frag.ipfr_src.s_addr;
416 frag.ipfr_dest = fin->fin_fi.fi_dst;
417 idx += frag.ipfr_dst.s_addr;
418 frag.ipfr_ifp = fin->fin_ifp;
419 idx *= 127;
420 idx %= softf->ipfr_size;
421
422 frag.ipfr_optmsk = fin->fin_fi.fi_optmsk & IPF_OPTCOPY;
423 frag.ipfr_secmsk = fin->fin_fi.fi_secmsk;
424 frag.ipfr_auth = fin->fin_fi.fi_auth;
425
426 off = fin->fin_off >> 3;
427 if (off == 0) {
428 char *ptr;
429 int end;
430
431 #ifdef USE_INET6
432 if (fin->fin_v == 6) {
433
434 ptr = (char *)fin->fin_fraghdr +
435 sizeof(struct ip6_frag);
436 } else
437 #endif
438 {
439 ptr = fin->fin_dp;
440 }
441 end = fin->fin_plen - (ptr - (char *)fin->fin_ip);
442 frag.ipfr_firstend = end >> 3;
443 } else {
444 frag.ipfr_firstend = 0;
445 }
446
447 /*
448 * allocate some memory, if possible, if not, just record that we
449 * failed to do so.
450 */
451 KMALLOC(fran, ipfr_t *);
452 if (fran == NULL) {
453 FBUMPD(ifs_nomem);
454 return NULL;
455 }
456 memset(fran, 0, sizeof(*fran));
457
458 WRITE_ENTER(lock);
459
460 /*
461 * first, make sure it isn't already there...
462 */
463 for (fra = table[idx]; (fra != NULL); fra = fra->ipfr_hnext)
464 if (!bcmp((char *)&frag.ipfr_ifp, (char *)&fra->ipfr_ifp,
465 IPFR_CMPSZ)) {
466 RWLOCK_EXIT(lock);
467 FBUMPD(ifs_exists);
468 KFREE(fran);
469 return NULL;
470 }
471
472 fra = fran;
473 fran = NULL;
474 fr = fin->fin_fr;
475 fra->ipfr_rule = fr;
476 if (fr != NULL) {
477 MUTEX_ENTER(&fr->fr_lock);
478 fr->fr_ref++;
479 MUTEX_EXIT(&fr->fr_lock);
480 }
481
482 /*
483 * Insert the fragment into the fragment table, copy the struct used
484 * in the search using bcopy rather than reassign each field.
485 * Set the ttl to the default.
486 */
487 if ((fra->ipfr_hnext = table[idx]) != NULL)
488 table[idx]->ipfr_hprev = &fra->ipfr_hnext;
489 fra->ipfr_hprev = table + idx;
490 fra->ipfr_data = NULL;
491 table[idx] = fra;
492 bcopy((char *)&frag.ipfr_ifp, (char *)&fra->ipfr_ifp, IPFR_CMPSZ);
493 fra->ipfr_v = fin->fin_v;
494 fra->ipfr_p = fin->fin_p;
495 fra->ipfr_ttl = softc->ipf_ticks + softf->ipfr_ttl;
496 fra->ipfr_firstend = frag.ipfr_firstend;
497
498 /*
499 * Compute the offset of the expected start of the next packet.
500 */
501 if (off == 0)
502 fra->ipfr_seen0 = 1;
503 fra->ipfr_off = off + (fin->fin_dlen >> 3);
504 fra->ipfr_pass = pass;
505 fra->ipfr_ref = 1;
506 fra->ipfr_pkts = 1;
507 fra->ipfr_bytes = fin->fin_plen;
508 FBUMP(ifs_inuse);
509 FBUMP(ifs_new);
510 return fra;
511 }
512
513
514 /* ------------------------------------------------------------------------ */
515 /* Function: ipf_frag_new */
516 /* Returns: int - 0 == success, -1 == error */
517 /* Parameters: fin(I) - pointer to packet information */
518 /* */
519 /* Add a new entry to the fragment cache table based on the current packet */
520 /* ------------------------------------------------------------------------ */
521 int
ipf_frag_new(softc,fin,pass)522 ipf_frag_new(softc, fin, pass)
523 ipf_main_softc_t *softc;
524 u_32_t pass;
525 fr_info_t *fin;
526 {
527 ipf_frag_softc_t *softf = softc->ipf_frag_soft;
528 ipfr_t *fra;
529
530 if (softf->ipfr_lock != 0)
531 return -1;
532
533 #ifdef USE_MUTEXES
534 fra = ipfr_frag_new(softc, softf, fin, pass, softf->ipfr_heads, &softc->ipf_frag);
535 #else
536 fra = ipfr_frag_new(softc, softf, fin, pass, softf->ipfr_heads);
537 #endif
538 if (fra != NULL) {
539 *softf->ipfr_tail = fra;
540 fra->ipfr_prev = softf->ipfr_tail;
541 softf->ipfr_tail = &fra->ipfr_next;
542 fra->ipfr_next = NULL;
543 RWLOCK_EXIT(&softc->ipf_frag);
544 }
545 return fra ? 0 : -1;
546 }
547
548
549 /* ------------------------------------------------------------------------ */
550 /* Function: ipf_frag_natnew */
551 /* Returns: int - 0 == success, -1 == error */
552 /* Parameters: fin(I) - pointer to packet information */
553 /* nat(I) - pointer to NAT structure */
554 /* */
555 /* Create a new NAT fragment cache entry based on the current packet and */
556 /* the NAT structure for this "session". */
557 /* ------------------------------------------------------------------------ */
558 int
ipf_frag_natnew(softc,fin,pass,nat)559 ipf_frag_natnew(softc, fin, pass, nat)
560 ipf_main_softc_t *softc;
561 fr_info_t *fin;
562 u_32_t pass;
563 nat_t *nat;
564 {
565 ipf_frag_softc_t *softf = softc->ipf_frag_soft;
566 ipfr_t *fra;
567
568 if (softf->ipfr_lock != 0)
569 return 0;
570
571 #ifdef USE_MUTEXES
572 fra = ipfr_frag_new(softc, softf, fin, pass, softf->ipfr_nattab,
573 &softf->ipfr_natfrag);
574 #else
575 fra = ipfr_frag_new(softc, softf, fin, pass, softf->ipfr_nattab);
576 #endif
577 if (fra != NULL) {
578 fra->ipfr_data = nat;
579 nat->nat_data = fra;
580 *softf->ipfr_nattail = fra;
581 fra->ipfr_prev = softf->ipfr_nattail;
582 softf->ipfr_nattail = &fra->ipfr_next;
583 fra->ipfr_next = NULL;
584 RWLOCK_EXIT(&softf->ipfr_natfrag);
585 return 0;
586 }
587 return -1;
588 }
589
590
591 /* ------------------------------------------------------------------------ */
592 /* Function: ipf_frag_ipidnew */
593 /* Returns: int - 0 == success, -1 == error */
594 /* Parameters: fin(I) - pointer to packet information */
595 /* ipid(I) - new IP ID for this fragmented packet */
596 /* */
597 /* Create a new fragment cache entry for this packet and store, as a data */
598 /* pointer, the new IP ID value. */
599 /* ------------------------------------------------------------------------ */
600 int
ipf_frag_ipidnew(fin,ipid)601 ipf_frag_ipidnew(fin, ipid)
602 fr_info_t *fin;
603 u_32_t ipid;
604 {
605 ipf_main_softc_t *softc = fin->fin_main_soft;
606 ipf_frag_softc_t *softf = softc->ipf_frag_soft;
607 ipfr_t *fra;
608
609 if (softf->ipfr_lock)
610 return 0;
611
612 #ifdef USE_MUTEXES
613 fra = ipfr_frag_new(softc, softf, fin, 0, softf->ipfr_ipidtab, &softf->ipfr_ipidfrag);
614 #else
615 fra = ipfr_frag_new(softc, softf, fin, 0, softf->ipfr_ipidtab);
616 #endif
617 if (fra != NULL) {
618 fra->ipfr_data = (void *)(intptr_t)ipid;
619 *softf->ipfr_ipidtail = fra;
620 fra->ipfr_prev = softf->ipfr_ipidtail;
621 softf->ipfr_ipidtail = &fra->ipfr_next;
622 fra->ipfr_next = NULL;
623 RWLOCK_EXIT(&softf->ipfr_ipidfrag);
624 }
625 return fra ? 0 : -1;
626 }
627
628
629 /* ------------------------------------------------------------------------ */
630 /* Function: ipf_frag_lookup */
631 /* Returns: ipfr_t * - pointer to ipfr_t structure if there's a */
632 /* matching entry in the frag table, else NULL */
633 /* Parameters: fin(I) - pointer to packet information */
634 /* table(I) - pointer to fragment cache table to search */
635 /* */
636 /* Check the fragment cache to see if there is already a record of this */
637 /* packet with its filter result known. */
638 /* */
639 /* If this function succeeds, it returns with a write lock held on "lock". */
640 /* If it fails, no lock is held on return. */
641 /* ------------------------------------------------------------------------ */
642 static ipfr_t *
ipf_frag_lookup(softc,softf,fin,table,lock)643 ipf_frag_lookup(softc, softf, fin, table
644 #ifdef USE_MUTEXES
645 , lock
646 #endif
647 )
648 ipf_main_softc_t *softc;
649 ipf_frag_softc_t *softf;
650 fr_info_t *fin;
651 ipfr_t *table[];
652 #ifdef USE_MUTEXES
653 ipfrwlock_t *lock;
654 #endif
655 {
656 ipfr_t *f, frag;
657 u_int idx;
658
659 /*
660 * We don't want to let short packets match because they could be
661 * compromising the security of other rules that want to match on
662 * layer 4 fields (and can't because they have been fragmented off.)
663 * Why do this check here? The counter acts as an indicator of this
664 * kind of attack, whereas if it was elsewhere, it wouldn't know if
665 * other matching packets had been seen.
666 */
667 if (fin->fin_flx & FI_SHORT) {
668 FBUMPD(ifs_short);
669 return NULL;
670 }
671
672 if ((fin->fin_flx & FI_BAD) != 0) {
673 FBUMPD(ifs_bad);
674 return NULL;
675 }
676
677 /*
678 * For fragments, we record protocol, packet id, TOS and both IP#'s
679 * (these should all be the same for all fragments of a packet).
680 *
681 * build up a hash value to index the table with.
682 */
683 memset(&frag, 0, sizeof(frag));
684 frag.ipfr_v = fin->fin_v;
685 idx = fin->fin_v;
686 frag.ipfr_p = fin->fin_p;
687 idx += fin->fin_p;
688 frag.ipfr_id = fin->fin_id;
689 idx += fin->fin_id;
690 frag.ipfr_source = fin->fin_fi.fi_src;
691 idx += frag.ipfr_src.s_addr;
692 frag.ipfr_dest = fin->fin_fi.fi_dst;
693 idx += frag.ipfr_dst.s_addr;
694 frag.ipfr_ifp = fin->fin_ifp;
695 idx *= 127;
696 idx %= softf->ipfr_size;
697
698 frag.ipfr_optmsk = fin->fin_fi.fi_optmsk & IPF_OPTCOPY;
699 frag.ipfr_secmsk = fin->fin_fi.fi_secmsk;
700 frag.ipfr_auth = fin->fin_fi.fi_auth;
701
702 READ_ENTER(lock);
703
704 /*
705 * check the table, careful to only compare the right amount of data
706 */
707 for (f = table[idx]; f; f = f->ipfr_hnext) {
708 if (!bcmp((char *)&frag.ipfr_ifp, (char *)&f->ipfr_ifp,
709 IPFR_CMPSZ)) {
710 u_short off;
711
712 /*
713 * XXX - We really need to be guarding against the
714 * retransmission of (src,dst,id,offset-range) here
715 * because a fragmented packet is never resent with
716 * the same IP ID# (or shouldn't).
717 */
718 off = fin->fin_off >> 3;
719 if (f->ipfr_seen0) {
720 if (off == 0) {
721 FBUMPD(ifs_retrans0);
722 continue;
723 }
724
725 /*
726 * Case 3. See comment for frpr_fragment6.
727 */
728 if ((f->ipfr_firstend != 0) &&
729 (off < f->ipfr_firstend)) {
730 FBUMP(ifs_overlap);
731 DT2(ifs_overlap, u_short, off,
732 ipfr_t *, f);
733 DT3(ipf_fi_bad_ifs_overlap, fr_info_t *, fin, u_short, off,
734 ipfr_t *, f);
735 fin->fin_flx |= FI_BAD;
736 break;
737 }
738 } else if (off == 0)
739 f->ipfr_seen0 = 1;
740
741 if (f != table[idx] && MUTEX_TRY_UPGRADE(lock)) {
742 ipfr_t **fp;
743
744 /*
745 * Move fragment info. to the top of the list
746 * to speed up searches. First, delink...
747 */
748 fp = f->ipfr_hprev;
749 (*fp) = f->ipfr_hnext;
750 if (f->ipfr_hnext != NULL)
751 f->ipfr_hnext->ipfr_hprev = fp;
752 /*
753 * Then put back at the top of the chain.
754 */
755 f->ipfr_hnext = table[idx];
756 table[idx]->ipfr_hprev = &f->ipfr_hnext;
757 f->ipfr_hprev = table + idx;
758 table[idx] = f;
759 MUTEX_DOWNGRADE(lock);
760 }
761
762 /*
763 * If we've follwed the fragments, and this is the
764 * last (in order), shrink expiration time.
765 */
766 if (off == f->ipfr_off) {
767 f->ipfr_off = (fin->fin_dlen >> 3) + off;
768
769 /*
770 * Well, we could shrink the expiration time
771 * but only if every fragment has been seen
772 * in order upto this, the last. ipfr_badorder
773 * is used here to count those out of order
774 * and if it equals 0 when we get to the last
775 * fragment then we can assume all of the
776 * fragments have been seen and in order.
777 */
778 #if 0
779 /*
780 * Doing this properly requires moving it to
781 * the head of the list which is infesible.
782 */
783 if ((more == 0) && (f->ipfr_badorder == 0))
784 f->ipfr_ttl = softc->ipf_ticks + 1;
785 #endif
786 } else {
787 f->ipfr_badorder++;
788 FBUMPD(ifs_unordered);
789 if (f->ipfr_pass & FR_FRSTRICT) {
790 FBUMPD(ifs_strict);
791 continue;
792 }
793 }
794 f->ipfr_pkts++;
795 f->ipfr_bytes += fin->fin_plen;
796 FBUMP(ifs_hits);
797 return f;
798 }
799 }
800
801 RWLOCK_EXIT(lock);
802 FBUMP(ifs_miss);
803 return NULL;
804 }
805
806
807 /* ------------------------------------------------------------------------ */
808 /* Function: ipf_frag_natknown */
809 /* Returns: nat_t* - pointer to 'parent' NAT structure if frag table */
810 /* match found, else NULL */
811 /* Parameters: fin(I) - pointer to packet information */
812 /* */
813 /* Functional interface for NAT lookups of the NAT fragment cache */
814 /* ------------------------------------------------------------------------ */
815 nat_t *
ipf_frag_natknown(fin)816 ipf_frag_natknown(fin)
817 fr_info_t *fin;
818 {
819 ipf_main_softc_t *softc = fin->fin_main_soft;
820 ipf_frag_softc_t *softf = softc->ipf_frag_soft;
821 nat_t *nat;
822 ipfr_t *ipf;
823
824 if ((softf->ipfr_lock) || !softf->ipfr_natlist)
825 return NULL;
826 #ifdef USE_MUTEXES
827 ipf = ipf_frag_lookup(softc, softf, fin, softf->ipfr_nattab,
828 &softf->ipfr_natfrag);
829 #else
830 ipf = ipf_frag_lookup(softc, softf, fin, softf->ipfr_nattab);
831 #endif
832 if (ipf != NULL) {
833 nat = ipf->ipfr_data;
834 /*
835 * This is the last fragment for this packet.
836 */
837 if ((ipf->ipfr_ttl == softc->ipf_ticks + 1) && (nat != NULL)) {
838 nat->nat_data = NULL;
839 ipf->ipfr_data = NULL;
840 }
841 RWLOCK_EXIT(&softf->ipfr_natfrag);
842 } else
843 nat = NULL;
844 return nat;
845 }
846
847
848 /* ------------------------------------------------------------------------ */
849 /* Function: ipf_frag_ipidknown */
850 /* Returns: u_32_t - IPv4 ID for this packet if match found, else */
851 /* return 0xfffffff to indicate no match. */
852 /* Parameters: fin(I) - pointer to packet information */
853 /* */
854 /* Functional interface for IP ID lookups of the IP ID fragment cache */
855 /* ------------------------------------------------------------------------ */
856 u_32_t
ipf_frag_ipidknown(fin)857 ipf_frag_ipidknown(fin)
858 fr_info_t *fin;
859 {
860 ipf_main_softc_t *softc = fin->fin_main_soft;
861 ipf_frag_softc_t *softf = softc->ipf_frag_soft;
862 ipfr_t *ipf;
863 u_32_t id;
864
865 if (softf->ipfr_lock || !softf->ipfr_ipidlist)
866 return 0xffffffff;
867
868 #ifdef USE_MUTEXES
869 ipf = ipf_frag_lookup(softc, softf, fin, softf->ipfr_ipidtab,
870 &softf->ipfr_ipidfrag);
871 #else
872 ipf = ipf_frag_lookup(softc, softf, fin, softf->ipfr_ipidtab);
873 #endif
874 if (ipf != NULL) {
875 id = (u_32_t)(intptr_t)ipf->ipfr_data;
876 RWLOCK_EXIT(&softf->ipfr_ipidfrag);
877 } else
878 id = 0xffffffff;
879 return id;
880 }
881
882
883 /* ------------------------------------------------------------------------ */
884 /* Function: ipf_frag_known */
885 /* Returns: frentry_t* - pointer to filter rule if a match is found in */
886 /* the frag cache table, else NULL. */
887 /* Parameters: fin(I) - pointer to packet information */
888 /* passp(O) - pointer to where to store rule flags resturned */
889 /* */
890 /* Functional interface for normal lookups of the fragment cache. If a */
891 /* match is found, return the rule pointer and flags from the rule, except */
892 /* that if FR_LOGFIRST is set, reset FR_LOG. */
893 /* ------------------------------------------------------------------------ */
894 frentry_t *
ipf_frag_known(fin,passp)895 ipf_frag_known(fin, passp)
896 fr_info_t *fin;
897 u_32_t *passp;
898 {
899 ipf_main_softc_t *softc = fin->fin_main_soft;
900 ipf_frag_softc_t *softf = softc->ipf_frag_soft;
901 frentry_t *fr = NULL;
902 ipfr_t *fra;
903 u_32_t pass;
904
905 if ((softf->ipfr_lock) || (softf->ipfr_list == NULL))
906 return NULL;
907
908 #ifdef USE_MUTEXES
909 fra = ipf_frag_lookup(softc, softf, fin, softf->ipfr_heads,
910 &softc->ipf_frag);
911 #else
912 fra = ipf_frag_lookup(softc, softf, fin, softf->ipfr_heads);
913 #endif
914 if (fra != NULL) {
915 if (fin->fin_flx & FI_BAD) {
916 fr = &ipfr_block;
917 fin->fin_reason = FRB_BADFRAG;
918 DT2(ipf_frb_badfrag, fr_info_t *, fin, uint, fra);
919 } else {
920 fr = fra->ipfr_rule;
921 }
922 fin->fin_fr = fr;
923 if (fr != NULL) {
924 pass = fr->fr_flags;
925 if ((pass & FR_KEEPSTATE) != 0) {
926 fin->fin_flx |= FI_STATE;
927 /*
928 * Reset the keep state flag here so that we
929 * don't try and add a new state entry because
930 * of a match here. That leads to blocking of
931 * the packet later because the add fails.
932 */
933 pass &= ~FR_KEEPSTATE;
934 }
935 if ((pass & FR_LOGFIRST) != 0)
936 pass &= ~(FR_LOGFIRST|FR_LOG);
937 *passp = pass;
938 }
939 RWLOCK_EXIT(&softc->ipf_frag);
940 }
941 return fr;
942 }
943
944
945 /* ------------------------------------------------------------------------ */
946 /* Function: ipf_frag_natforget */
947 /* Returns: Nil */
948 /* Parameters: softc(I) - pointer to soft context main structure */
949 /* ptr(I) - pointer to data structure */
950 /* */
951 /* Search through all of the fragment cache entries for NAT and wherever a */
952 /* pointer is found to match ptr, reset it to NULL. */
953 /* ------------------------------------------------------------------------ */
954 void
ipf_frag_natforget(softc,ptr)955 ipf_frag_natforget(softc, ptr)
956 ipf_main_softc_t *softc;
957 void *ptr;
958 {
959 ipf_frag_softc_t *softf = softc->ipf_frag_soft;
960 ipfr_t *fr;
961
962 WRITE_ENTER(&softf->ipfr_natfrag);
963 for (fr = softf->ipfr_natlist; fr; fr = fr->ipfr_next)
964 if (fr->ipfr_data == ptr)
965 fr->ipfr_data = NULL;
966 RWLOCK_EXIT(&softf->ipfr_natfrag);
967 }
968
969
970 /* ------------------------------------------------------------------------ */
971 /* Function: ipf_frag_delete */
972 /* Returns: Nil */
973 /* Parameters: softc(I) - pointer to soft context main structure */
974 /* fra(I) - pointer to fragment structure to delete */
975 /* tail(IO) - pointer to the pointer to the tail of the frag */
976 /* list */
977 /* */
978 /* Remove a fragment cache table entry from the table & list. Also free */
979 /* the filter rule it is associated with it if it is no longer used as a */
980 /* result of decreasing the reference count. */
981 /* ------------------------------------------------------------------------ */
982 static void
ipf_frag_delete(softc,fra,tail)983 ipf_frag_delete(softc, fra, tail)
984 ipf_main_softc_t *softc;
985 ipfr_t *fra, ***tail;
986 {
987 ipf_frag_softc_t *softf = softc->ipf_frag_soft;
988
989 if (fra->ipfr_next)
990 fra->ipfr_next->ipfr_prev = fra->ipfr_prev;
991 *fra->ipfr_prev = fra->ipfr_next;
992 if (*tail == &fra->ipfr_next)
993 *tail = fra->ipfr_prev;
994
995 if (fra->ipfr_hnext)
996 fra->ipfr_hnext->ipfr_hprev = fra->ipfr_hprev;
997 *fra->ipfr_hprev = fra->ipfr_hnext;
998
999 if (fra->ipfr_rule != NULL) {
1000 (void) ipf_derefrule(softc, &fra->ipfr_rule);
1001 }
1002
1003 if (fra->ipfr_ref <= 0)
1004 ipf_frag_free(softf, fra);
1005 }
1006
1007
1008 /* ------------------------------------------------------------------------ */
1009 /* Function: ipf_frag_free */
1010 /* Returns: Nil */
1011 /* Parameters: softf(I) - pointer to fragment context information */
1012 /* fra(I) - pointer to fragment structure to free */
1013 /* */
1014 /* Free up a fragment cache entry and bump relevent statistics. */
1015 /* ------------------------------------------------------------------------ */
1016 static void
ipf_frag_free(softf,fra)1017 ipf_frag_free(softf, fra)
1018 ipf_frag_softc_t *softf;
1019 ipfr_t *fra;
1020 {
1021 KFREE(fra);
1022 FBUMP(ifs_expire);
1023 softf->ipfr_stats.ifs_inuse--;
1024 }
1025
1026
1027 /* ------------------------------------------------------------------------ */
1028 /* Function: ipf_frag_clear */
1029 /* Returns: Nil */
1030 /* Parameters: softc(I) - pointer to soft context main structure */
1031 /* */
1032 /* Free memory in use by fragment state information kept. Do the normal */
1033 /* fragment state stuff first and then the NAT-fragment table. */
1034 /* ------------------------------------------------------------------------ */
1035 void
ipf_frag_clear(softc)1036 ipf_frag_clear(softc)
1037 ipf_main_softc_t *softc;
1038 {
1039 ipf_frag_softc_t *softf = softc->ipf_frag_soft;
1040 ipfr_t *fra;
1041 nat_t *nat;
1042
1043 WRITE_ENTER(&softc->ipf_frag);
1044 while ((fra = softf->ipfr_list) != NULL) {
1045 fra->ipfr_ref--;
1046 ipf_frag_delete(softc, fra, &softf->ipfr_tail);
1047 }
1048 softf->ipfr_tail = &softf->ipfr_list;
1049 RWLOCK_EXIT(&softc->ipf_frag);
1050
1051 WRITE_ENTER(&softc->ipf_nat);
1052 WRITE_ENTER(&softf->ipfr_natfrag);
1053 while ((fra = softf->ipfr_natlist) != NULL) {
1054 nat = fra->ipfr_data;
1055 if (nat != NULL) {
1056 if (nat->nat_data == fra)
1057 nat->nat_data = NULL;
1058 }
1059 fra->ipfr_ref--;
1060 ipf_frag_delete(softc, fra, &softf->ipfr_nattail);
1061 }
1062 softf->ipfr_nattail = &softf->ipfr_natlist;
1063 RWLOCK_EXIT(&softf->ipfr_natfrag);
1064 RWLOCK_EXIT(&softc->ipf_nat);
1065 }
1066
1067
1068 /* ------------------------------------------------------------------------ */
1069 /* Function: ipf_frag_expire */
1070 /* Returns: Nil */
1071 /* Parameters: softc(I) - pointer to soft context main structure */
1072 /* */
1073 /* Expire entries in the fragment cache table that have been there too long */
1074 /* ------------------------------------------------------------------------ */
1075 void
ipf_frag_expire(softc)1076 ipf_frag_expire(softc)
1077 ipf_main_softc_t *softc;
1078 {
1079 ipf_frag_softc_t *softf = softc->ipf_frag_soft;
1080 ipfr_t **fp, *fra;
1081 nat_t *nat;
1082 SPL_INT(s);
1083
1084 if (softf->ipfr_lock)
1085 return;
1086
1087 SPL_NET(s);
1088 WRITE_ENTER(&softc->ipf_frag);
1089 /*
1090 * Go through the entire table, looking for entries to expire,
1091 * which is indicated by the ttl being less than or equal to ipf_ticks.
1092 */
1093 for (fp = &softf->ipfr_list; ((fra = *fp) != NULL); ) {
1094 if (fra->ipfr_ttl > softc->ipf_ticks)
1095 break;
1096 fra->ipfr_ref--;
1097 ipf_frag_delete(softc, fra, &softf->ipfr_tail);
1098 }
1099 RWLOCK_EXIT(&softc->ipf_frag);
1100
1101 WRITE_ENTER(&softf->ipfr_ipidfrag);
1102 for (fp = &softf->ipfr_ipidlist; ((fra = *fp) != NULL); ) {
1103 if (fra->ipfr_ttl > softc->ipf_ticks)
1104 break;
1105 fra->ipfr_ref--;
1106 ipf_frag_delete(softc, fra, &softf->ipfr_ipidtail);
1107 }
1108 RWLOCK_EXIT(&softf->ipfr_ipidfrag);
1109
1110 /*
1111 * Same again for the NAT table, except that if the structure also
1112 * still points to a NAT structure, and the NAT structure points back
1113 * at the one to be free'd, NULL the reference from the NAT struct.
1114 * NOTE: We need to grab both mutex's early, and in this order so as
1115 * to prevent a deadlock if both try to expire at the same time.
1116 * The extra if() statement here is because it locks out all NAT
1117 * operations - no need to do that if there are no entries in this
1118 * list, right?
1119 */
1120 if (softf->ipfr_natlist != NULL) {
1121 WRITE_ENTER(&softc->ipf_nat);
1122 WRITE_ENTER(&softf->ipfr_natfrag);
1123 for (fp = &softf->ipfr_natlist; ((fra = *fp) != NULL); ) {
1124 if (fra->ipfr_ttl > softc->ipf_ticks)
1125 break;
1126 nat = fra->ipfr_data;
1127 if (nat != NULL) {
1128 if (nat->nat_data == fra)
1129 nat->nat_data = NULL;
1130 }
1131 fra->ipfr_ref--;
1132 ipf_frag_delete(softc, fra, &softf->ipfr_nattail);
1133 }
1134 RWLOCK_EXIT(&softf->ipfr_natfrag);
1135 RWLOCK_EXIT(&softc->ipf_nat);
1136 }
1137 SPL_X(s);
1138 }
1139
1140
1141 /* ------------------------------------------------------------------------ */
1142 /* Function: ipf_frag_pkt_next */
1143 /* Returns: int - 0 == success, else error */
1144 /* Parameters: softc(I) - pointer to soft context main structure */
1145 /* token(I) - pointer to token information for this caller */
1146 /* itp(I) - pointer to generic iterator from caller */
1147 /* */
1148 /* This function is used to step through the fragment cache list used for */
1149 /* filter rules. The hard work is done by the more generic ipf_frag_next. */
1150 /* ------------------------------------------------------------------------ */
1151 int
ipf_frag_pkt_next(softc,token,itp)1152 ipf_frag_pkt_next(softc, token, itp)
1153 ipf_main_softc_t *softc;
1154 ipftoken_t *token;
1155 ipfgeniter_t *itp;
1156 {
1157 ipf_frag_softc_t *softf = softc->ipf_frag_soft;
1158
1159 #ifdef USE_MUTEXES
1160 return ipf_frag_next(softc, token, itp, &softf->ipfr_list,
1161 &softf->ipfr_frag);
1162 #else
1163 return ipf_frag_next(softc, token, itp, &softf->ipfr_list);
1164 #endif
1165 }
1166
1167
1168 /* ------------------------------------------------------------------------ */
1169 /* Function: ipf_frag_nat_next */
1170 /* Returns: int - 0 == success, else error */
1171 /* Parameters: softc(I) - pointer to soft context main structure */
1172 /* token(I) - pointer to token information for this caller */
1173 /* itp(I) - pointer to generic iterator from caller */
1174 /* */
1175 /* This function is used to step through the fragment cache list used for */
1176 /* NAT. The hard work is done by the more generic ipf_frag_next. */
1177 /* ------------------------------------------------------------------------ */
1178 int
ipf_frag_nat_next(softc,token,itp)1179 ipf_frag_nat_next(softc, token, itp)
1180 ipf_main_softc_t *softc;
1181 ipftoken_t *token;
1182 ipfgeniter_t *itp;
1183 {
1184 ipf_frag_softc_t *softf = softc->ipf_frag_soft;;
1185
1186 #ifdef USE_MUTEXES
1187 return ipf_frag_next(softc, token, itp, &softf->ipfr_natlist,
1188 &softf->ipfr_natfrag);
1189 #else
1190 return ipf_frag_next(softc, token, itp, &softf->ipfr_natlist);
1191 #endif
1192 }
1193
1194 /* ------------------------------------------------------------------------ */
1195 /* Function: ipf_frag_next */
1196 /* Returns: int - 0 == success, else error */
1197 /* Parameters: softc(I) - pointer to soft context main structure */
1198 /* token(I) - pointer to token information for this caller */
1199 /* itp(I) - pointer to generic iterator from caller */
1200 /* top(I) - top of the fragment list */
1201 /* lock(I) - fragment cache lock */
1202 /* */
1203 /* This function is used to interate through the list of entries in the */
1204 /* fragment cache. It increases the reference count on the one currently */
1205 /* being returned so that the caller can come back and resume from it later.*/
1206 /* */
1207 /* This function is used for both the NAT fragment cache as well as the ipf */
1208 /* fragment cache - hence the reason for passing in top and lock. */
1209 /* ------------------------------------------------------------------------ */
1210 static int
ipf_frag_next(softc,token,itp,top,lock)1211 ipf_frag_next(softc, token, itp, top
1212 #ifdef USE_MUTEXES
1213 , lock
1214 #endif
1215 )
1216 ipf_main_softc_t *softc;
1217 ipftoken_t *token;
1218 ipfgeniter_t *itp;
1219 ipfr_t **top;
1220 #ifdef USE_MUTEXES
1221 ipfrwlock_t *lock;
1222 #endif
1223 {
1224 ipfr_t *frag, *next, zero;
1225 int error = 0;
1226
1227 if (itp->igi_data == NULL) {
1228 IPFERROR(20001);
1229 return EFAULT;
1230 }
1231
1232 if (itp->igi_nitems != 1) {
1233 IPFERROR(20003);
1234 return EFAULT;
1235 }
1236
1237 frag = token->ipt_data;
1238
1239 READ_ENTER(lock);
1240
1241 if (frag == NULL)
1242 next = *top;
1243 else
1244 next = frag->ipfr_next;
1245
1246 if (next != NULL) {
1247 ATOMIC_INC(next->ipfr_ref);
1248 token->ipt_data = next;
1249 } else {
1250 bzero(&zero, sizeof(zero));
1251 next = &zero;
1252 token->ipt_data = NULL;
1253 }
1254 if (next->ipfr_next == NULL)
1255 ipf_token_mark_complete(token);
1256
1257 RWLOCK_EXIT(lock);
1258
1259 error = COPYOUT(next, itp->igi_data, sizeof(*next));
1260 if (error != 0)
1261 IPFERROR(20002);
1262
1263 if (frag != NULL) {
1264 #ifdef USE_MUTEXES
1265 ipf_frag_deref(softc, &frag, lock);
1266 #else
1267 ipf_frag_deref(softc, &frag);
1268 #endif
1269 }
1270 return error;
1271 }
1272
1273
1274 /* ------------------------------------------------------------------------ */
1275 /* Function: ipf_frag_pkt_deref */
1276 /* Returns: Nil */
1277 /* Parameters: softc(I) - pointer to soft context main structure */
1278 /* data(I) - pointer to frag cache pointer */
1279 /* */
1280 /* This function is the external interface for dropping a reference to a */
1281 /* fragment cache entry used by filter rules. */
1282 /* ------------------------------------------------------------------------ */
1283 void
ipf_frag_pkt_deref(softc,data)1284 ipf_frag_pkt_deref(softc, data)
1285 ipf_main_softc_t *softc;
1286 void *data;
1287 {
1288 ipfr_t **frp = data;
1289
1290 #ifdef USE_MUTEXES
1291 ipf_frag_softc_t *softf = softc->ipf_frag_soft;
1292
1293 ipf_frag_deref(softc->ipf_frag_soft, frp, &softf->ipfr_frag);
1294 #else
1295 ipf_frag_deref(softc->ipf_frag_soft, frp);
1296 #endif
1297 }
1298
1299
1300 /* ------------------------------------------------------------------------ */
1301 /* Function: ipf_frag_nat_deref */
1302 /* Returns: Nil */
1303 /* Parameters: softc(I) - pointer to soft context main structure */
1304 /* data(I) - pointer to frag cache pointer */
1305 /* */
1306 /* This function is the external interface for dropping a reference to a */
1307 /* fragment cache entry used by NAT table entries. */
1308 /* ------------------------------------------------------------------------ */
1309 void
ipf_frag_nat_deref(softc,data)1310 ipf_frag_nat_deref(softc, data)
1311 ipf_main_softc_t *softc;
1312 void *data;
1313 {
1314 ipfr_t **frp = data;
1315
1316 #ifdef USE_MUTEXES
1317 ipf_frag_softc_t *softf = softc->ipf_frag_soft;
1318
1319 ipf_frag_deref(softc->ipf_frag_soft, frp, &softf->ipfr_natfrag);
1320 #else
1321 ipf_frag_deref(softc->ipf_frag_soft, frp);
1322 #endif
1323 }
1324
1325
1326 /* ------------------------------------------------------------------------ */
1327 /* Function: ipf_frag_deref */
1328 /* Returns: Nil */
1329 /* Parameters: frp(IO) - pointer to fragment structure to deference */
1330 /* lock(I) - lock associated with the fragment */
1331 /* */
1332 /* This function dereferences a fragment structure (ipfr_t). The pointer */
1333 /* passed in will always be reset back to NULL, even if the structure is */
1334 /* not freed, to enforce the notion that the caller is no longer entitled */
1335 /* to use the pointer it is dropping the reference to. */
1336 /* ------------------------------------------------------------------------ */
1337 static void
ipf_frag_deref(arg,frp,lock)1338 ipf_frag_deref(arg, frp
1339 #ifdef USE_MUTEXES
1340 , lock
1341 #endif
1342 )
1343 void *arg;
1344 ipfr_t **frp;
1345 #ifdef USE_MUTEXES
1346 ipfrwlock_t *lock;
1347 #endif
1348 {
1349 ipf_frag_softc_t *softf = arg;
1350 ipfr_t *fra;
1351
1352 fra = *frp;
1353 *frp = NULL;
1354
1355 WRITE_ENTER(lock);
1356 fra->ipfr_ref--;
1357 if (fra->ipfr_ref <= 0)
1358 ipf_frag_free(softf, fra);
1359 RWLOCK_EXIT(lock);
1360 }
1361