1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright 2001 Niels Provos <[email protected]>
5 * Copyright 2011 Alexander Bluhm <[email protected]>
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * $OpenBSD: pf_norm.c,v 1.114 2009/01/29 14:11:45 henning Exp $
29 */
30
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33
34 #include "opt_inet.h"
35 #include "opt_inet6.h"
36 #include "opt_pf.h"
37
38 #include <sys/param.h>
39 #include <sys/kernel.h>
40 #include <sys/lock.h>
41 #include <sys/mbuf.h>
42 #include <sys/mutex.h>
43 #include <sys/refcount.h>
44 #include <sys/socket.h>
45
46 #include <net/if.h>
47 #include <net/vnet.h>
48 #include <net/pfvar.h>
49 #include <net/if_pflog.h>
50
51 #include <netinet/in.h>
52 #include <netinet/ip.h>
53 #include <netinet/ip_var.h>
54 #include <netinet6/ip6_var.h>
55 #include <netinet/tcp.h>
56 #include <netinet/tcp_fsm.h>
57 #include <netinet/tcp_seq.h>
58
59 #ifdef INET6
60 #include <netinet/ip6.h>
61 #endif /* INET6 */
62
63 struct pf_frent {
64 TAILQ_ENTRY(pf_frent) fr_next;
65 struct mbuf *fe_m;
66 uint16_t fe_hdrlen; /* ipv4 header length with ip options
67 ipv6, extension, fragment header */
68 uint16_t fe_extoff; /* last extension header offset or 0 */
69 uint16_t fe_len; /* fragment length */
70 uint16_t fe_off; /* fragment offset */
71 uint16_t fe_mff; /* more fragment flag */
72 };
73
74 struct pf_fragment_cmp {
75 struct pf_addr frc_src;
76 struct pf_addr frc_dst;
77 uint32_t frc_id;
78 sa_family_t frc_af;
79 uint8_t frc_proto;
80 };
81
82 struct pf_fragment {
83 struct pf_fragment_cmp fr_key;
84 #define fr_src fr_key.frc_src
85 #define fr_dst fr_key.frc_dst
86 #define fr_id fr_key.frc_id
87 #define fr_af fr_key.frc_af
88 #define fr_proto fr_key.frc_proto
89
90 RB_ENTRY(pf_fragment) fr_entry;
91 TAILQ_ENTRY(pf_fragment) frag_next;
92 uint32_t fr_timeout;
93 uint16_t fr_maxlen; /* maximum length of single fragment */
94 uint16_t fr_entries; /* Total number of pf_fragment entries */
95 TAILQ_HEAD(pf_fragq, pf_frent) fr_queue;
96 };
97 #define PF_MAX_FRENT_PER_FRAGMENT 64
98
99 struct pf_fragment_tag {
100 uint16_t ft_hdrlen; /* header length of reassembled pkt */
101 uint16_t ft_extoff; /* last extension header offset or 0 */
102 uint16_t ft_maxlen; /* maximum fragment payload length */
103 uint32_t ft_id; /* fragment id */
104 };
105
106 static struct mtx pf_frag_mtx;
107 MTX_SYSINIT(pf_frag_mtx, &pf_frag_mtx, "pf fragments", MTX_DEF);
108 #define PF_FRAG_LOCK() mtx_lock(&pf_frag_mtx)
109 #define PF_FRAG_UNLOCK() mtx_unlock(&pf_frag_mtx)
110 #define PF_FRAG_ASSERT() mtx_assert(&pf_frag_mtx, MA_OWNED)
111
112 VNET_DEFINE(uma_zone_t, pf_state_scrub_z); /* XXX: shared with pfsync */
113
114 VNET_DEFINE_STATIC(uma_zone_t, pf_frent_z);
115 #define V_pf_frent_z VNET(pf_frent_z)
116 VNET_DEFINE_STATIC(uma_zone_t, pf_frag_z);
117 #define V_pf_frag_z VNET(pf_frag_z)
118
119 TAILQ_HEAD(pf_fragqueue, pf_fragment);
120 TAILQ_HEAD(pf_cachequeue, pf_fragment);
121 VNET_DEFINE_STATIC(struct pf_fragqueue, pf_fragqueue);
122 #define V_pf_fragqueue VNET(pf_fragqueue)
123 RB_HEAD(pf_frag_tree, pf_fragment);
124 VNET_DEFINE_STATIC(struct pf_frag_tree, pf_frag_tree);
125 #define V_pf_frag_tree VNET(pf_frag_tree)
126 static int pf_frag_compare(struct pf_fragment *,
127 struct pf_fragment *);
128 static RB_PROTOTYPE(pf_frag_tree, pf_fragment, fr_entry, pf_frag_compare);
129 static RB_GENERATE(pf_frag_tree, pf_fragment, fr_entry, pf_frag_compare);
130
131 static void pf_flush_fragments(void);
132 static void pf_free_fragment(struct pf_fragment *);
133 static void pf_remove_fragment(struct pf_fragment *);
134 static int pf_normalize_tcpopt(struct pf_rule *, struct mbuf *,
135 struct tcphdr *, int, sa_family_t);
136 static struct pf_frent *pf_create_fragment(u_short *);
137 static struct pf_fragment *pf_find_fragment(struct pf_fragment_cmp *key,
138 struct pf_frag_tree *tree);
139 static struct pf_fragment *pf_fillup_fragment(struct pf_fragment_cmp *,
140 struct pf_frent *, u_short *);
141 static int pf_isfull_fragment(struct pf_fragment *);
142 static struct mbuf *pf_join_fragment(struct pf_fragment *);
143 #ifdef INET
144 static void pf_scrub_ip(struct mbuf **, uint32_t, uint8_t, uint8_t);
145 static int pf_reassemble(struct mbuf **, struct ip *, int, u_short *);
146 #endif /* INET */
147 #ifdef INET6
148 static int pf_reassemble6(struct mbuf **, struct ip6_hdr *,
149 struct ip6_frag *, uint16_t, uint16_t, u_short *);
150 static void pf_scrub_ip6(struct mbuf **, uint8_t);
151 #endif /* INET6 */
152
153 #define DPFPRINTF(x) do { \
154 if (V_pf_status.debug >= PF_DEBUG_MISC) { \
155 printf("%s: ", __func__); \
156 printf x ; \
157 } \
158 } while(0)
159
160 #ifdef INET
161 static void
pf_ip2key(struct ip * ip,int dir,struct pf_fragment_cmp * key)162 pf_ip2key(struct ip *ip, int dir, struct pf_fragment_cmp *key)
163 {
164
165 key->frc_src.v4 = ip->ip_src;
166 key->frc_dst.v4 = ip->ip_dst;
167 key->frc_af = AF_INET;
168 key->frc_proto = ip->ip_p;
169 key->frc_id = ip->ip_id;
170 }
171 #endif /* INET */
172
173 void
pf_normalize_init(void)174 pf_normalize_init(void)
175 {
176
177 V_pf_frag_z = uma_zcreate("pf frags", sizeof(struct pf_fragment),
178 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
179 V_pf_frent_z = uma_zcreate("pf frag entries", sizeof(struct pf_frent),
180 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
181 V_pf_state_scrub_z = uma_zcreate("pf state scrubs",
182 sizeof(struct pf_state_scrub), NULL, NULL, NULL, NULL,
183 UMA_ALIGN_PTR, 0);
184
185 V_pf_limits[PF_LIMIT_FRAGS].zone = V_pf_frent_z;
186 V_pf_limits[PF_LIMIT_FRAGS].limit = PFFRAG_FRENT_HIWAT;
187 uma_zone_set_max(V_pf_frent_z, PFFRAG_FRENT_HIWAT);
188 uma_zone_set_warning(V_pf_frent_z, "PF frag entries limit reached");
189
190 TAILQ_INIT(&V_pf_fragqueue);
191 }
192
193 void
pf_normalize_cleanup(void)194 pf_normalize_cleanup(void)
195 {
196
197 uma_zdestroy(V_pf_state_scrub_z);
198 uma_zdestroy(V_pf_frent_z);
199 uma_zdestroy(V_pf_frag_z);
200 }
201
202 static int
pf_frag_compare(struct pf_fragment * a,struct pf_fragment * b)203 pf_frag_compare(struct pf_fragment *a, struct pf_fragment *b)
204 {
205 int diff;
206
207 if ((diff = a->fr_id - b->fr_id) != 0)
208 return (diff);
209 if ((diff = a->fr_proto - b->fr_proto) != 0)
210 return (diff);
211 if ((diff = a->fr_af - b->fr_af) != 0)
212 return (diff);
213 if ((diff = pf_addr_cmp(&a->fr_src, &b->fr_src, a->fr_af)) != 0)
214 return (diff);
215 if ((diff = pf_addr_cmp(&a->fr_dst, &b->fr_dst, a->fr_af)) != 0)
216 return (diff);
217 return (0);
218 }
219
220 void
pf_purge_expired_fragments(void)221 pf_purge_expired_fragments(void)
222 {
223 u_int32_t expire = time_uptime -
224 V_pf_default_rule.timeout[PFTM_FRAG];
225
226 pf_purge_fragments(expire);
227 }
228
229 void
pf_purge_fragments(uint32_t expire)230 pf_purge_fragments(uint32_t expire)
231 {
232 struct pf_fragment *frag;
233
234 PF_FRAG_LOCK();
235 while ((frag = TAILQ_LAST(&V_pf_fragqueue, pf_fragqueue)) != NULL) {
236 if (frag->fr_timeout > expire)
237 break;
238
239 DPFPRINTF(("expiring %d(%p)\n", frag->fr_id, frag));
240 pf_free_fragment(frag);
241 }
242
243 PF_FRAG_UNLOCK();
244 }
245
246 /*
247 * Try to flush old fragments to make space for new ones
248 */
249 static void
pf_flush_fragments(void)250 pf_flush_fragments(void)
251 {
252 struct pf_fragment *frag;
253 int goal;
254
255 PF_FRAG_ASSERT();
256
257 goal = uma_zone_get_cur(V_pf_frent_z) * 9 / 10;
258 DPFPRINTF(("trying to free %d frag entriess\n", goal));
259 while (goal < uma_zone_get_cur(V_pf_frent_z)) {
260 frag = TAILQ_LAST(&V_pf_fragqueue, pf_fragqueue);
261 if (frag)
262 pf_free_fragment(frag);
263 else
264 break;
265 }
266 }
267
268 /* Frees the fragments and all associated entries */
269 static void
pf_free_fragment(struct pf_fragment * frag)270 pf_free_fragment(struct pf_fragment *frag)
271 {
272 struct pf_frent *frent;
273
274 PF_FRAG_ASSERT();
275
276 /* Free all fragments */
277 for (frent = TAILQ_FIRST(&frag->fr_queue); frent;
278 frent = TAILQ_FIRST(&frag->fr_queue)) {
279 TAILQ_REMOVE(&frag->fr_queue, frent, fr_next);
280
281 m_freem(frent->fe_m);
282 uma_zfree(V_pf_frent_z, frent);
283 }
284
285 pf_remove_fragment(frag);
286 }
287
288 static struct pf_fragment *
pf_find_fragment(struct pf_fragment_cmp * key,struct pf_frag_tree * tree)289 pf_find_fragment(struct pf_fragment_cmp *key, struct pf_frag_tree *tree)
290 {
291 struct pf_fragment *frag;
292
293 PF_FRAG_ASSERT();
294
295 frag = RB_FIND(pf_frag_tree, tree, (struct pf_fragment *)key);
296 if (frag != NULL) {
297 /* XXX Are we sure we want to update the timeout? */
298 frag->fr_timeout = time_uptime;
299 TAILQ_REMOVE(&V_pf_fragqueue, frag, frag_next);
300 TAILQ_INSERT_HEAD(&V_pf_fragqueue, frag, frag_next);
301 }
302
303 return (frag);
304 }
305
306 /* Removes a fragment from the fragment queue and frees the fragment */
307 static void
pf_remove_fragment(struct pf_fragment * frag)308 pf_remove_fragment(struct pf_fragment *frag)
309 {
310
311 PF_FRAG_ASSERT();
312
313 RB_REMOVE(pf_frag_tree, &V_pf_frag_tree, frag);
314 TAILQ_REMOVE(&V_pf_fragqueue, frag, frag_next);
315 uma_zfree(V_pf_frag_z, frag);
316 }
317
318 static struct pf_frent *
pf_create_fragment(u_short * reason)319 pf_create_fragment(u_short *reason)
320 {
321 struct pf_frent *frent;
322
323 PF_FRAG_ASSERT();
324
325 frent = uma_zalloc(V_pf_frent_z, M_NOWAIT);
326 if (frent == NULL) {
327 pf_flush_fragments();
328 frent = uma_zalloc(V_pf_frent_z, M_NOWAIT);
329 if (frent == NULL) {
330 REASON_SET(reason, PFRES_MEMORY);
331 return (NULL);
332 }
333 }
334
335 return (frent);
336 }
337
338 static struct pf_fragment *
pf_fillup_fragment(struct pf_fragment_cmp * key,struct pf_frent * frent,u_short * reason)339 pf_fillup_fragment(struct pf_fragment_cmp *key, struct pf_frent *frent,
340 u_short *reason)
341 {
342 struct pf_frent *after, *next, *prev;
343 struct pf_fragment *frag;
344 uint16_t total;
345
346 PF_FRAG_ASSERT();
347
348 /* No empty fragments. */
349 if (frent->fe_len == 0) {
350 DPFPRINTF(("bad fragment: len 0"));
351 goto bad_fragment;
352 }
353
354 /* All fragments are 8 byte aligned. */
355 if (frent->fe_mff && (frent->fe_len & 0x7)) {
356 DPFPRINTF(("bad fragment: mff and len %d", frent->fe_len));
357 goto bad_fragment;
358 }
359
360 /* Respect maximum length, IP_MAXPACKET == IPV6_MAXPACKET. */
361 if (frent->fe_off + frent->fe_len > IP_MAXPACKET) {
362 DPFPRINTF(("bad fragment: max packet %d",
363 frent->fe_off + frent->fe_len));
364 goto bad_fragment;
365 }
366
367 DPFPRINTF((key->frc_af == AF_INET ?
368 "reass frag %d @ %d-%d" : "reass frag %#08x @ %d-%d",
369 key->frc_id, frent->fe_off, frent->fe_off + frent->fe_len));
370
371 /* Fully buffer all of the fragments in this fragment queue. */
372 frag = pf_find_fragment(key, &V_pf_frag_tree);
373
374 /* Create a new reassembly queue for this packet. */
375 if (frag == NULL) {
376 frag = uma_zalloc(V_pf_frag_z, M_NOWAIT);
377 if (frag == NULL) {
378 pf_flush_fragments();
379 frag = uma_zalloc(V_pf_frag_z, M_NOWAIT);
380 if (frag == NULL) {
381 REASON_SET(reason, PFRES_MEMORY);
382 goto drop_fragment;
383 }
384 }
385
386 *(struct pf_fragment_cmp *)frag = *key;
387 frag->fr_timeout = time_uptime;
388 frag->fr_maxlen = frent->fe_len;
389 frag->fr_entries = 0;
390 TAILQ_INIT(&frag->fr_queue);
391
392 RB_INSERT(pf_frag_tree, &V_pf_frag_tree, frag);
393 TAILQ_INSERT_HEAD(&V_pf_fragqueue, frag, frag_next);
394
395 /* We do not have a previous fragment. */
396 TAILQ_INSERT_HEAD(&frag->fr_queue, frent, fr_next);
397
398 return (frag);
399 }
400
401 if (frag->fr_entries >= PF_MAX_FRENT_PER_FRAGMENT)
402 goto bad_fragment;
403
404 KASSERT(!TAILQ_EMPTY(&frag->fr_queue), ("!TAILQ_EMPTY()->fr_queue"));
405
406 /* Remember maximum fragment len for refragmentation. */
407 if (frent->fe_len > frag->fr_maxlen)
408 frag->fr_maxlen = frent->fe_len;
409
410 /* Maximum data we have seen already. */
411 total = TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_off +
412 TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_len;
413
414 /* Non terminal fragments must have more fragments flag. */
415 if (frent->fe_off + frent->fe_len < total && !frent->fe_mff)
416 goto bad_fragment;
417
418 /* Check if we saw the last fragment already. */
419 if (!TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_mff) {
420 if (frent->fe_off + frent->fe_len > total ||
421 (frent->fe_off + frent->fe_len == total && frent->fe_mff))
422 goto bad_fragment;
423 } else {
424 if (frent->fe_off + frent->fe_len == total && !frent->fe_mff)
425 goto bad_fragment;
426 }
427
428 /* Find a fragment after the current one. */
429 prev = NULL;
430 TAILQ_FOREACH(after, &frag->fr_queue, fr_next) {
431 if (after->fe_off > frent->fe_off)
432 break;
433 prev = after;
434 }
435
436 KASSERT(prev != NULL || after != NULL,
437 ("prev != NULL || after != NULL"));
438
439 if (prev != NULL && prev->fe_off + prev->fe_len > frent->fe_off) {
440 uint16_t precut;
441
442 precut = prev->fe_off + prev->fe_len - frent->fe_off;
443 if (precut >= frent->fe_len)
444 goto bad_fragment;
445 DPFPRINTF(("overlap -%d", precut));
446 m_adj(frent->fe_m, precut);
447 frent->fe_off += precut;
448 frent->fe_len -= precut;
449 }
450
451 for (; after != NULL && frent->fe_off + frent->fe_len > after->fe_off;
452 after = next) {
453 uint16_t aftercut;
454
455 aftercut = frent->fe_off + frent->fe_len - after->fe_off;
456 DPFPRINTF(("adjust overlap %d", aftercut));
457 if (aftercut < after->fe_len) {
458 m_adj(after->fe_m, aftercut);
459 after->fe_off += aftercut;
460 after->fe_len -= aftercut;
461 break;
462 }
463
464 /* This fragment is completely overlapped, lose it. */
465 next = TAILQ_NEXT(after, fr_next);
466 m_freem(after->fe_m);
467 TAILQ_REMOVE(&frag->fr_queue, after, fr_next);
468 uma_zfree(V_pf_frent_z, after);
469 }
470
471 if (prev == NULL)
472 TAILQ_INSERT_HEAD(&frag->fr_queue, frent, fr_next);
473 else
474 TAILQ_INSERT_AFTER(&frag->fr_queue, prev, frent, fr_next);
475
476 frag->fr_entries++;
477
478 return (frag);
479
480 bad_fragment:
481 REASON_SET(reason, PFRES_FRAG);
482 drop_fragment:
483 uma_zfree(V_pf_frent_z, frent);
484 return (NULL);
485 }
486
487 static int
pf_isfull_fragment(struct pf_fragment * frag)488 pf_isfull_fragment(struct pf_fragment *frag)
489 {
490 struct pf_frent *frent, *next;
491 uint16_t off, total;
492
493 /* Check if we are completely reassembled */
494 if (TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_mff)
495 return (0);
496
497 /* Maximum data we have seen already */
498 total = TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_off +
499 TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_len;
500
501 /* Check if we have all the data */
502 off = 0;
503 for (frent = TAILQ_FIRST(&frag->fr_queue); frent; frent = next) {
504 next = TAILQ_NEXT(frent, fr_next);
505
506 off += frent->fe_len;
507 if (off < total && (next == NULL || next->fe_off != off)) {
508 DPFPRINTF(("missing fragment at %d, next %d, total %d",
509 off, next == NULL ? -1 : next->fe_off, total));
510 return (0);
511 }
512 }
513 DPFPRINTF(("%d < %d?", off, total));
514 if (off < total)
515 return (0);
516 KASSERT(off == total, ("off == total"));
517
518 return (1);
519 }
520
521 static struct mbuf *
pf_join_fragment(struct pf_fragment * frag)522 pf_join_fragment(struct pf_fragment *frag)
523 {
524 struct mbuf *m, *m2;
525 struct pf_frent *frent, *next;
526
527 frent = TAILQ_FIRST(&frag->fr_queue);
528 next = TAILQ_NEXT(frent, fr_next);
529
530 m = frent->fe_m;
531 m_adj(m, (frent->fe_hdrlen + frent->fe_len) - m->m_pkthdr.len);
532 uma_zfree(V_pf_frent_z, frent);
533 for (frent = next; frent != NULL; frent = next) {
534 next = TAILQ_NEXT(frent, fr_next);
535
536 m2 = frent->fe_m;
537 /* Strip off ip header. */
538 m_adj(m2, frent->fe_hdrlen);
539 /* Strip off any trailing bytes. */
540 m_adj(m2, frent->fe_len - m2->m_pkthdr.len);
541
542 uma_zfree(V_pf_frent_z, frent);
543 m_cat(m, m2);
544 }
545
546 /* Remove from fragment queue. */
547 pf_remove_fragment(frag);
548
549 return (m);
550 }
551
552 #ifdef INET
553 static int
pf_reassemble(struct mbuf ** m0,struct ip * ip,int dir,u_short * reason)554 pf_reassemble(struct mbuf **m0, struct ip *ip, int dir, u_short *reason)
555 {
556 struct mbuf *m = *m0;
557 struct pf_frent *frent;
558 struct pf_fragment *frag;
559 struct pf_fragment_cmp key;
560 uint16_t total, hdrlen;
561
562 /* Get an entry for the fragment queue */
563 if ((frent = pf_create_fragment(reason)) == NULL)
564 return (PF_DROP);
565
566 frent->fe_m = m;
567 frent->fe_hdrlen = ip->ip_hl << 2;
568 frent->fe_extoff = 0;
569 frent->fe_len = ntohs(ip->ip_len) - (ip->ip_hl << 2);
570 frent->fe_off = (ntohs(ip->ip_off) & IP_OFFMASK) << 3;
571 frent->fe_mff = ntohs(ip->ip_off) & IP_MF;
572
573 pf_ip2key(ip, dir, &key);
574
575 if ((frag = pf_fillup_fragment(&key, frent, reason)) == NULL)
576 return (PF_DROP);
577
578 /* The mbuf is part of the fragment entry, no direct free or access */
579 m = *m0 = NULL;
580
581 if (!pf_isfull_fragment(frag))
582 return (PF_PASS); /* drop because *m0 is NULL, no error */
583
584 /* We have all the data */
585 frent = TAILQ_FIRST(&frag->fr_queue);
586 KASSERT(frent != NULL, ("frent != NULL"));
587 total = TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_off +
588 TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_len;
589 hdrlen = frent->fe_hdrlen;
590
591 m = *m0 = pf_join_fragment(frag);
592 frag = NULL;
593
594 if (m->m_flags & M_PKTHDR) {
595 int plen = 0;
596 for (m = *m0; m; m = m->m_next)
597 plen += m->m_len;
598 m = *m0;
599 m->m_pkthdr.len = plen;
600 }
601
602 ip = mtod(m, struct ip *);
603 ip->ip_len = htons(hdrlen + total);
604 ip->ip_off &= ~(IP_MF|IP_OFFMASK);
605
606 if (hdrlen + total > IP_MAXPACKET) {
607 DPFPRINTF(("drop: too big: %d", total));
608 ip->ip_len = 0;
609 REASON_SET(reason, PFRES_SHORT);
610 /* PF_DROP requires a valid mbuf *m0 in pf_test() */
611 return (PF_DROP);
612 }
613
614 DPFPRINTF(("complete: %p(%d)\n", m, ntohs(ip->ip_len)));
615 return (PF_PASS);
616 }
617 #endif /* INET */
618
619 #ifdef INET6
620 static int
pf_reassemble6(struct mbuf ** m0,struct ip6_hdr * ip6,struct ip6_frag * fraghdr,uint16_t hdrlen,uint16_t extoff,u_short * reason)621 pf_reassemble6(struct mbuf **m0, struct ip6_hdr *ip6, struct ip6_frag *fraghdr,
622 uint16_t hdrlen, uint16_t extoff, u_short *reason)
623 {
624 struct mbuf *m = *m0;
625 struct pf_frent *frent;
626 struct pf_fragment *frag;
627 struct pf_fragment_cmp key;
628 struct m_tag *mtag;
629 struct pf_fragment_tag *ftag;
630 int off;
631 uint32_t frag_id;
632 uint16_t total, maxlen;
633 uint8_t proto;
634
635 PF_FRAG_LOCK();
636
637 /* Get an entry for the fragment queue. */
638 if ((frent = pf_create_fragment(reason)) == NULL) {
639 PF_FRAG_UNLOCK();
640 return (PF_DROP);
641 }
642
643 frent->fe_m = m;
644 frent->fe_hdrlen = hdrlen;
645 frent->fe_extoff = extoff;
646 frent->fe_len = sizeof(struct ip6_hdr) + ntohs(ip6->ip6_plen) - hdrlen;
647 frent->fe_off = ntohs(fraghdr->ip6f_offlg & IP6F_OFF_MASK);
648 frent->fe_mff = fraghdr->ip6f_offlg & IP6F_MORE_FRAG;
649
650 key.frc_src.v6 = ip6->ip6_src;
651 key.frc_dst.v6 = ip6->ip6_dst;
652 key.frc_af = AF_INET6;
653 /* Only the first fragment's protocol is relevant. */
654 key.frc_proto = 0;
655 key.frc_id = fraghdr->ip6f_ident;
656
657 if ((frag = pf_fillup_fragment(&key, frent, reason)) == NULL) {
658 PF_FRAG_UNLOCK();
659 return (PF_DROP);
660 }
661
662 /* The mbuf is part of the fragment entry, no direct free or access. */
663 m = *m0 = NULL;
664
665 if (!pf_isfull_fragment(frag)) {
666 PF_FRAG_UNLOCK();
667 return (PF_PASS); /* Drop because *m0 is NULL, no error. */
668 }
669
670 /* We have all the data. */
671 frent = TAILQ_FIRST(&frag->fr_queue);
672 KASSERT(frent != NULL, ("frent != NULL"));
673 extoff = frent->fe_extoff;
674 maxlen = frag->fr_maxlen;
675 frag_id = frag->fr_id;
676 total = TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_off +
677 TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_len;
678 hdrlen = frent->fe_hdrlen - sizeof(struct ip6_frag);
679
680 m = *m0 = pf_join_fragment(frag);
681 frag = NULL;
682
683 PF_FRAG_UNLOCK();
684
685 /* Take protocol from first fragment header. */
686 m = m_getptr(m, hdrlen + offsetof(struct ip6_frag, ip6f_nxt), &off);
687 KASSERT(m, ("%s: short mbuf chain", __func__));
688 proto = *(mtod(m, caddr_t) + off);
689 m = *m0;
690
691 /* Delete frag6 header */
692 if (ip6_deletefraghdr(m, hdrlen, M_NOWAIT) != 0)
693 goto fail;
694
695 if (m->m_flags & M_PKTHDR) {
696 int plen = 0;
697 for (m = *m0; m; m = m->m_next)
698 plen += m->m_len;
699 m = *m0;
700 m->m_pkthdr.len = plen;
701 }
702
703 if ((mtag = m_tag_get(PF_REASSEMBLED, sizeof(struct pf_fragment_tag),
704 M_NOWAIT)) == NULL)
705 goto fail;
706 ftag = (struct pf_fragment_tag *)(mtag + 1);
707 ftag->ft_hdrlen = hdrlen;
708 ftag->ft_extoff = extoff;
709 ftag->ft_maxlen = maxlen;
710 ftag->ft_id = frag_id;
711 m_tag_prepend(m, mtag);
712
713 ip6 = mtod(m, struct ip6_hdr *);
714 ip6->ip6_plen = htons(hdrlen - sizeof(struct ip6_hdr) + total);
715 if (extoff) {
716 /* Write protocol into next field of last extension header. */
717 m = m_getptr(m, extoff + offsetof(struct ip6_ext, ip6e_nxt),
718 &off);
719 KASSERT(m, ("%s: short mbuf chain", __func__));
720 *(mtod(m, char *) + off) = proto;
721 m = *m0;
722 } else
723 ip6->ip6_nxt = proto;
724
725 if (hdrlen - sizeof(struct ip6_hdr) + total > IPV6_MAXPACKET) {
726 DPFPRINTF(("drop: too big: %d", total));
727 ip6->ip6_plen = 0;
728 REASON_SET(reason, PFRES_SHORT);
729 /* PF_DROP requires a valid mbuf *m0 in pf_test6(). */
730 return (PF_DROP);
731 }
732
733 DPFPRINTF(("complete: %p(%d)", m, ntohs(ip6->ip6_plen)));
734 return (PF_PASS);
735
736 fail:
737 REASON_SET(reason, PFRES_MEMORY);
738 /* PF_DROP requires a valid mbuf *m0 in pf_test6(), will free later. */
739 return (PF_DROP);
740 }
741 #endif /* INET6 */
742
743 #ifdef INET6
744 int
pf_refragment6(struct ifnet * ifp,struct mbuf ** m0,struct m_tag * mtag)745 pf_refragment6(struct ifnet *ifp, struct mbuf **m0, struct m_tag *mtag)
746 {
747 struct mbuf *m = *m0, *t;
748 struct pf_fragment_tag *ftag = (struct pf_fragment_tag *)(mtag + 1);
749 struct pf_pdesc pd;
750 uint32_t frag_id;
751 uint16_t hdrlen, extoff, maxlen;
752 uint8_t proto;
753 int error, action;
754
755 hdrlen = ftag->ft_hdrlen;
756 extoff = ftag->ft_extoff;
757 maxlen = ftag->ft_maxlen;
758 frag_id = ftag->ft_id;
759 m_tag_delete(m, mtag);
760 mtag = NULL;
761 ftag = NULL;
762
763 if (extoff) {
764 int off;
765
766 /* Use protocol from next field of last extension header */
767 m = m_getptr(m, extoff + offsetof(struct ip6_ext, ip6e_nxt),
768 &off);
769 KASSERT((m != NULL), ("pf_refragment6: short mbuf chain"));
770 proto = *(mtod(m, caddr_t) + off);
771 *(mtod(m, char *) + off) = IPPROTO_FRAGMENT;
772 m = *m0;
773 } else {
774 struct ip6_hdr *hdr;
775
776 hdr = mtod(m, struct ip6_hdr *);
777 proto = hdr->ip6_nxt;
778 hdr->ip6_nxt = IPPROTO_FRAGMENT;
779 }
780
781 /* The MTU must be a multiple of 8 bytes, or we risk doing the
782 * fragmentation wrong. */
783 maxlen = maxlen & ~7;
784
785 /*
786 * Maxlen may be less than 8 if there was only a single
787 * fragment. As it was fragmented before, add a fragment
788 * header also for a single fragment. If total or maxlen
789 * is less than 8, ip6_fragment() will return EMSGSIZE and
790 * we drop the packet.
791 */
792 error = ip6_fragment(ifp, m, hdrlen, proto, maxlen, frag_id);
793 m = (*m0)->m_nextpkt;
794 (*m0)->m_nextpkt = NULL;
795 if (error == 0) {
796 /* The first mbuf contains the unfragmented packet. */
797 m_freem(*m0);
798 *m0 = NULL;
799 action = PF_PASS;
800 } else {
801 /* Drop expects an mbuf to free. */
802 DPFPRINTF(("refragment error %d", error));
803 action = PF_DROP;
804 }
805 for (t = m; m; m = t) {
806 t = m->m_nextpkt;
807 m->m_nextpkt = NULL;
808 m->m_flags |= M_SKIP_FIREWALL;
809 memset(&pd, 0, sizeof(pd));
810 pd.pf_mtag = pf_find_mtag(m);
811 if (error == 0)
812 ip6_forward(m, 0);
813 else
814 m_freem(m);
815 }
816
817 return (action);
818 }
819 #endif /* INET6 */
820
821 #ifdef INET
822 int
pf_normalize_ip(struct mbuf ** m0,int dir,struct pfi_kif * kif,u_short * reason,struct pf_pdesc * pd)823 pf_normalize_ip(struct mbuf **m0, int dir, struct pfi_kif *kif, u_short *reason,
824 struct pf_pdesc *pd)
825 {
826 struct mbuf *m = *m0;
827 struct pf_rule *r;
828 struct ip *h = mtod(m, struct ip *);
829 int mff = (ntohs(h->ip_off) & IP_MF);
830 int hlen = h->ip_hl << 2;
831 u_int16_t fragoff = (ntohs(h->ip_off) & IP_OFFMASK) << 3;
832 u_int16_t max;
833 int ip_len;
834 int ip_off;
835 int tag = -1;
836 int verdict;
837
838 PF_RULES_RASSERT();
839
840 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
841 while (r != NULL) {
842 r->evaluations++;
843 if (pfi_kif_match(r->kif, kif) == r->ifnot)
844 r = r->skip[PF_SKIP_IFP].ptr;
845 else if (r->direction && r->direction != dir)
846 r = r->skip[PF_SKIP_DIR].ptr;
847 else if (r->af && r->af != AF_INET)
848 r = r->skip[PF_SKIP_AF].ptr;
849 else if (r->proto && r->proto != h->ip_p)
850 r = r->skip[PF_SKIP_PROTO].ptr;
851 else if (PF_MISMATCHAW(&r->src.addr,
852 (struct pf_addr *)&h->ip_src.s_addr, AF_INET,
853 r->src.neg, kif, M_GETFIB(m)))
854 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
855 else if (PF_MISMATCHAW(&r->dst.addr,
856 (struct pf_addr *)&h->ip_dst.s_addr, AF_INET,
857 r->dst.neg, NULL, M_GETFIB(m)))
858 r = r->skip[PF_SKIP_DST_ADDR].ptr;
859 else if (r->match_tag && !pf_match_tag(m, r, &tag,
860 pd->pf_mtag ? pd->pf_mtag->tag : 0))
861 r = TAILQ_NEXT(r, entries);
862 else
863 break;
864 }
865
866 if (r == NULL || r->action == PF_NOSCRUB)
867 return (PF_PASS);
868 else {
869 r->packets[dir == PF_OUT]++;
870 r->bytes[dir == PF_OUT] += pd->tot_len;
871 }
872
873 /* Check for illegal packets */
874 if (hlen < (int)sizeof(struct ip)) {
875 REASON_SET(reason, PFRES_NORM);
876 goto drop;
877 }
878
879 if (hlen > ntohs(h->ip_len)) {
880 REASON_SET(reason, PFRES_NORM);
881 goto drop;
882 }
883
884 /* Clear IP_DF if the rule uses the no-df option */
885 if (r->rule_flag & PFRULE_NODF && h->ip_off & htons(IP_DF)) {
886 u_int16_t ip_off = h->ip_off;
887
888 h->ip_off &= htons(~IP_DF);
889 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0);
890 }
891
892 /* We will need other tests here */
893 if (!fragoff && !mff)
894 goto no_fragment;
895
896 /* We're dealing with a fragment now. Don't allow fragments
897 * with IP_DF to enter the cache. If the flag was cleared by
898 * no-df above, fine. Otherwise drop it.
899 */
900 if (h->ip_off & htons(IP_DF)) {
901 DPFPRINTF(("IP_DF\n"));
902 goto bad;
903 }
904
905 ip_len = ntohs(h->ip_len) - hlen;
906 ip_off = (ntohs(h->ip_off) & IP_OFFMASK) << 3;
907
908 /* All fragments are 8 byte aligned */
909 if (mff && (ip_len & 0x7)) {
910 DPFPRINTF(("mff and %d\n", ip_len));
911 goto bad;
912 }
913
914 /* Respect maximum length */
915 if (fragoff + ip_len > IP_MAXPACKET) {
916 DPFPRINTF(("max packet %d\n", fragoff + ip_len));
917 goto bad;
918 }
919 max = fragoff + ip_len;
920
921 /* Fully buffer all of the fragments
922 * Might return a completely reassembled mbuf, or NULL */
923 PF_FRAG_LOCK();
924 DPFPRINTF(("reass frag %d @ %d-%d\n", h->ip_id, fragoff, max));
925 verdict = pf_reassemble(m0, h, dir, reason);
926 PF_FRAG_UNLOCK();
927
928 if (verdict != PF_PASS)
929 return (PF_DROP);
930
931 m = *m0;
932 if (m == NULL)
933 return (PF_DROP);
934
935 h = mtod(m, struct ip *);
936
937 no_fragment:
938 /* At this point, only IP_DF is allowed in ip_off */
939 if (h->ip_off & ~htons(IP_DF)) {
940 u_int16_t ip_off = h->ip_off;
941
942 h->ip_off &= htons(IP_DF);
943 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0);
944 }
945
946 pf_scrub_ip(&m, r->rule_flag, r->min_ttl, r->set_tos);
947
948 return (PF_PASS);
949
950 bad:
951 DPFPRINTF(("dropping bad fragment\n"));
952 REASON_SET(reason, PFRES_FRAG);
953 drop:
954 if (r != NULL && r->log)
955 PFLOG_PACKET(kif, m, AF_INET, dir, *reason, r, NULL, NULL, pd,
956 1);
957
958 return (PF_DROP);
959 }
960 #endif
961
962 #ifdef INET6
963 int
pf_normalize_ip6(struct mbuf ** m0,int dir,struct pfi_kif * kif,u_short * reason,struct pf_pdesc * pd)964 pf_normalize_ip6(struct mbuf **m0, int dir, struct pfi_kif *kif,
965 u_short *reason, struct pf_pdesc *pd)
966 {
967 struct mbuf *m = *m0;
968 struct pf_rule *r;
969 struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
970 int extoff;
971 int off;
972 struct ip6_ext ext;
973 struct ip6_opt opt;
974 struct ip6_opt_jumbo jumbo;
975 struct ip6_frag frag;
976 u_int32_t jumbolen = 0, plen;
977 int optend;
978 int ooff;
979 u_int8_t proto;
980 int terminal;
981
982 PF_RULES_RASSERT();
983
984 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
985 while (r != NULL) {
986 r->evaluations++;
987 if (pfi_kif_match(r->kif, kif) == r->ifnot)
988 r = r->skip[PF_SKIP_IFP].ptr;
989 else if (r->direction && r->direction != dir)
990 r = r->skip[PF_SKIP_DIR].ptr;
991 else if (r->af && r->af != AF_INET6)
992 r = r->skip[PF_SKIP_AF].ptr;
993 #if 0 /* header chain! */
994 else if (r->proto && r->proto != h->ip6_nxt)
995 r = r->skip[PF_SKIP_PROTO].ptr;
996 #endif
997 else if (PF_MISMATCHAW(&r->src.addr,
998 (struct pf_addr *)&h->ip6_src, AF_INET6,
999 r->src.neg, kif, M_GETFIB(m)))
1000 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
1001 else if (PF_MISMATCHAW(&r->dst.addr,
1002 (struct pf_addr *)&h->ip6_dst, AF_INET6,
1003 r->dst.neg, NULL, M_GETFIB(m)))
1004 r = r->skip[PF_SKIP_DST_ADDR].ptr;
1005 else
1006 break;
1007 }
1008
1009 if (r == NULL || r->action == PF_NOSCRUB)
1010 return (PF_PASS);
1011 else {
1012 r->packets[dir == PF_OUT]++;
1013 r->bytes[dir == PF_OUT] += pd->tot_len;
1014 }
1015
1016 /* Check for illegal packets */
1017 if (sizeof(struct ip6_hdr) + IPV6_MAXPACKET < m->m_pkthdr.len)
1018 goto drop;
1019
1020 extoff = 0;
1021 off = sizeof(struct ip6_hdr);
1022 proto = h->ip6_nxt;
1023 terminal = 0;
1024 do {
1025 switch (proto) {
1026 case IPPROTO_FRAGMENT:
1027 goto fragment;
1028 break;
1029 case IPPROTO_AH:
1030 case IPPROTO_ROUTING:
1031 case IPPROTO_DSTOPTS:
1032 if (!pf_pull_hdr(m, off, &ext, sizeof(ext), NULL,
1033 NULL, AF_INET6))
1034 goto shortpkt;
1035 extoff = off;
1036 if (proto == IPPROTO_AH)
1037 off += (ext.ip6e_len + 2) * 4;
1038 else
1039 off += (ext.ip6e_len + 1) * 8;
1040 proto = ext.ip6e_nxt;
1041 break;
1042 case IPPROTO_HOPOPTS:
1043 if (!pf_pull_hdr(m, off, &ext, sizeof(ext), NULL,
1044 NULL, AF_INET6))
1045 goto shortpkt;
1046 extoff = off;
1047 optend = off + (ext.ip6e_len + 1) * 8;
1048 ooff = off + sizeof(ext);
1049 do {
1050 if (!pf_pull_hdr(m, ooff, &opt.ip6o_type,
1051 sizeof(opt.ip6o_type), NULL, NULL,
1052 AF_INET6))
1053 goto shortpkt;
1054 if (opt.ip6o_type == IP6OPT_PAD1) {
1055 ooff++;
1056 continue;
1057 }
1058 if (!pf_pull_hdr(m, ooff, &opt, sizeof(opt),
1059 NULL, NULL, AF_INET6))
1060 goto shortpkt;
1061 if (ooff + sizeof(opt) + opt.ip6o_len > optend)
1062 goto drop;
1063 switch (opt.ip6o_type) {
1064 case IP6OPT_JUMBO:
1065 if (h->ip6_plen != 0)
1066 goto drop;
1067 if (!pf_pull_hdr(m, ooff, &jumbo,
1068 sizeof(jumbo), NULL, NULL,
1069 AF_INET6))
1070 goto shortpkt;
1071 memcpy(&jumbolen, jumbo.ip6oj_jumbo_len,
1072 sizeof(jumbolen));
1073 jumbolen = ntohl(jumbolen);
1074 if (jumbolen <= IPV6_MAXPACKET)
1075 goto drop;
1076 if (sizeof(struct ip6_hdr) + jumbolen !=
1077 m->m_pkthdr.len)
1078 goto drop;
1079 break;
1080 default:
1081 break;
1082 }
1083 ooff += sizeof(opt) + opt.ip6o_len;
1084 } while (ooff < optend);
1085
1086 off = optend;
1087 proto = ext.ip6e_nxt;
1088 break;
1089 default:
1090 terminal = 1;
1091 break;
1092 }
1093 } while (!terminal);
1094
1095 /* jumbo payload option must be present, or plen > 0 */
1096 if (ntohs(h->ip6_plen) == 0)
1097 plen = jumbolen;
1098 else
1099 plen = ntohs(h->ip6_plen);
1100 if (plen == 0)
1101 goto drop;
1102 if (sizeof(struct ip6_hdr) + plen > m->m_pkthdr.len)
1103 goto shortpkt;
1104
1105 pf_scrub_ip6(&m, r->min_ttl);
1106
1107 return (PF_PASS);
1108
1109 fragment:
1110 /* Jumbo payload packets cannot be fragmented. */
1111 plen = ntohs(h->ip6_plen);
1112 if (plen == 0 || jumbolen)
1113 goto drop;
1114 if (sizeof(struct ip6_hdr) + plen > m->m_pkthdr.len)
1115 goto shortpkt;
1116
1117 if (!pf_pull_hdr(m, off, &frag, sizeof(frag), NULL, NULL, AF_INET6))
1118 goto shortpkt;
1119
1120 /* Offset now points to data portion. */
1121 off += sizeof(frag);
1122
1123 /* Returns PF_DROP or *m0 is NULL or completely reassembled mbuf. */
1124 if (pf_reassemble6(m0, h, &frag, off, extoff, reason) != PF_PASS)
1125 return (PF_DROP);
1126 m = *m0;
1127 if (m == NULL)
1128 return (PF_DROP);
1129
1130 pd->flags |= PFDESC_IP_REAS;
1131 return (PF_PASS);
1132
1133 shortpkt:
1134 REASON_SET(reason, PFRES_SHORT);
1135 if (r != NULL && r->log)
1136 PFLOG_PACKET(kif, m, AF_INET6, dir, *reason, r, NULL, NULL, pd,
1137 1);
1138 return (PF_DROP);
1139
1140 drop:
1141 REASON_SET(reason, PFRES_NORM);
1142 if (r != NULL && r->log)
1143 PFLOG_PACKET(kif, m, AF_INET6, dir, *reason, r, NULL, NULL, pd,
1144 1);
1145 return (PF_DROP);
1146 }
1147 #endif /* INET6 */
1148
1149 int
pf_normalize_tcp(int dir,struct pfi_kif * kif,struct mbuf * m,int ipoff,int off,void * h,struct pf_pdesc * pd)1150 pf_normalize_tcp(int dir, struct pfi_kif *kif, struct mbuf *m, int ipoff,
1151 int off, void *h, struct pf_pdesc *pd)
1152 {
1153 struct pf_rule *r, *rm = NULL;
1154 struct tcphdr *th = pd->hdr.tcp;
1155 int rewrite = 0;
1156 u_short reason;
1157 u_int8_t flags;
1158 sa_family_t af = pd->af;
1159
1160 PF_RULES_RASSERT();
1161
1162 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
1163 while (r != NULL) {
1164 r->evaluations++;
1165 if (pfi_kif_match(r->kif, kif) == r->ifnot)
1166 r = r->skip[PF_SKIP_IFP].ptr;
1167 else if (r->direction && r->direction != dir)
1168 r = r->skip[PF_SKIP_DIR].ptr;
1169 else if (r->af && r->af != af)
1170 r = r->skip[PF_SKIP_AF].ptr;
1171 else if (r->proto && r->proto != pd->proto)
1172 r = r->skip[PF_SKIP_PROTO].ptr;
1173 else if (PF_MISMATCHAW(&r->src.addr, pd->src, af,
1174 r->src.neg, kif, M_GETFIB(m)))
1175 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
1176 else if (r->src.port_op && !pf_match_port(r->src.port_op,
1177 r->src.port[0], r->src.port[1], th->th_sport))
1178 r = r->skip[PF_SKIP_SRC_PORT].ptr;
1179 else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af,
1180 r->dst.neg, NULL, M_GETFIB(m)))
1181 r = r->skip[PF_SKIP_DST_ADDR].ptr;
1182 else if (r->dst.port_op && !pf_match_port(r->dst.port_op,
1183 r->dst.port[0], r->dst.port[1], th->th_dport))
1184 r = r->skip[PF_SKIP_DST_PORT].ptr;
1185 else if (r->os_fingerprint != PF_OSFP_ANY && !pf_osfp_match(
1186 pf_osfp_fingerprint(pd, m, off, th),
1187 r->os_fingerprint))
1188 r = TAILQ_NEXT(r, entries);
1189 else {
1190 rm = r;
1191 break;
1192 }
1193 }
1194
1195 if (rm == NULL || rm->action == PF_NOSCRUB)
1196 return (PF_PASS);
1197 else {
1198 r->packets[dir == PF_OUT]++;
1199 r->bytes[dir == PF_OUT] += pd->tot_len;
1200 }
1201
1202 if (rm->rule_flag & PFRULE_REASSEMBLE_TCP)
1203 pd->flags |= PFDESC_TCP_NORM;
1204
1205 flags = th->th_flags;
1206 if (flags & TH_SYN) {
1207 /* Illegal packet */
1208 if (flags & TH_RST)
1209 goto tcp_drop;
1210
1211 if (flags & TH_FIN)
1212 goto tcp_drop;
1213 } else {
1214 /* Illegal packet */
1215 if (!(flags & (TH_ACK|TH_RST)))
1216 goto tcp_drop;
1217 }
1218
1219 if (!(flags & TH_ACK)) {
1220 /* These flags are only valid if ACK is set */
1221 if ((flags & TH_FIN) || (flags & TH_PUSH) || (flags & TH_URG))
1222 goto tcp_drop;
1223 }
1224
1225 /* Check for illegal header length */
1226 if (th->th_off < (sizeof(struct tcphdr) >> 2))
1227 goto tcp_drop;
1228
1229 /* If flags changed, or reserved data set, then adjust */
1230 if (flags != th->th_flags || th->th_x2 != 0) {
1231 u_int16_t ov, nv;
1232
1233 ov = *(u_int16_t *)(&th->th_ack + 1);
1234 th->th_flags = flags;
1235 th->th_x2 = 0;
1236 nv = *(u_int16_t *)(&th->th_ack + 1);
1237
1238 th->th_sum = pf_proto_cksum_fixup(m, th->th_sum, ov, nv, 0);
1239 rewrite = 1;
1240 }
1241
1242 /* Remove urgent pointer, if TH_URG is not set */
1243 if (!(flags & TH_URG) && th->th_urp) {
1244 th->th_sum = pf_proto_cksum_fixup(m, th->th_sum, th->th_urp,
1245 0, 0);
1246 th->th_urp = 0;
1247 rewrite = 1;
1248 }
1249
1250 /* Process options */
1251 if (r->max_mss && pf_normalize_tcpopt(r, m, th, off, pd->af))
1252 rewrite = 1;
1253
1254 /* copy back packet headers if we sanitized */
1255 if (rewrite)
1256 m_copyback(m, off, sizeof(*th), (caddr_t)th);
1257
1258 return (PF_PASS);
1259
1260 tcp_drop:
1261 REASON_SET(&reason, PFRES_NORM);
1262 if (rm != NULL && r->log)
1263 PFLOG_PACKET(kif, m, AF_INET, dir, reason, r, NULL, NULL, pd,
1264 1);
1265 return (PF_DROP);
1266 }
1267
1268 int
pf_normalize_tcp_init(struct mbuf * m,int off,struct pf_pdesc * pd,struct tcphdr * th,struct pf_state_peer * src,struct pf_state_peer * dst)1269 pf_normalize_tcp_init(struct mbuf *m, int off, struct pf_pdesc *pd,
1270 struct tcphdr *th, struct pf_state_peer *src, struct pf_state_peer *dst)
1271 {
1272 u_int32_t tsval, tsecr;
1273 u_int8_t hdr[60];
1274 u_int8_t *opt;
1275
1276 KASSERT((src->scrub == NULL),
1277 ("pf_normalize_tcp_init: src->scrub != NULL"));
1278
1279 src->scrub = uma_zalloc(V_pf_state_scrub_z, M_ZERO | M_NOWAIT);
1280 if (src->scrub == NULL)
1281 return (1);
1282
1283 switch (pd->af) {
1284 #ifdef INET
1285 case AF_INET: {
1286 struct ip *h = mtod(m, struct ip *);
1287 src->scrub->pfss_ttl = h->ip_ttl;
1288 break;
1289 }
1290 #endif /* INET */
1291 #ifdef INET6
1292 case AF_INET6: {
1293 struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
1294 src->scrub->pfss_ttl = h->ip6_hlim;
1295 break;
1296 }
1297 #endif /* INET6 */
1298 }
1299
1300
1301 /*
1302 * All normalizations below are only begun if we see the start of
1303 * the connections. They must all set an enabled bit in pfss_flags
1304 */
1305 if ((th->th_flags & TH_SYN) == 0)
1306 return (0);
1307
1308
1309 if (th->th_off > (sizeof(struct tcphdr) >> 2) && src->scrub &&
1310 pf_pull_hdr(m, off, hdr, th->th_off << 2, NULL, NULL, pd->af)) {
1311 /* Diddle with TCP options */
1312 int hlen;
1313 opt = hdr + sizeof(struct tcphdr);
1314 hlen = (th->th_off << 2) - sizeof(struct tcphdr);
1315 while (hlen >= TCPOLEN_TIMESTAMP) {
1316 switch (*opt) {
1317 case TCPOPT_EOL: /* FALLTHROUGH */
1318 case TCPOPT_NOP:
1319 opt++;
1320 hlen--;
1321 break;
1322 case TCPOPT_TIMESTAMP:
1323 if (opt[1] >= TCPOLEN_TIMESTAMP) {
1324 src->scrub->pfss_flags |=
1325 PFSS_TIMESTAMP;
1326 src->scrub->pfss_ts_mod =
1327 htonl(arc4random());
1328
1329 /* note PFSS_PAWS not set yet */
1330 memcpy(&tsval, &opt[2],
1331 sizeof(u_int32_t));
1332 memcpy(&tsecr, &opt[6],
1333 sizeof(u_int32_t));
1334 src->scrub->pfss_tsval0 = ntohl(tsval);
1335 src->scrub->pfss_tsval = ntohl(tsval);
1336 src->scrub->pfss_tsecr = ntohl(tsecr);
1337 getmicrouptime(&src->scrub->pfss_last);
1338 }
1339 /* FALLTHROUGH */
1340 default:
1341 hlen -= MAX(opt[1], 2);
1342 opt += MAX(opt[1], 2);
1343 break;
1344 }
1345 }
1346 }
1347
1348 return (0);
1349 }
1350
1351 void
pf_normalize_tcp_cleanup(struct pf_state * state)1352 pf_normalize_tcp_cleanup(struct pf_state *state)
1353 {
1354 if (state->src.scrub)
1355 uma_zfree(V_pf_state_scrub_z, state->src.scrub);
1356 if (state->dst.scrub)
1357 uma_zfree(V_pf_state_scrub_z, state->dst.scrub);
1358
1359 /* Someday... flush the TCP segment reassembly descriptors. */
1360 }
1361
1362 int
pf_normalize_tcp_stateful(struct mbuf * m,int off,struct pf_pdesc * pd,u_short * reason,struct tcphdr * th,struct pf_state * state,struct pf_state_peer * src,struct pf_state_peer * dst,int * writeback)1363 pf_normalize_tcp_stateful(struct mbuf *m, int off, struct pf_pdesc *pd,
1364 u_short *reason, struct tcphdr *th, struct pf_state *state,
1365 struct pf_state_peer *src, struct pf_state_peer *dst, int *writeback)
1366 {
1367 struct timeval uptime;
1368 u_int32_t tsval, tsecr;
1369 u_int tsval_from_last;
1370 u_int8_t hdr[60];
1371 u_int8_t *opt;
1372 int copyback = 0;
1373 int got_ts = 0;
1374
1375 KASSERT((src->scrub || dst->scrub),
1376 ("%s: src->scrub && dst->scrub!", __func__));
1377
1378 /*
1379 * Enforce the minimum TTL seen for this connection. Negate a common
1380 * technique to evade an intrusion detection system and confuse
1381 * firewall state code.
1382 */
1383 switch (pd->af) {
1384 #ifdef INET
1385 case AF_INET: {
1386 if (src->scrub) {
1387 struct ip *h = mtod(m, struct ip *);
1388 if (h->ip_ttl > src->scrub->pfss_ttl)
1389 src->scrub->pfss_ttl = h->ip_ttl;
1390 h->ip_ttl = src->scrub->pfss_ttl;
1391 }
1392 break;
1393 }
1394 #endif /* INET */
1395 #ifdef INET6
1396 case AF_INET6: {
1397 if (src->scrub) {
1398 struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
1399 if (h->ip6_hlim > src->scrub->pfss_ttl)
1400 src->scrub->pfss_ttl = h->ip6_hlim;
1401 h->ip6_hlim = src->scrub->pfss_ttl;
1402 }
1403 break;
1404 }
1405 #endif /* INET6 */
1406 }
1407
1408 if (th->th_off > (sizeof(struct tcphdr) >> 2) &&
1409 ((src->scrub && (src->scrub->pfss_flags & PFSS_TIMESTAMP)) ||
1410 (dst->scrub && (dst->scrub->pfss_flags & PFSS_TIMESTAMP))) &&
1411 pf_pull_hdr(m, off, hdr, th->th_off << 2, NULL, NULL, pd->af)) {
1412 /* Diddle with TCP options */
1413 int hlen;
1414 opt = hdr + sizeof(struct tcphdr);
1415 hlen = (th->th_off << 2) - sizeof(struct tcphdr);
1416 while (hlen >= TCPOLEN_TIMESTAMP) {
1417 switch (*opt) {
1418 case TCPOPT_EOL: /* FALLTHROUGH */
1419 case TCPOPT_NOP:
1420 opt++;
1421 hlen--;
1422 break;
1423 case TCPOPT_TIMESTAMP:
1424 /* Modulate the timestamps. Can be used for
1425 * NAT detection, OS uptime determination or
1426 * reboot detection.
1427 */
1428
1429 if (got_ts) {
1430 /* Huh? Multiple timestamps!? */
1431 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1432 DPFPRINTF(("multiple TS??"));
1433 pf_print_state(state);
1434 printf("\n");
1435 }
1436 REASON_SET(reason, PFRES_TS);
1437 return (PF_DROP);
1438 }
1439 if (opt[1] >= TCPOLEN_TIMESTAMP) {
1440 memcpy(&tsval, &opt[2],
1441 sizeof(u_int32_t));
1442 if (tsval && src->scrub &&
1443 (src->scrub->pfss_flags &
1444 PFSS_TIMESTAMP)) {
1445 tsval = ntohl(tsval);
1446 pf_change_proto_a(m, &opt[2],
1447 &th->th_sum,
1448 htonl(tsval +
1449 src->scrub->pfss_ts_mod),
1450 0);
1451 copyback = 1;
1452 }
1453
1454 /* Modulate TS reply iff valid (!0) */
1455 memcpy(&tsecr, &opt[6],
1456 sizeof(u_int32_t));
1457 if (tsecr && dst->scrub &&
1458 (dst->scrub->pfss_flags &
1459 PFSS_TIMESTAMP)) {
1460 tsecr = ntohl(tsecr)
1461 - dst->scrub->pfss_ts_mod;
1462 pf_change_proto_a(m, &opt[6],
1463 &th->th_sum, htonl(tsecr),
1464 0);
1465 copyback = 1;
1466 }
1467 got_ts = 1;
1468 }
1469 /* FALLTHROUGH */
1470 default:
1471 hlen -= MAX(opt[1], 2);
1472 opt += MAX(opt[1], 2);
1473 break;
1474 }
1475 }
1476 if (copyback) {
1477 /* Copyback the options, caller copys back header */
1478 *writeback = 1;
1479 m_copyback(m, off + sizeof(struct tcphdr),
1480 (th->th_off << 2) - sizeof(struct tcphdr), hdr +
1481 sizeof(struct tcphdr));
1482 }
1483 }
1484
1485
1486 /*
1487 * Must invalidate PAWS checks on connections idle for too long.
1488 * The fastest allowed timestamp clock is 1ms. That turns out to
1489 * be about 24 days before it wraps. XXX Right now our lowerbound
1490 * TS echo check only works for the first 12 days of a connection
1491 * when the TS has exhausted half its 32bit space
1492 */
1493 #define TS_MAX_IDLE (24*24*60*60)
1494 #define TS_MAX_CONN (12*24*60*60) /* XXX remove when better tsecr check */
1495
1496 getmicrouptime(&uptime);
1497 if (src->scrub && (src->scrub->pfss_flags & PFSS_PAWS) &&
1498 (uptime.tv_sec - src->scrub->pfss_last.tv_sec > TS_MAX_IDLE ||
1499 time_uptime - state->creation > TS_MAX_CONN)) {
1500 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1501 DPFPRINTF(("src idled out of PAWS\n"));
1502 pf_print_state(state);
1503 printf("\n");
1504 }
1505 src->scrub->pfss_flags = (src->scrub->pfss_flags & ~PFSS_PAWS)
1506 | PFSS_PAWS_IDLED;
1507 }
1508 if (dst->scrub && (dst->scrub->pfss_flags & PFSS_PAWS) &&
1509 uptime.tv_sec - dst->scrub->pfss_last.tv_sec > TS_MAX_IDLE) {
1510 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1511 DPFPRINTF(("dst idled out of PAWS\n"));
1512 pf_print_state(state);
1513 printf("\n");
1514 }
1515 dst->scrub->pfss_flags = (dst->scrub->pfss_flags & ~PFSS_PAWS)
1516 | PFSS_PAWS_IDLED;
1517 }
1518
1519 if (got_ts && src->scrub && dst->scrub &&
1520 (src->scrub->pfss_flags & PFSS_PAWS) &&
1521 (dst->scrub->pfss_flags & PFSS_PAWS)) {
1522 /* Validate that the timestamps are "in-window".
1523 * RFC1323 describes TCP Timestamp options that allow
1524 * measurement of RTT (round trip time) and PAWS
1525 * (protection against wrapped sequence numbers). PAWS
1526 * gives us a set of rules for rejecting packets on
1527 * long fat pipes (packets that were somehow delayed
1528 * in transit longer than the time it took to send the
1529 * full TCP sequence space of 4Gb). We can use these
1530 * rules and infer a few others that will let us treat
1531 * the 32bit timestamp and the 32bit echoed timestamp
1532 * as sequence numbers to prevent a blind attacker from
1533 * inserting packets into a connection.
1534 *
1535 * RFC1323 tells us:
1536 * - The timestamp on this packet must be greater than
1537 * or equal to the last value echoed by the other
1538 * endpoint. The RFC says those will be discarded
1539 * since it is a dup that has already been acked.
1540 * This gives us a lowerbound on the timestamp.
1541 * timestamp >= other last echoed timestamp
1542 * - The timestamp will be less than or equal to
1543 * the last timestamp plus the time between the
1544 * last packet and now. The RFC defines the max
1545 * clock rate as 1ms. We will allow clocks to be
1546 * up to 10% fast and will allow a total difference
1547 * or 30 seconds due to a route change. And this
1548 * gives us an upperbound on the timestamp.
1549 * timestamp <= last timestamp + max ticks
1550 * We have to be careful here. Windows will send an
1551 * initial timestamp of zero and then initialize it
1552 * to a random value after the 3whs; presumably to
1553 * avoid a DoS by having to call an expensive RNG
1554 * during a SYN flood. Proof MS has at least one
1555 * good security geek.
1556 *
1557 * - The TCP timestamp option must also echo the other
1558 * endpoints timestamp. The timestamp echoed is the
1559 * one carried on the earliest unacknowledged segment
1560 * on the left edge of the sequence window. The RFC
1561 * states that the host will reject any echoed
1562 * timestamps that were larger than any ever sent.
1563 * This gives us an upperbound on the TS echo.
1564 * tescr <= largest_tsval
1565 * - The lowerbound on the TS echo is a little more
1566 * tricky to determine. The other endpoint's echoed
1567 * values will not decrease. But there may be
1568 * network conditions that re-order packets and
1569 * cause our view of them to decrease. For now the
1570 * only lowerbound we can safely determine is that
1571 * the TS echo will never be less than the original
1572 * TS. XXX There is probably a better lowerbound.
1573 * Remove TS_MAX_CONN with better lowerbound check.
1574 * tescr >= other original TS
1575 *
1576 * It is also important to note that the fastest
1577 * timestamp clock of 1ms will wrap its 32bit space in
1578 * 24 days. So we just disable TS checking after 24
1579 * days of idle time. We actually must use a 12d
1580 * connection limit until we can come up with a better
1581 * lowerbound to the TS echo check.
1582 */
1583 struct timeval delta_ts;
1584 int ts_fudge;
1585
1586
1587 /*
1588 * PFTM_TS_DIFF is how many seconds of leeway to allow
1589 * a host's timestamp. This can happen if the previous
1590 * packet got delayed in transit for much longer than
1591 * this packet.
1592 */
1593 if ((ts_fudge = state->rule.ptr->timeout[PFTM_TS_DIFF]) == 0)
1594 ts_fudge = V_pf_default_rule.timeout[PFTM_TS_DIFF];
1595
1596 /* Calculate max ticks since the last timestamp */
1597 #define TS_MAXFREQ 1100 /* RFC max TS freq of 1Khz + 10% skew */
1598 #define TS_MICROSECS 1000000 /* microseconds per second */
1599 delta_ts = uptime;
1600 timevalsub(&delta_ts, &src->scrub->pfss_last);
1601 tsval_from_last = (delta_ts.tv_sec + ts_fudge) * TS_MAXFREQ;
1602 tsval_from_last += delta_ts.tv_usec / (TS_MICROSECS/TS_MAXFREQ);
1603
1604 if ((src->state >= TCPS_ESTABLISHED &&
1605 dst->state >= TCPS_ESTABLISHED) &&
1606 (SEQ_LT(tsval, dst->scrub->pfss_tsecr) ||
1607 SEQ_GT(tsval, src->scrub->pfss_tsval + tsval_from_last) ||
1608 (tsecr && (SEQ_GT(tsecr, dst->scrub->pfss_tsval) ||
1609 SEQ_LT(tsecr, dst->scrub->pfss_tsval0))))) {
1610 /* Bad RFC1323 implementation or an insertion attack.
1611 *
1612 * - Solaris 2.6 and 2.7 are known to send another ACK
1613 * after the FIN,FIN|ACK,ACK closing that carries
1614 * an old timestamp.
1615 */
1616
1617 DPFPRINTF(("Timestamp failed %c%c%c%c\n",
1618 SEQ_LT(tsval, dst->scrub->pfss_tsecr) ? '0' : ' ',
1619 SEQ_GT(tsval, src->scrub->pfss_tsval +
1620 tsval_from_last) ? '1' : ' ',
1621 SEQ_GT(tsecr, dst->scrub->pfss_tsval) ? '2' : ' ',
1622 SEQ_LT(tsecr, dst->scrub->pfss_tsval0)? '3' : ' '));
1623 DPFPRINTF((" tsval: %u tsecr: %u +ticks: %u "
1624 "idle: %jus %lums\n",
1625 tsval, tsecr, tsval_from_last,
1626 (uintmax_t)delta_ts.tv_sec,
1627 delta_ts.tv_usec / 1000));
1628 DPFPRINTF((" src->tsval: %u tsecr: %u\n",
1629 src->scrub->pfss_tsval, src->scrub->pfss_tsecr));
1630 DPFPRINTF((" dst->tsval: %u tsecr: %u tsval0: %u"
1631 "\n", dst->scrub->pfss_tsval,
1632 dst->scrub->pfss_tsecr, dst->scrub->pfss_tsval0));
1633 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1634 pf_print_state(state);
1635 pf_print_flags(th->th_flags);
1636 printf("\n");
1637 }
1638 REASON_SET(reason, PFRES_TS);
1639 return (PF_DROP);
1640 }
1641
1642 /* XXX I'd really like to require tsecr but it's optional */
1643
1644 } else if (!got_ts && (th->th_flags & TH_RST) == 0 &&
1645 ((src->state == TCPS_ESTABLISHED && dst->state == TCPS_ESTABLISHED)
1646 || pd->p_len > 0 || (th->th_flags & TH_SYN)) &&
1647 src->scrub && dst->scrub &&
1648 (src->scrub->pfss_flags & PFSS_PAWS) &&
1649 (dst->scrub->pfss_flags & PFSS_PAWS)) {
1650 /* Didn't send a timestamp. Timestamps aren't really useful
1651 * when:
1652 * - connection opening or closing (often not even sent).
1653 * but we must not let an attacker to put a FIN on a
1654 * data packet to sneak it through our ESTABLISHED check.
1655 * - on a TCP reset. RFC suggests not even looking at TS.
1656 * - on an empty ACK. The TS will not be echoed so it will
1657 * probably not help keep the RTT calculation in sync and
1658 * there isn't as much danger when the sequence numbers
1659 * got wrapped. So some stacks don't include TS on empty
1660 * ACKs :-(
1661 *
1662 * To minimize the disruption to mostly RFC1323 conformant
1663 * stacks, we will only require timestamps on data packets.
1664 *
1665 * And what do ya know, we cannot require timestamps on data
1666 * packets. There appear to be devices that do legitimate
1667 * TCP connection hijacking. There are HTTP devices that allow
1668 * a 3whs (with timestamps) and then buffer the HTTP request.
1669 * If the intermediate device has the HTTP response cache, it
1670 * will spoof the response but not bother timestamping its
1671 * packets. So we can look for the presence of a timestamp in
1672 * the first data packet and if there, require it in all future
1673 * packets.
1674 */
1675
1676 if (pd->p_len > 0 && (src->scrub->pfss_flags & PFSS_DATA_TS)) {
1677 /*
1678 * Hey! Someone tried to sneak a packet in. Or the
1679 * stack changed its RFC1323 behavior?!?!
1680 */
1681 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1682 DPFPRINTF(("Did not receive expected RFC1323 "
1683 "timestamp\n"));
1684 pf_print_state(state);
1685 pf_print_flags(th->th_flags);
1686 printf("\n");
1687 }
1688 REASON_SET(reason, PFRES_TS);
1689 return (PF_DROP);
1690 }
1691 }
1692
1693
1694 /*
1695 * We will note if a host sends his data packets with or without
1696 * timestamps. And require all data packets to contain a timestamp
1697 * if the first does. PAWS implicitly requires that all data packets be
1698 * timestamped. But I think there are middle-man devices that hijack
1699 * TCP streams immediately after the 3whs and don't timestamp their
1700 * packets (seen in a WWW accelerator or cache).
1701 */
1702 if (pd->p_len > 0 && src->scrub && (src->scrub->pfss_flags &
1703 (PFSS_TIMESTAMP|PFSS_DATA_TS|PFSS_DATA_NOTS)) == PFSS_TIMESTAMP) {
1704 if (got_ts)
1705 src->scrub->pfss_flags |= PFSS_DATA_TS;
1706 else {
1707 src->scrub->pfss_flags |= PFSS_DATA_NOTS;
1708 if (V_pf_status.debug >= PF_DEBUG_MISC && dst->scrub &&
1709 (dst->scrub->pfss_flags & PFSS_TIMESTAMP)) {
1710 /* Don't warn if other host rejected RFC1323 */
1711 DPFPRINTF(("Broken RFC1323 stack did not "
1712 "timestamp data packet. Disabled PAWS "
1713 "security.\n"));
1714 pf_print_state(state);
1715 pf_print_flags(th->th_flags);
1716 printf("\n");
1717 }
1718 }
1719 }
1720
1721
1722 /*
1723 * Update PAWS values
1724 */
1725 if (got_ts && src->scrub && PFSS_TIMESTAMP == (src->scrub->pfss_flags &
1726 (PFSS_PAWS_IDLED|PFSS_TIMESTAMP))) {
1727 getmicrouptime(&src->scrub->pfss_last);
1728 if (SEQ_GEQ(tsval, src->scrub->pfss_tsval) ||
1729 (src->scrub->pfss_flags & PFSS_PAWS) == 0)
1730 src->scrub->pfss_tsval = tsval;
1731
1732 if (tsecr) {
1733 if (SEQ_GEQ(tsecr, src->scrub->pfss_tsecr) ||
1734 (src->scrub->pfss_flags & PFSS_PAWS) == 0)
1735 src->scrub->pfss_tsecr = tsecr;
1736
1737 if ((src->scrub->pfss_flags & PFSS_PAWS) == 0 &&
1738 (SEQ_LT(tsval, src->scrub->pfss_tsval0) ||
1739 src->scrub->pfss_tsval0 == 0)) {
1740 /* tsval0 MUST be the lowest timestamp */
1741 src->scrub->pfss_tsval0 = tsval;
1742 }
1743
1744 /* Only fully initialized after a TS gets echoed */
1745 if ((src->scrub->pfss_flags & PFSS_PAWS) == 0)
1746 src->scrub->pfss_flags |= PFSS_PAWS;
1747 }
1748 }
1749
1750 /* I have a dream.... TCP segment reassembly.... */
1751 return (0);
1752 }
1753
1754 static int
pf_normalize_tcpopt(struct pf_rule * r,struct mbuf * m,struct tcphdr * th,int off,sa_family_t af)1755 pf_normalize_tcpopt(struct pf_rule *r, struct mbuf *m, struct tcphdr *th,
1756 int off, sa_family_t af)
1757 {
1758 u_int16_t *mss;
1759 int thoff;
1760 int opt, cnt, optlen = 0;
1761 int rewrite = 0;
1762 u_char opts[TCP_MAXOLEN];
1763 u_char *optp = opts;
1764
1765 thoff = th->th_off << 2;
1766 cnt = thoff - sizeof(struct tcphdr);
1767
1768 if (cnt > 0 && !pf_pull_hdr(m, off + sizeof(*th), opts, cnt,
1769 NULL, NULL, af))
1770 return (rewrite);
1771
1772 for (; cnt > 0; cnt -= optlen, optp += optlen) {
1773 opt = optp[0];
1774 if (opt == TCPOPT_EOL)
1775 break;
1776 if (opt == TCPOPT_NOP)
1777 optlen = 1;
1778 else {
1779 if (cnt < 2)
1780 break;
1781 optlen = optp[1];
1782 if (optlen < 2 || optlen > cnt)
1783 break;
1784 }
1785 switch (opt) {
1786 case TCPOPT_MAXSEG:
1787 mss = (u_int16_t *)(optp + 2);
1788 if ((ntohs(*mss)) > r->max_mss) {
1789 th->th_sum = pf_proto_cksum_fixup(m,
1790 th->th_sum, *mss, htons(r->max_mss), 0);
1791 *mss = htons(r->max_mss);
1792 rewrite = 1;
1793 }
1794 break;
1795 default:
1796 break;
1797 }
1798 }
1799
1800 if (rewrite)
1801 m_copyback(m, off + sizeof(*th), thoff - sizeof(*th), opts);
1802
1803 return (rewrite);
1804 }
1805
1806 #ifdef INET
1807 static void
pf_scrub_ip(struct mbuf ** m0,u_int32_t flags,u_int8_t min_ttl,u_int8_t tos)1808 pf_scrub_ip(struct mbuf **m0, u_int32_t flags, u_int8_t min_ttl, u_int8_t tos)
1809 {
1810 struct mbuf *m = *m0;
1811 struct ip *h = mtod(m, struct ip *);
1812
1813 /* Clear IP_DF if no-df was requested */
1814 if (flags & PFRULE_NODF && h->ip_off & htons(IP_DF)) {
1815 u_int16_t ip_off = h->ip_off;
1816
1817 h->ip_off &= htons(~IP_DF);
1818 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0);
1819 }
1820
1821 /* Enforce a minimum ttl, may cause endless packet loops */
1822 if (min_ttl && h->ip_ttl < min_ttl) {
1823 u_int16_t ip_ttl = h->ip_ttl;
1824
1825 h->ip_ttl = min_ttl;
1826 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_ttl, h->ip_ttl, 0);
1827 }
1828
1829 /* Enforce tos */
1830 if (flags & PFRULE_SET_TOS) {
1831 u_int16_t ov, nv;
1832
1833 ov = *(u_int16_t *)h;
1834 h->ip_tos = tos | (h->ip_tos & IPTOS_ECN_MASK);
1835 nv = *(u_int16_t *)h;
1836
1837 h->ip_sum = pf_cksum_fixup(h->ip_sum, ov, nv, 0);
1838 }
1839
1840 /* random-id, but not for fragments */
1841 if (flags & PFRULE_RANDOMID && !(h->ip_off & ~htons(IP_DF))) {
1842 uint16_t ip_id = h->ip_id;
1843
1844 ip_fillid(h);
1845 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_id, h->ip_id, 0);
1846 }
1847 }
1848 #endif /* INET */
1849
1850 #ifdef INET6
1851 static void
pf_scrub_ip6(struct mbuf ** m0,u_int8_t min_ttl)1852 pf_scrub_ip6(struct mbuf **m0, u_int8_t min_ttl)
1853 {
1854 struct mbuf *m = *m0;
1855 struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
1856
1857 /* Enforce a minimum ttl, may cause endless packet loops */
1858 if (min_ttl && h->ip6_hlim < min_ttl)
1859 h->ip6_hlim = min_ttl;
1860 }
1861 #endif
1862