pf_norm.c revision 1.1.1.4 1 /* $NetBSD: pf_norm.c,v 1.1.1.4 2009/12/01 07:03:14 martti Exp $ */
2 /* $OpenBSD: pf_norm.c,v 1.109 2007/05/28 17:16:39 henning Exp $ */
3
4 /*
5 * Copyright 2001 Niels Provos <provos (at) citi.umich.edu>
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 #include "pflog.h"
30
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/mbuf.h>
34 #include <sys/filio.h>
35 #include <sys/fcntl.h>
36 #include <sys/socket.h>
37 #include <sys/kernel.h>
38 #include <sys/time.h>
39 #include <sys/pool.h>
40
41 #include <dev/rndvar.h>
42 #include <net/if.h>
43 #include <net/if_types.h>
44 #include <net/bpf.h>
45 #include <net/route.h>
46 #include <net/if_pflog.h>
47
48 #include <netinet/in.h>
49 #include <netinet/in_var.h>
50 #include <netinet/in_systm.h>
51 #include <netinet/ip.h>
52 #include <netinet/ip_var.h>
53 #include <netinet/tcp.h>
54 #include <netinet/tcp_seq.h>
55 #include <netinet/udp.h>
56 #include <netinet/ip_icmp.h>
57
58 #ifdef INET6
59 #include <netinet/ip6.h>
60 #endif /* INET6 */
61
62 #include <net/pfvar.h>
63
64 struct pf_frent {
65 LIST_ENTRY(pf_frent) fr_next;
66 struct ip *fr_ip;
67 struct mbuf *fr_m;
68 };
69
70 struct pf_frcache {
71 LIST_ENTRY(pf_frcache) fr_next;
72 uint16_t fr_off;
73 uint16_t fr_end;
74 };
75
76 #define PFFRAG_SEENLAST 0x0001 /* Seen the last fragment for this */
77 #define PFFRAG_NOBUFFER 0x0002 /* Non-buffering fragment cache */
78 #define PFFRAG_DROP 0x0004 /* Drop all fragments */
79 #define BUFFER_FRAGMENTS(fr) (!((fr)->fr_flags & PFFRAG_NOBUFFER))
80
81 struct pf_fragment {
82 RB_ENTRY(pf_fragment) fr_entry;
83 TAILQ_ENTRY(pf_fragment) frag_next;
84 struct in_addr fr_src;
85 struct in_addr fr_dst;
86 u_int8_t fr_p; /* protocol of this fragment */
87 u_int8_t fr_flags; /* status flags */
88 u_int16_t fr_id; /* fragment id for reassemble */
89 u_int16_t fr_max; /* fragment data max */
90 u_int32_t fr_timeout;
91 #define fr_queue fr_u.fru_queue
92 #define fr_cache fr_u.fru_cache
93 union {
94 LIST_HEAD(pf_fragq, pf_frent) fru_queue; /* buffering */
95 LIST_HEAD(pf_cacheq, pf_frcache) fru_cache; /* non-buf */
96 } fr_u;
97 };
98
99 TAILQ_HEAD(pf_fragqueue, pf_fragment) pf_fragqueue;
100 TAILQ_HEAD(pf_cachequeue, pf_fragment) pf_cachequeue;
101
102 static __inline int pf_frag_compare(struct pf_fragment *,
103 struct pf_fragment *);
104 RB_HEAD(pf_frag_tree, pf_fragment) pf_frag_tree, pf_cache_tree;
105 RB_PROTOTYPE(pf_frag_tree, pf_fragment, fr_entry, pf_frag_compare);
106 RB_GENERATE(pf_frag_tree, pf_fragment, fr_entry, pf_frag_compare);
107
108 /* Private prototypes */
109 void pf_ip2key(struct pf_fragment *, struct ip *);
110 void pf_remove_fragment(struct pf_fragment *);
111 void pf_flush_fragments(void);
112 void pf_free_fragment(struct pf_fragment *);
113 struct pf_fragment *pf_find_fragment(struct ip *, struct pf_frag_tree *);
114 struct mbuf *pf_reassemble(struct mbuf **, struct pf_fragment **,
115 struct pf_frent *, int);
116 struct mbuf *pf_fragcache(struct mbuf **, struct ip*,
117 struct pf_fragment **, int, int, int *);
118 int pf_normalize_tcpopt(struct pf_rule *, struct mbuf *,
119 struct tcphdr *, int);
120
121 #define DPFPRINTF(x) do { \
122 if (pf_status.debug >= PF_DEBUG_MISC) { \
123 printf("%s: ", __func__); \
124 printf x ; \
125 } \
126 } while(0)
127
128 /* Globals */
129 struct pool pf_frent_pl, pf_frag_pl, pf_cache_pl, pf_cent_pl;
130 struct pool pf_state_scrub_pl;
131 int pf_nfrents, pf_ncache;
132
133 void
134 pf_normalize_init(void)
135 {
136 pool_init(&pf_frent_pl, sizeof(struct pf_frent), 0, 0, 0, "pffrent",
137 NULL);
138 pool_init(&pf_frag_pl, sizeof(struct pf_fragment), 0, 0, 0, "pffrag",
139 NULL);
140 pool_init(&pf_cache_pl, sizeof(struct pf_fragment), 0, 0, 0,
141 "pffrcache", NULL);
142 pool_init(&pf_cent_pl, sizeof(struct pf_frcache), 0, 0, 0, "pffrcent",
143 NULL);
144 pool_init(&pf_state_scrub_pl, sizeof(struct pf_state_scrub), 0, 0, 0,
145 "pfstscr", NULL);
146
147 pool_sethiwat(&pf_frag_pl, PFFRAG_FRAG_HIWAT);
148 pool_sethardlimit(&pf_frent_pl, PFFRAG_FRENT_HIWAT, NULL, 0);
149 pool_sethardlimit(&pf_cache_pl, PFFRAG_FRCACHE_HIWAT, NULL, 0);
150 pool_sethardlimit(&pf_cent_pl, PFFRAG_FRCENT_HIWAT, NULL, 0);
151
152 TAILQ_INIT(&pf_fragqueue);
153 TAILQ_INIT(&pf_cachequeue);
154 }
155
156 static __inline int
157 pf_frag_compare(struct pf_fragment *a, struct pf_fragment *b)
158 {
159 int diff;
160
161 if ((diff = a->fr_id - b->fr_id))
162 return (diff);
163 else if ((diff = a->fr_p - b->fr_p))
164 return (diff);
165 else if (a->fr_src.s_addr < b->fr_src.s_addr)
166 return (-1);
167 else if (a->fr_src.s_addr > b->fr_src.s_addr)
168 return (1);
169 else if (a->fr_dst.s_addr < b->fr_dst.s_addr)
170 return (-1);
171 else if (a->fr_dst.s_addr > b->fr_dst.s_addr)
172 return (1);
173 return (0);
174 }
175
176 void
177 pf_purge_expired_fragments(void)
178 {
179 struct pf_fragment *frag;
180 u_int32_t expire = time_second -
181 pf_default_rule.timeout[PFTM_FRAG];
182
183 while ((frag = TAILQ_LAST(&pf_fragqueue, pf_fragqueue)) != NULL) {
184 KASSERT(BUFFER_FRAGMENTS(frag));
185 if (frag->fr_timeout > expire)
186 break;
187
188 DPFPRINTF(("expiring %d(%p)\n", frag->fr_id, frag));
189 pf_free_fragment(frag);
190 }
191
192 while ((frag = TAILQ_LAST(&pf_cachequeue, pf_cachequeue)) != NULL) {
193 KASSERT(!BUFFER_FRAGMENTS(frag));
194 if (frag->fr_timeout > expire)
195 break;
196
197 DPFPRINTF(("expiring %d(%p)\n", frag->fr_id, frag));
198 pf_free_fragment(frag);
199 KASSERT(TAILQ_EMPTY(&pf_cachequeue) ||
200 TAILQ_LAST(&pf_cachequeue, pf_cachequeue) != frag);
201 }
202 }
203
204 /*
205 * Try to flush old fragments to make space for new ones
206 */
207
208 void
209 pf_flush_fragments(void)
210 {
211 struct pf_fragment *frag;
212 int goal;
213
214 goal = pf_nfrents * 9 / 10;
215 DPFPRINTF(("trying to free > %d frents\n",
216 pf_nfrents - goal));
217 while (goal < pf_nfrents) {
218 frag = TAILQ_LAST(&pf_fragqueue, pf_fragqueue);
219 if (frag == NULL)
220 break;
221 pf_free_fragment(frag);
222 }
223
224
225 goal = pf_ncache * 9 / 10;
226 DPFPRINTF(("trying to free > %d cache entries\n",
227 pf_ncache - goal));
228 while (goal < pf_ncache) {
229 frag = TAILQ_LAST(&pf_cachequeue, pf_cachequeue);
230 if (frag == NULL)
231 break;
232 pf_free_fragment(frag);
233 }
234 }
235
236 /* Frees the fragments and all associated entries */
237
238 void
239 pf_free_fragment(struct pf_fragment *frag)
240 {
241 struct pf_frent *frent;
242 struct pf_frcache *frcache;
243
244 /* Free all fragments */
245 if (BUFFER_FRAGMENTS(frag)) {
246 for (frent = LIST_FIRST(&frag->fr_queue); frent;
247 frent = LIST_FIRST(&frag->fr_queue)) {
248 LIST_REMOVE(frent, fr_next);
249
250 m_freem(frent->fr_m);
251 pool_put(&pf_frent_pl, frent);
252 pf_nfrents--;
253 }
254 } else {
255 for (frcache = LIST_FIRST(&frag->fr_cache); frcache;
256 frcache = LIST_FIRST(&frag->fr_cache)) {
257 LIST_REMOVE(frcache, fr_next);
258
259 KASSERT(LIST_EMPTY(&frag->fr_cache) ||
260 LIST_FIRST(&frag->fr_cache)->fr_off >
261 frcache->fr_end);
262
263 pool_put(&pf_cent_pl, frcache);
264 pf_ncache--;
265 }
266 }
267
268 pf_remove_fragment(frag);
269 }
270
271 void
272 pf_ip2key(struct pf_fragment *key, struct ip *ip)
273 {
274 key->fr_p = ip->ip_p;
275 key->fr_id = ip->ip_id;
276 key->fr_src.s_addr = ip->ip_src.s_addr;
277 key->fr_dst.s_addr = ip->ip_dst.s_addr;
278 }
279
280 struct pf_fragment *
281 pf_find_fragment(struct ip *ip, struct pf_frag_tree *tree)
282 {
283 struct pf_fragment key;
284 struct pf_fragment *frag;
285
286 pf_ip2key(&key, ip);
287
288 frag = RB_FIND(pf_frag_tree, tree, &key);
289 if (frag != NULL) {
290 /* XXX Are we sure we want to update the timeout? */
291 frag->fr_timeout = time_second;
292 if (BUFFER_FRAGMENTS(frag)) {
293 TAILQ_REMOVE(&pf_fragqueue, frag, frag_next);
294 TAILQ_INSERT_HEAD(&pf_fragqueue, frag, frag_next);
295 } else {
296 TAILQ_REMOVE(&pf_cachequeue, frag, frag_next);
297 TAILQ_INSERT_HEAD(&pf_cachequeue, frag, frag_next);
298 }
299 }
300
301 return (frag);
302 }
303
304 /* Removes a fragment from the fragment queue and frees the fragment */
305
306 void
307 pf_remove_fragment(struct pf_fragment *frag)
308 {
309 if (BUFFER_FRAGMENTS(frag)) {
310 RB_REMOVE(pf_frag_tree, &pf_frag_tree, frag);
311 TAILQ_REMOVE(&pf_fragqueue, frag, frag_next);
312 pool_put(&pf_frag_pl, frag);
313 } else {
314 RB_REMOVE(pf_frag_tree, &pf_cache_tree, frag);
315 TAILQ_REMOVE(&pf_cachequeue, frag, frag_next);
316 pool_put(&pf_cache_pl, frag);
317 }
318 }
319
320 #define FR_IP_OFF(fr) ((ntohs((fr)->fr_ip->ip_off) & IP_OFFMASK) << 3)
321 struct mbuf *
322 pf_reassemble(struct mbuf **m0, struct pf_fragment **frag,
323 struct pf_frent *frent, int mff)
324 {
325 struct mbuf *m = *m0, *m2;
326 struct pf_frent *frea, *next;
327 struct pf_frent *frep = NULL;
328 struct ip *ip = frent->fr_ip;
329 int hlen = ip->ip_hl << 2;
330 u_int16_t off = (ntohs(ip->ip_off) & IP_OFFMASK) << 3;
331 u_int16_t ip_len = ntohs(ip->ip_len) - ip->ip_hl * 4;
332 u_int16_t max = ip_len + off;
333
334 KASSERT(*frag == NULL || BUFFER_FRAGMENTS(*frag));
335
336 /* Strip off ip header */
337 m->m_data += hlen;
338 m->m_len -= hlen;
339
340 /* Create a new reassembly queue for this packet */
341 if (*frag == NULL) {
342 *frag = pool_get(&pf_frag_pl, PR_NOWAIT);
343 if (*frag == NULL) {
344 pf_flush_fragments();
345 *frag = pool_get(&pf_frag_pl, PR_NOWAIT);
346 if (*frag == NULL)
347 goto drop_fragment;
348 }
349
350 (*frag)->fr_flags = 0;
351 (*frag)->fr_max = 0;
352 (*frag)->fr_src = frent->fr_ip->ip_src;
353 (*frag)->fr_dst = frent->fr_ip->ip_dst;
354 (*frag)->fr_p = frent->fr_ip->ip_p;
355 (*frag)->fr_id = frent->fr_ip->ip_id;
356 (*frag)->fr_timeout = time_second;
357 LIST_INIT(&(*frag)->fr_queue);
358
359 RB_INSERT(pf_frag_tree, &pf_frag_tree, *frag);
360 TAILQ_INSERT_HEAD(&pf_fragqueue, *frag, frag_next);
361
362 /* We do not have a previous fragment */
363 frep = NULL;
364 goto insert;
365 }
366
367 /*
368 * Find a fragment after the current one:
369 * - off contains the real shifted offset.
370 */
371 LIST_FOREACH(frea, &(*frag)->fr_queue, fr_next) {
372 if (FR_IP_OFF(frea) > off)
373 break;
374 frep = frea;
375 }
376
377 KASSERT(frep != NULL || frea != NULL);
378
379 if (frep != NULL &&
380 FR_IP_OFF(frep) + ntohs(frep->fr_ip->ip_len) - frep->fr_ip->ip_hl *
381 4 > off)
382 {
383 u_int16_t precut;
384
385 precut = FR_IP_OFF(frep) + ntohs(frep->fr_ip->ip_len) -
386 frep->fr_ip->ip_hl * 4 - off;
387 if (precut >= ip_len)
388 goto drop_fragment;
389 m_adj(frent->fr_m, precut);
390 DPFPRINTF(("overlap -%d\n", precut));
391 /* Enforce 8 byte boundaries */
392 ip->ip_off = htons(ntohs(ip->ip_off) + (precut >> 3));
393 off = (ntohs(ip->ip_off) & IP_OFFMASK) << 3;
394 ip_len -= precut;
395 ip->ip_len = htons(ip_len);
396 }
397
398 for (; frea != NULL && ip_len + off > FR_IP_OFF(frea);
399 frea = next)
400 {
401 u_int16_t aftercut;
402
403 aftercut = ip_len + off - FR_IP_OFF(frea);
404 DPFPRINTF(("adjust overlap %d\n", aftercut));
405 if (aftercut < ntohs(frea->fr_ip->ip_len) - frea->fr_ip->ip_hl
406 * 4)
407 {
408 frea->fr_ip->ip_len =
409 htons(ntohs(frea->fr_ip->ip_len) - aftercut);
410 frea->fr_ip->ip_off = htons(ntohs(frea->fr_ip->ip_off) +
411 (aftercut >> 3));
412 m_adj(frea->fr_m, aftercut);
413 break;
414 }
415
416 /* This fragment is completely overlapped, lose it */
417 next = LIST_NEXT(frea, fr_next);
418 m_freem(frea->fr_m);
419 LIST_REMOVE(frea, fr_next);
420 pool_put(&pf_frent_pl, frea);
421 pf_nfrents--;
422 }
423
424 insert:
425 /* Update maximum data size */
426 if ((*frag)->fr_max < max)
427 (*frag)->fr_max = max;
428 /* This is the last segment */
429 if (!mff)
430 (*frag)->fr_flags |= PFFRAG_SEENLAST;
431
432 if (frep == NULL)
433 LIST_INSERT_HEAD(&(*frag)->fr_queue, frent, fr_next);
434 else
435 LIST_INSERT_AFTER(frep, frent, fr_next);
436
437 /* Check if we are completely reassembled */
438 if (!((*frag)->fr_flags & PFFRAG_SEENLAST))
439 return (NULL);
440
441 /* Check if we have all the data */
442 off = 0;
443 for (frep = LIST_FIRST(&(*frag)->fr_queue); frep; frep = next) {
444 next = LIST_NEXT(frep, fr_next);
445
446 off += ntohs(frep->fr_ip->ip_len) - frep->fr_ip->ip_hl * 4;
447 if (off < (*frag)->fr_max &&
448 (next == NULL || FR_IP_OFF(next) != off))
449 {
450 DPFPRINTF(("missing fragment at %d, next %d, max %d\n",
451 off, next == NULL ? -1 : FR_IP_OFF(next),
452 (*frag)->fr_max));
453 return (NULL);
454 }
455 }
456 DPFPRINTF(("%d < %d?\n", off, (*frag)->fr_max));
457 if (off < (*frag)->fr_max)
458 return (NULL);
459
460 /* We have all the data */
461 frent = LIST_FIRST(&(*frag)->fr_queue);
462 KASSERT(frent != NULL);
463 if ((frent->fr_ip->ip_hl << 2) + off > IP_MAXPACKET) {
464 DPFPRINTF(("drop: too big: %d\n", off));
465 pf_free_fragment(*frag);
466 *frag = NULL;
467 return (NULL);
468 }
469 next = LIST_NEXT(frent, fr_next);
470
471 /* Magic from ip_input */
472 ip = frent->fr_ip;
473 m = frent->fr_m;
474 m2 = m->m_next;
475 m->m_next = NULL;
476 m_cat(m, m2);
477 pool_put(&pf_frent_pl, frent);
478 pf_nfrents--;
479 for (frent = next; frent != NULL; frent = next) {
480 next = LIST_NEXT(frent, fr_next);
481
482 m2 = frent->fr_m;
483 pool_put(&pf_frent_pl, frent);
484 pf_nfrents--;
485 m_cat(m, m2);
486 }
487
488 ip->ip_src = (*frag)->fr_src;
489 ip->ip_dst = (*frag)->fr_dst;
490
491 /* Remove from fragment queue */
492 pf_remove_fragment(*frag);
493 *frag = NULL;
494
495 hlen = ip->ip_hl << 2;
496 ip->ip_len = htons(off + hlen);
497 m->m_len += hlen;
498 m->m_data -= hlen;
499
500 /* some debugging cruft by sklower, below, will go away soon */
501 /* XXX this should be done elsewhere */
502 if (m->m_flags & M_PKTHDR) {
503 int plen = 0;
504 for (m2 = m; m2; m2 = m2->m_next)
505 plen += m2->m_len;
506 m->m_pkthdr.len = plen;
507 }
508
509 DPFPRINTF(("complete: %p(%d)\n", m, ntohs(ip->ip_len)));
510 return (m);
511
512 drop_fragment:
513 /* Oops - fail safe - drop packet */
514 pool_put(&pf_frent_pl, frent);
515 pf_nfrents--;
516 m_freem(m);
517 return (NULL);
518 }
519
520 struct mbuf *
521 pf_fragcache(struct mbuf **m0, struct ip *h, struct pf_fragment **frag, int mff,
522 int drop, int *nomem)
523 {
524 struct mbuf *m = *m0;
525 struct pf_frcache *frp, *fra, *cur = NULL;
526 int ip_len = ntohs(h->ip_len) - (h->ip_hl << 2);
527 u_int16_t off = ntohs(h->ip_off) << 3;
528 u_int16_t max = ip_len + off;
529 int hosed = 0;
530
531 KASSERT(*frag == NULL || !BUFFER_FRAGMENTS(*frag));
532
533 /* Create a new range queue for this packet */
534 if (*frag == NULL) {
535 *frag = pool_get(&pf_cache_pl, PR_NOWAIT);
536 if (*frag == NULL) {
537 pf_flush_fragments();
538 *frag = pool_get(&pf_cache_pl, PR_NOWAIT);
539 if (*frag == NULL)
540 goto no_mem;
541 }
542
543 /* Get an entry for the queue */
544 cur = pool_get(&pf_cent_pl, PR_NOWAIT);
545 if (cur == NULL) {
546 pool_put(&pf_cache_pl, *frag);
547 *frag = NULL;
548 goto no_mem;
549 }
550 pf_ncache++;
551
552 (*frag)->fr_flags = PFFRAG_NOBUFFER;
553 (*frag)->fr_max = 0;
554 (*frag)->fr_src = h->ip_src;
555 (*frag)->fr_dst = h->ip_dst;
556 (*frag)->fr_p = h->ip_p;
557 (*frag)->fr_id = h->ip_id;
558 (*frag)->fr_timeout = time_second;
559
560 cur->fr_off = off;
561 cur->fr_end = max;
562 LIST_INIT(&(*frag)->fr_cache);
563 LIST_INSERT_HEAD(&(*frag)->fr_cache, cur, fr_next);
564
565 RB_INSERT(pf_frag_tree, &pf_cache_tree, *frag);
566 TAILQ_INSERT_HEAD(&pf_cachequeue, *frag, frag_next);
567
568 DPFPRINTF(("fragcache[%d]: new %d-%d\n", h->ip_id, off, max));
569
570 goto pass;
571 }
572
573 /*
574 * Find a fragment after the current one:
575 * - off contains the real shifted offset.
576 */
577 frp = NULL;
578 LIST_FOREACH(fra, &(*frag)->fr_cache, fr_next) {
579 if (fra->fr_off > off)
580 break;
581 frp = fra;
582 }
583
584 KASSERT(frp != NULL || fra != NULL);
585
586 if (frp != NULL) {
587 int precut;
588
589 precut = frp->fr_end - off;
590 if (precut >= ip_len) {
591 /* Fragment is entirely a duplicate */
592 DPFPRINTF(("fragcache[%d]: dead (%d-%d) %d-%d\n",
593 h->ip_id, frp->fr_off, frp->fr_end, off, max));
594 goto drop_fragment;
595 }
596 if (precut == 0) {
597 /* They are adjacent. Fixup cache entry */
598 DPFPRINTF(("fragcache[%d]: adjacent (%d-%d) %d-%d\n",
599 h->ip_id, frp->fr_off, frp->fr_end, off, max));
600 frp->fr_end = max;
601 } else if (precut > 0) {
602 /* The first part of this payload overlaps with a
603 * fragment that has already been passed.
604 * Need to trim off the first part of the payload.
605 * But to do so easily, we need to create another
606 * mbuf to throw the original header into.
607 */
608
609 DPFPRINTF(("fragcache[%d]: chop %d (%d-%d) %d-%d\n",
610 h->ip_id, precut, frp->fr_off, frp->fr_end, off,
611 max));
612
613 off += precut;
614 max -= precut;
615 /* Update the previous frag to encompass this one */
616 frp->fr_end = max;
617
618 if (!drop) {
619 /* XXX Optimization opportunity
620 * This is a very heavy way to trim the payload.
621 * we could do it much faster by diddling mbuf
622 * internals but that would be even less legible
623 * than this mbuf magic. For my next trick,
624 * I'll pull a rabbit out of my laptop.
625 */
626 *m0 = m_copym2(m, 0, h->ip_hl << 2, M_NOWAIT);
627 if (*m0 == NULL)
628 goto no_mem;
629 KASSERT((*m0)->m_next == NULL);
630 m_adj(m, precut + (h->ip_hl << 2));
631 m_cat(*m0, m);
632 m = *m0;
633 if (m->m_flags & M_PKTHDR) {
634 int plen = 0;
635 struct mbuf *t;
636 for (t = m; t; t = t->m_next)
637 plen += t->m_len;
638 m->m_pkthdr.len = plen;
639 }
640
641
642 h = mtod(m, struct ip *);
643
644
645 KASSERT((int)m->m_len ==
646 ntohs(h->ip_len) - precut);
647 h->ip_off = htons(ntohs(h->ip_off) +
648 (precut >> 3));
649 h->ip_len = htons(ntohs(h->ip_len) - precut);
650 } else {
651 hosed++;
652 }
653 } else {
654 /* There is a gap between fragments */
655
656 DPFPRINTF(("fragcache[%d]: gap %d (%d-%d) %d-%d\n",
657 h->ip_id, -precut, frp->fr_off, frp->fr_end, off,
658 max));
659
660 cur = pool_get(&pf_cent_pl, PR_NOWAIT);
661 if (cur == NULL)
662 goto no_mem;
663 pf_ncache++;
664
665 cur->fr_off = off;
666 cur->fr_end = max;
667 LIST_INSERT_AFTER(frp, cur, fr_next);
668 }
669 }
670
671 if (fra != NULL) {
672 int aftercut;
673 int merge = 0;
674
675 aftercut = max - fra->fr_off;
676 if (aftercut == 0) {
677 /* Adjacent fragments */
678 DPFPRINTF(("fragcache[%d]: adjacent %d-%d (%d-%d)\n",
679 h->ip_id, off, max, fra->fr_off, fra->fr_end));
680 fra->fr_off = off;
681 merge = 1;
682 } else if (aftercut > 0) {
683 /* Need to chop off the tail of this fragment */
684 DPFPRINTF(("fragcache[%d]: chop %d %d-%d (%d-%d)\n",
685 h->ip_id, aftercut, off, max, fra->fr_off,
686 fra->fr_end));
687 fra->fr_off = off;
688 max -= aftercut;
689
690 merge = 1;
691
692 if (!drop) {
693 m_adj(m, -aftercut);
694 if (m->m_flags & M_PKTHDR) {
695 int plen = 0;
696 struct mbuf *t;
697 for (t = m; t; t = t->m_next)
698 plen += t->m_len;
699 m->m_pkthdr.len = plen;
700 }
701 h = mtod(m, struct ip *);
702 KASSERT((int)m->m_len ==
703 ntohs(h->ip_len) - aftercut);
704 h->ip_len = htons(ntohs(h->ip_len) - aftercut);
705 } else {
706 hosed++;
707 }
708 } else if (frp == NULL) {
709 /* There is a gap between fragments */
710 DPFPRINTF(("fragcache[%d]: gap %d %d-%d (%d-%d)\n",
711 h->ip_id, -aftercut, off, max, fra->fr_off,
712 fra->fr_end));
713
714 cur = pool_get(&pf_cent_pl, PR_NOWAIT);
715 if (cur == NULL)
716 goto no_mem;
717 pf_ncache++;
718
719 cur->fr_off = off;
720 cur->fr_end = max;
721 LIST_INSERT_BEFORE(fra, cur, fr_next);
722 }
723
724
725 /* Need to glue together two separate fragment descriptors */
726 if (merge) {
727 if (cur && fra->fr_off <= cur->fr_end) {
728 /* Need to merge in a previous 'cur' */
729 DPFPRINTF(("fragcache[%d]: adjacent(merge "
730 "%d-%d) %d-%d (%d-%d)\n",
731 h->ip_id, cur->fr_off, cur->fr_end, off,
732 max, fra->fr_off, fra->fr_end));
733 fra->fr_off = cur->fr_off;
734 LIST_REMOVE(cur, fr_next);
735 pool_put(&pf_cent_pl, cur);
736 pf_ncache--;
737 cur = NULL;
738
739 } else if (frp && fra->fr_off <= frp->fr_end) {
740 /* Need to merge in a modified 'frp' */
741 KASSERT(cur == NULL);
742 DPFPRINTF(("fragcache[%d]: adjacent(merge "
743 "%d-%d) %d-%d (%d-%d)\n",
744 h->ip_id, frp->fr_off, frp->fr_end, off,
745 max, fra->fr_off, fra->fr_end));
746 fra->fr_off = frp->fr_off;
747 LIST_REMOVE(frp, fr_next);
748 pool_put(&pf_cent_pl, frp);
749 pf_ncache--;
750 frp = NULL;
751
752 }
753 }
754 }
755
756 if (hosed) {
757 /*
758 * We must keep tracking the overall fragment even when
759 * we're going to drop it anyway so that we know when to
760 * free the overall descriptor. Thus we drop the frag late.
761 */
762 goto drop_fragment;
763 }
764
765
766 pass:
767 /* Update maximum data size */
768 if ((*frag)->fr_max < max)
769 (*frag)->fr_max = max;
770
771 /* This is the last segment */
772 if (!mff)
773 (*frag)->fr_flags |= PFFRAG_SEENLAST;
774
775 /* Check if we are completely reassembled */
776 if (((*frag)->fr_flags & PFFRAG_SEENLAST) &&
777 LIST_FIRST(&(*frag)->fr_cache)->fr_off == 0 &&
778 LIST_FIRST(&(*frag)->fr_cache)->fr_end == (*frag)->fr_max) {
779 /* Remove from fragment queue */
780 DPFPRINTF(("fragcache[%d]: done 0-%d\n", h->ip_id,
781 (*frag)->fr_max));
782 pf_free_fragment(*frag);
783 *frag = NULL;
784 }
785
786 return (m);
787
788 no_mem:
789 *nomem = 1;
790
791 /* Still need to pay attention to !IP_MF */
792 if (!mff && *frag != NULL)
793 (*frag)->fr_flags |= PFFRAG_SEENLAST;
794
795 m_freem(m);
796 return (NULL);
797
798 drop_fragment:
799
800 /* Still need to pay attention to !IP_MF */
801 if (!mff && *frag != NULL)
802 (*frag)->fr_flags |= PFFRAG_SEENLAST;
803
804 if (drop) {
805 /* This fragment has been deemed bad. Don't reass */
806 if (((*frag)->fr_flags & PFFRAG_DROP) == 0)
807 DPFPRINTF(("fragcache[%d]: dropping overall fragment\n",
808 h->ip_id));
809 (*frag)->fr_flags |= PFFRAG_DROP;
810 }
811
812 m_freem(m);
813 return (NULL);
814 }
815
816 int
817 pf_normalize_ip(struct mbuf **m0, int dir, struct pfi_kif *kif, u_short *reason,
818 struct pf_pdesc *pd)
819 {
820 struct mbuf *m = *m0;
821 struct pf_rule *r;
822 struct pf_frent *frent;
823 struct pf_fragment *frag = NULL;
824 struct ip *h = mtod(m, struct ip *);
825 int mff = (ntohs(h->ip_off) & IP_MF);
826 int hlen = h->ip_hl << 2;
827 u_int16_t fragoff = (ntohs(h->ip_off) & IP_OFFMASK) << 3;
828 u_int16_t max;
829 int ip_len;
830 int ip_off;
831
832 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
833 while (r != NULL) {
834 r->evaluations++;
835 if (pfi_kif_match(r->kif, kif) == r->ifnot)
836 r = r->skip[PF_SKIP_IFP].ptr;
837 else if (r->direction && r->direction != dir)
838 r = r->skip[PF_SKIP_DIR].ptr;
839 else if (r->af && r->af != AF_INET)
840 r = r->skip[PF_SKIP_AF].ptr;
841 else if (r->proto && r->proto != h->ip_p)
842 r = r->skip[PF_SKIP_PROTO].ptr;
843 else if (PF_MISMATCHAW(&r->src.addr,
844 (struct pf_addr *)&h->ip_src.s_addr, AF_INET,
845 r->src.neg, kif))
846 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
847 else if (PF_MISMATCHAW(&r->dst.addr,
848 (struct pf_addr *)&h->ip_dst.s_addr, AF_INET,
849 r->dst.neg, NULL))
850 r = r->skip[PF_SKIP_DST_ADDR].ptr;
851 else
852 break;
853 }
854
855 if (r == NULL || r->action == PF_NOSCRUB)
856 return (PF_PASS);
857 else {
858 r->packets[dir == PF_OUT]++;
859 r->bytes[dir == PF_OUT] += pd->tot_len;
860 }
861
862 /* Check for illegal packets */
863 if (hlen < (int)sizeof(struct ip))
864 goto drop;
865
866 if (hlen > ntohs(h->ip_len))
867 goto drop;
868
869 /* Clear IP_DF if the rule uses the no-df option */
870 if (r->rule_flag & PFRULE_NODF && h->ip_off & htons(IP_DF)) {
871 u_int16_t ip_off = h->ip_off;
872
873 h->ip_off &= htons(~IP_DF);
874 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0);
875 }
876
877 /* We will need other tests here */
878 if (!fragoff && !mff)
879 goto no_fragment;
880
881 /* We're dealing with a fragment now. Don't allow fragments
882 * with IP_DF to enter the cache. If the flag was cleared by
883 * no-df above, fine. Otherwise drop it.
884 */
885 if (h->ip_off & htons(IP_DF)) {
886 DPFPRINTF(("IP_DF\n"));
887 goto bad;
888 }
889
890 ip_len = ntohs(h->ip_len) - hlen;
891 ip_off = (ntohs(h->ip_off) & IP_OFFMASK) << 3;
892
893 /* All fragments are 8 byte aligned */
894 if (mff && (ip_len & 0x7)) {
895 DPFPRINTF(("mff and %d\n", ip_len));
896 goto bad;
897 }
898
899 /* Respect maximum length */
900 if (fragoff + ip_len > IP_MAXPACKET) {
901 DPFPRINTF(("max packet %d\n", fragoff + ip_len));
902 goto bad;
903 }
904 max = fragoff + ip_len;
905
906 if ((r->rule_flag & (PFRULE_FRAGCROP|PFRULE_FRAGDROP)) == 0) {
907 /* Fully buffer all of the fragments */
908
909 frag = pf_find_fragment(h, &pf_frag_tree);
910
911 /* Check if we saw the last fragment already */
912 if (frag != NULL && (frag->fr_flags & PFFRAG_SEENLAST) &&
913 max > frag->fr_max)
914 goto bad;
915
916 /* Get an entry for the fragment queue */
917 frent = pool_get(&pf_frent_pl, PR_NOWAIT);
918 if (frent == NULL) {
919 REASON_SET(reason, PFRES_MEMORY);
920 return (PF_DROP);
921 }
922 pf_nfrents++;
923 frent->fr_ip = h;
924 frent->fr_m = m;
925
926 /* Might return a completely reassembled mbuf, or NULL */
927 DPFPRINTF(("reass frag %d @ %d-%d\n", h->ip_id, fragoff, max));
928 *m0 = m = pf_reassemble(m0, &frag, frent, mff);
929
930 if (m == NULL)
931 return (PF_DROP);
932
933 if (frag != NULL && (frag->fr_flags & PFFRAG_DROP))
934 goto drop;
935
936 h = mtod(m, struct ip *);
937 } else {
938 /* non-buffering fragment cache (drops or masks overlaps) */
939 int nomem = 0;
940
941 if (dir == PF_OUT && m->m_pkthdr.pf.flags & PF_TAG_FRAGCACHE) {
942 /*
943 * Already passed the fragment cache in the
944 * input direction. If we continued, it would
945 * appear to be a dup and would be dropped.
946 */
947 goto fragment_pass;
948 }
949
950 frag = pf_find_fragment(h, &pf_cache_tree);
951
952 /* Check if we saw the last fragment already */
953 if (frag != NULL && (frag->fr_flags & PFFRAG_SEENLAST) &&
954 max > frag->fr_max) {
955 if (r->rule_flag & PFRULE_FRAGDROP)
956 frag->fr_flags |= PFFRAG_DROP;
957 goto bad;
958 }
959
960 *m0 = m = pf_fragcache(m0, h, &frag, mff,
961 (r->rule_flag & PFRULE_FRAGDROP) ? 1 : 0, &nomem);
962 if (m == NULL) {
963 if (nomem)
964 goto no_mem;
965 goto drop;
966 }
967
968 if (dir == PF_IN)
969 m->m_pkthdr.pf.flags |= PF_TAG_FRAGCACHE;
970
971 if (frag != NULL && (frag->fr_flags & PFFRAG_DROP))
972 goto drop;
973 goto fragment_pass;
974 }
975
976 no_fragment:
977 /* At this point, only IP_DF is allowed in ip_off */
978 if (h->ip_off & ~htons(IP_DF)) {
979 u_int16_t ip_off = h->ip_off;
980
981 h->ip_off &= htons(IP_DF);
982 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0);
983 }
984
985 /* Enforce a minimum ttl, may cause endless packet loops */
986 if (r->min_ttl && h->ip_ttl < r->min_ttl) {
987 u_int16_t ip_ttl = h->ip_ttl;
988
989 h->ip_ttl = r->min_ttl;
990 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_ttl, h->ip_ttl, 0);
991 }
992
993 if (r->rule_flag & PFRULE_RANDOMID) {
994 u_int16_t ip_id = h->ip_id;
995
996 h->ip_id = ip_randomid();
997 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_id, h->ip_id, 0);
998 }
999 if ((r->rule_flag & (PFRULE_FRAGCROP|PFRULE_FRAGDROP)) == 0)
1000 pd->flags |= PFDESC_IP_REAS;
1001
1002 return (PF_PASS);
1003
1004 fragment_pass:
1005 /* Enforce a minimum ttl, may cause endless packet loops */
1006 if (r->min_ttl && h->ip_ttl < r->min_ttl) {
1007 u_int16_t ip_ttl = h->ip_ttl;
1008
1009 h->ip_ttl = r->min_ttl;
1010 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_ttl, h->ip_ttl, 0);
1011 }
1012 if ((r->rule_flag & (PFRULE_FRAGCROP|PFRULE_FRAGDROP)) == 0)
1013 pd->flags |= PFDESC_IP_REAS;
1014 return (PF_PASS);
1015
1016 no_mem:
1017 REASON_SET(reason, PFRES_MEMORY);
1018 if (r != NULL && r->log)
1019 PFLOG_PACKET(kif, h, m, AF_INET, dir, *reason, r, NULL, NULL, pd);
1020 return (PF_DROP);
1021
1022 drop:
1023 REASON_SET(reason, PFRES_NORM);
1024 if (r != NULL && r->log)
1025 PFLOG_PACKET(kif, h, m, AF_INET, dir, *reason, r, NULL, NULL, pd);
1026 return (PF_DROP);
1027
1028 bad:
1029 DPFPRINTF(("dropping bad fragment\n"));
1030
1031 /* Free associated fragments */
1032 if (frag != NULL)
1033 pf_free_fragment(frag);
1034
1035 REASON_SET(reason, PFRES_FRAG);
1036 if (r != NULL && r->log)
1037 PFLOG_PACKET(kif, h, m, AF_INET, dir, *reason, r, NULL, NULL, pd);
1038
1039 return (PF_DROP);
1040 }
1041
1042 #ifdef INET6
1043 int
1044 pf_normalize_ip6(struct mbuf **m0, int dir, struct pfi_kif *kif,
1045 u_short *reason, struct pf_pdesc *pd)
1046 {
1047 struct mbuf *m = *m0;
1048 struct pf_rule *r;
1049 struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
1050 int off;
1051 struct ip6_ext ext;
1052 struct ip6_opt opt;
1053 struct ip6_opt_jumbo jumbo;
1054 struct ip6_frag frag;
1055 u_int32_t jumbolen = 0, plen;
1056 u_int16_t fragoff = 0;
1057 int optend;
1058 int ooff;
1059 u_int8_t proto;
1060 int terminal;
1061
1062 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
1063 while (r != NULL) {
1064 r->evaluations++;
1065 if (pfi_kif_match(r->kif, kif) == r->ifnot)
1066 r = r->skip[PF_SKIP_IFP].ptr;
1067 else if (r->direction && r->direction != dir)
1068 r = r->skip[PF_SKIP_DIR].ptr;
1069 else if (r->af && r->af != AF_INET6)
1070 r = r->skip[PF_SKIP_AF].ptr;
1071 #if 0 /* header chain! */
1072 else if (r->proto && r->proto != h->ip6_nxt)
1073 r = r->skip[PF_SKIP_PROTO].ptr;
1074 #endif
1075 else if (PF_MISMATCHAW(&r->src.addr,
1076 (struct pf_addr *)&h->ip6_src, AF_INET6,
1077 r->src.neg, kif))
1078 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
1079 else if (PF_MISMATCHAW(&r->dst.addr,
1080 (struct pf_addr *)&h->ip6_dst, AF_INET6,
1081 r->dst.neg, NULL))
1082 r = r->skip[PF_SKIP_DST_ADDR].ptr;
1083 else
1084 break;
1085 }
1086
1087 if (r == NULL || r->action == PF_NOSCRUB)
1088 return (PF_PASS);
1089 else {
1090 r->packets[dir == PF_OUT]++;
1091 r->bytes[dir == PF_OUT] += pd->tot_len;
1092 }
1093
1094 /* Check for illegal packets */
1095 if (sizeof(struct ip6_hdr) + IPV6_MAXPACKET < m->m_pkthdr.len)
1096 goto drop;
1097
1098 off = sizeof(struct ip6_hdr);
1099 proto = h->ip6_nxt;
1100 terminal = 0;
1101 do {
1102 switch (proto) {
1103 case IPPROTO_FRAGMENT:
1104 goto fragment;
1105 break;
1106 case IPPROTO_AH:
1107 case IPPROTO_ROUTING:
1108 case IPPROTO_DSTOPTS:
1109 if (!pf_pull_hdr(m, off, &ext, sizeof(ext), NULL,
1110 NULL, AF_INET6))
1111 goto shortpkt;
1112 if (proto == IPPROTO_AH)
1113 off += (ext.ip6e_len + 2) * 4;
1114 else
1115 off += (ext.ip6e_len + 1) * 8;
1116 proto = ext.ip6e_nxt;
1117 break;
1118 case IPPROTO_HOPOPTS:
1119 if (!pf_pull_hdr(m, off, &ext, sizeof(ext), NULL,
1120 NULL, AF_INET6))
1121 goto shortpkt;
1122 optend = off + (ext.ip6e_len + 1) * 8;
1123 ooff = off + sizeof(ext);
1124 do {
1125 if (!pf_pull_hdr(m, ooff, &opt.ip6o_type,
1126 sizeof(opt.ip6o_type), NULL, NULL,
1127 AF_INET6))
1128 goto shortpkt;
1129 if (opt.ip6o_type == IP6OPT_PAD1) {
1130 ooff++;
1131 continue;
1132 }
1133 if (!pf_pull_hdr(m, ooff, &opt, sizeof(opt),
1134 NULL, NULL, AF_INET6))
1135 goto shortpkt;
1136 if (ooff + sizeof(opt) + opt.ip6o_len > optend)
1137 goto drop;
1138 switch (opt.ip6o_type) {
1139 case IP6OPT_JUMBO:
1140 if (h->ip6_plen != 0)
1141 goto drop;
1142 if (!pf_pull_hdr(m, ooff, &jumbo,
1143 sizeof(jumbo), NULL, NULL,
1144 AF_INET6))
1145 goto shortpkt;
1146 memcpy(&jumbolen, jumbo.ip6oj_jumbo_len,
1147 sizeof(jumbolen));
1148 jumbolen = ntohl(jumbolen);
1149 if (jumbolen <= IPV6_MAXPACKET)
1150 goto drop;
1151 if (sizeof(struct ip6_hdr) + jumbolen !=
1152 m->m_pkthdr.len)
1153 goto drop;
1154 break;
1155 default:
1156 break;
1157 }
1158 ooff += sizeof(opt) + opt.ip6o_len;
1159 } while (ooff < optend);
1160
1161 off = optend;
1162 proto = ext.ip6e_nxt;
1163 break;
1164 default:
1165 terminal = 1;
1166 break;
1167 }
1168 } while (!terminal);
1169
1170 /* jumbo payload option must be present, or plen > 0 */
1171 if (ntohs(h->ip6_plen) == 0)
1172 plen = jumbolen;
1173 else
1174 plen = ntohs(h->ip6_plen);
1175 if (plen == 0)
1176 goto drop;
1177 if (sizeof(struct ip6_hdr) + plen > m->m_pkthdr.len)
1178 goto shortpkt;
1179
1180 /* Enforce a minimum ttl, may cause endless packet loops */
1181 if (r->min_ttl && h->ip6_hlim < r->min_ttl)
1182 h->ip6_hlim = r->min_ttl;
1183
1184 return (PF_PASS);
1185
1186 fragment:
1187 if (ntohs(h->ip6_plen) == 0 || jumbolen)
1188 goto drop;
1189 plen = ntohs(h->ip6_plen);
1190
1191 if (!pf_pull_hdr(m, off, &frag, sizeof(frag), NULL, NULL, AF_INET6))
1192 goto shortpkt;
1193 fragoff = ntohs(frag.ip6f_offlg & IP6F_OFF_MASK);
1194 if (fragoff + (plen - off - sizeof(frag)) > IPV6_MAXPACKET)
1195 goto badfrag;
1196
1197 /* do something about it */
1198 /* remember to set pd->flags |= PFDESC_IP_REAS */
1199 return (PF_PASS);
1200
1201 shortpkt:
1202 REASON_SET(reason, PFRES_SHORT);
1203 if (r != NULL && r->log)
1204 PFLOG_PACKET(kif, h, m, AF_INET6, dir, *reason, r, NULL, NULL, pd);
1205 return (PF_DROP);
1206
1207 drop:
1208 REASON_SET(reason, PFRES_NORM);
1209 if (r != NULL && r->log)
1210 PFLOG_PACKET(kif, h, m, AF_INET6, dir, *reason, r, NULL, NULL, pd);
1211 return (PF_DROP);
1212
1213 badfrag:
1214 REASON_SET(reason, PFRES_FRAG);
1215 if (r != NULL && r->log)
1216 PFLOG_PACKET(kif, h, m, AF_INET6, dir, *reason, r, NULL, NULL, pd);
1217 return (PF_DROP);
1218 }
1219 #endif /* INET6 */
1220
1221 int
1222 pf_normalize_tcp(int dir, struct pfi_kif *kif, struct mbuf *m, int ipoff,
1223 int off, void *h, struct pf_pdesc *pd)
1224 {
1225 struct pf_rule *r, *rm = NULL;
1226 struct tcphdr *th = pd->hdr.tcp;
1227 int rewrite = 0;
1228 u_short reason;
1229 u_int8_t flags;
1230 sa_family_t af = pd->af;
1231
1232 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
1233 while (r != NULL) {
1234 r->evaluations++;
1235 if (pfi_kif_match(r->kif, kif) == r->ifnot)
1236 r = r->skip[PF_SKIP_IFP].ptr;
1237 else if (r->direction && r->direction != dir)
1238 r = r->skip[PF_SKIP_DIR].ptr;
1239 else if (r->af && r->af != af)
1240 r = r->skip[PF_SKIP_AF].ptr;
1241 else if (r->proto && r->proto != pd->proto)
1242 r = r->skip[PF_SKIP_PROTO].ptr;
1243 else if (PF_MISMATCHAW(&r->src.addr, pd->src, af,
1244 r->src.neg, kif))
1245 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
1246 else if (r->src.port_op && !pf_match_port(r->src.port_op,
1247 r->src.port[0], r->src.port[1], th->th_sport))
1248 r = r->skip[PF_SKIP_SRC_PORT].ptr;
1249 else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af,
1250 r->dst.neg, NULL))
1251 r = r->skip[PF_SKIP_DST_ADDR].ptr;
1252 else if (r->dst.port_op && !pf_match_port(r->dst.port_op,
1253 r->dst.port[0], r->dst.port[1], th->th_dport))
1254 r = r->skip[PF_SKIP_DST_PORT].ptr;
1255 else if (r->os_fingerprint != PF_OSFP_ANY && !pf_osfp_match(
1256 pf_osfp_fingerprint(pd, m, off, th),
1257 r->os_fingerprint))
1258 r = TAILQ_NEXT(r, entries);
1259 else {
1260 rm = r;
1261 break;
1262 }
1263 }
1264
1265 if (rm == NULL || rm->action == PF_NOSCRUB)
1266 return (PF_PASS);
1267 else {
1268 r->packets[dir == PF_OUT]++;
1269 r->bytes[dir == PF_OUT] += pd->tot_len;
1270 }
1271
1272 if (rm->rule_flag & PFRULE_REASSEMBLE_TCP)
1273 pd->flags |= PFDESC_TCP_NORM;
1274
1275 flags = th->th_flags;
1276 if (flags & TH_SYN) {
1277 /* Illegal packet */
1278 if (flags & TH_RST)
1279 goto tcp_drop;
1280
1281 if (flags & TH_FIN)
1282 flags &= ~TH_FIN;
1283 } else {
1284 /* Illegal packet */
1285 if (!(flags & (TH_ACK|TH_RST)))
1286 goto tcp_drop;
1287 }
1288
1289 if (!(flags & TH_ACK)) {
1290 /* These flags are only valid if ACK is set */
1291 if ((flags & TH_FIN) || (flags & TH_PUSH) || (flags & TH_URG))
1292 goto tcp_drop;
1293 }
1294
1295 /* Check for illegal header length */
1296 if (th->th_off < (sizeof(struct tcphdr) >> 2))
1297 goto tcp_drop;
1298
1299 /* If flags changed, or reserved data set, then adjust */
1300 if (flags != th->th_flags || th->th_x2 != 0) {
1301 u_int16_t ov, nv;
1302
1303 ov = *(u_int16_t *)(&th->th_ack + 1);
1304 th->th_flags = flags;
1305 th->th_x2 = 0;
1306 nv = *(u_int16_t *)(&th->th_ack + 1);
1307
1308 th->th_sum = pf_cksum_fixup(th->th_sum, ov, nv, 0);
1309 rewrite = 1;
1310 }
1311
1312 /* Remove urgent pointer, if TH_URG is not set */
1313 if (!(flags & TH_URG) && th->th_urp) {
1314 th->th_sum = pf_cksum_fixup(th->th_sum, th->th_urp, 0, 0);
1315 th->th_urp = 0;
1316 rewrite = 1;
1317 }
1318
1319 /* Process options */
1320 if (r->max_mss && pf_normalize_tcpopt(r, m, th, off))
1321 rewrite = 1;
1322
1323 /* copy back packet headers if we sanitized */
1324 if (rewrite)
1325 m_copyback(m, off, sizeof(*th), th);
1326
1327 return (PF_PASS);
1328
1329 tcp_drop:
1330 REASON_SET(&reason, PFRES_NORM);
1331 if (rm != NULL && r->log)
1332 PFLOG_PACKET(kif, h, m, AF_INET, dir, reason, r, NULL, NULL, pd);
1333 return (PF_DROP);
1334 }
1335
1336 int
1337 pf_normalize_tcp_init(struct mbuf *m, int off, struct pf_pdesc *pd,
1338 struct tcphdr *th, struct pf_state_peer *src, struct pf_state_peer *dst)
1339 {
1340 u_int32_t tsval, tsecr;
1341 u_int8_t hdr[60];
1342 u_int8_t *opt;
1343
1344 KASSERT(src->scrub == NULL);
1345
1346 src->scrub = pool_get(&pf_state_scrub_pl, PR_NOWAIT);
1347 if (src->scrub == NULL)
1348 return (1);
1349 bzero(src->scrub, sizeof(*src->scrub));
1350
1351 switch (pd->af) {
1352 #ifdef INET
1353 case AF_INET: {
1354 struct ip *h = mtod(m, struct ip *);
1355 src->scrub->pfss_ttl = h->ip_ttl;
1356 break;
1357 }
1358 #endif /* INET */
1359 #ifdef INET6
1360 case AF_INET6: {
1361 struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
1362 src->scrub->pfss_ttl = h->ip6_hlim;
1363 break;
1364 }
1365 #endif /* INET6 */
1366 }
1367
1368
1369 /*
1370 * All normalizations below are only begun if we see the start of
1371 * the connections. They must all set an enabled bit in pfss_flags
1372 */
1373 if ((th->th_flags & TH_SYN) == 0)
1374 return (0);
1375
1376
1377 if (th->th_off > (sizeof(struct tcphdr) >> 2) && src->scrub &&
1378 pf_pull_hdr(m, off, hdr, th->th_off << 2, NULL, NULL, pd->af)) {
1379 /* Diddle with TCP options */
1380 int hlen;
1381 opt = hdr + sizeof(struct tcphdr);
1382 hlen = (th->th_off << 2) - sizeof(struct tcphdr);
1383 while (hlen >= TCPOLEN_TIMESTAMP) {
1384 switch (*opt) {
1385 case TCPOPT_EOL: /* FALLTHROUGH */
1386 case TCPOPT_NOP:
1387 opt++;
1388 hlen--;
1389 break;
1390 case TCPOPT_TIMESTAMP:
1391 if (opt[1] >= TCPOLEN_TIMESTAMP) {
1392 src->scrub->pfss_flags |=
1393 PFSS_TIMESTAMP;
1394 src->scrub->pfss_ts_mod =
1395 htonl(arc4random());
1396
1397 /* note PFSS_PAWS not set yet */
1398 memcpy(&tsval, &opt[2],
1399 sizeof(u_int32_t));
1400 memcpy(&tsecr, &opt[6],
1401 sizeof(u_int32_t));
1402 src->scrub->pfss_tsval0 = ntohl(tsval);
1403 src->scrub->pfss_tsval = ntohl(tsval);
1404 src->scrub->pfss_tsecr = ntohl(tsecr);
1405 getmicrouptime(&src->scrub->pfss_last);
1406 }
1407 /* FALLTHROUGH */
1408 default:
1409 hlen -= MAX(opt[1], 2);
1410 opt += MAX(opt[1], 2);
1411 break;
1412 }
1413 }
1414 }
1415
1416 return (0);
1417 }
1418
1419 void
1420 pf_normalize_tcp_cleanup(struct pf_state *state)
1421 {
1422 if (state->src.scrub)
1423 pool_put(&pf_state_scrub_pl, state->src.scrub);
1424 if (state->dst.scrub)
1425 pool_put(&pf_state_scrub_pl, state->dst.scrub);
1426
1427 /* Someday... flush the TCP segment reassembly descriptors. */
1428 }
1429
1430 int
1431 pf_normalize_tcp_stateful(struct mbuf *m, int off, struct pf_pdesc *pd,
1432 u_short *reason, struct tcphdr *th, struct pf_state *state,
1433 struct pf_state_peer *src, struct pf_state_peer *dst, int *writeback)
1434 {
1435 struct timeval uptime;
1436 u_int32_t tsval, tsecr;
1437 u_int tsval_from_last;
1438 u_int8_t hdr[60];
1439 u_int8_t *opt;
1440 int copyback = 0;
1441 int got_ts = 0;
1442
1443 KASSERT(src->scrub || dst->scrub);
1444
1445 /*
1446 * Enforce the minimum TTL seen for this connection. Negate a common
1447 * technique to evade an intrusion detection system and confuse
1448 * firewall state code.
1449 */
1450 switch (pd->af) {
1451 #ifdef INET
1452 case AF_INET: {
1453 if (src->scrub) {
1454 struct ip *h = mtod(m, struct ip *);
1455 if (h->ip_ttl > src->scrub->pfss_ttl)
1456 src->scrub->pfss_ttl = h->ip_ttl;
1457 h->ip_ttl = src->scrub->pfss_ttl;
1458 }
1459 break;
1460 }
1461 #endif /* INET */
1462 #ifdef INET6
1463 case AF_INET6: {
1464 if (src->scrub) {
1465 struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
1466 if (h->ip6_hlim > src->scrub->pfss_ttl)
1467 src->scrub->pfss_ttl = h->ip6_hlim;
1468 h->ip6_hlim = src->scrub->pfss_ttl;
1469 }
1470 break;
1471 }
1472 #endif /* INET6 */
1473 }
1474
1475 if (th->th_off > (sizeof(struct tcphdr) >> 2) &&
1476 ((src->scrub && (src->scrub->pfss_flags & PFSS_TIMESTAMP)) ||
1477 (dst->scrub && (dst->scrub->pfss_flags & PFSS_TIMESTAMP))) &&
1478 pf_pull_hdr(m, off, hdr, th->th_off << 2, NULL, NULL, pd->af)) {
1479 /* Diddle with TCP options */
1480 int hlen;
1481 opt = hdr + sizeof(struct tcphdr);
1482 hlen = (th->th_off << 2) - sizeof(struct tcphdr);
1483 while (hlen >= TCPOLEN_TIMESTAMP) {
1484 switch (*opt) {
1485 case TCPOPT_EOL: /* FALLTHROUGH */
1486 case TCPOPT_NOP:
1487 opt++;
1488 hlen--;
1489 break;
1490 case TCPOPT_TIMESTAMP:
1491 /* Modulate the timestamps. Can be used for
1492 * NAT detection, OS uptime determination or
1493 * reboot detection.
1494 */
1495
1496 if (got_ts) {
1497 /* Huh? Multiple timestamps!? */
1498 if (pf_status.debug >= PF_DEBUG_MISC) {
1499 DPFPRINTF(("multiple TS??"));
1500 pf_print_state(state);
1501 printf("\n");
1502 }
1503 REASON_SET(reason, PFRES_TS);
1504 return (PF_DROP);
1505 }
1506 if (opt[1] >= TCPOLEN_TIMESTAMP) {
1507 memcpy(&tsval, &opt[2],
1508 sizeof(u_int32_t));
1509 if (tsval && src->scrub &&
1510 (src->scrub->pfss_flags &
1511 PFSS_TIMESTAMP)) {
1512 tsval = ntohl(tsval);
1513 pf_change_a(&opt[2],
1514 &th->th_sum,
1515 htonl(tsval +
1516 src->scrub->pfss_ts_mod),
1517 0);
1518 copyback = 1;
1519 }
1520
1521 /* Modulate TS reply iff valid (!0) */
1522 memcpy(&tsecr, &opt[6],
1523 sizeof(u_int32_t));
1524 if (tsecr && dst->scrub &&
1525 (dst->scrub->pfss_flags &
1526 PFSS_TIMESTAMP)) {
1527 tsecr = ntohl(tsecr)
1528 - dst->scrub->pfss_ts_mod;
1529 pf_change_a(&opt[6],
1530 &th->th_sum, htonl(tsecr),
1531 0);
1532 copyback = 1;
1533 }
1534 got_ts = 1;
1535 }
1536 /* FALLTHROUGH */
1537 default:
1538 hlen -= MAX(opt[1], 2);
1539 opt += MAX(opt[1], 2);
1540 break;
1541 }
1542 }
1543 if (copyback) {
1544 /* Copyback the options, caller copys back header */
1545 *writeback = 1;
1546 m_copyback(m, off + sizeof(struct tcphdr),
1547 (th->th_off << 2) - sizeof(struct tcphdr), hdr +
1548 sizeof(struct tcphdr));
1549 }
1550 }
1551
1552
1553 /*
1554 * Must invalidate PAWS checks on connections idle for too long.
1555 * The fastest allowed timestamp clock is 1ms. That turns out to
1556 * be about 24 days before it wraps. XXX Right now our lowerbound
1557 * TS echo check only works for the first 12 days of a connection
1558 * when the TS has exhausted half its 32bit space
1559 */
1560 #define TS_MAX_IDLE (24*24*60*60)
1561 #define TS_MAX_CONN (12*24*60*60) /* XXX remove when better tsecr check */
1562
1563 getmicrouptime(&uptime);
1564 if (src->scrub && (src->scrub->pfss_flags & PFSS_PAWS) &&
1565 (uptime.tv_sec - src->scrub->pfss_last.tv_sec > TS_MAX_IDLE ||
1566 time_second - state->creation > TS_MAX_CONN)) {
1567 if (pf_status.debug >= PF_DEBUG_MISC) {
1568 DPFPRINTF(("src idled out of PAWS\n"));
1569 pf_print_state(state);
1570 printf("\n");
1571 }
1572 src->scrub->pfss_flags = (src->scrub->pfss_flags & ~PFSS_PAWS)
1573 | PFSS_PAWS_IDLED;
1574 }
1575 if (dst->scrub && (dst->scrub->pfss_flags & PFSS_PAWS) &&
1576 uptime.tv_sec - dst->scrub->pfss_last.tv_sec > TS_MAX_IDLE) {
1577 if (pf_status.debug >= PF_DEBUG_MISC) {
1578 DPFPRINTF(("dst idled out of PAWS\n"));
1579 pf_print_state(state);
1580 printf("\n");
1581 }
1582 dst->scrub->pfss_flags = (dst->scrub->pfss_flags & ~PFSS_PAWS)
1583 | PFSS_PAWS_IDLED;
1584 }
1585
1586 if (got_ts && src->scrub && dst->scrub &&
1587 (src->scrub->pfss_flags & PFSS_PAWS) &&
1588 (dst->scrub->pfss_flags & PFSS_PAWS)) {
1589 /* Validate that the timestamps are "in-window".
1590 * RFC1323 describes TCP Timestamp options that allow
1591 * measurement of RTT (round trip time) and PAWS
1592 * (protection against wrapped sequence numbers). PAWS
1593 * gives us a set of rules for rejecting packets on
1594 * long fat pipes (packets that were somehow delayed
1595 * in transit longer than the time it took to send the
1596 * full TCP sequence space of 4Gb). We can use these
1597 * rules and infer a few others that will let us treat
1598 * the 32bit timestamp and the 32bit echoed timestamp
1599 * as sequence numbers to prevent a blind attacker from
1600 * inserting packets into a connection.
1601 *
1602 * RFC1323 tells us:
1603 * - The timestamp on this packet must be greater than
1604 * or equal to the last value echoed by the other
1605 * endpoint. The RFC says those will be discarded
1606 * since it is a dup that has already been acked.
1607 * This gives us a lowerbound on the timestamp.
1608 * timestamp >= other last echoed timestamp
1609 * - The timestamp will be less than or equal to
1610 * the last timestamp plus the time between the
1611 * last packet and now. The RFC defines the max
1612 * clock rate as 1ms. We will allow clocks to be
1613 * up to 10% fast and will allow a total difference
1614 * or 30 seconds due to a route change. And this
1615 * gives us an upperbound on the timestamp.
1616 * timestamp <= last timestamp + max ticks
1617 * We have to be careful here. Windows will send an
1618 * initial timestamp of zero and then initialize it
1619 * to a random value after the 3whs; presumably to
1620 * avoid a DoS by having to call an expensive RNG
1621 * during a SYN flood. Proof MS has at least one
1622 * good security geek.
1623 *
1624 * - The TCP timestamp option must also echo the other
1625 * endpoints timestamp. The timestamp echoed is the
1626 * one carried on the earliest unacknowledged segment
1627 * on the left edge of the sequence window. The RFC
1628 * states that the host will reject any echoed
1629 * timestamps that were larger than any ever sent.
1630 * This gives us an upperbound on the TS echo.
1631 * tescr <= largest_tsval
1632 * - The lowerbound on the TS echo is a little more
1633 * tricky to determine. The other endpoint's echoed
1634 * values will not decrease. But there may be
1635 * network conditions that re-order packets and
1636 * cause our view of them to decrease. For now the
1637 * only lowerbound we can safely determine is that
1638 * the TS echo will never be less than the original
1639 * TS. XXX There is probably a better lowerbound.
1640 * Remove TS_MAX_CONN with better lowerbound check.
1641 * tescr >= other original TS
1642 *
1643 * It is also important to note that the fastest
1644 * timestamp clock of 1ms will wrap its 32bit space in
1645 * 24 days. So we just disable TS checking after 24
1646 * days of idle time. We actually must use a 12d
1647 * connection limit until we can come up with a better
1648 * lowerbound to the TS echo check.
1649 */
1650 struct timeval delta_ts;
1651 int ts_fudge;
1652
1653
1654 /*
1655 * PFTM_TS_DIFF is how many seconds of leeway to allow
1656 * a host's timestamp. This can happen if the previous
1657 * packet got delayed in transit for much longer than
1658 * this packet.
1659 */
1660 if ((ts_fudge = state->rule.ptr->timeout[PFTM_TS_DIFF]) == 0)
1661 ts_fudge = pf_default_rule.timeout[PFTM_TS_DIFF];
1662
1663
1664 /* Calculate max ticks since the last timestamp */
1665 #define TS_MAXFREQ 1100 /* RFC max TS freq of 1Khz + 10% skew */
1666 #define TS_MICROSECS 1000000 /* microseconds per second */
1667 timersub(&uptime, &src->scrub->pfss_last, &delta_ts);
1668 tsval_from_last = (delta_ts.tv_sec + ts_fudge) * TS_MAXFREQ;
1669 tsval_from_last += delta_ts.tv_usec / (TS_MICROSECS/TS_MAXFREQ);
1670
1671
1672 if ((src->state >= TCPS_ESTABLISHED &&
1673 dst->state >= TCPS_ESTABLISHED) &&
1674 (SEQ_LT(tsval, dst->scrub->pfss_tsecr) ||
1675 SEQ_GT(tsval, src->scrub->pfss_tsval + tsval_from_last) ||
1676 (tsecr && (SEQ_GT(tsecr, dst->scrub->pfss_tsval) ||
1677 SEQ_LT(tsecr, dst->scrub->pfss_tsval0))))) {
1678 /* Bad RFC1323 implementation or an insertion attack.
1679 *
1680 * - Solaris 2.6 and 2.7 are known to send another ACK
1681 * after the FIN,FIN|ACK,ACK closing that carries
1682 * an old timestamp.
1683 */
1684
1685 DPFPRINTF(("Timestamp failed %c%c%c%c\n",
1686 SEQ_LT(tsval, dst->scrub->pfss_tsecr) ? '0' : ' ',
1687 SEQ_GT(tsval, src->scrub->pfss_tsval +
1688 tsval_from_last) ? '1' : ' ',
1689 SEQ_GT(tsecr, dst->scrub->pfss_tsval) ? '2' : ' ',
1690 SEQ_LT(tsecr, dst->scrub->pfss_tsval0)? '3' : ' '));
1691 DPFPRINTF((" tsval: %lu tsecr: %lu +ticks: %lu "
1692 "idle: %lus %lums\n",
1693 tsval, tsecr, tsval_from_last, delta_ts.tv_sec,
1694 delta_ts.tv_usec / 1000));
1695 DPFPRINTF((" src->tsval: %lu tsecr: %lu\n",
1696 src->scrub->pfss_tsval, src->scrub->pfss_tsecr));
1697 DPFPRINTF((" dst->tsval: %lu tsecr: %lu tsval0: %lu"
1698 "\n", dst->scrub->pfss_tsval,
1699 dst->scrub->pfss_tsecr, dst->scrub->pfss_tsval0));
1700 if (pf_status.debug >= PF_DEBUG_MISC) {
1701 pf_print_state(state);
1702 pf_print_flags(th->th_flags);
1703 printf("\n");
1704 }
1705 REASON_SET(reason, PFRES_TS);
1706 return (PF_DROP);
1707 }
1708
1709 /* XXX I'd really like to require tsecr but it's optional */
1710
1711 } else if (!got_ts && (th->th_flags & TH_RST) == 0 &&
1712 ((src->state == TCPS_ESTABLISHED && dst->state == TCPS_ESTABLISHED)
1713 || pd->p_len > 0 || (th->th_flags & TH_SYN)) &&
1714 src->scrub && dst->scrub &&
1715 (src->scrub->pfss_flags & PFSS_PAWS) &&
1716 (dst->scrub->pfss_flags & PFSS_PAWS)) {
1717 /* Didn't send a timestamp. Timestamps aren't really useful
1718 * when:
1719 * - connection opening or closing (often not even sent).
1720 * but we must not let an attacker to put a FIN on a
1721 * data packet to sneak it through our ESTABLISHED check.
1722 * - on a TCP reset. RFC suggests not even looking at TS.
1723 * - on an empty ACK. The TS will not be echoed so it will
1724 * probably not help keep the RTT calculation in sync and
1725 * there isn't as much danger when the sequence numbers
1726 * got wrapped. So some stacks don't include TS on empty
1727 * ACKs :-(
1728 *
1729 * To minimize the disruption to mostly RFC1323 conformant
1730 * stacks, we will only require timestamps on data packets.
1731 *
1732 * And what do ya know, we cannot require timestamps on data
1733 * packets. There appear to be devices that do legitimate
1734 * TCP connection hijacking. There are HTTP devices that allow
1735 * a 3whs (with timestamps) and then buffer the HTTP request.
1736 * If the intermediate device has the HTTP response cache, it
1737 * will spoof the response but not bother timestamping its
1738 * packets. So we can look for the presence of a timestamp in
1739 * the first data packet and if there, require it in all future
1740 * packets.
1741 */
1742
1743 if (pd->p_len > 0 && (src->scrub->pfss_flags & PFSS_DATA_TS)) {
1744 /*
1745 * Hey! Someone tried to sneak a packet in. Or the
1746 * stack changed its RFC1323 behavior?!?!
1747 */
1748 if (pf_status.debug >= PF_DEBUG_MISC) {
1749 DPFPRINTF(("Did not receive expected RFC1323 "
1750 "timestamp\n"));
1751 pf_print_state(state);
1752 pf_print_flags(th->th_flags);
1753 printf("\n");
1754 }
1755 REASON_SET(reason, PFRES_TS);
1756 return (PF_DROP);
1757 }
1758 }
1759
1760
1761 /*
1762 * We will note if a host sends his data packets with or without
1763 * timestamps. And require all data packets to contain a timestamp
1764 * if the first does. PAWS implicitly requires that all data packets be
1765 * timestamped. But I think there are middle-man devices that hijack
1766 * TCP streams immediately after the 3whs and don't timestamp their
1767 * packets (seen in a WWW accelerator or cache).
1768 */
1769 if (pd->p_len > 0 && src->scrub && (src->scrub->pfss_flags &
1770 (PFSS_TIMESTAMP|PFSS_DATA_TS|PFSS_DATA_NOTS)) == PFSS_TIMESTAMP) {
1771 if (got_ts)
1772 src->scrub->pfss_flags |= PFSS_DATA_TS;
1773 else {
1774 src->scrub->pfss_flags |= PFSS_DATA_NOTS;
1775 if (pf_status.debug >= PF_DEBUG_MISC && dst->scrub &&
1776 (dst->scrub->pfss_flags & PFSS_TIMESTAMP)) {
1777 /* Don't warn if other host rejected RFC1323 */
1778 DPFPRINTF(("Broken RFC1323 stack did not "
1779 "timestamp data packet. Disabled PAWS "
1780 "security.\n"));
1781 pf_print_state(state);
1782 pf_print_flags(th->th_flags);
1783 printf("\n");
1784 }
1785 }
1786 }
1787
1788
1789 /*
1790 * Update PAWS values
1791 */
1792 if (got_ts && src->scrub && PFSS_TIMESTAMP == (src->scrub->pfss_flags &
1793 (PFSS_PAWS_IDLED|PFSS_TIMESTAMP))) {
1794 getmicrouptime(&src->scrub->pfss_last);
1795 if (SEQ_GEQ(tsval, src->scrub->pfss_tsval) ||
1796 (src->scrub->pfss_flags & PFSS_PAWS) == 0)
1797 src->scrub->pfss_tsval = tsval;
1798
1799 if (tsecr) {
1800 if (SEQ_GEQ(tsecr, src->scrub->pfss_tsecr) ||
1801 (src->scrub->pfss_flags & PFSS_PAWS) == 0)
1802 src->scrub->pfss_tsecr = tsecr;
1803
1804 if ((src->scrub->pfss_flags & PFSS_PAWS) == 0 &&
1805 (SEQ_LT(tsval, src->scrub->pfss_tsval0) ||
1806 src->scrub->pfss_tsval0 == 0)) {
1807 /* tsval0 MUST be the lowest timestamp */
1808 src->scrub->pfss_tsval0 = tsval;
1809 }
1810
1811 /* Only fully initialized after a TS gets echoed */
1812 if ((src->scrub->pfss_flags & PFSS_PAWS) == 0)
1813 src->scrub->pfss_flags |= PFSS_PAWS;
1814 }
1815 }
1816
1817 /* I have a dream.... TCP segment reassembly.... */
1818 return (0);
1819 }
1820
1821 int
1822 pf_normalize_tcpopt(struct pf_rule *r, struct mbuf *m, struct tcphdr *th,
1823 int off)
1824 {
1825 u_int16_t *mss;
1826 int thoff;
1827 int opt, cnt, optlen = 0;
1828 int rewrite = 0;
1829 u_char *optp;
1830
1831 thoff = th->th_off << 2;
1832 cnt = thoff - sizeof(struct tcphdr);
1833 optp = mtod(m, caddr_t) + off + sizeof(struct tcphdr);
1834
1835 for (; cnt > 0; cnt -= optlen, optp += optlen) {
1836 opt = optp[0];
1837 if (opt == TCPOPT_EOL)
1838 break;
1839 if (opt == TCPOPT_NOP)
1840 optlen = 1;
1841 else {
1842 if (cnt < 2)
1843 break;
1844 optlen = optp[1];
1845 if (optlen < 2 || optlen > cnt)
1846 break;
1847 }
1848 switch (opt) {
1849 case TCPOPT_MAXSEG:
1850 mss = (u_int16_t *)(optp + 2);
1851 if ((ntohs(*mss)) > r->max_mss) {
1852 th->th_sum = pf_cksum_fixup(th->th_sum,
1853 *mss, htons(r->max_mss), 0);
1854 *mss = htons(r->max_mss);
1855 rewrite = 1;
1856 }
1857 break;
1858 default:
1859 break;
1860 }
1861 }
1862
1863 return (rewrite);
1864 }
1865