Home | History | Annotate | Line # | Download | only in net
pf_norm.c revision 1.2
      1 /*	$NetBSD: pf_norm.c,v 1.2 2004/06/22 14:17:08 itojun Exp $	*/
      2 /*	$OpenBSD: pf_norm.c,v 1.80 2004/03/09 21:44:41 mcbride Exp $ */
      3 
      4 /*
      5  * Copyright 2001 Niels Provos <provos (at) citi.umich.edu>
      6  * All rights reserved.
      7  *
      8  * Redistribution and use in source and binary forms, with or without
      9  * modification, are permitted provided that the following conditions
     10  * are met:
     11  * 1. Redistributions of source code must retain the above copyright
     12  *    notice, this list of conditions and the following disclaimer.
     13  * 2. Redistributions in binary form must reproduce the above copyright
     14  *    notice, this list of conditions and the following disclaimer in the
     15  *    documentation and/or other materials provided with the distribution.
     16  *
     17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     27  */
     28 
     29 #ifdef _KERNEL_OPT
     30 #include "opt_inet.h"
     31 #endif
     32 
     33 #include "pflog.h"
     34 
     35 #include <sys/param.h>
     36 #include <sys/systm.h>
     37 #include <sys/mbuf.h>
     38 #include <sys/filio.h>
     39 #include <sys/fcntl.h>
     40 #include <sys/socket.h>
     41 #include <sys/kernel.h>
     42 #include <sys/time.h>
     43 #include <sys/pool.h>
     44 
     45 #ifdef __OpenBSD__
     46 #include <dev/rndvar.h>
     47 #else
     48 #include <sys/rnd.h>
     49 #endif
     50 #include <net/if.h>
     51 #include <net/if_types.h>
     52 #include <net/bpf.h>
     53 #include <net/route.h>
     54 #include <net/if_pflog.h>
     55 
     56 #include <netinet/in.h>
     57 #include <netinet/in_var.h>
     58 #include <netinet/in_systm.h>
     59 #include <netinet/ip.h>
     60 #include <netinet/ip_var.h>
     61 #include <netinet/tcp.h>
     62 #include <netinet/tcp_seq.h>
     63 #include <netinet/udp.h>
     64 #include <netinet/ip_icmp.h>
     65 
     66 #ifdef INET6
     67 #include <netinet/ip6.h>
     68 #endif /* INET6 */
     69 
     70 #include <net/pfvar.h>
     71 
     72 struct pf_frent {
     73 	LIST_ENTRY(pf_frent) fr_next;
     74 	struct ip *fr_ip;
     75 	struct mbuf *fr_m;
     76 };
     77 
     78 struct pf_frcache {
     79 	LIST_ENTRY(pf_frcache) fr_next;
     80 	uint16_t	fr_off;
     81 	uint16_t	fr_end;
     82 };
     83 
     84 #define PFFRAG_SEENLAST	0x0001		/* Seen the last fragment for this */
     85 #define PFFRAG_NOBUFFER	0x0002		/* Non-buffering fragment cache */
     86 #define PFFRAG_DROP	0x0004		/* Drop all fragments */
     87 #define BUFFER_FRAGMENTS(fr)	(!((fr)->fr_flags & PFFRAG_NOBUFFER))
     88 
     89 struct pf_fragment {
     90 	RB_ENTRY(pf_fragment) fr_entry;
     91 	TAILQ_ENTRY(pf_fragment) frag_next;
     92 	struct in_addr	fr_src;
     93 	struct in_addr	fr_dst;
     94 	u_int8_t	fr_p;		/* protocol of this fragment */
     95 	u_int8_t	fr_flags;	/* status flags */
     96 	u_int16_t	fr_id;		/* fragment id for reassemble */
     97 	u_int16_t	fr_max;		/* fragment data max */
     98 	u_int32_t	fr_timeout;
     99 #define fr_queue	fr_u.fru_queue
    100 #define fr_cache	fr_u.fru_cache
    101 	union {
    102 		LIST_HEAD(pf_fragq, pf_frent) fru_queue;	/* buffering */
    103 		LIST_HEAD(pf_cacheq, pf_frcache) fru_cache;	/* non-buf */
    104 	} fr_u;
    105 };
    106 
    107 TAILQ_HEAD(pf_fragqueue, pf_fragment)	pf_fragqueue;
    108 TAILQ_HEAD(pf_cachequeue, pf_fragment)	pf_cachequeue;
    109 
    110 static __inline int	 pf_frag_compare(struct pf_fragment *,
    111 			    struct pf_fragment *);
    112 RB_HEAD(pf_frag_tree, pf_fragment)	pf_frag_tree, pf_cache_tree;
    113 RB_PROTOTYPE(pf_frag_tree, pf_fragment, fr_entry, pf_frag_compare);
    114 RB_GENERATE(pf_frag_tree, pf_fragment, fr_entry, pf_frag_compare);
    115 
    116 /* Private prototypes */
    117 void			 pf_ip2key(struct pf_fragment *, struct ip *);
    118 void			 pf_remove_fragment(struct pf_fragment *);
    119 void			 pf_flush_fragments(void);
    120 void			 pf_free_fragment(struct pf_fragment *);
    121 struct pf_fragment	*pf_find_fragment(struct ip *, struct pf_frag_tree *);
    122 struct mbuf		*pf_reassemble(struct mbuf **, struct pf_fragment **,
    123 			    struct pf_frent *, int);
    124 struct mbuf		*pf_fragcache(struct mbuf **, struct ip*,
    125 			    struct pf_fragment **, int, int, int *);
    126 u_int16_t		 pf_cksum_fixup(u_int16_t, u_int16_t, u_int16_t);
    127 int			 pf_normalize_tcpopt(struct pf_rule *, struct mbuf *,
    128 			    struct tcphdr *, int);
    129 
    130 #define	DPFPRINTF(x)	if (pf_status.debug >= PF_DEBUG_MISC) \
    131 			    { printf("%s: ", __func__); printf x ;}
    132 
    133 /* Globals */
    134 struct pool		 pf_frent_pl, pf_frag_pl, pf_cache_pl, pf_cent_pl;
    135 struct pool		 pf_state_scrub_pl;
    136 int			 pf_nfrents, pf_ncache;
    137 
    138 #ifdef __NetBSD__
    139 POOL_INIT(pf_frent_pl, sizeof(struct pf_frent), 0, 0, 0, "pffrent", NULL);
    140 POOL_INIT(pf_frag_pl, sizeof(struct pf_fragment), 0, 0, 0, "pffrag", NULL);
    141 POOL_INIT(pf_cache_pl, sizeof(struct pf_fragment), 0, 0, 0, "pffrcache", NULL);
    142 POOL_INIT(pf_cent_pl, sizeof(struct pf_frcache), 0, 0, 0, "pffrcent", NULL);
    143 POOL_INIT(pf_state_scrub_pl, sizeof(struct pf_state_scrub), 0, 0, 0,
    144     "pfstscr", NULL);
    145 #endif
    146 
    147 void
    148 pf_normalize_init(void)
    149 {
    150 #ifdef __OpenBSD__
    151 	pool_init(&pf_frent_pl, sizeof(struct pf_frent), 0, 0, 0, "pffrent",
    152 	    NULL);
    153 	pool_init(&pf_frag_pl, sizeof(struct pf_fragment), 0, 0, 0, "pffrag",
    154 	    NULL);
    155 	pool_init(&pf_cache_pl, sizeof(struct pf_fragment), 0, 0, 0,
    156 	    "pffrcache", NULL);
    157 	pool_init(&pf_cent_pl, sizeof(struct pf_frcache), 0, 0, 0, "pffrcent",
    158 	    NULL);
    159 	pool_init(&pf_state_scrub_pl, sizeof(struct pf_state_scrub), 0, 0, 0,
    160 	    "pfstscr", NULL);
    161 #endif
    162 
    163 	pool_sethiwat(&pf_frag_pl, PFFRAG_FRAG_HIWAT);
    164 	pool_sethardlimit(&pf_frent_pl, PFFRAG_FRENT_HIWAT, NULL, 0);
    165 	pool_sethardlimit(&pf_cache_pl, PFFRAG_FRCACHE_HIWAT, NULL, 0);
    166 	pool_sethardlimit(&pf_cent_pl, PFFRAG_FRCENT_HIWAT, NULL, 0);
    167 
    168 	TAILQ_INIT(&pf_fragqueue);
    169 	TAILQ_INIT(&pf_cachequeue);
    170 }
    171 
    172 static __inline int
    173 pf_frag_compare(struct pf_fragment *a, struct pf_fragment *b)
    174 {
    175 	int	diff;
    176 
    177 	if ((diff = a->fr_id - b->fr_id))
    178 		return (diff);
    179 	else if ((diff = a->fr_p - b->fr_p))
    180 		return (diff);
    181 	else if (a->fr_src.s_addr < b->fr_src.s_addr)
    182 		return (-1);
    183 	else if (a->fr_src.s_addr > b->fr_src.s_addr)
    184 		return (1);
    185 	else if (a->fr_dst.s_addr < b->fr_dst.s_addr)
    186 		return (-1);
    187 	else if (a->fr_dst.s_addr > b->fr_dst.s_addr)
    188 		return (1);
    189 	return (0);
    190 }
    191 
    192 void
    193 pf_purge_expired_fragments(void)
    194 {
    195 	struct pf_fragment	*frag;
    196 	u_int32_t		 expire = time.tv_sec -
    197 				    pf_default_rule.timeout[PFTM_FRAG];
    198 
    199 	while ((frag = TAILQ_LAST(&pf_fragqueue, pf_fragqueue)) != NULL) {
    200 		KASSERT(BUFFER_FRAGMENTS(frag));
    201 		if (frag->fr_timeout > expire)
    202 			break;
    203 
    204 		DPFPRINTF(("expiring %d(%p)\n", frag->fr_id, frag));
    205 		pf_free_fragment(frag);
    206 	}
    207 
    208 	while ((frag = TAILQ_LAST(&pf_cachequeue, pf_cachequeue)) != NULL) {
    209 		KASSERT(!BUFFER_FRAGMENTS(frag));
    210 		if (frag->fr_timeout > expire)
    211 			break;
    212 
    213 		DPFPRINTF(("expiring %d(%p)\n", frag->fr_id, frag));
    214 		pf_free_fragment(frag);
    215 		KASSERT(TAILQ_EMPTY(&pf_cachequeue) ||
    216 		    TAILQ_LAST(&pf_cachequeue, pf_cachequeue) != frag);
    217 	}
    218 }
    219 
    220 /*
    221  * Try to flush old fragments to make space for new ones
    222  */
    223 
    224 void
    225 pf_flush_fragments(void)
    226 {
    227 	struct pf_fragment	*frag;
    228 	int			 goal;
    229 
    230 	goal = pf_nfrents * 9 / 10;
    231 	DPFPRINTF(("trying to free > %d frents\n",
    232 	    pf_nfrents - goal));
    233 	while (goal < pf_nfrents) {
    234 		frag = TAILQ_LAST(&pf_fragqueue, pf_fragqueue);
    235 		if (frag == NULL)
    236 			break;
    237 		pf_free_fragment(frag);
    238 	}
    239 
    240 
    241 	goal = pf_ncache * 9 / 10;
    242 	DPFPRINTF(("trying to free > %d cache entries\n",
    243 	    pf_ncache - goal));
    244 	while (goal < pf_ncache) {
    245 		frag = TAILQ_LAST(&pf_cachequeue, pf_cachequeue);
    246 		if (frag == NULL)
    247 			break;
    248 		pf_free_fragment(frag);
    249 	}
    250 }
    251 
    252 /* Frees the fragments and all associated entries */
    253 
    254 void
    255 pf_free_fragment(struct pf_fragment *frag)
    256 {
    257 	struct pf_frent		*frent;
    258 	struct pf_frcache	*frcache;
    259 
    260 	/* Free all fragments */
    261 	if (BUFFER_FRAGMENTS(frag)) {
    262 		for (frent = LIST_FIRST(&frag->fr_queue); frent;
    263 		    frent = LIST_FIRST(&frag->fr_queue)) {
    264 			LIST_REMOVE(frent, fr_next);
    265 
    266 			m_freem(frent->fr_m);
    267 			pool_put(&pf_frent_pl, frent);
    268 			pf_nfrents--;
    269 		}
    270 	} else {
    271 		for (frcache = LIST_FIRST(&frag->fr_cache); frcache;
    272 		    frcache = LIST_FIRST(&frag->fr_cache)) {
    273 			LIST_REMOVE(frcache, fr_next);
    274 
    275 			KASSERT(LIST_EMPTY(&frag->fr_cache) ||
    276 			    LIST_FIRST(&frag->fr_cache)->fr_off >
    277 			    frcache->fr_end);
    278 
    279 			pool_put(&pf_cent_pl, frcache);
    280 			pf_ncache--;
    281 		}
    282 	}
    283 
    284 	pf_remove_fragment(frag);
    285 }
    286 
    287 void
    288 pf_ip2key(struct pf_fragment *key, struct ip *ip)
    289 {
    290 	key->fr_p = ip->ip_p;
    291 	key->fr_id = ip->ip_id;
    292 	key->fr_src.s_addr = ip->ip_src.s_addr;
    293 	key->fr_dst.s_addr = ip->ip_dst.s_addr;
    294 }
    295 
    296 struct pf_fragment *
    297 pf_find_fragment(struct ip *ip, struct pf_frag_tree *tree)
    298 {
    299 	struct pf_fragment	 key;
    300 	struct pf_fragment	*frag;
    301 
    302 	pf_ip2key(&key, ip);
    303 
    304 	frag = RB_FIND(pf_frag_tree, tree, &key);
    305 	if (frag != NULL) {
    306 		/* XXX Are we sure we want to update the timeout? */
    307 		frag->fr_timeout = time.tv_sec;
    308 		if (BUFFER_FRAGMENTS(frag)) {
    309 			TAILQ_REMOVE(&pf_fragqueue, frag, frag_next);
    310 			TAILQ_INSERT_HEAD(&pf_fragqueue, frag, frag_next);
    311 		} else {
    312 			TAILQ_REMOVE(&pf_cachequeue, frag, frag_next);
    313 			TAILQ_INSERT_HEAD(&pf_cachequeue, frag, frag_next);
    314 		}
    315 	}
    316 
    317 	return (frag);
    318 }
    319 
    320 /* Removes a fragment from the fragment queue and frees the fragment */
    321 
    322 void
    323 pf_remove_fragment(struct pf_fragment *frag)
    324 {
    325 	if (BUFFER_FRAGMENTS(frag)) {
    326 		RB_REMOVE(pf_frag_tree, &pf_frag_tree, frag);
    327 		TAILQ_REMOVE(&pf_fragqueue, frag, frag_next);
    328 		pool_put(&pf_frag_pl, frag);
    329 	} else {
    330 		RB_REMOVE(pf_frag_tree, &pf_cache_tree, frag);
    331 		TAILQ_REMOVE(&pf_cachequeue, frag, frag_next);
    332 		pool_put(&pf_cache_pl, frag);
    333 	}
    334 }
    335 
    336 #define FR_IP_OFF(fr)	((ntohs((fr)->fr_ip->ip_off) & IP_OFFMASK) << 3)
    337 struct mbuf *
    338 pf_reassemble(struct mbuf **m0, struct pf_fragment **frag,
    339     struct pf_frent *frent, int mff)
    340 {
    341 	struct mbuf	*m = *m0, *m2;
    342 	struct pf_frent	*frea, *next;
    343 	struct pf_frent	*frep = NULL;
    344 	struct ip	*ip = frent->fr_ip;
    345 	int		 hlen = ip->ip_hl << 2;
    346 	u_int16_t	 off = (ntohs(ip->ip_off) & IP_OFFMASK) << 3;
    347 	u_int16_t	 ip_len = ntohs(ip->ip_len) - ip->ip_hl * 4;
    348 	u_int16_t	 max = ip_len + off;
    349 
    350 	KASSERT(*frag == NULL || BUFFER_FRAGMENTS(*frag));
    351 
    352 	/* Strip off ip header */
    353 	m->m_data += hlen;
    354 	m->m_len -= hlen;
    355 
    356 	/* Create a new reassembly queue for this packet */
    357 	if (*frag == NULL) {
    358 		*frag = pool_get(&pf_frag_pl, PR_NOWAIT);
    359 		if (*frag == NULL) {
    360 			pf_flush_fragments();
    361 			*frag = pool_get(&pf_frag_pl, PR_NOWAIT);
    362 			if (*frag == NULL)
    363 				goto drop_fragment;
    364 		}
    365 
    366 		(*frag)->fr_flags = 0;
    367 		(*frag)->fr_max = 0;
    368 		(*frag)->fr_src = frent->fr_ip->ip_src;
    369 		(*frag)->fr_dst = frent->fr_ip->ip_dst;
    370 		(*frag)->fr_p = frent->fr_ip->ip_p;
    371 		(*frag)->fr_id = frent->fr_ip->ip_id;
    372 		(*frag)->fr_timeout = time.tv_sec;
    373 		LIST_INIT(&(*frag)->fr_queue);
    374 
    375 		RB_INSERT(pf_frag_tree, &pf_frag_tree, *frag);
    376 		TAILQ_INSERT_HEAD(&pf_fragqueue, *frag, frag_next);
    377 
    378 		/* We do not have a previous fragment */
    379 		frep = NULL;
    380 		goto insert;
    381 	}
    382 
    383 	/*
    384 	 * Find a fragment after the current one:
    385 	 *  - off contains the real shifted offset.
    386 	 */
    387 	LIST_FOREACH(frea, &(*frag)->fr_queue, fr_next) {
    388 		if (FR_IP_OFF(frea) > off)
    389 			break;
    390 		frep = frea;
    391 	}
    392 
    393 	KASSERT(frep != NULL || frea != NULL);
    394 
    395 	if (frep != NULL &&
    396 	    FR_IP_OFF(frep) + ntohs(frep->fr_ip->ip_len) - frep->fr_ip->ip_hl *
    397 	    4 > off)
    398 	{
    399 		u_int16_t	precut;
    400 
    401 		precut = FR_IP_OFF(frep) + ntohs(frep->fr_ip->ip_len) -
    402 		    frep->fr_ip->ip_hl * 4 - off;
    403 		if (precut >= ip_len)
    404 			goto drop_fragment;
    405 		m_adj(frent->fr_m, precut);
    406 		DPFPRINTF(("overlap -%d\n", precut));
    407 		/* Enforce 8 byte boundaries */
    408 		ip->ip_off = htons(ntohs(ip->ip_off) + (precut >> 3));
    409 		off = (ntohs(ip->ip_off) & IP_OFFMASK) << 3;
    410 		ip_len -= precut;
    411 		ip->ip_len = htons(ip_len);
    412 	}
    413 
    414 	for (; frea != NULL && ip_len + off > FR_IP_OFF(frea);
    415 	    frea = next)
    416 	{
    417 		u_int16_t	aftercut;
    418 
    419 		aftercut = ip_len + off - FR_IP_OFF(frea);
    420 		DPFPRINTF(("adjust overlap %d\n", aftercut));
    421 		if (aftercut < ntohs(frea->fr_ip->ip_len) - frea->fr_ip->ip_hl
    422 		    * 4)
    423 		{
    424 			frea->fr_ip->ip_len =
    425 			    htons(ntohs(frea->fr_ip->ip_len) - aftercut);
    426 			frea->fr_ip->ip_off = htons(ntohs(frea->fr_ip->ip_off) +
    427 			    (aftercut >> 3));
    428 			m_adj(frea->fr_m, aftercut);
    429 			break;
    430 		}
    431 
    432 		/* This fragment is completely overlapped, loose it */
    433 		next = LIST_NEXT(frea, fr_next);
    434 		m_freem(frea->fr_m);
    435 		LIST_REMOVE(frea, fr_next);
    436 		pool_put(&pf_frent_pl, frea);
    437 		pf_nfrents--;
    438 	}
    439 
    440  insert:
    441 	/* Update maximum data size */
    442 	if ((*frag)->fr_max < max)
    443 		(*frag)->fr_max = max;
    444 	/* This is the last segment */
    445 	if (!mff)
    446 		(*frag)->fr_flags |= PFFRAG_SEENLAST;
    447 
    448 	if (frep == NULL)
    449 		LIST_INSERT_HEAD(&(*frag)->fr_queue, frent, fr_next);
    450 	else
    451 		LIST_INSERT_AFTER(frep, frent, fr_next);
    452 
    453 	/* Check if we are completely reassembled */
    454 	if (!((*frag)->fr_flags & PFFRAG_SEENLAST))
    455 		return (NULL);
    456 
    457 	/* Check if we have all the data */
    458 	off = 0;
    459 	for (frep = LIST_FIRST(&(*frag)->fr_queue); frep; frep = next) {
    460 		next = LIST_NEXT(frep, fr_next);
    461 
    462 		off += ntohs(frep->fr_ip->ip_len) - frep->fr_ip->ip_hl * 4;
    463 		if (off < (*frag)->fr_max &&
    464 		    (next == NULL || FR_IP_OFF(next) != off))
    465 		{
    466 			DPFPRINTF(("missing fragment at %d, next %d, max %d\n",
    467 			    off, next == NULL ? -1 : FR_IP_OFF(next),
    468 			    (*frag)->fr_max));
    469 			return (NULL);
    470 		}
    471 	}
    472 	DPFPRINTF(("%d < %d?\n", off, (*frag)->fr_max));
    473 	if (off < (*frag)->fr_max)
    474 		return (NULL);
    475 
    476 	/* We have all the data */
    477 	frent = LIST_FIRST(&(*frag)->fr_queue);
    478 	KASSERT(frent != NULL);
    479 	if ((frent->fr_ip->ip_hl << 2) + off > IP_MAXPACKET) {
    480 		DPFPRINTF(("drop: too big: %d\n", off));
    481 		pf_free_fragment(*frag);
    482 		*frag = NULL;
    483 		return (NULL);
    484 	}
    485 	next = LIST_NEXT(frent, fr_next);
    486 
    487 	/* Magic from ip_input */
    488 	ip = frent->fr_ip;
    489 	m = frent->fr_m;
    490 	m2 = m->m_next;
    491 	m->m_next = NULL;
    492 	m_cat(m, m2);
    493 	pool_put(&pf_frent_pl, frent);
    494 	pf_nfrents--;
    495 	for (frent = next; frent != NULL; frent = next) {
    496 		next = LIST_NEXT(frent, fr_next);
    497 
    498 		m2 = frent->fr_m;
    499 		pool_put(&pf_frent_pl, frent);
    500 		pf_nfrents--;
    501 		m_cat(m, m2);
    502 	}
    503 
    504 	ip->ip_src = (*frag)->fr_src;
    505 	ip->ip_dst = (*frag)->fr_dst;
    506 
    507 	/* Remove from fragment queue */
    508 	pf_remove_fragment(*frag);
    509 	*frag = NULL;
    510 
    511 	hlen = ip->ip_hl << 2;
    512 	ip->ip_len = htons(off + hlen);
    513 	m->m_len += hlen;
    514 	m->m_data -= hlen;
    515 
    516 	/* some debugging cruft by sklower, below, will go away soon */
    517 	/* XXX this should be done elsewhere */
    518 	if (m->m_flags & M_PKTHDR) {
    519 		int plen = 0;
    520 		for (m2 = m; m2; m2 = m2->m_next)
    521 			plen += m2->m_len;
    522 		m->m_pkthdr.len = plen;
    523 	}
    524 
    525 	DPFPRINTF(("complete: %p(%d)\n", m, ntohs(ip->ip_len)));
    526 	return (m);
    527 
    528  drop_fragment:
    529 	/* Oops - fail safe - drop packet */
    530 	pool_put(&pf_frent_pl, frent);
    531 	pf_nfrents--;
    532 	m_freem(m);
    533 	return (NULL);
    534 }
    535 
    536 struct mbuf *
    537 pf_fragcache(struct mbuf **m0, struct ip *h, struct pf_fragment **frag, int mff,
    538     int drop, int *nomem)
    539 {
    540 	struct mbuf		*m = *m0;
    541 	struct pf_frcache	*frp, *fra, *cur = NULL;
    542 	int			 ip_len = ntohs(h->ip_len) - (h->ip_hl << 2);
    543 	u_int16_t		 off = ntohs(h->ip_off) << 3;
    544 	u_int16_t		 max = ip_len + off;
    545 	int			 hosed = 0;
    546 
    547 	KASSERT(*frag == NULL || !BUFFER_FRAGMENTS(*frag));
    548 
    549 	/* Create a new range queue for this packet */
    550 	if (*frag == NULL) {
    551 		*frag = pool_get(&pf_cache_pl, PR_NOWAIT);
    552 		if (*frag == NULL) {
    553 			pf_flush_fragments();
    554 			*frag = pool_get(&pf_cache_pl, PR_NOWAIT);
    555 			if (*frag == NULL)
    556 				goto no_mem;
    557 		}
    558 
    559 		/* Get an entry for the queue */
    560 		cur = pool_get(&pf_cent_pl, PR_NOWAIT);
    561 		if (cur == NULL) {
    562 			pool_put(&pf_cache_pl, *frag);
    563 			*frag = NULL;
    564 			goto no_mem;
    565 		}
    566 		pf_ncache++;
    567 
    568 		(*frag)->fr_flags = PFFRAG_NOBUFFER;
    569 		(*frag)->fr_max = 0;
    570 		(*frag)->fr_src = h->ip_src;
    571 		(*frag)->fr_dst = h->ip_dst;
    572 		(*frag)->fr_p = h->ip_p;
    573 		(*frag)->fr_id = h->ip_id;
    574 		(*frag)->fr_timeout = time.tv_sec;
    575 
    576 		cur->fr_off = off;
    577 		cur->fr_end = max;
    578 		LIST_INIT(&(*frag)->fr_cache);
    579 		LIST_INSERT_HEAD(&(*frag)->fr_cache, cur, fr_next);
    580 
    581 		RB_INSERT(pf_frag_tree, &pf_cache_tree, *frag);
    582 		TAILQ_INSERT_HEAD(&pf_cachequeue, *frag, frag_next);
    583 
    584 		DPFPRINTF(("fragcache[%d]: new %d-%d\n", h->ip_id, off, max));
    585 
    586 		goto pass;
    587 	}
    588 
    589 	/*
    590 	 * Find a fragment after the current one:
    591 	 *  - off contains the real shifted offset.
    592 	 */
    593 	frp = NULL;
    594 	LIST_FOREACH(fra, &(*frag)->fr_cache, fr_next) {
    595 		if (fra->fr_off > off)
    596 			break;
    597 		frp = fra;
    598 	}
    599 
    600 	KASSERT(frp != NULL || fra != NULL);
    601 
    602 	if (frp != NULL) {
    603 		int	precut;
    604 
    605 		precut = frp->fr_end - off;
    606 		if (precut >= ip_len) {
    607 			/* Fragment is entirely a duplicate */
    608 			DPFPRINTF(("fragcache[%d]: dead (%d-%d) %d-%d\n",
    609 			    h->ip_id, frp->fr_off, frp->fr_end, off, max));
    610 			goto drop_fragment;
    611 		}
    612 		if (precut == 0) {
    613 			/* They are adjacent.  Fixup cache entry */
    614 			DPFPRINTF(("fragcache[%d]: adjacent (%d-%d) %d-%d\n",
    615 			    h->ip_id, frp->fr_off, frp->fr_end, off, max));
    616 			frp->fr_end = max;
    617 		} else if (precut > 0) {
    618 			/* The first part of this payload overlaps with a
    619 			 * fragment that has already been passed.
    620 			 * Need to trim off the first part of the payload.
    621 			 * But to do so easily, we need to create another
    622 			 * mbuf to throw the original header into.
    623 			 */
    624 
    625 			DPFPRINTF(("fragcache[%d]: chop %d (%d-%d) %d-%d\n",
    626 			    h->ip_id, precut, frp->fr_off, frp->fr_end, off,
    627 			    max));
    628 
    629 			off += precut;
    630 			max -= precut;
    631 			/* Update the previous frag to encompass this one */
    632 			frp->fr_end = max;
    633 
    634 			if (!drop) {
    635 				/* XXX Optimization opportunity
    636 				 * This is a very heavy way to trim the payload.
    637 				 * we could do it much faster by diddling mbuf
    638 				 * internals but that would be even less legible
    639 				 * than this mbuf magic.  For my next trick,
    640 				 * I'll pull a rabbit out of my laptop.
    641 				 */
    642 #ifdef __OpenBSD__
    643 				*m0 = m_copym2(m, 0, h->ip_hl << 2, M_NOWAIT);
    644 #else
    645 				*m0 = m_dup(m, 0, h->ip_hl << 2, M_NOWAIT);
    646 #endif
    647 				if (*m0 == NULL)
    648 					goto no_mem;
    649 				KASSERT((*m0)->m_next == NULL);
    650 				m_adj(m, precut + (h->ip_hl << 2));
    651 				m_cat(*m0, m);
    652 				m = *m0;
    653 				if (m->m_flags & M_PKTHDR) {
    654 					int plen = 0;
    655 					struct mbuf *t;
    656 					for (t = m; t; t = t->m_next)
    657 						plen += t->m_len;
    658 					m->m_pkthdr.len = plen;
    659 				}
    660 
    661 
    662 				h = mtod(m, struct ip *);
    663 
    664 
    665 				KASSERT((int)m->m_len ==
    666 				    ntohs(h->ip_len) - precut);
    667 				h->ip_off = htons(ntohs(h->ip_off) +
    668 				    (precut >> 3));
    669 				h->ip_len = htons(ntohs(h->ip_len) - precut);
    670 			} else {
    671 				hosed++;
    672 			}
    673 		} else {
    674 			/* There is a gap between fragments */
    675 
    676 			DPFPRINTF(("fragcache[%d]: gap %d (%d-%d) %d-%d\n",
    677 			    h->ip_id, -precut, frp->fr_off, frp->fr_end, off,
    678 			    max));
    679 
    680 			cur = pool_get(&pf_cent_pl, PR_NOWAIT);
    681 			if (cur == NULL)
    682 				goto no_mem;
    683 			pf_ncache++;
    684 
    685 			cur->fr_off = off;
    686 			cur->fr_end = max;
    687 			LIST_INSERT_AFTER(frp, cur, fr_next);
    688 		}
    689 	}
    690 
    691 	if (fra != NULL) {
    692 		int	aftercut;
    693 		int	merge = 0;
    694 
    695 		aftercut = max - fra->fr_off;
    696 		if (aftercut == 0) {
    697 			/* Adjacent fragments */
    698 			DPFPRINTF(("fragcache[%d]: adjacent %d-%d (%d-%d)\n",
    699 			    h->ip_id, off, max, fra->fr_off, fra->fr_end));
    700 			fra->fr_off = off;
    701 			merge = 1;
    702 		} else if (aftercut > 0) {
    703 			/* Need to chop off the tail of this fragment */
    704 			DPFPRINTF(("fragcache[%d]: chop %d %d-%d (%d-%d)\n",
    705 			    h->ip_id, aftercut, off, max, fra->fr_off,
    706 			    fra->fr_end));
    707 			fra->fr_off = off;
    708 			max -= aftercut;
    709 
    710 			merge = 1;
    711 
    712 			if (!drop) {
    713 				m_adj(m, -aftercut);
    714 				if (m->m_flags & M_PKTHDR) {
    715 					int plen = 0;
    716 					struct mbuf *t;
    717 					for (t = m; t; t = t->m_next)
    718 						plen += t->m_len;
    719 					m->m_pkthdr.len = plen;
    720 				}
    721 				h = mtod(m, struct ip *);
    722 				KASSERT((int)m->m_len ==
    723 				    ntohs(h->ip_len) - aftercut);
    724 				h->ip_len = htons(ntohs(h->ip_len) - aftercut);
    725 			} else {
    726 				hosed++;
    727 			}
    728 		} else {
    729 			/* There is a gap between fragments */
    730 			DPFPRINTF(("fragcache[%d]: gap %d %d-%d (%d-%d)\n",
    731 			    h->ip_id, -aftercut, off, max, fra->fr_off,
    732 			    fra->fr_end));
    733 
    734 			cur = pool_get(&pf_cent_pl, PR_NOWAIT);
    735 			if (cur == NULL)
    736 				goto no_mem;
    737 			pf_ncache++;
    738 
    739 			cur->fr_off = off;
    740 			cur->fr_end = max;
    741 			LIST_INSERT_BEFORE(fra, cur, fr_next);
    742 		}
    743 
    744 
    745 		/* Need to glue together two separate fragment descriptors */
    746 		if (merge) {
    747 			if (cur && fra->fr_off <= cur->fr_end) {
    748 				/* Need to merge in a previous 'cur' */
    749 				DPFPRINTF(("fragcache[%d]: adjacent(merge "
    750 				    "%d-%d) %d-%d (%d-%d)\n",
    751 				    h->ip_id, cur->fr_off, cur->fr_end, off,
    752 				    max, fra->fr_off, fra->fr_end));
    753 				fra->fr_off = cur->fr_off;
    754 				LIST_REMOVE(cur, fr_next);
    755 				pool_put(&pf_cent_pl, cur);
    756 				pf_ncache--;
    757 				cur = NULL;
    758 
    759 			} else if (frp && fra->fr_off <= frp->fr_end) {
    760 				/* Need to merge in a modified 'frp' */
    761 				KASSERT(cur == NULL);
    762 				DPFPRINTF(("fragcache[%d]: adjacent(merge "
    763 				    "%d-%d) %d-%d (%d-%d)\n",
    764 				    h->ip_id, frp->fr_off, frp->fr_end, off,
    765 				    max, fra->fr_off, fra->fr_end));
    766 				fra->fr_off = frp->fr_off;
    767 				LIST_REMOVE(frp, fr_next);
    768 				pool_put(&pf_cent_pl, frp);
    769 				pf_ncache--;
    770 				frp = NULL;
    771 
    772 			}
    773 		}
    774 	}
    775 
    776 	if (hosed) {
    777 		/*
    778 		 * We must keep tracking the overall fragment even when
    779 		 * we're going to drop it anyway so that we know when to
    780 		 * free the overall descriptor.  Thus we drop the frag late.
    781 		 */
    782 		goto drop_fragment;
    783 	}
    784 
    785 
    786  pass:
    787 	/* Update maximum data size */
    788 	if ((*frag)->fr_max < max)
    789 		(*frag)->fr_max = max;
    790 
    791 	/* This is the last segment */
    792 	if (!mff)
    793 		(*frag)->fr_flags |= PFFRAG_SEENLAST;
    794 
    795 	/* Check if we are completely reassembled */
    796 	if (((*frag)->fr_flags & PFFRAG_SEENLAST) &&
    797 	    LIST_FIRST(&(*frag)->fr_cache)->fr_off == 0 &&
    798 	    LIST_FIRST(&(*frag)->fr_cache)->fr_end == (*frag)->fr_max) {
    799 		/* Remove from fragment queue */
    800 		DPFPRINTF(("fragcache[%d]: done 0-%d\n", h->ip_id,
    801 		    (*frag)->fr_max));
    802 		pf_free_fragment(*frag);
    803 		*frag = NULL;
    804 	}
    805 
    806 	return (m);
    807 
    808  no_mem:
    809 	*nomem = 1;
    810 
    811 	/* Still need to pay attention to !IP_MF */
    812 	if (!mff && *frag != NULL)
    813 		(*frag)->fr_flags |= PFFRAG_SEENLAST;
    814 
    815 	m_freem(m);
    816 	return (NULL);
    817 
    818  drop_fragment:
    819 
    820 	/* Still need to pay attention to !IP_MF */
    821 	if (!mff && *frag != NULL)
    822 		(*frag)->fr_flags |= PFFRAG_SEENLAST;
    823 
    824 	if (drop) {
    825 		/* This fragment has been deemed bad.  Don't reass */
    826 		if (((*frag)->fr_flags & PFFRAG_DROP) == 0)
    827 			DPFPRINTF(("fragcache[%d]: dropping overall fragment\n",
    828 			    h->ip_id));
    829 		(*frag)->fr_flags |= PFFRAG_DROP;
    830 	}
    831 
    832 	m_freem(m);
    833 	return (NULL);
    834 }
    835 
    836 int
    837 pf_normalize_ip(struct mbuf **m0, int dir, struct pfi_kif *kif, u_short *reason)
    838 {
    839 	struct mbuf		*m = *m0;
    840 	struct pf_rule		*r;
    841 	struct pf_frent		*frent;
    842 	struct pf_fragment	*frag = NULL;
    843 	struct ip		*h = mtod(m, struct ip *);
    844 	int			 mff = (ntohs(h->ip_off) & IP_MF);
    845 	int			 hlen = h->ip_hl << 2;
    846 	u_int16_t		 fragoff = (ntohs(h->ip_off) & IP_OFFMASK) << 3;
    847 	u_int16_t		 max;
    848 	int			 ip_len;
    849 	int			 ip_off;
    850 
    851 	r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
    852 	while (r != NULL) {
    853 		r->evaluations++;
    854 		if (r->kif != NULL &&
    855 		    (r->kif != kif && r->kif != kif->pfik_parent) == !r->ifnot)
    856 			r = r->skip[PF_SKIP_IFP].ptr;
    857 		else if (r->direction && r->direction != dir)
    858 			r = r->skip[PF_SKIP_DIR].ptr;
    859 		else if (r->af && r->af != AF_INET)
    860 			r = r->skip[PF_SKIP_AF].ptr;
    861 		else if (r->proto && r->proto != h->ip_p)
    862 			r = r->skip[PF_SKIP_PROTO].ptr;
    863 		else if (PF_MISMATCHAW(&r->src.addr,
    864 		    (struct pf_addr *)&h->ip_src.s_addr, AF_INET, r->src.not))
    865 			r = r->skip[PF_SKIP_SRC_ADDR].ptr;
    866 		else if (PF_MISMATCHAW(&r->dst.addr,
    867 		    (struct pf_addr *)&h->ip_dst.s_addr, AF_INET, r->dst.not))
    868 			r = r->skip[PF_SKIP_DST_ADDR].ptr;
    869 		else
    870 			break;
    871 	}
    872 
    873 	if (r == NULL)
    874 		return (PF_PASS);
    875 	else
    876 		r->packets++;
    877 
    878 	/* Check for illegal packets */
    879 	if (hlen < (int)sizeof(struct ip))
    880 		goto drop;
    881 
    882 	if (hlen > ntohs(h->ip_len))
    883 		goto drop;
    884 
    885 	/* Clear IP_DF if the rule uses the no-df option */
    886 	if (r->rule_flag & PFRULE_NODF)
    887 		h->ip_off &= htons(~IP_DF);
    888 
    889 	/* We will need other tests here */
    890 	if (!fragoff && !mff)
    891 		goto no_fragment;
    892 
    893 	/* We're dealing with a fragment now. Don't allow fragments
    894 	 * with IP_DF to enter the cache. If the flag was cleared by
    895 	 * no-df above, fine. Otherwise drop it.
    896 	 */
    897 	if (h->ip_off & htons(IP_DF)) {
    898 		DPFPRINTF(("IP_DF\n"));
    899 		goto bad;
    900 	}
    901 
    902 	ip_len = ntohs(h->ip_len) - hlen;
    903 	ip_off = (ntohs(h->ip_off) & IP_OFFMASK) << 3;
    904 
    905 	/* All fragments are 8 byte aligned */
    906 	if (mff && (ip_len & 0x7)) {
    907 		DPFPRINTF(("mff and %d\n", ip_len));
    908 		goto bad;
    909 	}
    910 
    911 	/* Respect maximum length */
    912 	if (fragoff + ip_len > IP_MAXPACKET) {
    913 		DPFPRINTF(("max packet %d\n", fragoff + ip_len));
    914 		goto bad;
    915 	}
    916 	max = fragoff + ip_len;
    917 
    918 	if ((r->rule_flag & (PFRULE_FRAGCROP|PFRULE_FRAGDROP)) == 0) {
    919 		/* Fully buffer all of the fragments */
    920 
    921 		frag = pf_find_fragment(h, &pf_frag_tree);
    922 
    923 		/* Check if we saw the last fragment already */
    924 		if (frag != NULL && (frag->fr_flags & PFFRAG_SEENLAST) &&
    925 		    max > frag->fr_max)
    926 			goto bad;
    927 
    928 		/* Get an entry for the fragment queue */
    929 		frent = pool_get(&pf_frent_pl, PR_NOWAIT);
    930 		if (frent == NULL) {
    931 			REASON_SET(reason, PFRES_MEMORY);
    932 			return (PF_DROP);
    933 		}
    934 		pf_nfrents++;
    935 		frent->fr_ip = h;
    936 		frent->fr_m = m;
    937 
    938 		/* Might return a completely reassembled mbuf, or NULL */
    939 		DPFPRINTF(("reass frag %d @ %d-%d\n", h->ip_id, fragoff, max));
    940 		*m0 = m = pf_reassemble(m0, &frag, frent, mff);
    941 
    942 		if (m == NULL)
    943 			return (PF_DROP);
    944 
    945 		if (frag != NULL && (frag->fr_flags & PFFRAG_DROP))
    946 			goto drop;
    947 
    948 		h = mtod(m, struct ip *);
    949 	} else {
    950 		/* non-buffering fragment cache (drops or masks overlaps) */
    951 		int	nomem = 0;
    952 
    953 		if (dir == PF_OUT) {
    954 			if (m_tag_find(m, PACKET_TAG_PF_FRAGCACHE, NULL) !=
    955 			    NULL) {
    956 				/* Already passed the fragment cache in the
    957 				 * input direction.  If we continued, it would
    958 				 * appear to be a dup and would be dropped.
    959 				 */
    960 				goto fragment_pass;
    961 			}
    962 		}
    963 
    964 		frag = pf_find_fragment(h, &pf_cache_tree);
    965 
    966 		/* Check if we saw the last fragment already */
    967 		if (frag != NULL && (frag->fr_flags & PFFRAG_SEENLAST) &&
    968 		    max > frag->fr_max) {
    969 			if (r->rule_flag & PFRULE_FRAGDROP)
    970 				frag->fr_flags |= PFFRAG_DROP;
    971 			goto bad;
    972 		}
    973 
    974 		*m0 = m = pf_fragcache(m0, h, &frag, mff,
    975 		    (r->rule_flag & PFRULE_FRAGDROP) ? 1 : 0, &nomem);
    976 		if (m == NULL) {
    977 			if (nomem)
    978 				goto no_mem;
    979 			goto drop;
    980 		}
    981 
    982 		if (dir == PF_IN) {
    983 			struct m_tag	*mtag;
    984 
    985 			mtag = m_tag_get(PACKET_TAG_PF_FRAGCACHE, 0, M_NOWAIT);
    986 			if (mtag == NULL)
    987 				goto no_mem;
    988 			m_tag_prepend(m, mtag);
    989 		}
    990 		if (frag != NULL && (frag->fr_flags & PFFRAG_DROP))
    991 			goto drop;
    992 		goto fragment_pass;
    993 	}
    994 
    995  no_fragment:
    996 	/* At this point, only IP_DF is allowed in ip_off */
    997 	h->ip_off &= htons(IP_DF);
    998 
    999 	/* Enforce a minimum ttl, may cause endless packet loops */
   1000 	if (r->min_ttl && h->ip_ttl < r->min_ttl)
   1001 		h->ip_ttl = r->min_ttl;
   1002 
   1003 	if (r->rule_flag & PFRULE_RANDOMID)
   1004 		h->ip_id = ip_randomid();
   1005 
   1006 	return (PF_PASS);
   1007 
   1008  fragment_pass:
   1009 	/* Enforce a minimum ttl, may cause endless packet loops */
   1010 	if (r->min_ttl && h->ip_ttl < r->min_ttl)
   1011 		h->ip_ttl = r->min_ttl;
   1012 
   1013 	return (PF_PASS);
   1014 
   1015  no_mem:
   1016 	REASON_SET(reason, PFRES_MEMORY);
   1017 	if (r != NULL && r->log)
   1018 		PFLOG_PACKET(kif, h, m, AF_INET, dir, *reason, r, NULL, NULL);
   1019 	return (PF_DROP);
   1020 
   1021  drop:
   1022 	REASON_SET(reason, PFRES_NORM);
   1023 	if (r != NULL && r->log)
   1024 		PFLOG_PACKET(kif, h, m, AF_INET, dir, *reason, r, NULL, NULL);
   1025 	return (PF_DROP);
   1026 
   1027  bad:
   1028 	DPFPRINTF(("dropping bad fragment\n"));
   1029 
   1030 	/* Free associated fragments */
   1031 	if (frag != NULL)
   1032 		pf_free_fragment(frag);
   1033 
   1034 	REASON_SET(reason, PFRES_FRAG);
   1035 	if (r != NULL && r->log)
   1036 		PFLOG_PACKET(kif, h, m, AF_INET, dir, *reason, r, NULL, NULL);
   1037 
   1038 	return (PF_DROP);
   1039 }
   1040 
   1041 #ifdef INET6
   1042 int
   1043 pf_normalize_ip6(struct mbuf **m0, int dir, struct pfi_kif *kif,
   1044     u_short *reason)
   1045 {
   1046 	struct mbuf		*m = *m0;
   1047 	struct pf_rule		*r;
   1048 	struct ip6_hdr		*h = mtod(m, struct ip6_hdr *);
   1049 	int			 off;
   1050 	struct ip6_ext		 ext;
   1051 	struct ip6_opt		 opt;
   1052 	struct ip6_opt_jumbo	 jumbo;
   1053 	struct ip6_frag		 frag;
   1054 	u_int32_t		 jumbolen = 0, plen;
   1055 	u_int16_t		 fragoff = 0;
   1056 	int			 optend;
   1057 	int			 ooff;
   1058 	u_int8_t		 proto;
   1059 	int			 terminal;
   1060 
   1061 	r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
   1062 	while (r != NULL) {
   1063 		r->evaluations++;
   1064 		if (r->kif != NULL &&
   1065 		    (r->kif != kif && r->kif != kif->pfik_parent) == !r->ifnot)
   1066 			r = r->skip[PF_SKIP_IFP].ptr;
   1067 		else if (r->direction && r->direction != dir)
   1068 			r = r->skip[PF_SKIP_DIR].ptr;
   1069 		else if (r->af && r->af != AF_INET6)
   1070 			r = r->skip[PF_SKIP_AF].ptr;
   1071 #if 0 /* header chain! */
   1072 		else if (r->proto && r->proto != h->ip6_nxt)
   1073 			r = r->skip[PF_SKIP_PROTO].ptr;
   1074 #endif
   1075 		else if (PF_MISMATCHAW(&r->src.addr,
   1076 		    (struct pf_addr *)&h->ip6_src, AF_INET6, r->src.not))
   1077 			r = r->skip[PF_SKIP_SRC_ADDR].ptr;
   1078 		else if (PF_MISMATCHAW(&r->dst.addr,
   1079 		    (struct pf_addr *)&h->ip6_dst, AF_INET6, r->dst.not))
   1080 			r = r->skip[PF_SKIP_DST_ADDR].ptr;
   1081 		else
   1082 			break;
   1083 	}
   1084 
   1085 	if (r == NULL)
   1086 		return (PF_PASS);
   1087 	else
   1088 		r->packets++;
   1089 
   1090 	/* Check for illegal packets */
   1091 	if (sizeof(struct ip6_hdr) + IPV6_MAXPACKET < m->m_pkthdr.len)
   1092 		goto drop;
   1093 
   1094 	off = sizeof(struct ip6_hdr);
   1095 	proto = h->ip6_nxt;
   1096 	terminal = 0;
   1097 	do {
   1098 		switch (proto) {
   1099 		case IPPROTO_FRAGMENT:
   1100 			goto fragment;
   1101 			break;
   1102 		case IPPROTO_AH:
   1103 		case IPPROTO_ROUTING:
   1104 		case IPPROTO_DSTOPTS:
   1105 			if (!pf_pull_hdr(m, off, &ext, sizeof(ext), NULL,
   1106 			    NULL, AF_INET6))
   1107 				goto shortpkt;
   1108 			if (proto == IPPROTO_AH)
   1109 				off += (ext.ip6e_len + 2) * 4;
   1110 			else
   1111 				off += (ext.ip6e_len + 1) * 8;
   1112 			proto = ext.ip6e_nxt;
   1113 			break;
   1114 		case IPPROTO_HOPOPTS:
   1115 			if (!pf_pull_hdr(m, off, &ext, sizeof(ext), NULL,
   1116 			    NULL, AF_INET6))
   1117 				goto shortpkt;
   1118 			optend = off + (ext.ip6e_len + 1) * 8;
   1119 			ooff = off + sizeof(ext);
   1120 			do {
   1121 				if (!pf_pull_hdr(m, ooff, &opt.ip6o_type,
   1122 				    sizeof(opt.ip6o_type), NULL, NULL,
   1123 				    AF_INET6))
   1124 					goto shortpkt;
   1125 				if (opt.ip6o_type == IP6OPT_PAD1) {
   1126 					ooff++;
   1127 					continue;
   1128 				}
   1129 				if (!pf_pull_hdr(m, ooff, &opt, sizeof(opt),
   1130 				    NULL, NULL, AF_INET6))
   1131 					goto shortpkt;
   1132 				if (ooff + sizeof(opt) + opt.ip6o_len > optend)
   1133 					goto drop;
   1134 				switch (opt.ip6o_type) {
   1135 				case IP6OPT_JUMBO:
   1136 					if (h->ip6_plen != 0)
   1137 						goto drop;
   1138 					if (!pf_pull_hdr(m, ooff, &jumbo,
   1139 					    sizeof(jumbo), NULL, NULL,
   1140 					    AF_INET6))
   1141 						goto shortpkt;
   1142 					memcpy(&jumbolen, jumbo.ip6oj_jumbo_len,
   1143 					    sizeof(jumbolen));
   1144 					jumbolen = ntohl(jumbolen);
   1145 					if (jumbolen <= IPV6_MAXPACKET)
   1146 						goto drop;
   1147 					if (sizeof(struct ip6_hdr) + jumbolen !=
   1148 					    m->m_pkthdr.len)
   1149 						goto drop;
   1150 					break;
   1151 				default:
   1152 					break;
   1153 				}
   1154 				ooff += sizeof(opt) + opt.ip6o_len;
   1155 			} while (ooff < optend);
   1156 
   1157 			off = optend;
   1158 			proto = ext.ip6e_nxt;
   1159 			break;
   1160 		default:
   1161 			terminal = 1;
   1162 			break;
   1163 		}
   1164 	} while (!terminal);
   1165 
   1166 	/* jumbo payload option must be present, or plen > 0 */
   1167 	if (ntohs(h->ip6_plen) == 0)
   1168 		plen = jumbolen;
   1169 	else
   1170 		plen = ntohs(h->ip6_plen);
   1171 	if (plen == 0)
   1172 		goto drop;
   1173 	if (sizeof(struct ip6_hdr) + plen > m->m_pkthdr.len)
   1174 		goto shortpkt;
   1175 
   1176 	/* Enforce a minimum ttl, may cause endless packet loops */
   1177 	if (r->min_ttl && h->ip6_hlim < r->min_ttl)
   1178 		h->ip6_hlim = r->min_ttl;
   1179 
   1180 	return (PF_PASS);
   1181 
   1182  fragment:
   1183 	if (ntohs(h->ip6_plen) == 0 || jumbolen)
   1184 		goto drop;
   1185 	plen = ntohs(h->ip6_plen);
   1186 
   1187 	if (!pf_pull_hdr(m, off, &frag, sizeof(frag), NULL, NULL, AF_INET6))
   1188 		goto shortpkt;
   1189 	fragoff = ntohs(frag.ip6f_offlg & IP6F_OFF_MASK);
   1190 	if (fragoff + (plen - off - sizeof(frag)) > IPV6_MAXPACKET)
   1191 		goto badfrag;
   1192 
   1193 	/* do something about it */
   1194 	return (PF_PASS);
   1195 
   1196  shortpkt:
   1197 	REASON_SET(reason, PFRES_SHORT);
   1198 	if (r != NULL && r->log)
   1199 		PFLOG_PACKET(kif, h, m, AF_INET6, dir, *reason, r, NULL, NULL);
   1200 	return (PF_DROP);
   1201 
   1202  drop:
   1203 	REASON_SET(reason, PFRES_NORM);
   1204 	if (r != NULL && r->log)
   1205 		PFLOG_PACKET(kif, h, m, AF_INET6, dir, *reason, r, NULL, NULL);
   1206 	return (PF_DROP);
   1207 
   1208  badfrag:
   1209 	REASON_SET(reason, PFRES_FRAG);
   1210 	if (r != NULL && r->log)
   1211 		PFLOG_PACKET(kif, h, m, AF_INET6, dir, *reason, r, NULL, NULL);
   1212 	return (PF_DROP);
   1213 }
   1214 #endif
   1215 
   1216 int
   1217 pf_normalize_tcp(int dir, struct pfi_kif *kif, struct mbuf *m, int ipoff,
   1218     int off, void *h, struct pf_pdesc *pd)
   1219 {
   1220 	struct pf_rule	*r, *rm = NULL;
   1221 	struct tcphdr	*th = pd->hdr.tcp;
   1222 	int		 rewrite = 0;
   1223 	u_short		 reason;
   1224 	u_int8_t	 flags;
   1225 	sa_family_t	 af = pd->af;
   1226 
   1227 	r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
   1228 	while (r != NULL) {
   1229 		r->evaluations++;
   1230 		if (r->kif != NULL &&
   1231 		    (r->kif != kif && r->kif != kif->pfik_parent) == !r->ifnot)
   1232 			r = r->skip[PF_SKIP_IFP].ptr;
   1233 		else if (r->direction && r->direction != dir)
   1234 			r = r->skip[PF_SKIP_DIR].ptr;
   1235 		else if (r->af && r->af != af)
   1236 			r = r->skip[PF_SKIP_AF].ptr;
   1237 		else if (r->proto && r->proto != pd->proto)
   1238 			r = r->skip[PF_SKIP_PROTO].ptr;
   1239 		else if (PF_MISMATCHAW(&r->src.addr, pd->src, af, r->src.not))
   1240 			r = r->skip[PF_SKIP_SRC_ADDR].ptr;
   1241 		else if (r->src.port_op && !pf_match_port(r->src.port_op,
   1242 			    r->src.port[0], r->src.port[1], th->th_sport))
   1243 			r = r->skip[PF_SKIP_SRC_PORT].ptr;
   1244 		else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af, r->dst.not))
   1245 			r = r->skip[PF_SKIP_DST_ADDR].ptr;
   1246 		else if (r->dst.port_op && !pf_match_port(r->dst.port_op,
   1247 			    r->dst.port[0], r->dst.port[1], th->th_dport))
   1248 			r = r->skip[PF_SKIP_DST_PORT].ptr;
   1249 		else if (r->os_fingerprint != PF_OSFP_ANY && !pf_osfp_match(
   1250 			    pf_osfp_fingerprint(pd, m, off, th),
   1251 			    r->os_fingerprint))
   1252 			r = TAILQ_NEXT(r, entries);
   1253 		else {
   1254 			rm = r;
   1255 			break;
   1256 		}
   1257 	}
   1258 
   1259 	if (rm == NULL)
   1260 		return (PF_PASS);
   1261 	else
   1262 		r->packets++;
   1263 
   1264 	if (rm->rule_flag & PFRULE_REASSEMBLE_TCP)
   1265 		pd->flags |= PFDESC_TCP_NORM;
   1266 
   1267 	flags = th->th_flags;
   1268 	if (flags & TH_SYN) {
   1269 		/* Illegal packet */
   1270 		if (flags & TH_RST)
   1271 			goto tcp_drop;
   1272 
   1273 		if (flags & TH_FIN)
   1274 			flags &= ~TH_FIN;
   1275 	} else {
   1276 		/* Illegal packet */
   1277 		if (!(flags & (TH_ACK|TH_RST)))
   1278 			goto tcp_drop;
   1279 	}
   1280 
   1281 	if (!(flags & TH_ACK)) {
   1282 		/* These flags are only valid if ACK is set */
   1283 		if ((flags & TH_FIN) || (flags & TH_PUSH) || (flags & TH_URG))
   1284 			goto tcp_drop;
   1285 	}
   1286 
   1287 	/* Check for illegal header length */
   1288 	if (th->th_off < (sizeof(struct tcphdr) >> 2))
   1289 		goto tcp_drop;
   1290 
   1291 	/* If flags changed, or reserved data set, then adjust */
   1292 	if (flags != th->th_flags || th->th_x2 != 0) {
   1293 		u_int16_t	ov, nv;
   1294 
   1295 		ov = *(u_int16_t *)(&th->th_ack + 1);
   1296 		th->th_flags = flags;
   1297 		th->th_x2 = 0;
   1298 		nv = *(u_int16_t *)(&th->th_ack + 1);
   1299 
   1300 		th->th_sum = pf_cksum_fixup(th->th_sum, ov, nv);
   1301 		rewrite = 1;
   1302 	}
   1303 
   1304 	/* Remove urgent pointer, if TH_URG is not set */
   1305 	if (!(flags & TH_URG) && th->th_urp) {
   1306 		th->th_sum = pf_cksum_fixup(th->th_sum, th->th_urp, 0);
   1307 		th->th_urp = 0;
   1308 		rewrite = 1;
   1309 	}
   1310 
   1311 	/* Process options */
   1312 	if (r->max_mss && pf_normalize_tcpopt(r, m, th, off))
   1313 		rewrite = 1;
   1314 
   1315 	/* copy back packet headers if we sanitized */
   1316 	if (rewrite)
   1317 		m_copyback(m, off, sizeof(*th), (caddr_t)th);
   1318 
   1319 	return (PF_PASS);
   1320 
   1321  tcp_drop:
   1322 	REASON_SET(&reason, PFRES_NORM);
   1323 	if (rm != NULL && r->log)
   1324 		PFLOG_PACKET(kif, h, m, AF_INET, dir, reason, r, NULL, NULL);
   1325 	return (PF_DROP);
   1326 }
   1327 
   1328 int
   1329 pf_normalize_tcp_init(struct mbuf *m, int off, struct pf_pdesc *pd,
   1330     struct tcphdr *th, struct pf_state_peer *src, struct pf_state_peer *dst)
   1331 {
   1332 	u_int8_t hdr[60];
   1333 	u_int8_t *opt;
   1334 
   1335 	KASSERT(src->scrub == NULL);
   1336 
   1337 	src->scrub = pool_get(&pf_state_scrub_pl, PR_NOWAIT);
   1338 	if (src->scrub == NULL)
   1339 		return (1);
   1340 	bzero(src->scrub, sizeof(*src->scrub));
   1341 
   1342 	switch (pd->af) {
   1343 #ifdef INET
   1344 	case AF_INET: {
   1345 		struct ip *h = mtod(m, struct ip *);
   1346 		src->scrub->pfss_ttl = h->ip_ttl;
   1347 		break;
   1348 	}
   1349 #endif /* INET */
   1350 #ifdef INET6
   1351 	case AF_INET6: {
   1352 		struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
   1353 		src->scrub->pfss_ttl = h->ip6_hlim;
   1354 		break;
   1355 	}
   1356 #endif /* INET6 */
   1357 	}
   1358 
   1359 
   1360 	/*
   1361 	 * All normalizations below are only begun if we see the start of
   1362 	 * the connections.  They must all set an enabled bit in pfss_flags
   1363 	 */
   1364 	if ((th->th_flags & TH_SYN) == 0)
   1365 		return (0);
   1366 
   1367 
   1368 	if (th->th_off > (sizeof(struct tcphdr) >> 2) && src->scrub &&
   1369 	    pf_pull_hdr(m, off, hdr, th->th_off << 2, NULL, NULL, pd->af)) {
   1370 		/* Diddle with TCP options */
   1371 		int hlen;
   1372 		opt = hdr + sizeof(struct tcphdr);
   1373 		hlen = (th->th_off << 2) - sizeof(struct tcphdr);
   1374 		while (hlen >= TCPOLEN_TIMESTAMP) {
   1375 			switch (*opt) {
   1376 			case TCPOPT_EOL:	/* FALLTHROUGH */
   1377 			case TCPOPT_NOP:
   1378 				opt++;
   1379 				hlen--;
   1380 				break;
   1381 			case TCPOPT_TIMESTAMP:
   1382 				if (opt[1] >= TCPOLEN_TIMESTAMP) {
   1383 					src->scrub->pfss_flags |=
   1384 					    PFSS_TIMESTAMP;
   1385 					src->scrub->pfss_ts_mod = arc4random();
   1386 				}
   1387 				/* FALLTHROUGH */
   1388 			default:
   1389 				hlen -= opt[1];
   1390 				opt += opt[1];
   1391 				break;
   1392 			}
   1393 		}
   1394 	}
   1395 
   1396 	return (0);
   1397 }
   1398 
   1399 void
   1400 pf_normalize_tcp_cleanup(struct pf_state *state)
   1401 {
   1402 	if (state->src.scrub)
   1403 		pool_put(&pf_state_scrub_pl, state->src.scrub);
   1404 	if (state->dst.scrub)
   1405 		pool_put(&pf_state_scrub_pl, state->dst.scrub);
   1406 
   1407 	/* Someday... flush the TCP segment reassembly descriptors. */
   1408 }
   1409 
   1410 int
   1411 pf_normalize_tcp_stateful(struct mbuf *m, int off, struct pf_pdesc *pd,
   1412     u_short *reason, struct tcphdr *th, struct pf_state_peer *src,
   1413     struct pf_state_peer *dst, int *writeback)
   1414 {
   1415 	u_int8_t hdr[60];
   1416 	u_int8_t *opt;
   1417 	int copyback = 0;
   1418 
   1419 	KASSERT(src->scrub || dst->scrub);
   1420 
   1421 	/*
   1422 	 * Enforce the minimum TTL seen for this connection.  Negate a common
   1423 	 * technique to evade an intrusion detection system and confuse
   1424 	 * firewall state code.
   1425 	 */
   1426 	switch (pd->af) {
   1427 #ifdef INET
   1428 	case AF_INET: {
   1429 		if (src->scrub) {
   1430 			struct ip *h = mtod(m, struct ip *);
   1431 			if (h->ip_ttl > src->scrub->pfss_ttl)
   1432 				src->scrub->pfss_ttl = h->ip_ttl;
   1433 			h->ip_ttl = src->scrub->pfss_ttl;
   1434 		}
   1435 		break;
   1436 	}
   1437 #endif /* INET */
   1438 #ifdef INET6
   1439 	case AF_INET6: {
   1440 		if (src->scrub) {
   1441 			struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
   1442 			if (h->ip6_hlim > src->scrub->pfss_ttl)
   1443 				src->scrub->pfss_ttl = h->ip6_hlim;
   1444 			h->ip6_hlim = src->scrub->pfss_ttl;
   1445 		}
   1446 		break;
   1447 	}
   1448 #endif /* INET6 */
   1449 	}
   1450 
   1451 	if (th->th_off > (sizeof(struct tcphdr) >> 2) &&
   1452 	    ((src->scrub && (src->scrub->pfss_flags & PFSS_TIMESTAMP)) ||
   1453 	    (dst->scrub && (dst->scrub->pfss_flags & PFSS_TIMESTAMP))) &&
   1454 	    pf_pull_hdr(m, off, hdr, th->th_off << 2, NULL, NULL, pd->af)) {
   1455 		/* Diddle with TCP options */
   1456 		int hlen;
   1457 		opt = hdr + sizeof(struct tcphdr);
   1458 		hlen = (th->th_off << 2) - sizeof(struct tcphdr);
   1459 		while (hlen >= TCPOLEN_TIMESTAMP) {
   1460 			switch (*opt) {
   1461 			case TCPOPT_EOL:	/* FALLTHROUGH */
   1462 			case TCPOPT_NOP:
   1463 				opt++;
   1464 				hlen--;
   1465 				break;
   1466 			case TCPOPT_TIMESTAMP:
   1467 				/* Modulate the timestamps.  Can be used for
   1468 				 * NAT detection, OS uptime determination or
   1469 				 * reboot detection.
   1470 				 */
   1471 				if (opt[1] >= TCPOLEN_TIMESTAMP) {
   1472 					u_int32_t ts_value;
   1473 					if (src->scrub &&
   1474 					    (src->scrub->pfss_flags &
   1475 					    PFSS_TIMESTAMP)) {
   1476 						memcpy(&ts_value, &opt[2],
   1477 						    sizeof(u_int32_t));
   1478 						ts_value = htonl(ntohl(ts_value)
   1479 						    + src->scrub->pfss_ts_mod);
   1480 						pf_change_a(&opt[2],
   1481 						    &th->th_sum, ts_value, 0);
   1482 						copyback = 1;
   1483 					}
   1484 
   1485 					/* Modulate TS reply iff valid (!0) */
   1486 					memcpy(&ts_value, &opt[6],
   1487 					    sizeof(u_int32_t));
   1488 					if (ts_value && dst->scrub &&
   1489 					    (dst->scrub->pfss_flags &
   1490 					    PFSS_TIMESTAMP)) {
   1491 						ts_value = htonl(ntohl(ts_value)
   1492 						    - dst->scrub->pfss_ts_mod);
   1493 						pf_change_a(&opt[6],
   1494 						    &th->th_sum, ts_value, 0);
   1495 						copyback = 1;
   1496 					}
   1497 				}
   1498 				/* FALLTHROUGH */
   1499 			default:
   1500 				hlen -= opt[1];
   1501 				opt += opt[1];
   1502 				break;
   1503 			}
   1504 		}
   1505 		if (copyback) {
   1506 			/* Copyback the options, caller copys back header */
   1507 			*writeback = 1;
   1508 			m_copyback(m, off + sizeof(struct tcphdr),
   1509 			    (th->th_off << 2) - sizeof(struct tcphdr),
   1510 			    (caddr_t)hdr + sizeof(struct tcphdr));
   1511 		}
   1512 	}
   1513 
   1514 
   1515 	/* I have a dream....  TCP segment reassembly.... */
   1516 	return (0);
   1517 }
   1518 int
   1519 pf_normalize_tcpopt(struct pf_rule *r, struct mbuf *m, struct tcphdr *th,
   1520     int off)
   1521 {
   1522 	u_int16_t	*mss;
   1523 	int		 thoff;
   1524 	int		 opt, cnt, optlen = 0;
   1525 	int		 rewrite = 0;
   1526 	u_char		*optp;
   1527 
   1528 	thoff = th->th_off << 2;
   1529 	cnt = thoff - sizeof(struct tcphdr);
   1530 	optp = mtod(m, caddr_t) + off + sizeof(struct tcphdr);
   1531 
   1532 	for (; cnt > 0; cnt -= optlen, optp += optlen) {
   1533 		opt = optp[0];
   1534 		if (opt == TCPOPT_EOL)
   1535 			break;
   1536 		if (opt == TCPOPT_NOP)
   1537 			optlen = 1;
   1538 		else {
   1539 			if (cnt < 2)
   1540 				break;
   1541 			optlen = optp[1];
   1542 			if (optlen < 2 || optlen > cnt)
   1543 				break;
   1544 		}
   1545 		switch (opt) {
   1546 		case TCPOPT_MAXSEG:
   1547 			mss = (u_int16_t *)(optp + 2);
   1548 			if ((ntohs(*mss)) > r->max_mss) {
   1549 				th->th_sum = pf_cksum_fixup(th->th_sum,
   1550 				    *mss, htons(r->max_mss));
   1551 				*mss = htons(r->max_mss);
   1552 				rewrite = 1;
   1553 			}
   1554 			break;
   1555 		default:
   1556 			break;
   1557 		}
   1558 	}
   1559 
   1560 	return (rewrite);
   1561 }
   1562