Home | History | Annotate | Line # | Download | only in npf
npf_inet.c revision 1.14
      1 /*	$NetBSD: npf_inet.c,v 1.14 2012/07/15 00:23:00 rmind Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2009-2012 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This material is based upon work partially supported by The
      8  * NetBSD Foundation under a contract with Mindaugas Rasiukevicius.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*
     33  * Various procotol related helper routines.
     34  *
     35  * This layer manipulates npf_cache_t structure i.e. caches requested headers
     36  * and stores which information was cached in the information bit field.
     37  * It is also responsibility of this layer to update or invalidate the cache
     38  * on rewrites (e.g. by translation routines).
     39  */
     40 
     41 #include <sys/cdefs.h>
     42 __KERNEL_RCSID(0, "$NetBSD: npf_inet.c,v 1.14 2012/07/15 00:23:00 rmind Exp $");
     43 
     44 #include <sys/param.h>
     45 #include <sys/types.h>
     46 
     47 #include <net/pfil.h>
     48 #include <net/if.h>
     49 #include <net/ethertypes.h>
     50 #include <net/if_ether.h>
     51 
     52 #include <netinet/in_systm.h>
     53 #include <netinet/in.h>
     54 #include <netinet/in_var.h>
     55 #include <netinet/ip.h>
     56 #include <netinet/ip6.h>
     57 #include <netinet/tcp.h>
     58 #include <netinet/udp.h>
     59 #include <netinet/ip_icmp.h>
     60 
     61 #include "npf_impl.h"
     62 
     63 /*
     64  * npf_fixup{16,32}_cksum: update IPv4 checksum.
     65  */
     66 
     67 uint16_t
     68 npf_fixup16_cksum(uint16_t cksum, uint16_t odatum, uint16_t ndatum)
     69 {
     70 	uint32_t sum;
     71 
     72 	/*
     73 	 * RFC 1624:
     74 	 *	HC' = ~(~HC + ~m + m')
     75 	 */
     76 	sum = ~ntohs(cksum) & 0xffff;
     77 	sum += (~ntohs(odatum) & 0xffff) + ntohs(ndatum);
     78 	sum = (sum >> 16) + (sum & 0xffff);
     79 	sum += (sum >> 16);
     80 
     81 	return htons(~sum & 0xffff);
     82 }
     83 
     84 uint16_t
     85 npf_fixup32_cksum(uint16_t cksum, uint32_t odatum, uint32_t ndatum)
     86 {
     87 
     88 	cksum = npf_fixup16_cksum(cksum, odatum & 0xffff, ndatum & 0xffff);
     89 	cksum = npf_fixup16_cksum(cksum, odatum >> 16, ndatum >> 16);
     90 	return cksum;
     91 }
     92 
     93 /*
     94  * npf_addr_cksum: calculate checksum of the address, either IPv4 or IPv6.
     95  */
     96 uint16_t
     97 npf_addr_cksum(uint16_t cksum, int sz, npf_addr_t *oaddr, npf_addr_t *naddr)
     98 {
     99 	uint32_t *oip32 = (uint32_t *)oaddr, *nip32 = (uint32_t *)naddr;
    100 
    101 	KASSERT(sz % sizeof(uint32_t) == 0);
    102 	do {
    103 		cksum = npf_fixup32_cksum(cksum, *oip32++, *nip32++);
    104 		sz -= sizeof(uint32_t);
    105 	} while (sz);
    106 
    107 	return cksum;
    108 }
    109 
    110 /*
    111  * npf_addr_sum: provide IP address as a summed (if needed) 32-bit integer.
    112  * Note: used for hash function.
    113  */
    114 uint32_t
    115 npf_addr_sum(const int sz, const npf_addr_t *a1, const npf_addr_t *a2)
    116 {
    117 	uint32_t mix = 0;
    118 	int i;
    119 
    120 	KASSERT(sz > 0 && a1 != NULL && a2 != NULL);
    121 
    122 	for (i = 0; i < (sz >> 2); i++) {
    123 		mix += a1->s6_addr32[i];
    124 		mix += a2->s6_addr32[i];
    125 	}
    126 	return mix;
    127 }
    128 
    129 /*
    130  * npf_addr_mask: apply the mask to a given address and store the result.
    131  */
    132 void
    133 npf_addr_mask(const npf_addr_t *addr, const npf_netmask_t mask,
    134     const int alen, npf_addr_t *out)
    135 {
    136 	const int nwords = alen >> 2;
    137 	uint_fast8_t length = mask;
    138 
    139 	/* Note: maximum length is 32 for IPv4 and 128 for IPv6. */
    140 	KASSERT(length <= NPF_MAX_NETMASK);
    141 
    142 	for (int i = 0; i < nwords; i++) {
    143 		uint32_t wordmask;
    144 
    145 		if (length >= 32) {
    146 			wordmask = htonl(0xffffffff);
    147 			length -= 32;
    148 		} else if (length) {
    149 			wordmask = htonl(0xffffffff << (32 - length));
    150 			length = 0;
    151 		} else {
    152 			wordmask = 0;
    153 		}
    154 		out->s6_addr32[i] = addr->s6_addr32[i] & wordmask;
    155 	}
    156 }
    157 
    158 /*
    159  * npf_addr_cmp: compare two addresses, either IPv4 or IPv6.
    160  *
    161  * => Return 0 if equal and negative/positive if less/greater accordingly.
    162  * => Ignore the mask, if NPF_NO_NETMASK is specified.
    163  */
    164 int
    165 npf_addr_cmp(const npf_addr_t *addr1, const npf_netmask_t mask1,
    166     const npf_addr_t *addr2, const npf_netmask_t mask2, const int alen)
    167 {
    168 	npf_addr_t realaddr1, realaddr2;
    169 
    170 	if (mask1 != NPF_NO_NETMASK) {
    171 		npf_addr_mask(addr1, mask1, alen, &realaddr1);
    172 		addr1 = &realaddr1;
    173 	}
    174 	if (mask2 != NPF_NO_NETMASK) {
    175 		npf_addr_mask(addr2, mask2, alen, &realaddr2);
    176 		addr2 = &realaddr2;
    177 	}
    178 	return memcmp(addr1, addr2, alen);
    179 }
    180 
    181 /*
    182  * npf_tcpsaw: helper to fetch SEQ, ACK, WIN and return TCP data length.
    183  *
    184  * => Returns all values in host byte-order.
    185  */
    186 int
    187 npf_tcpsaw(const npf_cache_t *npc, tcp_seq *seq, tcp_seq *ack, uint32_t *win)
    188 {
    189 	const struct tcphdr *th = &npc->npc_l4.tcp;
    190 	u_int thlen;
    191 
    192 	KASSERT(npf_iscached(npc, NPC_TCP));
    193 
    194 	*seq = ntohl(th->th_seq);
    195 	*ack = ntohl(th->th_ack);
    196 	*win = (uint32_t)ntohs(th->th_win);
    197 	thlen = th->th_off << 2;
    198 
    199 	if (npf_iscached(npc, NPC_IP4)) {
    200 		const struct ip *ip = &npc->npc_ip.v4;
    201 		return ntohs(ip->ip_len) - npf_cache_hlen(npc) - thlen;
    202 	} else if (npf_iscached(npc, NPC_IP6)) {
    203 		const struct ip6_hdr *ip6 = &npc->npc_ip.v6;
    204 		return ntohs(ip6->ip6_plen) - thlen;
    205 	}
    206 	return 0;
    207 }
    208 
    209 /*
    210  * npf_fetch_tcpopts: parse and return TCP options.
    211  */
    212 bool
    213 npf_fetch_tcpopts(const npf_cache_t *npc, nbuf_t *nbuf,
    214     uint16_t *mss, int *wscale)
    215 {
    216 	void *n_ptr = nbuf_dataptr(nbuf);
    217 	const struct tcphdr *th = &npc->npc_l4.tcp;
    218 	int topts_len, step;
    219 	uint16_t val16;
    220 	uint8_t val;
    221 
    222 	KASSERT(npf_iscached(npc, NPC_IP46));
    223 	KASSERT(npf_iscached(npc, NPC_TCP));
    224 
    225 	/* Determine if there are any TCP options, get their length. */
    226 	topts_len = (th->th_off << 2) - sizeof(struct tcphdr);
    227 	if (topts_len <= 0) {
    228 		/* No options. */
    229 		return false;
    230 	}
    231 	KASSERT(topts_len <= MAX_TCPOPTLEN);
    232 
    233 	/* First step: IP and TCP header up to options. */
    234 	step = npf_cache_hlen(npc) + sizeof(struct tcphdr);
    235 next:
    236 	if (nbuf_advfetch(&nbuf, &n_ptr, step, sizeof(val), &val)) {
    237 		return false;
    238 	}
    239 
    240 	switch (val) {
    241 	case TCPOPT_EOL:
    242 		/* Done. */
    243 		return true;
    244 	case TCPOPT_NOP:
    245 		topts_len--;
    246 		step = 1;
    247 		break;
    248 	case TCPOPT_MAXSEG:
    249 		/*
    250 		 * XXX: clean this mess.
    251 		 */
    252 		if (mss && *mss) {
    253 			val16 = *mss;
    254 			if (nbuf_advstore(&nbuf, &n_ptr, 2,
    255 			    sizeof(val16), &val16))
    256 				return false;
    257 		} else if (nbuf_advfetch(&nbuf, &n_ptr, 2,
    258 		    sizeof(val16), &val16)) {
    259 			return false;
    260 		}
    261 		if (mss) {
    262 			*mss = val16;
    263 		}
    264 		topts_len -= TCPOLEN_MAXSEG;
    265 		step = sizeof(val16);
    266 		break;
    267 	case TCPOPT_WINDOW:
    268 		/* TCP Window Scaling (RFC 1323). */
    269 		if (nbuf_advfetch(&nbuf, &n_ptr, 2, sizeof(val), &val)) {
    270 			return false;
    271 		}
    272 		*wscale = (val > TCP_MAX_WINSHIFT) ? TCP_MAX_WINSHIFT : val;
    273 		topts_len -= TCPOLEN_WINDOW;
    274 		step = sizeof(val);
    275 		break;
    276 	default:
    277 		if (nbuf_advfetch(&nbuf, &n_ptr, 1, sizeof(val), &val)) {
    278 			return false;
    279 		}
    280 		if (val < 2 || val >= topts_len) {
    281 			return false;
    282 		}
    283 		topts_len -= val;
    284 		step = val - 1;
    285 	}
    286 
    287 	/* Any options left? */
    288 	if (__predict_true(topts_len > 0)) {
    289 		goto next;
    290 	}
    291 	return true;
    292 }
    293 
    294 /*
    295  * npf_fetch_ip: fetch, check and cache IP header.
    296  */
    297 bool
    298 npf_fetch_ip(npf_cache_t *npc, nbuf_t *nbuf, void *n_ptr)
    299 {
    300 	uint8_t ver;
    301 
    302 	if (nbuf_fetch_datum(nbuf, n_ptr, sizeof(uint8_t), &ver)) {
    303 		return false;
    304 	}
    305 
    306 	switch (ver >> 4) {
    307 	case IPVERSION: {
    308 		struct ip *ip = &npc->npc_ip.v4;
    309 
    310 		/* Fetch IPv4 header. */
    311 		if (nbuf_fetch_datum(nbuf, n_ptr, sizeof(struct ip), ip)) {
    312 			return false;
    313 		}
    314 
    315 		/* Check header length and fragment offset. */
    316 		if ((u_int)(ip->ip_hl << 2) < sizeof(struct ip)) {
    317 			return false;
    318 		}
    319 		if (ip->ip_off & ~htons(IP_DF | IP_RF)) {
    320 			/* Note fragmentation. */
    321 			npc->npc_info |= NPC_IPFRAG;
    322 		}
    323 
    324 		/* Cache: layer 3 - IPv4. */
    325 		npc->npc_alen = sizeof(struct in_addr);
    326 		npc->npc_srcip = (npf_addr_t *)&ip->ip_src;
    327 		npc->npc_dstip = (npf_addr_t *)&ip->ip_dst;
    328 		npc->npc_info |= NPC_IP4;
    329 		npc->npc_hlen = ip->ip_hl << 2;
    330 		npc->npc_next_proto = npc->npc_ip.v4.ip_p;
    331 		break;
    332 	}
    333 
    334 	case (IPV6_VERSION >> 4): {
    335 		struct ip6_hdr *ip6 = &npc->npc_ip.v6;
    336 		size_t hlen = sizeof(struct ip6_hdr);
    337 		struct ip6_ext ip6e;
    338 
    339 		/* Fetch IPv6 header and set initial next-protocol value. */
    340 		if (nbuf_fetch_datum(nbuf, n_ptr, hlen, ip6)) {
    341 			return false;
    342 		}
    343 		npc->npc_next_proto = ip6->ip6_nxt;
    344 		npc->npc_hlen = hlen;
    345 
    346 		/*
    347 		 * Advance by the length of the current header and
    348 		 * prefetch the extension header.
    349 		 */
    350 		while (nbuf_advfetch(&nbuf, &n_ptr, hlen,
    351 		    sizeof(struct ip6_ext), &ip6e) == 0) {
    352 			/*
    353 			 * Determine whether we are going to continue.
    354 			 */
    355 			switch (npc->npc_next_proto) {
    356 			case IPPROTO_HOPOPTS:
    357 			case IPPROTO_DSTOPTS:
    358 			case IPPROTO_ROUTING:
    359 				hlen = (ip6e.ip6e_len + 1) << 3;
    360 				break;
    361 			case IPPROTO_FRAGMENT:
    362 				npc->npc_info |= NPC_IPFRAG;
    363 				hlen = sizeof(struct ip6_frag);
    364 				break;
    365 			case IPPROTO_AH:
    366 				hlen = (ip6e.ip6e_len + 2) << 2;
    367 				break;
    368 			default:
    369 				hlen = 0;
    370 				break;
    371 			}
    372 
    373 			if (!hlen) {
    374 				break;
    375 			}
    376 			npc->npc_next_proto = ip6e.ip6e_nxt;
    377 			npc->npc_hlen += hlen;
    378 		}
    379 
    380 		/* Cache: layer 3 - IPv6. */
    381 		npc->npc_alen = sizeof(struct in6_addr);
    382 		npc->npc_srcip = (npf_addr_t *)&ip6->ip6_src;
    383 		npc->npc_dstip = (npf_addr_t *)&ip6->ip6_dst;
    384 		npc->npc_info |= NPC_IP6;
    385 		break;
    386 	}
    387 	default:
    388 		return false;
    389 	}
    390 
    391 	return true;
    392 }
    393 
    394 /*
    395  * npf_fetch_tcp: fetch, check and cache TCP header.  If necessary,
    396  * fetch and cache layer 3 as well.
    397  */
    398 bool
    399 npf_fetch_tcp(npf_cache_t *npc, nbuf_t *nbuf, void *n_ptr)
    400 {
    401 	struct tcphdr *th;
    402 
    403 	/* Must have IP header processed for its length and protocol. */
    404 	if (!npf_iscached(npc, NPC_IP46) && !npf_fetch_ip(npc, nbuf, n_ptr)) {
    405 		return false;
    406 	}
    407 	if (npf_cache_ipproto(npc) != IPPROTO_TCP) {
    408 		return false;
    409 	}
    410 	th = &npc->npc_l4.tcp;
    411 
    412 	/* Fetch TCP header. */
    413 	if (nbuf_advfetch(&nbuf, &n_ptr, npf_cache_hlen(npc),
    414 	    sizeof(struct tcphdr), th)) {
    415 		return false;
    416 	}
    417 
    418 	/* Cache: layer 4 - TCP. */
    419 	npc->npc_info |= (NPC_LAYER4 | NPC_TCP);
    420 	return true;
    421 }
    422 
    423 /*
    424  * npf_fetch_udp: fetch, check and cache UDP header.  If necessary,
    425  * fetch and cache layer 3 as well.
    426  */
    427 bool
    428 npf_fetch_udp(npf_cache_t *npc, nbuf_t *nbuf, void *n_ptr)
    429 {
    430 	struct udphdr *uh;
    431 	u_int hlen;
    432 
    433 	/* Must have IP header processed for its length and protocol. */
    434 	if (!npf_iscached(npc, NPC_IP46) && !npf_fetch_ip(npc, nbuf, n_ptr)) {
    435 		return false;
    436 	}
    437 	if (npf_cache_ipproto(npc) != IPPROTO_UDP) {
    438 		return false;
    439 	}
    440 	uh = &npc->npc_l4.udp;
    441 	hlen = npf_cache_hlen(npc);
    442 
    443 	/* Fetch UDP header. */
    444 	if (nbuf_advfetch(&nbuf, &n_ptr, hlen, sizeof(struct udphdr), uh)) {
    445 		return false;
    446 	}
    447 
    448 	/* Cache: layer 4 - UDP. */
    449 	npc->npc_info |= (NPC_LAYER4 | NPC_UDP);
    450 	return true;
    451 }
    452 
    453 /*
    454  * npf_fetch_icmp: fetch ICMP code, type and possible query ID.
    455  */
    456 bool
    457 npf_fetch_icmp(npf_cache_t *npc, nbuf_t *nbuf, void *n_ptr)
    458 {
    459 	struct icmp *ic;
    460 	u_int hlen, iclen;
    461 
    462 	/* Must have IP header processed for its length and protocol. */
    463 	if (!npf_iscached(npc, NPC_IP46) && !npf_fetch_ip(npc, nbuf, n_ptr)) {
    464 		return false;
    465 	}
    466 	if (npf_cache_ipproto(npc) != IPPROTO_ICMP) {
    467 		return false;
    468 	}
    469 	ic = &npc->npc_l4.icmp;
    470 	hlen = npf_cache_hlen(npc);
    471 
    472 	/* Fetch basic ICMP header, up to the "data" point. */
    473 	iclen = offsetof(struct icmp, icmp_data);
    474 	if (nbuf_advfetch(&nbuf, &n_ptr, hlen, iclen, ic)) {
    475 		return false;
    476 	}
    477 
    478 	/* Cache: layer 4 - ICMP. */
    479 	npc->npc_info |= (NPC_LAYER4 | NPC_ICMP);
    480 	return true;
    481 }
    482 
    483 /*
    484  * npf_cache_all: general routine to cache all relevant IP (v4 or v6)
    485  * and TCP, UDP or ICMP headers.
    486  */
    487 int
    488 npf_cache_all(npf_cache_t *npc, nbuf_t *nbuf)
    489 {
    490 	void *n_ptr = nbuf_dataptr(nbuf);
    491 
    492 	if (!npf_iscached(npc, NPC_IP46) && !npf_fetch_ip(npc, nbuf, n_ptr)) {
    493 		return npc->npc_info;
    494 	}
    495 	if (npf_iscached(npc, NPC_IPFRAG)) {
    496 		return npc->npc_info;
    497 	}
    498 	switch (npf_cache_ipproto(npc)) {
    499 	case IPPROTO_TCP:
    500 		(void)npf_fetch_tcp(npc, nbuf, n_ptr);
    501 		break;
    502 	case IPPROTO_UDP:
    503 		(void)npf_fetch_udp(npc, nbuf, n_ptr);
    504 		break;
    505 	case IPPROTO_ICMP:
    506 		(void)npf_fetch_icmp(npc, nbuf, n_ptr);
    507 		break;
    508 	}
    509 	return npc->npc_info;
    510 }
    511 
    512 /*
    513  * npf_rwrip: rewrite required IP address, update the cache.
    514  */
    515 bool
    516 npf_rwrip(npf_cache_t *npc, nbuf_t *nbuf, void *n_ptr, const int di,
    517     npf_addr_t *addr)
    518 {
    519 	npf_addr_t *oaddr;
    520 	u_int offby;
    521 
    522 	KASSERT(npf_iscached(npc, NPC_IP46));
    523 
    524 	if (di == PFIL_OUT) {
    525 		/* Rewrite source address, if outgoing. */
    526 		offby = offsetof(struct ip, ip_src);
    527 		oaddr = npc->npc_srcip;
    528 	} else {
    529 		/* Rewrite destination, if incoming. */
    530 		offby = offsetof(struct ip, ip_dst);
    531 		oaddr = npc->npc_dstip;
    532 	}
    533 
    534 	/* Advance to the address and rewrite it. */
    535 	if (nbuf_advstore(&nbuf, &n_ptr, offby, npc->npc_alen, addr))
    536 		return false;
    537 
    538 	/* Cache: IP address. */
    539 	memcpy(oaddr, addr, npc->npc_alen);
    540 	return true;
    541 }
    542 
    543 /*
    544  * npf_rwrport: rewrite required TCP/UDP port, update the cache.
    545  */
    546 bool
    547 npf_rwrport(npf_cache_t *npc, nbuf_t *nbuf, void *n_ptr, const int di,
    548     in_port_t port)
    549 {
    550 	const int proto = npf_cache_ipproto(npc);
    551 	u_int offby = npf_cache_hlen(npc);
    552 	in_port_t *oport;
    553 
    554 	KASSERT(npf_iscached(npc, NPC_TCP) || npf_iscached(npc, NPC_UDP));
    555 	KASSERT(proto == IPPROTO_TCP || proto == IPPROTO_UDP);
    556 
    557 	/* Offset to the port and pointer in the cache. */
    558 	if (proto == IPPROTO_TCP) {
    559 		struct tcphdr *th = &npc->npc_l4.tcp;
    560 		if (di == PFIL_OUT) {
    561 			CTASSERT(offsetof(struct tcphdr, th_sport) == 0);
    562 			oport = &th->th_sport;
    563 		} else {
    564 			offby += offsetof(struct tcphdr, th_dport);
    565 			oport = &th->th_dport;
    566 		}
    567 	} else {
    568 		struct udphdr *uh = &npc->npc_l4.udp;
    569 		if (di == PFIL_OUT) {
    570 			CTASSERT(offsetof(struct udphdr, uh_sport) == 0);
    571 			oport = &uh->uh_sport;
    572 		} else {
    573 			offby += offsetof(struct udphdr, uh_dport);
    574 			oport = &uh->uh_dport;
    575 		}
    576 	}
    577 
    578 	/* Advance and rewrite the port. */
    579 	if (nbuf_advstore(&nbuf, &n_ptr, offby, sizeof(in_port_t), &port))
    580 		return false;
    581 
    582 	/* Cache: TCP/UDP port. */
    583 	*oport = port;
    584 	return true;
    585 }
    586 
    587 /*
    588  * npf_rwrcksum: rewrite IPv4 and/or TCP/UDP checksum, update the cache.
    589  */
    590 bool
    591 npf_rwrcksum(npf_cache_t *npc, nbuf_t *nbuf, void *n_ptr, const int di,
    592     npf_addr_t *addr, in_port_t port)
    593 {
    594 	const int proto = npf_cache_ipproto(npc);
    595 	npf_addr_t *oaddr;
    596 	in_port_t *oport;
    597 	uint16_t *cksum;
    598 	u_int offby;
    599 
    600 	/* Checksum update for IPv4 header. */
    601 	if (npf_iscached(npc, NPC_IP4)) {
    602 		struct ip *ip = &npc->npc_ip.v4;
    603 		uint16_t ipsum;
    604 
    605 		oaddr = (di == PFIL_OUT) ? npc->npc_srcip : npc->npc_dstip;
    606 		ipsum = npf_addr_cksum(ip->ip_sum, npc->npc_alen, oaddr, addr);
    607 
    608 		/* Advance to the IPv4 checksum and rewrite it. */
    609 		offby = offsetof(struct ip, ip_sum);
    610 		if (nbuf_advstore(&nbuf, &n_ptr, offby, sizeof(ipsum), &ipsum))
    611 			return false;
    612 
    613 		ip->ip_sum = ipsum;
    614 		offby = npf_cache_hlen(npc) - offby;
    615 	} else {
    616 		/* No checksum for IPv6. */
    617 		KASSERT(npf_iscached(npc, NPC_IP6));
    618 		oaddr = NULL;
    619 		offby = 0;
    620 		return false;	/* XXX: Not yet supported. */
    621 	}
    622 
    623 	/* Determine whether TCP/UDP checksum update is needed. */
    624 	if (proto == IPPROTO_ICMP || port == 0) {
    625 		return true;
    626 	}
    627 	KASSERT(npf_iscached(npc, NPC_TCP) || npf_iscached(npc, NPC_UDP));
    628 
    629 	/* Calculate TCP/UDP checksum. */
    630 	if (proto == IPPROTO_TCP) {
    631 		struct tcphdr *th = &npc->npc_l4.tcp;
    632 
    633 		cksum = &th->th_sum;
    634 		offby += offsetof(struct tcphdr, th_sum);
    635 		oport = (di == PFIL_OUT) ? &th->th_sport : &th->th_dport;
    636 	} else {
    637 		struct udphdr *uh = &npc->npc_l4.udp;
    638 
    639 		KASSERT(proto == IPPROTO_UDP);
    640 		cksum = &uh->uh_sum;
    641 		if (*cksum == 0) {
    642 			/* No need to update. */
    643 			return true;
    644 		}
    645 		offby += offsetof(struct udphdr, uh_sum);
    646 		oport = (di == PFIL_OUT) ? &uh->uh_sport : &uh->uh_dport;
    647 	}
    648 	*cksum = npf_addr_cksum(*cksum, npc->npc_alen, oaddr, addr);
    649 	*cksum = npf_fixup16_cksum(*cksum, *oport, port);
    650 
    651 	/* Advance to TCP/UDP checksum and rewrite it. */
    652 	if (nbuf_advstore(&nbuf, &n_ptr, offby, sizeof(uint16_t), cksum)) {
    653 		return false;
    654 	}
    655 	return true;
    656 }
    657 
    658 static inline bool
    659 npf_normalize_ip4(npf_cache_t *npc, nbuf_t *nbuf,
    660     bool rnd, bool no_df, int minttl)
    661 {
    662 	void *n_ptr = nbuf_dataptr(nbuf);
    663 	struct ip *ip = &npc->npc_ip.v4;
    664 	uint16_t cksum = ip->ip_sum;
    665 	uint16_t ip_off = ip->ip_off;
    666 	uint8_t ttl = ip->ip_ttl;
    667 	u_int offby = 0;
    668 
    669 	KASSERT(rnd || minttl || no_df);
    670 
    671 	/* Randomize IPv4 ID. */
    672 	if (rnd) {
    673 		uint16_t oid = ip->ip_id, nid;
    674 
    675 		nid = htons(ip_randomid(ip_ids, 0));
    676 		offby = offsetof(struct ip, ip_id);
    677 		if (nbuf_advstore(&nbuf, &n_ptr, offby, sizeof(nid), &nid)) {
    678 			return false;
    679 		}
    680 		cksum = npf_fixup16_cksum(cksum, oid, nid);
    681 		ip->ip_id = nid;
    682 	}
    683 
    684 	/* IP_DF flag cleansing. */
    685 	if (no_df && (ip_off & htons(IP_DF)) != 0) {
    686 		uint16_t nip_off = ip_off & ~htons(IP_DF);
    687 
    688 		if (nbuf_advstore(&nbuf, &n_ptr,
    689 		    offsetof(struct ip, ip_off) - offby,
    690 		    sizeof(uint16_t), &nip_off)) {
    691 			return false;
    692 		}
    693 		cksum = npf_fixup16_cksum(cksum, ip_off, nip_off);
    694 		ip->ip_off = nip_off;
    695 		offby = offsetof(struct ip, ip_off);
    696 	}
    697 
    698 	/* Enforce minimum TTL. */
    699 	if (minttl && ttl < minttl) {
    700 		if (nbuf_advstore(&nbuf, &n_ptr,
    701 		    offsetof(struct ip, ip_ttl) - offby,
    702 		    sizeof(uint8_t), &minttl)) {
    703 			return false;
    704 		}
    705 		cksum = npf_fixup16_cksum(cksum, ttl, minttl);
    706 		ip->ip_ttl = minttl;
    707 		offby = offsetof(struct ip, ip_ttl);
    708 	}
    709 
    710 	/* Update IP checksum. */
    711 	offby = offsetof(struct ip, ip_sum) - offby;
    712 	if (nbuf_advstore(&nbuf, &n_ptr, offby, sizeof(cksum), &cksum)) {
    713 		return false;
    714 	}
    715 	ip->ip_sum = cksum;
    716 	return true;
    717 }
    718 
    719 bool
    720 npf_normalize(npf_cache_t *npc, nbuf_t *nbuf,
    721     bool no_df, bool rnd, u_int minttl, u_int maxmss)
    722 {
    723 	void *n_ptr = nbuf_dataptr(nbuf);
    724 	struct tcphdr *th = &npc->npc_l4.tcp;
    725 	uint16_t cksum, mss;
    726 	u_int offby;
    727 	int wscale;
    728 
    729 	/* Normalize IPv4. */
    730 	if (npf_iscached(npc, NPC_IP4) && (rnd || minttl)) {
    731 		if (!npf_normalize_ip4(npc, nbuf, rnd, no_df, minttl)) {
    732 			return false;
    733 		}
    734 	} else if (!npf_iscached(npc, NPC_IP4)) {
    735 		/* XXX: no IPv6 */
    736 		return false;
    737 	}
    738 
    739 	/*
    740 	 * TCP Maximum Segment Size (MSS) "clamping".  Only if SYN packet.
    741 	 * Fetch MSS and check whether rewrite to lower is needed.
    742 	 */
    743 	if (maxmss == 0 || !npf_iscached(npc, NPC_TCP) ||
    744 	    (th->th_flags & TH_SYN) == 0) {
    745 		/* Not required; done. */
    746 		return true;
    747 	}
    748 	mss = 0;
    749 	if (!npf_fetch_tcpopts(npc, nbuf, &mss, &wscale)) {
    750 		return false;
    751 	}
    752 	if (ntohs(mss) <= maxmss) {
    753 		return true;
    754 	}
    755 
    756 	/* Calculate TCP checksum, then rewrite MSS and the checksum. */
    757 	maxmss = htons(maxmss);
    758 	cksum = npf_fixup16_cksum(th->th_sum, mss, maxmss);
    759 	th->th_sum = cksum;
    760 	mss = maxmss;
    761 	if (!npf_fetch_tcpopts(npc, nbuf, &mss, &wscale)) {
    762 		return false;
    763 	}
    764 	offby = npf_cache_hlen(npc) + offsetof(struct tcphdr, th_sum);
    765 	if (nbuf_advstore(&nbuf, &n_ptr, offby, sizeof(cksum), &cksum)) {
    766 		return false;
    767 	}
    768 	return true;
    769 }
    770 
    771 #if defined(DDB) || defined(_NPF_TESTING)
    772 
    773 void
    774 npf_addr_dump(const npf_addr_t *addr)
    775 {
    776 	printf("IP[%x:%x:%x:%x]\n",
    777 	    addr->s6_addr32[0], addr->s6_addr32[1],
    778 	    addr->s6_addr32[2], addr->s6_addr32[3]);
    779 }
    780 
    781 #endif
    782