npf_inet.c revision 1.15 1 /* $NetBSD: npf_inet.c,v 1.15 2012/07/19 21:52:29 spz Exp $ */
2
3 /*-
4 * Copyright (c) 2009-2012 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This material is based upon work partially supported by The
8 * NetBSD Foundation under a contract with Mindaugas Rasiukevicius.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Various procotol related helper routines.
34 *
35 * This layer manipulates npf_cache_t structure i.e. caches requested headers
36 * and stores which information was cached in the information bit field.
37 * It is also responsibility of this layer to update or invalidate the cache
38 * on rewrites (e.g. by translation routines).
39 */
40
41 #include <sys/cdefs.h>
42 __KERNEL_RCSID(0, "$NetBSD: npf_inet.c,v 1.15 2012/07/19 21:52:29 spz Exp $");
43
44 #include <sys/param.h>
45 #include <sys/types.h>
46
47 #include <net/pfil.h>
48 #include <net/if.h>
49 #include <net/ethertypes.h>
50 #include <net/if_ether.h>
51
52 #include <netinet/in_systm.h>
53 #include <netinet/in.h>
54 #include <netinet/in_var.h>
55 #include <netinet/ip.h>
56 #include <netinet/ip6.h>
57 #include <netinet/tcp.h>
58 #include <netinet/udp.h>
59 #include <netinet/ip_icmp.h>
60
61 #include "npf_impl.h"
62
63 /*
64 * npf_fixup{16,32}_cksum: update IPv4 checksum.
65 */
66
67 uint16_t
68 npf_fixup16_cksum(uint16_t cksum, uint16_t odatum, uint16_t ndatum)
69 {
70 uint32_t sum;
71
72 /*
73 * RFC 1624:
74 * HC' = ~(~HC + ~m + m')
75 */
76 sum = ~ntohs(cksum) & 0xffff;
77 sum += (~ntohs(odatum) & 0xffff) + ntohs(ndatum);
78 sum = (sum >> 16) + (sum & 0xffff);
79 sum += (sum >> 16);
80
81 return htons(~sum & 0xffff);
82 }
83
84 uint16_t
85 npf_fixup32_cksum(uint16_t cksum, uint32_t odatum, uint32_t ndatum)
86 {
87
88 cksum = npf_fixup16_cksum(cksum, odatum & 0xffff, ndatum & 0xffff);
89 cksum = npf_fixup16_cksum(cksum, odatum >> 16, ndatum >> 16);
90 return cksum;
91 }
92
93 /*
94 * npf_addr_cksum: calculate checksum of the address, either IPv4 or IPv6.
95 */
96 uint16_t
97 npf_addr_cksum(uint16_t cksum, int sz, npf_addr_t *oaddr, npf_addr_t *naddr)
98 {
99 uint32_t *oip32 = (uint32_t *)oaddr, *nip32 = (uint32_t *)naddr;
100
101 KASSERT(sz % sizeof(uint32_t) == 0);
102 do {
103 cksum = npf_fixup32_cksum(cksum, *oip32++, *nip32++);
104 sz -= sizeof(uint32_t);
105 } while (sz);
106
107 return cksum;
108 }
109
110 /*
111 * npf_addr_sum: provide IP address as a summed (if needed) 32-bit integer.
112 * Note: used for hash function.
113 */
114 uint32_t
115 npf_addr_sum(const int sz, const npf_addr_t *a1, const npf_addr_t *a2)
116 {
117 uint32_t mix = 0;
118 int i;
119
120 KASSERT(sz > 0 && a1 != NULL && a2 != NULL);
121
122 for (i = 0; i < (sz >> 2); i++) {
123 mix += a1->s6_addr32[i];
124 mix += a2->s6_addr32[i];
125 }
126 return mix;
127 }
128
129 /*
130 * npf_addr_mask: apply the mask to a given address and store the result.
131 */
132 void
133 npf_addr_mask(const npf_addr_t *addr, const npf_netmask_t mask,
134 const int alen, npf_addr_t *out)
135 {
136 const int nwords = alen >> 2;
137 uint_fast8_t length = mask;
138
139 /* Note: maximum length is 32 for IPv4 and 128 for IPv6. */
140 KASSERT(length <= NPF_MAX_NETMASK);
141
142 for (int i = 0; i < nwords; i++) {
143 uint32_t wordmask;
144
145 if (length >= 32) {
146 wordmask = htonl(0xffffffff);
147 length -= 32;
148 } else if (length) {
149 wordmask = htonl(0xffffffff << (32 - length));
150 length = 0;
151 } else {
152 wordmask = 0;
153 }
154 out->s6_addr32[i] = addr->s6_addr32[i] & wordmask;
155 }
156 }
157
158 /*
159 * npf_addr_cmp: compare two addresses, either IPv4 or IPv6.
160 *
161 * => Return 0 if equal and negative/positive if less/greater accordingly.
162 * => Ignore the mask, if NPF_NO_NETMASK is specified.
163 */
164 int
165 npf_addr_cmp(const npf_addr_t *addr1, const npf_netmask_t mask1,
166 const npf_addr_t *addr2, const npf_netmask_t mask2, const int alen)
167 {
168 npf_addr_t realaddr1, realaddr2;
169
170 if (mask1 != NPF_NO_NETMASK) {
171 npf_addr_mask(addr1, mask1, alen, &realaddr1);
172 addr1 = &realaddr1;
173 }
174 if (mask2 != NPF_NO_NETMASK) {
175 npf_addr_mask(addr2, mask2, alen, &realaddr2);
176 addr2 = &realaddr2;
177 }
178 return memcmp(addr1, addr2, alen);
179 }
180
181 /*
182 * npf_tcpsaw: helper to fetch SEQ, ACK, WIN and return TCP data length.
183 *
184 * => Returns all values in host byte-order.
185 */
186 int
187 npf_tcpsaw(const npf_cache_t *npc, tcp_seq *seq, tcp_seq *ack, uint32_t *win)
188 {
189 const struct tcphdr *th = &npc->npc_l4.tcp;
190 u_int thlen;
191
192 KASSERT(npf_iscached(npc, NPC_TCP));
193
194 *seq = ntohl(th->th_seq);
195 *ack = ntohl(th->th_ack);
196 *win = (uint32_t)ntohs(th->th_win);
197 thlen = th->th_off << 2;
198
199 if (npf_iscached(npc, NPC_IP4)) {
200 const struct ip *ip = &npc->npc_ip.v4;
201 return ntohs(ip->ip_len) - npf_cache_hlen(npc) - thlen;
202 } else if (npf_iscached(npc, NPC_IP6)) {
203 const struct ip6_hdr *ip6 = &npc->npc_ip.v6;
204 return ntohs(ip6->ip6_plen) - thlen;
205 }
206 return 0;
207 }
208
209 /*
210 * npf_fetch_tcpopts: parse and return TCP options.
211 */
212 bool
213 npf_fetch_tcpopts(const npf_cache_t *npc, nbuf_t *nbuf,
214 uint16_t *mss, int *wscale)
215 {
216 void *n_ptr = nbuf_dataptr(nbuf);
217 const struct tcphdr *th = &npc->npc_l4.tcp;
218 int topts_len, step;
219 uint16_t val16;
220 uint8_t val;
221
222 KASSERT(npf_iscached(npc, NPC_IP46));
223 KASSERT(npf_iscached(npc, NPC_TCP));
224
225 /* Determine if there are any TCP options, get their length. */
226 topts_len = (th->th_off << 2) - sizeof(struct tcphdr);
227 if (topts_len <= 0) {
228 /* No options. */
229 return false;
230 }
231 KASSERT(topts_len <= MAX_TCPOPTLEN);
232
233 /* First step: IP and TCP header up to options. */
234 step = npf_cache_hlen(npc) + sizeof(struct tcphdr);
235 next:
236 if (nbuf_advfetch(&nbuf, &n_ptr, step, sizeof(val), &val)) {
237 return false;
238 }
239
240 switch (val) {
241 case TCPOPT_EOL:
242 /* Done. */
243 return true;
244 case TCPOPT_NOP:
245 topts_len--;
246 step = 1;
247 break;
248 case TCPOPT_MAXSEG:
249 /*
250 * XXX: clean this mess.
251 */
252 if (mss && *mss) {
253 val16 = *mss;
254 if (nbuf_advstore(&nbuf, &n_ptr, 2,
255 sizeof(val16), &val16))
256 return false;
257 } else if (nbuf_advfetch(&nbuf, &n_ptr, 2,
258 sizeof(val16), &val16)) {
259 return false;
260 }
261 if (mss) {
262 *mss = val16;
263 }
264 topts_len -= TCPOLEN_MAXSEG;
265 step = sizeof(val16);
266 break;
267 case TCPOPT_WINDOW:
268 /* TCP Window Scaling (RFC 1323). */
269 if (nbuf_advfetch(&nbuf, &n_ptr, 2, sizeof(val), &val)) {
270 return false;
271 }
272 *wscale = (val > TCP_MAX_WINSHIFT) ? TCP_MAX_WINSHIFT : val;
273 topts_len -= TCPOLEN_WINDOW;
274 step = sizeof(val);
275 break;
276 default:
277 if (nbuf_advfetch(&nbuf, &n_ptr, 1, sizeof(val), &val)) {
278 return false;
279 }
280 if (val < 2 || val >= topts_len) {
281 return false;
282 }
283 topts_len -= val;
284 step = val - 1;
285 }
286
287 /* Any options left? */
288 if (__predict_true(topts_len > 0)) {
289 goto next;
290 }
291 return true;
292 }
293
294 /*
295 * npf_fetch_ip: fetch, check and cache IP header.
296 */
297 bool
298 npf_fetch_ip(npf_cache_t *npc, nbuf_t *nbuf, void *n_ptr)
299 {
300 uint8_t ver;
301
302 if (nbuf_fetch_datum(nbuf, n_ptr, sizeof(uint8_t), &ver)) {
303 return false;
304 }
305
306 switch (ver >> 4) {
307 case IPVERSION: {
308 struct ip *ip = &npc->npc_ip.v4;
309
310 /* Fetch IPv4 header. */
311 if (nbuf_fetch_datum(nbuf, n_ptr, sizeof(struct ip), ip)) {
312 return false;
313 }
314
315 /* Check header length and fragment offset. */
316 if ((u_int)(ip->ip_hl << 2) < sizeof(struct ip)) {
317 return false;
318 }
319 if (ip->ip_off & ~htons(IP_DF | IP_RF)) {
320 /* Note fragmentation. */
321 npc->npc_info |= NPC_IPFRAG;
322 }
323
324 /* Cache: layer 3 - IPv4. */
325 npc->npc_alen = sizeof(struct in_addr);
326 npc->npc_srcip = (npf_addr_t *)&ip->ip_src;
327 npc->npc_dstip = (npf_addr_t *)&ip->ip_dst;
328 npc->npc_info |= NPC_IP4;
329 npc->npc_hlen = ip->ip_hl << 2;
330 npc->npc_next_proto = npc->npc_ip.v4.ip_p;
331 break;
332 }
333
334 case (IPV6_VERSION >> 4): {
335 struct ip6_hdr *ip6 = &npc->npc_ip.v6;
336 size_t hlen = sizeof(struct ip6_hdr);
337 struct ip6_ext ip6e;
338
339 /* Fetch IPv6 header and set initial next-protocol value. */
340 if (nbuf_fetch_datum(nbuf, n_ptr, hlen, ip6)) {
341 return false;
342 }
343 npc->npc_next_proto = ip6->ip6_nxt;
344 npc->npc_hlen = hlen;
345
346 /*
347 * Advance by the length of the current header and
348 * prefetch the extension header.
349 */
350 while (nbuf_advfetch(&nbuf, &n_ptr, hlen,
351 sizeof(struct ip6_ext), &ip6e) == 0) {
352 /*
353 * Determine whether we are going to continue.
354 */
355 switch (npc->npc_next_proto) {
356 case IPPROTO_HOPOPTS:
357 case IPPROTO_DSTOPTS:
358 case IPPROTO_ROUTING:
359 hlen = (ip6e.ip6e_len + 1) << 3;
360 break;
361 case IPPROTO_FRAGMENT:
362 npc->npc_info |= NPC_IPFRAG;
363 hlen = sizeof(struct ip6_frag);
364 break;
365 case IPPROTO_AH:
366 hlen = (ip6e.ip6e_len + 2) << 2;
367 break;
368 default:
369 hlen = 0;
370 break;
371 }
372
373 if (!hlen) {
374 break;
375 }
376 npc->npc_next_proto = ip6e.ip6e_nxt;
377 npc->npc_hlen += hlen;
378 }
379
380 /* Cache: layer 3 - IPv6. */
381 npc->npc_alen = sizeof(struct in6_addr);
382 npc->npc_srcip = (npf_addr_t *)&ip6->ip6_src;
383 npc->npc_dstip = (npf_addr_t *)&ip6->ip6_dst;
384 npc->npc_info |= NPC_IP6;
385 break;
386 }
387 default:
388 return false;
389 }
390
391 return true;
392 }
393
394 /*
395 * npf_fetch_tcp: fetch, check and cache TCP header. If necessary,
396 * fetch and cache layer 3 as well.
397 */
398 bool
399 npf_fetch_tcp(npf_cache_t *npc, nbuf_t *nbuf, void *n_ptr)
400 {
401 struct tcphdr *th;
402
403 /* Must have IP header processed for its length and protocol. */
404 if (!npf_iscached(npc, NPC_IP46) && !npf_fetch_ip(npc, nbuf, n_ptr)) {
405 return false;
406 }
407 if (npf_cache_ipproto(npc) != IPPROTO_TCP) {
408 return false;
409 }
410 th = &npc->npc_l4.tcp;
411
412 /* Fetch TCP header. */
413 if (nbuf_advfetch(&nbuf, &n_ptr, npf_cache_hlen(npc),
414 sizeof(struct tcphdr), th)) {
415 return false;
416 }
417
418 /* Cache: layer 4 - TCP. */
419 npc->npc_info |= (NPC_LAYER4 | NPC_TCP);
420 return true;
421 }
422
423 /*
424 * npf_fetch_udp: fetch, check and cache UDP header. If necessary,
425 * fetch and cache layer 3 as well.
426 */
427 bool
428 npf_fetch_udp(npf_cache_t *npc, nbuf_t *nbuf, void *n_ptr)
429 {
430 struct udphdr *uh;
431 u_int hlen;
432
433 /* Must have IP header processed for its length and protocol. */
434 if (!npf_iscached(npc, NPC_IP46) && !npf_fetch_ip(npc, nbuf, n_ptr)) {
435 return false;
436 }
437 if (npf_cache_ipproto(npc) != IPPROTO_UDP) {
438 return false;
439 }
440 uh = &npc->npc_l4.udp;
441 hlen = npf_cache_hlen(npc);
442
443 /* Fetch UDP header. */
444 if (nbuf_advfetch(&nbuf, &n_ptr, hlen, sizeof(struct udphdr), uh)) {
445 return false;
446 }
447
448 /* Cache: layer 4 - UDP. */
449 npc->npc_info |= (NPC_LAYER4 | NPC_UDP);
450 return true;
451 }
452
453 /*
454 * npf_fetch_icmp: fetch ICMP code, type and possible query ID.
455 */
456 bool
457 npf_fetch_icmp(npf_cache_t *npc, nbuf_t *nbuf, void *n_ptr)
458 {
459 struct icmp *ic;
460 u_int hlen, iclen;
461
462 /* Must have IP header processed for its length and protocol. */
463 if (!npf_iscached(npc, NPC_IP46) && !npf_fetch_ip(npc, nbuf, n_ptr)) {
464 return false;
465 }
466 if (npf_cache_ipproto(npc) != IPPROTO_ICMP &&
467 npf_cache_ipproto(npc) != IPPROTO_ICMPV6) {
468 return false;
469 }
470 ic = &npc->npc_l4.icmp;
471 hlen = npf_cache_hlen(npc);
472
473 /* Fetch basic ICMP header, up to the "data" point. */
474 CTASSERT(offsetof(struct icmp, icmp_void) ==
475 offsetof(struct icmp6_hdr, icmp6_data32));
476
477 iclen = offsetof(struct icmp, icmp_void);
478 if (nbuf_advfetch(&nbuf, &n_ptr, hlen, iclen, ic)) {
479 return false;
480 }
481
482 /* Cache: layer 4 - ICMP. */
483 npc->npc_info |= (NPC_LAYER4 | NPC_ICMP);
484 return true;
485 }
486
487 /*
488 * npf_cache_all: general routine to cache all relevant IP (v4 or v6)
489 * and TCP, UDP or ICMP headers.
490 */
491 int
492 npf_cache_all(npf_cache_t *npc, nbuf_t *nbuf)
493 {
494 void *n_ptr = nbuf_dataptr(nbuf);
495
496 if (!npf_iscached(npc, NPC_IP46) && !npf_fetch_ip(npc, nbuf, n_ptr)) {
497 return npc->npc_info;
498 }
499 if (npf_iscached(npc, NPC_IPFRAG)) {
500 return npc->npc_info;
501 }
502 switch (npf_cache_ipproto(npc)) {
503 case IPPROTO_TCP:
504 (void)npf_fetch_tcp(npc, nbuf, n_ptr);
505 break;
506 case IPPROTO_UDP:
507 (void)npf_fetch_udp(npc, nbuf, n_ptr);
508 break;
509 case IPPROTO_ICMP:
510 case IPPROTO_ICMPV6:
511 (void)npf_fetch_icmp(npc, nbuf, n_ptr);
512 break;
513 }
514 return npc->npc_info;
515 }
516
517 /*
518 * npf_rwrip: rewrite required IP address, update the cache.
519 */
520 bool
521 npf_rwrip(npf_cache_t *npc, nbuf_t *nbuf, void *n_ptr, const int di,
522 npf_addr_t *addr)
523 {
524 npf_addr_t *oaddr;
525 u_int offby;
526
527 KASSERT(npf_iscached(npc, NPC_IP46));
528
529 if (di == PFIL_OUT) {
530 /* Rewrite source address, if outgoing. */
531 offby = offsetof(struct ip, ip_src);
532 oaddr = npc->npc_srcip;
533 } else {
534 /* Rewrite destination, if incoming. */
535 offby = offsetof(struct ip, ip_dst);
536 oaddr = npc->npc_dstip;
537 }
538
539 /* Advance to the address and rewrite it. */
540 if (nbuf_advstore(&nbuf, &n_ptr, offby, npc->npc_alen, addr))
541 return false;
542
543 /* Cache: IP address. */
544 memcpy(oaddr, addr, npc->npc_alen);
545 return true;
546 }
547
548 /*
549 * npf_rwrport: rewrite required TCP/UDP port, update the cache.
550 */
551 bool
552 npf_rwrport(npf_cache_t *npc, nbuf_t *nbuf, void *n_ptr, const int di,
553 in_port_t port)
554 {
555 const int proto = npf_cache_ipproto(npc);
556 u_int offby = npf_cache_hlen(npc);
557 in_port_t *oport;
558
559 KASSERT(npf_iscached(npc, NPC_TCP) || npf_iscached(npc, NPC_UDP));
560 KASSERT(proto == IPPROTO_TCP || proto == IPPROTO_UDP);
561
562 /* Offset to the port and pointer in the cache. */
563 if (proto == IPPROTO_TCP) {
564 struct tcphdr *th = &npc->npc_l4.tcp;
565 if (di == PFIL_OUT) {
566 CTASSERT(offsetof(struct tcphdr, th_sport) == 0);
567 oport = &th->th_sport;
568 } else {
569 offby += offsetof(struct tcphdr, th_dport);
570 oport = &th->th_dport;
571 }
572 } else {
573 struct udphdr *uh = &npc->npc_l4.udp;
574 if (di == PFIL_OUT) {
575 CTASSERT(offsetof(struct udphdr, uh_sport) == 0);
576 oport = &uh->uh_sport;
577 } else {
578 offby += offsetof(struct udphdr, uh_dport);
579 oport = &uh->uh_dport;
580 }
581 }
582
583 /* Advance and rewrite the port. */
584 if (nbuf_advstore(&nbuf, &n_ptr, offby, sizeof(in_port_t), &port))
585 return false;
586
587 /* Cache: TCP/UDP port. */
588 *oport = port;
589 return true;
590 }
591
592 /*
593 * npf_rwrcksum: rewrite IPv4 and/or TCP/UDP checksum, update the cache.
594 */
595 bool
596 npf_rwrcksum(npf_cache_t *npc, nbuf_t *nbuf, void *n_ptr, const int di,
597 npf_addr_t *addr, in_port_t port)
598 {
599 const int proto = npf_cache_ipproto(npc);
600 npf_addr_t *oaddr;
601 in_port_t *oport;
602 uint16_t *cksum;
603 u_int offby;
604
605 /* Checksum update for IPv4 header. */
606 if (npf_iscached(npc, NPC_IP4)) {
607 struct ip *ip = &npc->npc_ip.v4;
608 uint16_t ipsum;
609
610 oaddr = (di == PFIL_OUT) ? npc->npc_srcip : npc->npc_dstip;
611 ipsum = npf_addr_cksum(ip->ip_sum, npc->npc_alen, oaddr, addr);
612
613 /* Advance to the IPv4 checksum and rewrite it. */
614 offby = offsetof(struct ip, ip_sum);
615 if (nbuf_advstore(&nbuf, &n_ptr, offby, sizeof(ipsum), &ipsum))
616 return false;
617
618 ip->ip_sum = ipsum;
619 offby = npf_cache_hlen(npc) - offby;
620 } else {
621 /* No checksum for IPv6. */
622 KASSERT(npf_iscached(npc, NPC_IP6));
623 oaddr = NULL;
624 offby = 0;
625 return false; /* XXX: Not yet supported. */
626 }
627
628 /* Determine whether TCP/UDP checksum update is needed. */
629 if (proto == IPPROTO_ICMP || port == 0) {
630 return true;
631 }
632 KASSERT(npf_iscached(npc, NPC_TCP) || npf_iscached(npc, NPC_UDP));
633
634 /* Calculate TCP/UDP checksum. */
635 if (proto == IPPROTO_TCP) {
636 struct tcphdr *th = &npc->npc_l4.tcp;
637
638 cksum = &th->th_sum;
639 offby += offsetof(struct tcphdr, th_sum);
640 oport = (di == PFIL_OUT) ? &th->th_sport : &th->th_dport;
641 } else {
642 struct udphdr *uh = &npc->npc_l4.udp;
643
644 KASSERT(proto == IPPROTO_UDP);
645 cksum = &uh->uh_sum;
646 if (*cksum == 0) {
647 /* No need to update. */
648 return true;
649 }
650 offby += offsetof(struct udphdr, uh_sum);
651 oport = (di == PFIL_OUT) ? &uh->uh_sport : &uh->uh_dport;
652 }
653 *cksum = npf_addr_cksum(*cksum, npc->npc_alen, oaddr, addr);
654 *cksum = npf_fixup16_cksum(*cksum, *oport, port);
655
656 /* Advance to TCP/UDP checksum and rewrite it. */
657 if (nbuf_advstore(&nbuf, &n_ptr, offby, sizeof(uint16_t), cksum)) {
658 return false;
659 }
660 return true;
661 }
662
663 static inline bool
664 npf_normalize_ip4(npf_cache_t *npc, nbuf_t *nbuf,
665 bool rnd, bool no_df, int minttl)
666 {
667 void *n_ptr = nbuf_dataptr(nbuf);
668 struct ip *ip = &npc->npc_ip.v4;
669 uint16_t cksum = ip->ip_sum;
670 uint16_t ip_off = ip->ip_off;
671 uint8_t ttl = ip->ip_ttl;
672 u_int offby = 0;
673
674 KASSERT(rnd || minttl || no_df);
675
676 /* Randomize IPv4 ID. */
677 if (rnd) {
678 uint16_t oid = ip->ip_id, nid;
679
680 nid = htons(ip_randomid(ip_ids, 0));
681 offby = offsetof(struct ip, ip_id);
682 if (nbuf_advstore(&nbuf, &n_ptr, offby, sizeof(nid), &nid)) {
683 return false;
684 }
685 cksum = npf_fixup16_cksum(cksum, oid, nid);
686 ip->ip_id = nid;
687 }
688
689 /* IP_DF flag cleansing. */
690 if (no_df && (ip_off & htons(IP_DF)) != 0) {
691 uint16_t nip_off = ip_off & ~htons(IP_DF);
692
693 if (nbuf_advstore(&nbuf, &n_ptr,
694 offsetof(struct ip, ip_off) - offby,
695 sizeof(uint16_t), &nip_off)) {
696 return false;
697 }
698 cksum = npf_fixup16_cksum(cksum, ip_off, nip_off);
699 ip->ip_off = nip_off;
700 offby = offsetof(struct ip, ip_off);
701 }
702
703 /* Enforce minimum TTL. */
704 if (minttl && ttl < minttl) {
705 if (nbuf_advstore(&nbuf, &n_ptr,
706 offsetof(struct ip, ip_ttl) - offby,
707 sizeof(uint8_t), &minttl)) {
708 return false;
709 }
710 cksum = npf_fixup16_cksum(cksum, ttl, minttl);
711 ip->ip_ttl = minttl;
712 offby = offsetof(struct ip, ip_ttl);
713 }
714
715 /* Update IP checksum. */
716 offby = offsetof(struct ip, ip_sum) - offby;
717 if (nbuf_advstore(&nbuf, &n_ptr, offby, sizeof(cksum), &cksum)) {
718 return false;
719 }
720 ip->ip_sum = cksum;
721 return true;
722 }
723
724 bool
725 npf_normalize(npf_cache_t *npc, nbuf_t *nbuf,
726 bool no_df, bool rnd, u_int minttl, u_int maxmss)
727 {
728 void *n_ptr = nbuf_dataptr(nbuf);
729 struct tcphdr *th = &npc->npc_l4.tcp;
730 uint16_t cksum, mss;
731 u_int offby;
732 int wscale;
733
734 /* Normalize IPv4. */
735 if (npf_iscached(npc, NPC_IP4) && (rnd || minttl)) {
736 if (!npf_normalize_ip4(npc, nbuf, rnd, no_df, minttl)) {
737 return false;
738 }
739 } else if (!npf_iscached(npc, NPC_IP4)) {
740 /* XXX: no IPv6 */
741 return false;
742 }
743
744 /*
745 * TCP Maximum Segment Size (MSS) "clamping". Only if SYN packet.
746 * Fetch MSS and check whether rewrite to lower is needed.
747 */
748 if (maxmss == 0 || !npf_iscached(npc, NPC_TCP) ||
749 (th->th_flags & TH_SYN) == 0) {
750 /* Not required; done. */
751 return true;
752 }
753 mss = 0;
754 if (!npf_fetch_tcpopts(npc, nbuf, &mss, &wscale)) {
755 return false;
756 }
757 if (ntohs(mss) <= maxmss) {
758 return true;
759 }
760
761 /* Calculate TCP checksum, then rewrite MSS and the checksum. */
762 maxmss = htons(maxmss);
763 cksum = npf_fixup16_cksum(th->th_sum, mss, maxmss);
764 th->th_sum = cksum;
765 mss = maxmss;
766 if (!npf_fetch_tcpopts(npc, nbuf, &mss, &wscale)) {
767 return false;
768 }
769 offby = npf_cache_hlen(npc) + offsetof(struct tcphdr, th_sum);
770 if (nbuf_advstore(&nbuf, &n_ptr, offby, sizeof(cksum), &cksum)) {
771 return false;
772 }
773 return true;
774 }
775
776 #if defined(DDB) || defined(_NPF_TESTING)
777
778 void
779 npf_addr_dump(const npf_addr_t *addr)
780 {
781 printf("IP[%x:%x:%x:%x]\n",
782 addr->s6_addr32[0], addr->s6_addr32[1],
783 addr->s6_addr32[2], addr->s6_addr32[3]);
784 }
785
786 #endif
787