npf_inet.c revision 1.8 1 /* $NetBSD: npf_inet.c,v 1.8 2011/11/06 02:49:03 rmind Exp $ */
2
3 /*-
4 * Copyright (c) 2009-2011 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This material is based upon work partially supported by The
8 * NetBSD Foundation under a contract with Mindaugas Rasiukevicius.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Various procotol related helper routines.
34 */
35
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: npf_inet.c,v 1.8 2011/11/06 02:49:03 rmind Exp $");
38
39 #include <sys/param.h>
40 #include <sys/kernel.h>
41
42 #include <net/pfil.h>
43 #include <net/if.h>
44 #include <net/ethertypes.h>
45 #include <net/if_ether.h>
46
47 #include <netinet/in_systm.h>
48 #include <netinet/in.h>
49 #include <netinet/in_var.h>
50 #include <netinet/ip.h>
51 #include <netinet/ip6.h>
52 #include <netinet/tcp.h>
53 #include <netinet/udp.h>
54 #include <netinet/ip_icmp.h>
55
56 #include "npf_impl.h"
57
58 /*
59 * npf_fixup{16,32}_cksum: update IPv4 checksum.
60 */
61
62 uint16_t
63 npf_fixup16_cksum(uint16_t cksum, uint16_t odatum, uint16_t ndatum)
64 {
65 uint32_t sum;
66
67 /*
68 * RFC 1624:
69 * HC' = ~(~HC + ~m + m')
70 */
71 sum = ~ntohs(cksum) & 0xffff;
72 sum += (~ntohs(odatum) & 0xffff) + ntohs(ndatum);
73 sum = (sum >> 16) + (sum & 0xffff);
74 sum += (sum >> 16);
75
76 return htons(~sum & 0xffff);
77 }
78
79 uint16_t
80 npf_fixup32_cksum(uint16_t cksum, uint32_t odatum, uint32_t ndatum)
81 {
82
83 cksum = npf_fixup16_cksum(cksum, odatum & 0xffff, ndatum & 0xffff);
84 cksum = npf_fixup16_cksum(cksum, odatum >> 16, ndatum >> 16);
85 return cksum;
86 }
87
88 /*
89 * npf_addr_cksum: calculate checksum of the address, either IPv4 or IPv6.
90 */
91 uint16_t
92 npf_addr_cksum(uint16_t cksum, int sz, npf_addr_t *oaddr, npf_addr_t *naddr)
93 {
94 uint32_t *oip32 = (uint32_t *)oaddr, *nip32 = (uint32_t *)naddr;
95
96 KASSERT(sz % sizeof(uint32_t) == 0);
97 do {
98 cksum = npf_fixup32_cksum(cksum, *oip32++, *nip32++);
99 sz -= sizeof(uint32_t);
100 } while (sz);
101
102 return cksum;
103 }
104
105 /*
106 * npf_addr_sum: provide IP address as a summed (if needed) 32-bit integer.
107 * Note: used for hash function.
108 */
109 uint32_t
110 npf_addr_sum(const int sz, const npf_addr_t *a1, const npf_addr_t *a2)
111 {
112 uint32_t mix = 0;
113 int i;
114
115 KASSERT(sz > 0 && a1 != NULL && a2 != NULL);
116
117 for (i = 0; i < (sz >> 2); i++) {
118 mix += a1->s6_addr32[i];
119 mix += a2->s6_addr32[i];
120 }
121 return mix;
122 }
123
124 /*
125 * npf_tcpsaw: helper to fetch SEQ, ACK, WIN and return TCP data length.
126 * Returns all values in host byte-order.
127 */
128 int
129 npf_tcpsaw(npf_cache_t *npc, nbuf_t *nbuf, tcp_seq *seq, tcp_seq *ack,
130 uint32_t *win)
131 {
132 struct tcphdr *th = &npc->npc_l4.tcp;
133 u_int thlen;
134
135 KASSERT(npf_iscached(npc, NPC_TCP));
136
137 *seq = ntohl(th->th_seq);
138 *ack = ntohl(th->th_ack);
139 *win = (uint32_t)ntohs(th->th_win);
140 thlen = th->th_off << 2;
141
142 if (npf_iscached(npc, NPC_IP4)) {
143 struct ip *ip = &npc->npc_ip.v4;
144 return ntohs(ip->ip_len) - npf_cache_hlen(npc, nbuf) - thlen;
145 } else {
146 KASSERT(npf_iscached(npc, NPC_IP6));
147 struct ip6_hdr *ip6 = &npc->npc_ip.v6;
148 return ntohs(ip6->ip6_plen) - thlen;
149 }
150 return 0;
151 }
152
153 /*
154 * npf_fetch_tcpopts: parse and return TCP options.
155 */
156 bool
157 npf_fetch_tcpopts(const npf_cache_t *npc, nbuf_t *nbuf,
158 uint16_t *mss, int *wscale)
159 {
160 void *n_ptr = nbuf_dataptr(nbuf);
161 const struct tcphdr *th = &npc->npc_l4.tcp;
162 int topts_len, step;
163 uint16_t val16;
164 uint8_t val;
165
166 KASSERT(npf_iscached(npc, NPC_IP46));
167 KASSERT(npf_iscached(npc, NPC_TCP));
168 /* Determine if there are any TCP options, get their length. */
169 topts_len = (th->th_off << 2) - sizeof(struct tcphdr);
170 if (topts_len <= 0) {
171 /* No options. */
172 return false;
173 }
174 KASSERT(topts_len <= MAX_TCPOPTLEN);
175
176 /* First step: IP and TCP header up to options. */
177 step = npf_cache_hlen(npc, nbuf) + sizeof(struct tcphdr);
178 next:
179 if (nbuf_advfetch(&nbuf, &n_ptr, step, sizeof(val), &val)) {
180 return false;
181 }
182 switch (val) {
183 case TCPOPT_EOL:
184 /* Done. */
185 return true;
186 case TCPOPT_NOP:
187 topts_len--;
188 step = 1;
189 break;
190 case TCPOPT_MAXSEG:
191 /*
192 * XXX: clean this mess.
193 */
194 if (mss && *mss) {
195 val16 = *mss;
196 if (nbuf_advstore(&nbuf, &n_ptr, 2,
197 sizeof(val16), &val16))
198 return false;
199 } else if (nbuf_advfetch(&nbuf, &n_ptr, 2,
200 sizeof(val16), &val16)) {
201 return false;
202 }
203 if (mss) {
204 *mss = val16;
205 }
206 topts_len -= TCPOLEN_MAXSEG;
207 step = sizeof(val16);
208 break;
209 case TCPOPT_WINDOW:
210 if (nbuf_advfetch(&nbuf, &n_ptr, 2, sizeof(val), &val)) {
211 return false;
212 }
213 *wscale = (val > TCP_MAX_WINSHIFT) ? TCP_MAX_WINSHIFT : val;
214 topts_len -= TCPOLEN_WINDOW;
215 step = sizeof(val);
216 break;
217 default:
218 if (nbuf_advfetch(&nbuf, &n_ptr, 1, sizeof(val), &val)) {
219 return false;
220 }
221 if (val < 2 || val >= topts_len) {
222 return false;
223 }
224 topts_len -= val;
225 step = val - 1;
226 }
227 /* Any options left? */
228 if (__predict_true(topts_len > 0)) {
229 goto next;
230 }
231 return true;
232 }
233
234 /*
235 * npf_fetch_ip: fetch, check and cache IP header.
236 */
237 bool
238 npf_fetch_ip(npf_cache_t *npc, nbuf_t *nbuf, void *n_ptr)
239 {
240 struct ip *ip;
241 struct ip6_hdr *ip6;
242 uint8_t ver;
243
244 if (nbuf_fetch_datum(nbuf, n_ptr, sizeof(uint8_t), &ver)) {
245 return false;
246 }
247 switch (ver >> 4) {
248 case IPVERSION:
249 /* IPv4 */
250 ip = &npc->npc_ip.v4;
251 /* Fetch the header. */
252 if (nbuf_fetch_datum(nbuf, n_ptr, sizeof(struct ip), ip)) {
253 return false;
254 }
255 /* Check header length and fragment offset. */
256 if ((ip->ip_hl << 2) < sizeof(struct ip)) {
257 return false;
258 }
259 if (ip->ip_off & ~htons(IP_DF | IP_RF)) {
260 /* Note fragmentation. */
261 npc->npc_info |= NPC_IPFRAG;
262 }
263 /* Cache: layer 3 - IPv4. */
264 npc->npc_ipsz = sizeof(struct in_addr);
265 npc->npc_srcip = (npf_addr_t *)&ip->ip_src;
266 npc->npc_dstip = (npf_addr_t *)&ip->ip_dst;
267 npc->npc_info |= NPC_IP4;
268 npc->npc_hlen = ip->ip_hl << 2;
269 npc->npc_next_proto = npc->npc_ip.v4.ip_p;
270 break;
271
272 case (IPV6_VERSION >> 4):
273 ip6 = &npc->npc_ip.v6;
274 if (nbuf_fetch_datum(nbuf, n_ptr, sizeof(struct ip6_hdr), ip6)) {
275 return false;
276 }
277
278 size_t toskip = sizeof(struct ip6_hdr);
279 bool processing_ends = false;
280 npc->npc_next_proto = ip6->ip6_nxt;
281 npc->npc_hlen = 0;
282
283 do {
284 struct ip6_ext ip6e;
285
286 /*
287 * Advance by the length of the previous known header
288 * and fetch the next extension header's length.
289 */
290 if (nbuf_advfetch(&nbuf, &n_ptr, toskip,
291 sizeof(struct ip6_ext), &ip6e)) {
292 return false;
293 }
294
295 switch (npc->npc_next_proto) {
296 case IPPROTO_DSTOPTS:
297 case IPPROTO_ROUTING:
298 toskip = (ip6e.ip6e_len + 1) << 3;
299 break;
300 case IPPROTO_FRAGMENT:
301 npc->npc_info |= NPC_IPFRAG;
302 toskip = sizeof(struct ip6_frag);
303 break;
304 case IPPROTO_AH:
305 toskip = (ip6e.ip6e_len + 2) << 2;
306 break;
307 default:
308 processing_ends = true;
309 break;
310 }
311
312 npc->npc_hlen += toskip;
313
314 if (!processing_ends) {
315 npc->npc_next_proto = ip6e.ip6e_nxt;
316 }
317 } while (!processing_ends);
318
319 npc->npc_ipsz = sizeof(struct in6_addr);
320 npc->npc_srcip = (npf_addr_t *)&ip6->ip6_src;
321 npc->npc_dstip = (npf_addr_t *)&ip6->ip6_dst;
322 npc->npc_info |= NPC_IP6;
323 break;
324 default:
325 return false;
326 }
327 return true;
328 }
329
330 bool
331 npf_fetch_tcp(npf_cache_t *npc, nbuf_t *nbuf, void *n_ptr)
332 {
333 struct tcphdr *th;
334
335 /* Must have IP header processed for its length and protocol. */
336 if (!npf_iscached(npc, NPC_IP46) && !npf_fetch_ip(npc, nbuf, n_ptr)) {
337 return false;
338 }
339 if (npf_cache_ipproto(npc) != IPPROTO_TCP) {
340 return false;
341 }
342 th = &npc->npc_l4.tcp;
343
344 /* Fetch TCP header. */
345 if (nbuf_advfetch(&nbuf, &n_ptr, npf_cache_hlen(npc, nbuf),
346 sizeof(struct tcphdr), th)) {
347 return false;
348 }
349
350 /* Cache: layer 4 - TCP. */
351 npc->npc_info |= (NPC_LAYER4 | NPC_TCP);
352 return true;
353 }
354
355 bool
356 npf_fetch_udp(npf_cache_t *npc, nbuf_t *nbuf, void *n_ptr)
357 {
358 struct ip *ip = &npc->npc_ip.v4;
359 struct udphdr *uh;
360 size_t hlen;
361
362 /* Must have IP header processed for its length and protocol. */
363 if (!npf_iscached(npc, NPC_IP46) && !npf_fetch_ip(npc, nbuf, n_ptr)) {
364 return false;
365 }
366 if (ip->ip_p != IPPROTO_UDP) {
367 return false;
368 }
369 uh = &npc->npc_l4.udp;
370 hlen = npf_cache_hlen(npc, nbuf);
371
372 /* Fetch ICMP header. */
373 if (nbuf_advfetch(&nbuf, &n_ptr, hlen, sizeof(struct udphdr), uh)) {
374 return false;
375 }
376
377 /* Cache: layer 4 - ICMP. */
378 npc->npc_info |= (NPC_LAYER4 | NPC_UDP);
379 return true;
380 }
381
382 /*
383 * npf_fetch_icmp: fetch ICMP code, type and possible query ID.
384 *
385 * => Stores both all fetched items into the cache.
386 */
387 bool
388 npf_fetch_icmp(npf_cache_t *npc, nbuf_t *nbuf, void *n_ptr)
389 {
390 struct ip *ip = &npc->npc_ip.v4;
391 struct icmp *ic;
392 u_int iclen;
393 size_t hlen;
394
395 /* Must have IP header processed for its length and protocol. */
396 if (!npf_iscached(npc, NPC_IP46) && !npf_fetch_ip(npc, nbuf, n_ptr)) {
397 return false;
398 }
399 if (ip->ip_p != IPPROTO_ICMP) {
400 return false;
401 }
402 ic = &npc->npc_l4.icmp;
403 hlen = npf_cache_hlen(npc, nbuf);
404
405 /* Fetch basic ICMP header, up to the "data" point. */
406 iclen = offsetof(struct icmp, icmp_data);
407 if (nbuf_advfetch(&nbuf, &n_ptr, hlen, iclen, ic)) {
408 return false;
409 }
410
411 /* Cache: layer 4 - ICMP. */
412 npc->npc_info |= (NPC_LAYER4 | NPC_ICMP);
413 return true;
414 }
415
416 /*
417 * npf_cache_all: general routine to cache all relevant IP (v4 or v6)
418 * and TCP, UDP or ICMP data.
419 */
420 bool
421 npf_cache_all(npf_cache_t *npc, nbuf_t *nbuf)
422 {
423 void *n_ptr = nbuf_dataptr(nbuf);
424
425 if (!npf_iscached(npc, NPC_IP46) && !npf_fetch_ip(npc, nbuf, n_ptr)) {
426 return false;
427 }
428 if (npf_iscached(npc, NPC_IPFRAG)) {
429 return true;
430 }
431 switch (npf_cache_ipproto(npc)) {
432 case IPPROTO_TCP:
433 return npf_fetch_tcp(npc, nbuf, n_ptr);
434 case IPPROTO_UDP:
435 return npf_fetch_udp(npc, nbuf, n_ptr);
436 case IPPROTO_ICMP:
437 return npf_fetch_icmp(npc, nbuf, n_ptr);
438 }
439 return false;
440 }
441
442 /*
443 * npf_rwrip: rewrite required IP address, update the cache.
444 */
445 bool
446 npf_rwrip(npf_cache_t *npc, nbuf_t *nbuf, void *n_ptr, const int di,
447 npf_addr_t *addr)
448 {
449 npf_addr_t *oaddr;
450 u_int offby;
451
452 KASSERT(npf_iscached(npc, NPC_IP46));
453
454 if (di == PFIL_OUT) {
455 /* Rewrite source address, if outgoing. */
456 offby = offsetof(struct ip, ip_src);
457 oaddr = npc->npc_srcip;
458 } else {
459 /* Rewrite destination, if incoming. */
460 offby = offsetof(struct ip, ip_dst);
461 oaddr = npc->npc_dstip;
462 }
463
464 /* Advance to the address and rewrite it. */
465 if (nbuf_advstore(&nbuf, &n_ptr, offby, npc->npc_ipsz, addr))
466 return false;
467
468 /* Cache: IP address. */
469 memcpy(oaddr, addr, npc->npc_ipsz);
470 return true;
471 }
472
473 /*
474 * npf_rwrport: rewrite required TCP/UDP port, update the cache.
475 */
476 bool
477 npf_rwrport(npf_cache_t *npc, nbuf_t *nbuf, void *n_ptr, const int di,
478 in_port_t port)
479 {
480 const int proto = npf_cache_ipproto(npc);
481 u_int offby = npf_cache_hlen(npc, nbuf);
482 in_port_t *oport;
483
484 KASSERT(npf_iscached(npc, NPC_TCP) || npf_iscached(npc, NPC_UDP));
485 KASSERT(proto == IPPROTO_TCP || proto == IPPROTO_UDP);
486
487 /* Offset to the port and pointer in the cache. */
488 if (proto == IPPROTO_TCP) {
489 struct tcphdr *th = &npc->npc_l4.tcp;
490 if (di == PFIL_OUT) {
491 CTASSERT(offsetof(struct tcphdr, th_sport) == 0);
492 oport = &th->th_sport;
493 } else {
494 offby += offsetof(struct tcphdr, th_dport);
495 oport = &th->th_dport;
496 }
497 } else {
498 struct udphdr *uh = &npc->npc_l4.udp;
499 if (di == PFIL_OUT) {
500 CTASSERT(offsetof(struct udphdr, uh_sport) == 0);
501 oport = &uh->uh_sport;
502 } else {
503 offby += offsetof(struct udphdr, uh_dport);
504 oport = &uh->uh_dport;
505 }
506 }
507
508 /* Advance and rewrite the port. */
509 if (nbuf_advstore(&nbuf, &n_ptr, offby, sizeof(in_port_t), &port))
510 return false;
511
512 /* Cache: TCP/UDP port. */
513 *oport = port;
514 return true;
515 }
516
517 /*
518 * npf_rwrcksum: rewrite IPv4 and/or TCP/UDP checksum, update the cache.
519 */
520 bool
521 npf_rwrcksum(npf_cache_t *npc, nbuf_t *nbuf, void *n_ptr, const int di,
522 npf_addr_t *addr, in_port_t port)
523 {
524 const int proto = npf_cache_ipproto(npc);
525 npf_addr_t *oaddr;
526 in_port_t *oport;
527 uint16_t *cksum;
528 u_int offby;
529
530 /* Checksum update for IPv4 header. */
531 if (npf_iscached(npc, NPC_IP4)) {
532 struct ip *ip = &npc->npc_ip.v4;
533 uint16_t ipsum;
534
535 oaddr = (di == PFIL_OUT) ? npc->npc_srcip : npc->npc_dstip;
536 ipsum = npf_addr_cksum(ip->ip_sum, npc->npc_ipsz, oaddr, addr);
537
538 /* Advance to the IPv4 checksum and rewrite it. */
539 offby = offsetof(struct ip, ip_sum);
540 if (nbuf_advstore(&nbuf, &n_ptr, offby, sizeof(ipsum), &ipsum))
541 return false;
542
543 ip->ip_sum = ipsum;
544 offby = npf_cache_hlen(npc, nbuf) - offby;
545 } else {
546 /* No checksum for IPv6. */
547 KASSERT(npf_iscached(npc, NPC_IP6));
548 oaddr = NULL;
549 offby = 0;
550 return false; /* XXX: Not yet supported. */
551 }
552
553 /* Determine whether TCP/UDP checksum update is needed. */
554 if (proto == IPPROTO_ICMP || port == 0) {
555 return true;
556 }
557 KASSERT(npf_iscached(npc, NPC_TCP) || npf_iscached(npc, NPC_UDP));
558
559 /* Calculate TCP/UDP checksum. */
560 if (proto == IPPROTO_TCP) {
561 struct tcphdr *th = &npc->npc_l4.tcp;
562
563 cksum = &th->th_sum;
564 offby += offsetof(struct tcphdr, th_sum);
565 oport = (di == PFIL_OUT) ? &th->th_sport : &th->th_dport;
566 } else {
567 struct udphdr *uh = &npc->npc_l4.udp;
568
569 KASSERT(proto == IPPROTO_UDP);
570 cksum = &uh->uh_sum;
571 if (*cksum == 0) {
572 /* No need to update. */
573 return true;
574 }
575 offby += offsetof(struct udphdr, uh_sum);
576 oport = (di == PFIL_OUT) ? &uh->uh_sport : &uh->uh_dport;
577 }
578 *cksum = npf_addr_cksum(*cksum, npc->npc_ipsz, oaddr, addr);
579 *cksum = npf_fixup16_cksum(*cksum, *oport, port);
580
581 /* Advance to TCP/UDP checksum and rewrite it. */
582 if (nbuf_advstore(&nbuf, &n_ptr, offby, sizeof(uint16_t), cksum)) {
583 return false;
584 }
585 return true;
586 }
587
588 static inline bool
589 npf_normalize_ip4(npf_cache_t *npc, nbuf_t *nbuf,
590 bool rnd, bool no_df, int minttl)
591 {
592 void *n_ptr = nbuf_dataptr(nbuf);
593 struct ip *ip = &npc->npc_ip.v4;
594 uint16_t cksum = ip->ip_sum;
595 uint16_t ip_off = ip->ip_off;
596 uint8_t ttl = ip->ip_ttl;
597 u_int offby = 0;
598
599 KASSERT(rnd || minttl || no_df);
600
601 /* Randomize IPv4 ID. */
602 if (rnd) {
603 uint16_t oid = ip->ip_id, nid;
604
605 nid = htons(ip_randomid(ip_ids, 0));
606 offby = offsetof(struct ip, ip_id);
607 if (nbuf_advstore(&nbuf, &n_ptr, offby, sizeof(nid), &nid)) {
608 return false;
609 }
610 cksum = npf_fixup16_cksum(cksum, oid, nid);
611 ip->ip_id = nid;
612 }
613
614 /* IP_DF flag cleansing. */
615 if (no_df && (ip_off & htons(IP_DF)) != 0) {
616 uint16_t nip_off = ip_off & ~htons(IP_DF);
617
618 if (nbuf_advstore(&nbuf, &n_ptr,
619 offsetof(struct ip, ip_off) - offby,
620 sizeof(uint16_t), &nip_off)) {
621 return false;
622 }
623 cksum = npf_fixup16_cksum(cksum, ip_off, nip_off);
624 ip->ip_off = nip_off;
625 offby = offsetof(struct ip, ip_off);
626 }
627
628 /* Enforce minimum TTL. */
629 if (minttl && ttl < minttl) {
630 if (nbuf_advstore(&nbuf, &n_ptr,
631 offsetof(struct ip, ip_ttl) - offby,
632 sizeof(uint8_t), &minttl)) {
633 return false;
634 }
635 cksum = npf_fixup16_cksum(cksum, ttl, minttl);
636 ip->ip_ttl = minttl;
637 offby = offsetof(struct ip, ip_ttl);
638 }
639
640 /* Update IP checksum. */
641 offby = offsetof(struct ip, ip_sum) - offby;
642 if (nbuf_advstore(&nbuf, &n_ptr, offby, sizeof(cksum), &cksum)) {
643 return false;
644 }
645 ip->ip_sum = cksum;
646 return true;
647 }
648
649 bool
650 npf_normalize(npf_cache_t *npc, nbuf_t *nbuf,
651 bool no_df, bool rnd, u_int minttl, u_int maxmss)
652 {
653 void *n_ptr = nbuf_dataptr(nbuf);
654 struct tcphdr *th = &npc->npc_l4.tcp;
655 uint16_t cksum, mss;
656 int offby, wscale;
657
658 /* Normalize IPv4. */
659 if (npf_iscached(npc, NPC_IP4) && (rnd || minttl)) {
660 if (!npf_normalize_ip4(npc, nbuf, rnd, no_df, minttl)) {
661 return false;
662 }
663 } else if (!npf_iscached(npc, NPC_IP4)) {
664 /* XXX: no IPv6 */
665 return false;
666 }
667
668 /*
669 * TCP Maximum Segment Size (MSS) "clamping". Only if SYN packet.
670 * Fetch MSS and check whether rewrite to lower is needed.
671 */
672 if (maxmss == 0 || !npf_iscached(npc, NPC_TCP) ||
673 (th->th_flags & TH_SYN) == 0) {
674 /* Not required; done. */
675 return true;
676 }
677 mss = 0;
678 if (!npf_fetch_tcpopts(npc, nbuf, &mss, &wscale)) {
679 return false;
680 }
681 if (ntohs(mss) <= maxmss) {
682 return true;
683 }
684
685 /* Calculate TCP checksum, then rewrite MSS and the checksum. */
686 maxmss = htons(maxmss);
687 cksum = npf_fixup16_cksum(th->th_sum, mss, maxmss);
688 th->th_sum = cksum;
689 mss = maxmss;
690 if (!npf_fetch_tcpopts(npc, nbuf, &mss, &wscale)) {
691 return false;
692 }
693 offby = npf_cache_hlen(npc, nbuf) + offsetof(struct tcphdr, th_sum);
694 if (nbuf_advstore(&nbuf, &n_ptr, offby, sizeof(cksum), &cksum)) {
695 return false;
696 }
697 return true;
698 }
699