npf_inet.c revision 1.40 1 /* $NetBSD: npf_inet.c,v 1.40 2018/03/13 09:04:02 maxv Exp $ */
2
3 /*-
4 * Copyright (c) 2009-2014 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This material is based upon work partially supported by The
8 * NetBSD Foundation under a contract with Mindaugas Rasiukevicius.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Various protocol related helper routines.
34 *
35 * This layer manipulates npf_cache_t structure i.e. caches requested headers
36 * and stores which information was cached in the information bit field.
37 * It is also responsibility of this layer to update or invalidate the cache
38 * on rewrites (e.g. by translation routines).
39 */
40
41 #ifdef _KERNEL
42 #include <sys/cdefs.h>
43 __KERNEL_RCSID(0, "$NetBSD: npf_inet.c,v 1.40 2018/03/13 09:04:02 maxv Exp $");
44
45 #include <sys/param.h>
46 #include <sys/types.h>
47
48 #include <net/pfil.h>
49 #include <net/if.h>
50 #include <net/ethertypes.h>
51 #include <net/if_ether.h>
52
53 #include <netinet/in_systm.h>
54 #include <netinet/in.h>
55 #include <netinet6/in6_var.h>
56 #include <netinet/ip.h>
57 #include <netinet/ip6.h>
58 #include <netinet/tcp.h>
59 #include <netinet/udp.h>
60 #include <netinet/ip_icmp.h>
61 #endif
62
63 #include "npf_impl.h"
64
65 /*
66 * npf_fixup{16,32}_cksum: incremental update of the Internet checksum.
67 */
68
69 uint16_t
70 npf_fixup16_cksum(uint16_t cksum, uint16_t odatum, uint16_t ndatum)
71 {
72 uint32_t sum;
73
74 /*
75 * RFC 1624:
76 * HC' = ~(~HC + ~m + m')
77 *
78 * Note: 1's complement sum is endian-independent (RFC 1071, page 2).
79 */
80 sum = ~cksum & 0xffff;
81 sum += (~odatum & 0xffff) + ndatum;
82 sum = (sum >> 16) + (sum & 0xffff);
83 sum += (sum >> 16);
84
85 return ~sum & 0xffff;
86 }
87
88 uint16_t
89 npf_fixup32_cksum(uint16_t cksum, uint32_t odatum, uint32_t ndatum)
90 {
91 uint32_t sum;
92
93 /*
94 * Checksum 32-bit datum as as two 16-bit. Note, the first
95 * 32->16 bit reduction is not necessary.
96 */
97 sum = ~cksum & 0xffff;
98 sum += (~odatum & 0xffff) + (ndatum & 0xffff);
99
100 sum += (~odatum >> 16) + (ndatum >> 16);
101 sum = (sum >> 16) + (sum & 0xffff);
102 sum += (sum >> 16);
103 return ~sum & 0xffff;
104 }
105
106 /*
107 * npf_addr_cksum: calculate checksum of the address, either IPv4 or IPv6.
108 */
109 uint16_t
110 npf_addr_cksum(uint16_t cksum, int sz, const npf_addr_t *oaddr,
111 const npf_addr_t *naddr)
112 {
113 const uint32_t *oip32 = (const uint32_t *)oaddr;
114 const uint32_t *nip32 = (const uint32_t *)naddr;
115
116 KASSERT(sz % sizeof(uint32_t) == 0);
117 do {
118 cksum = npf_fixup32_cksum(cksum, *oip32++, *nip32++);
119 sz -= sizeof(uint32_t);
120 } while (sz);
121
122 return cksum;
123 }
124
125 /*
126 * npf_addr_sum: provide IP addresses as a XORed 32-bit integer.
127 * Note: used for hash function.
128 */
129 uint32_t
130 npf_addr_mix(const int sz, const npf_addr_t *a1, const npf_addr_t *a2)
131 {
132 uint32_t mix = 0;
133
134 KASSERT(sz > 0 && a1 != NULL && a2 != NULL);
135
136 for (int i = 0; i < (sz >> 2); i++) {
137 mix ^= a1->word32[i];
138 mix ^= a2->word32[i];
139 }
140 return mix;
141 }
142
143 /*
144 * npf_addr_mask: apply the mask to a given address and store the result.
145 */
146 void
147 npf_addr_mask(const npf_addr_t *addr, const npf_netmask_t mask,
148 const int alen, npf_addr_t *out)
149 {
150 const int nwords = alen >> 2;
151 uint_fast8_t length = mask;
152
153 /* Note: maximum length is 32 for IPv4 and 128 for IPv6. */
154 KASSERT(length <= NPF_MAX_NETMASK);
155
156 for (int i = 0; i < nwords; i++) {
157 uint32_t wordmask;
158
159 if (length >= 32) {
160 wordmask = htonl(0xffffffff);
161 length -= 32;
162 } else if (length) {
163 wordmask = htonl(0xffffffff << (32 - length));
164 length = 0;
165 } else {
166 wordmask = 0;
167 }
168 out->word32[i] = addr->word32[i] & wordmask;
169 }
170 }
171
172 /*
173 * npf_addr_cmp: compare two addresses, either IPv4 or IPv6.
174 *
175 * => Return 0 if equal and negative/positive if less/greater accordingly.
176 * => Ignore the mask, if NPF_NO_NETMASK is specified.
177 */
178 int
179 npf_addr_cmp(const npf_addr_t *addr1, const npf_netmask_t mask1,
180 const npf_addr_t *addr2, const npf_netmask_t mask2, const int alen)
181 {
182 npf_addr_t realaddr1, realaddr2;
183
184 if (mask1 != NPF_NO_NETMASK) {
185 npf_addr_mask(addr1, mask1, alen, &realaddr1);
186 addr1 = &realaddr1;
187 }
188 if (mask2 != NPF_NO_NETMASK) {
189 npf_addr_mask(addr2, mask2, alen, &realaddr2);
190 addr2 = &realaddr2;
191 }
192 return memcmp(addr1, addr2, alen);
193 }
194
195 /*
196 * npf_tcpsaw: helper to fetch SEQ, ACK, WIN and return TCP data length.
197 *
198 * => Returns all values in host byte-order.
199 */
200 int
201 npf_tcpsaw(const npf_cache_t *npc, tcp_seq *seq, tcp_seq *ack, uint32_t *win)
202 {
203 const struct tcphdr *th = npc->npc_l4.tcp;
204 u_int thlen;
205
206 KASSERT(npf_iscached(npc, NPC_TCP));
207
208 *seq = ntohl(th->th_seq);
209 *ack = ntohl(th->th_ack);
210 *win = (uint32_t)ntohs(th->th_win);
211 thlen = th->th_off << 2;
212
213 if (npf_iscached(npc, NPC_IP4)) {
214 const struct ip *ip = npc->npc_ip.v4;
215 return ntohs(ip->ip_len) - npc->npc_hlen - thlen;
216 } else if (npf_iscached(npc, NPC_IP6)) {
217 const struct ip6_hdr *ip6 = npc->npc_ip.v6;
218 return ntohs(ip6->ip6_plen) - thlen;
219 }
220 return 0;
221 }
222
223 /*
224 * npf_fetch_tcpopts: parse and return TCP options.
225 */
226 bool
227 npf_fetch_tcpopts(npf_cache_t *npc, uint16_t *mss, int *wscale)
228 {
229 nbuf_t *nbuf = npc->npc_nbuf;
230 const struct tcphdr *th = npc->npc_l4.tcp;
231 int topts_len, step;
232 uint8_t *nptr;
233 uint8_t val;
234 bool ok;
235
236 KASSERT(npf_iscached(npc, NPC_IP46));
237 KASSERT(npf_iscached(npc, NPC_TCP));
238
239 /* Determine if there are any TCP options, get their length. */
240 topts_len = (th->th_off << 2) - sizeof(struct tcphdr);
241 if (topts_len <= 0) {
242 /* No options. */
243 return false;
244 }
245 KASSERT(topts_len <= MAX_TCPOPTLEN);
246
247 /* First step: IP and TCP header up to options. */
248 step = npc->npc_hlen + sizeof(struct tcphdr);
249 nbuf_reset(nbuf);
250 next:
251 if ((nptr = nbuf_advance(nbuf, step, 1)) == NULL) {
252 ok = false;
253 goto done;
254 }
255 val = *nptr;
256
257 switch (val) {
258 case TCPOPT_EOL:
259 /* Done. */
260 ok = true;
261 goto done;
262 case TCPOPT_NOP:
263 topts_len--;
264 step = 1;
265 break;
266 case TCPOPT_MAXSEG:
267 if ((nptr = nbuf_ensure_contig(nbuf, TCPOLEN_MAXSEG)) == NULL) {
268 ok = false;
269 goto done;
270 }
271 if (mss) {
272 if (*mss) {
273 memcpy(nptr + 2, mss, sizeof(uint16_t));
274 } else {
275 memcpy(mss, nptr + 2, sizeof(uint16_t));
276 }
277 }
278 topts_len -= TCPOLEN_MAXSEG;
279 step = TCPOLEN_MAXSEG;
280 break;
281 case TCPOPT_WINDOW:
282 /* TCP Window Scaling (RFC 1323). */
283 if ((nptr = nbuf_ensure_contig(nbuf, TCPOLEN_WINDOW)) == NULL) {
284 ok = false;
285 goto done;
286 }
287 val = *(nptr + 2);
288 *wscale = (val > TCP_MAX_WINSHIFT) ? TCP_MAX_WINSHIFT : val;
289 topts_len -= TCPOLEN_WINDOW;
290 step = TCPOLEN_WINDOW;
291 break;
292 default:
293 if ((nptr = nbuf_ensure_contig(nbuf, 2)) == NULL) {
294 ok = false;
295 goto done;
296 }
297 val = *(nptr + 1);
298 if (val < 2 || val > topts_len) {
299 ok = false;
300 goto done;
301 }
302 topts_len -= val;
303 step = val;
304 }
305
306 /* Any options left? */
307 if (__predict_true(topts_len > 0)) {
308 goto next;
309 }
310 ok = true;
311 done:
312 if (nbuf_flag_p(nbuf, NBUF_DATAREF_RESET)) {
313 npf_recache(npc);
314 }
315 return ok;
316 }
317
318 static int
319 npf_cache_ip(npf_cache_t *npc, nbuf_t *nbuf)
320 {
321 const void *nptr = nbuf_dataptr(nbuf);
322 const uint8_t ver = *(const uint8_t *)nptr;
323 int flags = 0;
324
325 switch (ver >> 4) {
326 case IPVERSION: {
327 struct ip *ip;
328
329 ip = nbuf_ensure_contig(nbuf, sizeof(struct ip));
330 if (ip == NULL) {
331 return NPC_FMTERR;
332 }
333
334 /* Check header length and fragment offset. */
335 if ((u_int)(ip->ip_hl << 2) < sizeof(struct ip)) {
336 return NPC_FMTERR;
337 }
338 if (ip->ip_off & ~htons(IP_DF | IP_RF)) {
339 /* Note fragmentation. */
340 flags |= NPC_IPFRAG;
341 }
342
343 /* Cache: layer 3 - IPv4. */
344 npc->npc_alen = sizeof(struct in_addr);
345 npc->npc_ips[NPF_SRC] = (npf_addr_t *)&ip->ip_src;
346 npc->npc_ips[NPF_DST] = (npf_addr_t *)&ip->ip_dst;
347 npc->npc_hlen = ip->ip_hl << 2;
348 npc->npc_proto = ip->ip_p;
349
350 npc->npc_ip.v4 = ip;
351 flags |= NPC_IP4;
352 break;
353 }
354
355 case (IPV6_VERSION >> 4): {
356 struct ip6_hdr *ip6;
357 struct ip6_ext *ip6e;
358 struct ip6_frag *ip6f;
359 size_t off, hlen;
360 int frag_present;
361
362 ip6 = nbuf_ensure_contig(nbuf, sizeof(struct ip6_hdr));
363 if (ip6 == NULL) {
364 return NPC_FMTERR;
365 }
366
367 /* Set initial next-protocol value. */
368 hlen = sizeof(struct ip6_hdr);
369 npc->npc_proto = ip6->ip6_nxt;
370 npc->npc_hlen = hlen;
371
372 frag_present = 0;
373
374 /*
375 * Advance by the length of the current header.
376 */
377 off = nbuf_offset(nbuf);
378 while ((ip6e = nbuf_advance(nbuf, hlen, sizeof(*ip6e))) != NULL) {
379 /*
380 * Determine whether we are going to continue.
381 */
382 switch (npc->npc_proto) {
383 case IPPROTO_HOPOPTS:
384 case IPPROTO_DSTOPTS:
385 case IPPROTO_ROUTING:
386 hlen = (ip6e->ip6e_len + 1) << 3;
387 break;
388 case IPPROTO_FRAGMENT:
389 if (frag_present++)
390 return NPC_FMTERR;
391 ip6f = nbuf_ensure_contig(nbuf, sizeof(*ip6f));
392 if (ip6f == NULL)
393 return NPC_FMTERR;
394
395 hlen = 0;
396 flags |= NPC_IPFRAG;
397
398 break;
399 case IPPROTO_AH:
400 hlen = (ip6e->ip6e_len + 2) << 2;
401 break;
402 default:
403 hlen = 0;
404 break;
405 }
406
407 if (!hlen) {
408 break;
409 }
410 npc->npc_proto = ip6e->ip6e_nxt;
411 npc->npc_hlen += hlen;
412 }
413
414 /*
415 * Re-fetch the header pointers (nbufs might have been
416 * reallocated). Restore the original offset (if any).
417 */
418 nbuf_reset(nbuf);
419 ip6 = nbuf_dataptr(nbuf);
420 if (off) {
421 nbuf_advance(nbuf, off, 0);
422 }
423
424 /* Cache: layer 3 - IPv6. */
425 npc->npc_alen = sizeof(struct in6_addr);
426 npc->npc_ips[NPF_SRC] = (npf_addr_t *)&ip6->ip6_src;
427 npc->npc_ips[NPF_DST]= (npf_addr_t *)&ip6->ip6_dst;
428
429 npc->npc_ip.v6 = ip6;
430 flags |= NPC_IP6;
431 break;
432 }
433 default:
434 break;
435 }
436 return flags;
437 }
438
439 /*
440 * npf_cache_all: general routine to cache all relevant IP (v4 or v6)
441 * and TCP, UDP or ICMP headers.
442 *
443 * => nbuf offset shall be set accordingly.
444 */
445 int
446 npf_cache_all(npf_cache_t *npc)
447 {
448 nbuf_t *nbuf = npc->npc_nbuf;
449 int flags, l4flags;
450 u_int hlen;
451
452 /*
453 * This routine is a main point where the references are cached,
454 * therefore clear the flag as we reset.
455 */
456 again:
457 nbuf_unset_flag(nbuf, NBUF_DATAREF_RESET);
458
459 /*
460 * First, cache the L3 header (IPv4 or IPv6). If IP packet is
461 * fragmented, then we cannot look into L4.
462 */
463 flags = npf_cache_ip(npc, nbuf);
464 if ((flags & NPC_IP46) == 0 || (flags & NPC_IPFRAG) != 0 ||
465 (flags & NPC_FMTERR) != 0) {
466 nbuf_unset_flag(nbuf, NBUF_DATAREF_RESET);
467 npc->npc_info |= flags;
468 return flags;
469 }
470 hlen = npc->npc_hlen;
471
472 switch (npc->npc_proto) {
473 case IPPROTO_TCP:
474 /* Cache: layer 4 - TCP. */
475 npc->npc_l4.tcp = nbuf_advance(nbuf, hlen,
476 sizeof(struct tcphdr));
477 l4flags = NPC_LAYER4 | NPC_TCP;
478 break;
479 case IPPROTO_UDP:
480 /* Cache: layer 4 - UDP. */
481 npc->npc_l4.udp = nbuf_advance(nbuf, hlen,
482 sizeof(struct udphdr));
483 l4flags = NPC_LAYER4 | NPC_UDP;
484 break;
485 case IPPROTO_ICMP:
486 /* Cache: layer 4 - ICMPv4. */
487 npc->npc_l4.icmp = nbuf_advance(nbuf, hlen,
488 offsetof(struct icmp, icmp_void));
489 l4flags = NPC_LAYER4 | NPC_ICMP;
490 break;
491 case IPPROTO_ICMPV6:
492 /* Cache: layer 4 - ICMPv6. */
493 npc->npc_l4.icmp6 = nbuf_advance(nbuf, hlen,
494 offsetof(struct icmp6_hdr, icmp6_data32));
495 l4flags = NPC_LAYER4 | NPC_ICMP;
496 break;
497 default:
498 l4flags = 0;
499 break;
500 }
501
502 if (nbuf_flag_p(nbuf, NBUF_DATAREF_RESET)) {
503 goto again;
504 }
505
506 /* Add the L4 flags if nbuf_advance() succeeded. */
507 if (l4flags && npc->npc_l4.hdr) {
508 flags |= l4flags;
509 }
510 npc->npc_info |= flags;
511 return flags;
512 }
513
514 void
515 npf_recache(npf_cache_t *npc)
516 {
517 nbuf_t *nbuf = npc->npc_nbuf;
518 const int mflags __diagused = npc->npc_info & (NPC_IP46 | NPC_LAYER4);
519 int flags __diagused;
520
521 nbuf_reset(nbuf);
522 npc->npc_info = 0;
523 flags = npf_cache_all(npc);
524
525 KASSERT((flags & mflags) == mflags);
526 KASSERT(nbuf_flag_p(nbuf, NBUF_DATAREF_RESET) == 0);
527 }
528
529 /*
530 * npf_rwrip: rewrite required IP address.
531 */
532 bool
533 npf_rwrip(const npf_cache_t *npc, u_int which, const npf_addr_t *addr)
534 {
535 KASSERT(npf_iscached(npc, NPC_IP46));
536 KASSERT(which == NPF_SRC || which == NPF_DST);
537
538 memcpy(npc->npc_ips[which], addr, npc->npc_alen);
539 return true;
540 }
541
542 /*
543 * npf_rwrport: rewrite required TCP/UDP port.
544 */
545 bool
546 npf_rwrport(const npf_cache_t *npc, u_int which, const in_port_t port)
547 {
548 const int proto = npc->npc_proto;
549 in_port_t *oport;
550
551 KASSERT(npf_iscached(npc, NPC_TCP) || npf_iscached(npc, NPC_UDP));
552 KASSERT(proto == IPPROTO_TCP || proto == IPPROTO_UDP);
553 KASSERT(which == NPF_SRC || which == NPF_DST);
554
555 /* Get the offset and store the port in it. */
556 if (proto == IPPROTO_TCP) {
557 struct tcphdr *th = npc->npc_l4.tcp;
558 oport = (which == NPF_SRC) ? &th->th_sport : &th->th_dport;
559 } else {
560 struct udphdr *uh = npc->npc_l4.udp;
561 oport = (which == NPF_SRC) ? &uh->uh_sport : &uh->uh_dport;
562 }
563 memcpy(oport, &port, sizeof(in_port_t));
564 return true;
565 }
566
567 /*
568 * npf_rwrcksum: rewrite IPv4 and/or TCP/UDP checksum.
569 */
570 bool
571 npf_rwrcksum(const npf_cache_t *npc, u_int which,
572 const npf_addr_t *addr, const in_port_t port)
573 {
574 const npf_addr_t *oaddr = npc->npc_ips[which];
575 const int proto = npc->npc_proto;
576 const int alen = npc->npc_alen;
577 uint16_t *ocksum;
578 in_port_t oport;
579
580 KASSERT(npf_iscached(npc, NPC_LAYER4));
581 KASSERT(which == NPF_SRC || which == NPF_DST);
582
583 if (npf_iscached(npc, NPC_IP4)) {
584 struct ip *ip = npc->npc_ip.v4;
585 uint16_t ipsum = ip->ip_sum;
586
587 /* Recalculate IPv4 checksum and rewrite. */
588 ip->ip_sum = npf_addr_cksum(ipsum, alen, oaddr, addr);
589 } else {
590 /* No checksum for IPv6. */
591 KASSERT(npf_iscached(npc, NPC_IP6));
592 }
593
594 /* Nothing else to do for ICMP. */
595 if (proto == IPPROTO_ICMP || proto == IPPROTO_ICMPV6) {
596 return true;
597 }
598 KASSERT(npf_iscached(npc, NPC_TCP) || npf_iscached(npc, NPC_UDP));
599
600 /*
601 * Calculate TCP/UDP checksum:
602 * - Skip if UDP and the current checksum is zero.
603 * - Fixup the IP address change.
604 * - Fixup the port change, if required (non-zero).
605 */
606 if (proto == IPPROTO_TCP) {
607 struct tcphdr *th = npc->npc_l4.tcp;
608
609 ocksum = &th->th_sum;
610 oport = (which == NPF_SRC) ? th->th_sport : th->th_dport;
611 } else {
612 struct udphdr *uh = npc->npc_l4.udp;
613
614 KASSERT(proto == IPPROTO_UDP);
615 ocksum = &uh->uh_sum;
616 if (*ocksum == 0) {
617 /* No need to update. */
618 return true;
619 }
620 oport = (which == NPF_SRC) ? uh->uh_sport : uh->uh_dport;
621 }
622
623 uint16_t cksum = npf_addr_cksum(*ocksum, alen, oaddr, addr);
624 if (port) {
625 cksum = npf_fixup16_cksum(cksum, oport, port);
626 }
627
628 /* Rewrite TCP/UDP checksum. */
629 memcpy(ocksum, &cksum, sizeof(uint16_t));
630 return true;
631 }
632
633 /*
634 * npf_napt_rwr: perform address and/or port translation.
635 */
636 int
637 npf_napt_rwr(const npf_cache_t *npc, u_int which,
638 const npf_addr_t *addr, const in_addr_t port)
639 {
640 const unsigned proto = npc->npc_proto;
641
642 /*
643 * Rewrite IP and/or TCP/UDP checksums first, since we need the
644 * current (old) address/port for the calculations. Then perform
645 * the address translation i.e. rewrite source or destination.
646 */
647 if (!npf_rwrcksum(npc, which, addr, port)) {
648 return EINVAL;
649 }
650 if (!npf_rwrip(npc, which, addr)) {
651 return EINVAL;
652 }
653 if (port == 0) {
654 /* Done. */
655 return 0;
656 }
657
658 switch (proto) {
659 case IPPROTO_TCP:
660 case IPPROTO_UDP:
661 /* Rewrite source/destination port. */
662 if (!npf_rwrport(npc, which, port)) {
663 return EINVAL;
664 }
665 break;
666 case IPPROTO_ICMP:
667 case IPPROTO_ICMPV6:
668 KASSERT(npf_iscached(npc, NPC_ICMP));
669 /* Nothing. */
670 break;
671 default:
672 return ENOTSUP;
673 }
674 return 0;
675 }
676
677 /*
678 * IPv6-to-IPv6 Network Prefix Translation (NPTv6), as per RFC 6296.
679 */
680
681 int
682 npf_npt66_rwr(const npf_cache_t *npc, u_int which, const npf_addr_t *pref,
683 npf_netmask_t len, uint16_t adj)
684 {
685 npf_addr_t *addr = npc->npc_ips[which];
686 unsigned remnant, word, preflen = len >> 4;
687 uint32_t sum;
688
689 KASSERT(which == NPF_SRC || which == NPF_DST);
690
691 if (!npf_iscached(npc, NPC_IP6)) {
692 return EINVAL;
693 }
694 if (len <= 48) {
695 /*
696 * The word to adjust. Cannot translate the 0xffff
697 * subnet if /48 or shorter.
698 */
699 word = 3;
700 if (addr->word16[word] == 0xffff) {
701 return EINVAL;
702 }
703 } else {
704 /*
705 * Also, all 0s or 1s in the host part are disallowed for
706 * longer than /48 prefixes.
707 */
708 if ((addr->word32[2] == 0 && addr->word32[3] == 0) ||
709 (addr->word32[2] == ~0U && addr->word32[3] == ~0U))
710 return EINVAL;
711
712 /* Determine the 16-bit word to adjust. */
713 for (word = 4; word < 8; word++)
714 if (addr->word16[word] != 0xffff)
715 break;
716 }
717
718 /* Rewrite the prefix. */
719 for (unsigned i = 0; i < preflen; i++) {
720 addr->word16[i] = pref->word16[i];
721 }
722
723 /*
724 * If prefix length is within a 16-bit word (not dividable by 16),
725 * then prepare a mask, determine the word and adjust it.
726 */
727 if ((remnant = len - (preflen << 4)) != 0) {
728 const uint16_t wordmask = (1U << remnant) - 1;
729 const unsigned i = preflen;
730
731 addr->word16[i] = (pref->word16[i] & wordmask) |
732 (addr->word16[i] & ~wordmask);
733 }
734
735 /*
736 * Performing 1's complement sum/difference.
737 */
738 sum = addr->word16[word] + adj;
739 while (sum >> 16) {
740 sum = (sum >> 16) + (sum & 0xffff);
741 }
742 if (sum == 0xffff) {
743 /* RFC 1071. */
744 sum = 0x0000;
745 }
746 addr->word16[word] = sum;
747 return 0;
748 }
749
750 #if defined(DDB) || defined(_NPF_TESTING)
751
752 const char *
753 npf_addr_dump(const npf_addr_t *addr, int alen)
754 {
755 if (alen == sizeof(struct in_addr)) {
756 struct in_addr ip;
757 memcpy(&ip, addr, alen);
758 return inet_ntoa(ip);
759 }
760 return "[IPv6]";
761 }
762
763 #endif
764