npf_inet.c revision 1.32.2.3 1 /* $NetBSD: npf_inet.c,v 1.32.2.3 2018/05/14 16:16:04 martin Exp $ */
2
3 /*-
4 * Copyright (c) 2009-2014 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This material is based upon work partially supported by The
8 * NetBSD Foundation under a contract with Mindaugas Rasiukevicius.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Various protocol related helper routines.
34 *
35 * This layer manipulates npf_cache_t structure i.e. caches requested headers
36 * and stores which information was cached in the information bit field.
37 * It is also responsibility of this layer to update or invalidate the cache
38 * on rewrites (e.g. by translation routines).
39 */
40
41 #include <sys/cdefs.h>
42 __KERNEL_RCSID(0, "$NetBSD: npf_inet.c,v 1.32.2.3 2018/05/14 16:16:04 martin Exp $");
43
44 #include <sys/param.h>
45 #include <sys/types.h>
46
47 #include <net/pfil.h>
48 #include <net/if.h>
49 #include <net/ethertypes.h>
50 #include <net/if_ether.h>
51
52 #include <netinet/in_systm.h>
53 #include <netinet/in.h>
54 #include <netinet6/in6_var.h>
55 #include <netinet/ip.h>
56 #include <netinet/ip6.h>
57 #include <netinet/tcp.h>
58 #include <netinet/udp.h>
59 #include <netinet/ip_icmp.h>
60
61 #include "npf_impl.h"
62
63 /*
64 * npf_fixup{16,32}_cksum: incremental update of the Internet checksum.
65 */
66
67 uint16_t
68 npf_fixup16_cksum(uint16_t cksum, uint16_t odatum, uint16_t ndatum)
69 {
70 uint32_t sum;
71
72 /*
73 * RFC 1624:
74 * HC' = ~(~HC + ~m + m')
75 *
76 * Note: 1's complement sum is endian-independent (RFC 1071, page 2).
77 */
78 sum = ~cksum & 0xffff;
79 sum += (~odatum & 0xffff) + ndatum;
80 sum = (sum >> 16) + (sum & 0xffff);
81 sum += (sum >> 16);
82
83 return ~sum & 0xffff;
84 }
85
86 uint16_t
87 npf_fixup32_cksum(uint16_t cksum, uint32_t odatum, uint32_t ndatum)
88 {
89 uint32_t sum;
90
91 /*
92 * Checksum 32-bit datum as as two 16-bit. Note, the first
93 * 32->16 bit reduction is not necessary.
94 */
95 sum = ~cksum & 0xffff;
96 sum += (~odatum & 0xffff) + (ndatum & 0xffff);
97
98 sum += (~odatum >> 16) + (ndatum >> 16);
99 sum = (sum >> 16) + (sum & 0xffff);
100 sum += (sum >> 16);
101 return ~sum & 0xffff;
102 }
103
104 /*
105 * npf_addr_cksum: calculate checksum of the address, either IPv4 or IPv6.
106 */
107 uint16_t
108 npf_addr_cksum(uint16_t cksum, int sz, const npf_addr_t *oaddr,
109 const npf_addr_t *naddr)
110 {
111 const uint32_t *oip32 = (const uint32_t *)oaddr;
112 const uint32_t *nip32 = (const uint32_t *)naddr;
113
114 KASSERT(sz % sizeof(uint32_t) == 0);
115 do {
116 cksum = npf_fixup32_cksum(cksum, *oip32++, *nip32++);
117 sz -= sizeof(uint32_t);
118 } while (sz);
119
120 return cksum;
121 }
122
123 /*
124 * npf_addr_sum: provide IP addresses as a XORed 32-bit integer.
125 * Note: used for hash function.
126 */
127 uint32_t
128 npf_addr_mix(const int sz, const npf_addr_t *a1, const npf_addr_t *a2)
129 {
130 uint32_t mix = 0;
131
132 KASSERT(sz > 0 && a1 != NULL && a2 != NULL);
133
134 for (int i = 0; i < (sz >> 2); i++) {
135 mix ^= a1->s6_addr32[i];
136 mix ^= a2->s6_addr32[i];
137 }
138 return mix;
139 }
140
141 /*
142 * npf_addr_mask: apply the mask to a given address and store the result.
143 */
144 void
145 npf_addr_mask(const npf_addr_t *addr, const npf_netmask_t mask,
146 const int alen, npf_addr_t *out)
147 {
148 const int nwords = alen >> 2;
149 uint_fast8_t length = mask;
150
151 /* Note: maximum length is 32 for IPv4 and 128 for IPv6. */
152 KASSERT(length <= NPF_MAX_NETMASK);
153
154 for (int i = 0; i < nwords; i++) {
155 uint32_t wordmask;
156
157 if (length >= 32) {
158 wordmask = htonl(0xffffffff);
159 length -= 32;
160 } else if (length) {
161 wordmask = htonl(0xffffffff << (32 - length));
162 length = 0;
163 } else {
164 wordmask = 0;
165 }
166 out->s6_addr32[i] = addr->s6_addr32[i] & wordmask;
167 }
168 }
169
170 /*
171 * npf_addr_cmp: compare two addresses, either IPv4 or IPv6.
172 *
173 * => Return 0 if equal and negative/positive if less/greater accordingly.
174 * => Ignore the mask, if NPF_NO_NETMASK is specified.
175 */
176 int
177 npf_addr_cmp(const npf_addr_t *addr1, const npf_netmask_t mask1,
178 const npf_addr_t *addr2, const npf_netmask_t mask2, const int alen)
179 {
180 npf_addr_t realaddr1, realaddr2;
181
182 if (mask1 != NPF_NO_NETMASK) {
183 npf_addr_mask(addr1, mask1, alen, &realaddr1);
184 addr1 = &realaddr1;
185 }
186 if (mask2 != NPF_NO_NETMASK) {
187 npf_addr_mask(addr2, mask2, alen, &realaddr2);
188 addr2 = &realaddr2;
189 }
190 return memcmp(addr1, addr2, alen);
191 }
192
193 /*
194 * npf_tcpsaw: helper to fetch SEQ, ACK, WIN and return TCP data length.
195 *
196 * => Returns all values in host byte-order.
197 */
198 int
199 npf_tcpsaw(const npf_cache_t *npc, tcp_seq *seq, tcp_seq *ack, uint32_t *win)
200 {
201 const struct tcphdr *th = npc->npc_l4.tcp;
202 u_int thlen;
203
204 KASSERT(npf_iscached(npc, NPC_TCP));
205
206 *seq = ntohl(th->th_seq);
207 *ack = ntohl(th->th_ack);
208 *win = (uint32_t)ntohs(th->th_win);
209 thlen = th->th_off << 2;
210
211 if (npf_iscached(npc, NPC_IP4)) {
212 const struct ip *ip = npc->npc_ip.v4;
213 return ntohs(ip->ip_len) - npc->npc_hlen - thlen;
214 } else if (npf_iscached(npc, NPC_IP6)) {
215 const struct ip6_hdr *ip6 = npc->npc_ip.v6;
216 return ntohs(ip6->ip6_plen) - thlen;
217 }
218 return 0;
219 }
220
221 /*
222 * npf_fetch_tcpopts: parse and return TCP options.
223 */
224 bool
225 npf_fetch_tcpopts(npf_cache_t *npc, uint16_t *mss, int *wscale)
226 {
227 nbuf_t *nbuf = npc->npc_nbuf;
228 const struct tcphdr *th = npc->npc_l4.tcp;
229 int topts_len, step;
230 void *nptr;
231 uint8_t val;
232 bool ok;
233
234 KASSERT(npf_iscached(npc, NPC_IP46));
235 KASSERT(npf_iscached(npc, NPC_TCP));
236
237 /* Determine if there are any TCP options, get their length. */
238 topts_len = (th->th_off << 2) - sizeof(struct tcphdr);
239 if (topts_len <= 0) {
240 /* No options. */
241 return false;
242 }
243 KASSERT(topts_len <= MAX_TCPOPTLEN);
244
245 /* First step: IP and TCP header up to options. */
246 step = npc->npc_hlen + sizeof(struct tcphdr);
247 nbuf_reset(nbuf);
248 next:
249 if ((nptr = nbuf_advance(nbuf, step, 1)) == NULL) {
250 ok = false;
251 goto done;
252 }
253 val = *(uint8_t *)nptr;
254
255 switch (val) {
256 case TCPOPT_EOL:
257 /* Done. */
258 ok = true;
259 goto done;
260 case TCPOPT_NOP:
261 topts_len--;
262 step = 1;
263 break;
264 case TCPOPT_MAXSEG:
265 if ((nptr = nbuf_advance(nbuf, 2, 2)) == NULL) {
266 ok = false;
267 goto done;
268 }
269 if (mss) {
270 if (*mss) {
271 memcpy(nptr, mss, sizeof(uint16_t));
272 } else {
273 memcpy(mss, nptr, sizeof(uint16_t));
274 }
275 }
276 topts_len -= TCPOLEN_MAXSEG;
277 step = 2;
278 break;
279 case TCPOPT_WINDOW:
280 /* TCP Window Scaling (RFC 1323). */
281 if ((nptr = nbuf_advance(nbuf, 2, 1)) == NULL) {
282 ok = false;
283 goto done;
284 }
285 val = *(uint8_t *)nptr;
286 *wscale = (val > TCP_MAX_WINSHIFT) ? TCP_MAX_WINSHIFT : val;
287 topts_len -= TCPOLEN_WINDOW;
288 step = 1;
289 break;
290 default:
291 if ((nptr = nbuf_advance(nbuf, 1, 1)) == NULL) {
292 ok = false;
293 goto done;
294 }
295 val = *(uint8_t *)nptr;
296 if (val < 2 || val > topts_len) {
297 ok = false;
298 goto done;
299 }
300 topts_len -= val;
301 step = val - 1;
302 }
303
304 /* Any options left? */
305 if (__predict_true(topts_len > 0)) {
306 goto next;
307 }
308 ok = true;
309 done:
310 if (nbuf_flag_p(nbuf, NBUF_DATAREF_RESET)) {
311 npf_recache(npc);
312 }
313 return ok;
314 }
315
316 static int
317 npf_cache_ip(npf_cache_t *npc, nbuf_t *nbuf)
318 {
319 const void *nptr = nbuf_dataptr(nbuf);
320 const uint8_t ver = *(const uint8_t *)nptr;
321 int flags = 0;
322
323 switch (ver >> 4) {
324 case IPVERSION: {
325 struct ip *ip;
326
327 ip = nbuf_ensure_contig(nbuf, sizeof(struct ip));
328 if (ip == NULL) {
329 return 0;
330 }
331
332 /* Check header length and fragment offset. */
333 if ((u_int)(ip->ip_hl << 2) < sizeof(struct ip)) {
334 return 0;
335 }
336 if (ip->ip_off & ~htons(IP_DF | IP_RF)) {
337 /* Note fragmentation. */
338 flags |= NPC_IPFRAG;
339 }
340
341 /* Cache: layer 3 - IPv4. */
342 npc->npc_alen = sizeof(struct in_addr);
343 npc->npc_ips[NPF_SRC] = (npf_addr_t *)&ip->ip_src;
344 npc->npc_ips[NPF_DST] = (npf_addr_t *)&ip->ip_dst;
345 npc->npc_hlen = ip->ip_hl << 2;
346 npc->npc_proto = ip->ip_p;
347
348 npc->npc_ip.v4 = ip;
349 flags |= NPC_IP4;
350 break;
351 }
352
353 case (IPV6_VERSION >> 4): {
354 struct ip6_hdr *ip6;
355 struct ip6_ext *ip6e;
356 size_t off, hlen;
357
358 ip6 = nbuf_ensure_contig(nbuf, sizeof(struct ip6_hdr));
359 if (ip6 == NULL) {
360 return 0;
361 }
362
363 /* Set initial next-protocol value. */
364 hlen = sizeof(struct ip6_hdr);
365 npc->npc_proto = ip6->ip6_nxt;
366 npc->npc_hlen = hlen;
367
368 /*
369 * Advance by the length of the current header.
370 */
371 off = nbuf_offset(nbuf);
372 while (nbuf_advance(nbuf, hlen, 0) != NULL) {
373 ip6e = nbuf_ensure_contig(nbuf, sizeof(*ip6e));
374 if (ip6e == NULL) {
375 return 0;
376 }
377
378 /*
379 * Determine whether we are going to continue.
380 */
381 switch (npc->npc_proto) {
382 case IPPROTO_HOPOPTS:
383 case IPPROTO_DSTOPTS:
384 case IPPROTO_ROUTING:
385 hlen = (ip6e->ip6e_len + 1) << 3;
386 break;
387 case IPPROTO_FRAGMENT:
388 hlen = sizeof(struct ip6_frag);
389 flags |= NPC_IPFRAG;
390 break;
391 case IPPROTO_AH:
392 hlen = (ip6e->ip6e_len + 2) << 2;
393 break;
394 default:
395 hlen = 0;
396 break;
397 }
398
399 if (!hlen) {
400 break;
401 }
402 npc->npc_proto = ip6e->ip6e_nxt;
403 npc->npc_hlen += hlen;
404 }
405
406 /*
407 * Re-fetch the header pointers (nbufs might have been
408 * reallocated). Restore the original offset (if any).
409 */
410 nbuf_reset(nbuf);
411 ip6 = nbuf_dataptr(nbuf);
412 if (off) {
413 nbuf_advance(nbuf, off, 0);
414 }
415
416 /* Cache: layer 3 - IPv6. */
417 npc->npc_alen = sizeof(struct in6_addr);
418 npc->npc_ips[NPF_SRC] = (npf_addr_t *)&ip6->ip6_src;
419 npc->npc_ips[NPF_DST]= (npf_addr_t *)&ip6->ip6_dst;
420
421 npc->npc_ip.v6 = ip6;
422 flags |= NPC_IP6;
423 break;
424 }
425 default:
426 break;
427 }
428 return flags;
429 }
430
431 /*
432 * npf_cache_all: general routine to cache all relevant IP (v4 or v6)
433 * and TCP, UDP or ICMP headers.
434 *
435 * => nbuf offset shall be set accordingly.
436 */
437 int
438 npf_cache_all(npf_cache_t *npc)
439 {
440 nbuf_t *nbuf = npc->npc_nbuf;
441 int flags, l4flags;
442 u_int hlen;
443
444 /*
445 * This routine is a main point where the references are cached,
446 * therefore clear the flag as we reset.
447 */
448 again:
449 nbuf_unset_flag(nbuf, NBUF_DATAREF_RESET);
450
451 /*
452 * First, cache the L3 header (IPv4 or IPv6). If IP packet is
453 * fragmented, then we cannot look into L4.
454 */
455 flags = npf_cache_ip(npc, nbuf);
456 if ((flags & NPC_IP46) == 0 || (flags & NPC_IPFRAG) != 0) {
457 nbuf_unset_flag(nbuf, NBUF_DATAREF_RESET);
458 npc->npc_info |= flags;
459 return flags;
460 }
461 hlen = npc->npc_hlen;
462
463 /*
464 * Note: we guarantee that the potential "Query Id" field of the
465 * ICMPv4/ICMPv6 packets is in the nbuf. This field is used in the
466 * ICMP ALG.
467 */
468 switch (npc->npc_proto) {
469 case IPPROTO_TCP:
470 /* Cache: layer 4 - TCP. */
471 npc->npc_l4.tcp = nbuf_advance(nbuf, hlen,
472 sizeof(struct tcphdr));
473 l4flags = NPC_LAYER4 | NPC_TCP;
474 break;
475 case IPPROTO_UDP:
476 /* Cache: layer 4 - UDP. */
477 npc->npc_l4.udp = nbuf_advance(nbuf, hlen,
478 sizeof(struct udphdr));
479 l4flags = NPC_LAYER4 | NPC_UDP;
480 break;
481 case IPPROTO_ICMP:
482 /* Cache: layer 4 - ICMPv4. */
483 npc->npc_l4.icmp = nbuf_advance(nbuf, hlen,
484 ICMP_MINLEN);
485 l4flags = NPC_LAYER4 | NPC_ICMP;
486 break;
487 case IPPROTO_ICMPV6:
488 /* Cache: layer 4 - ICMPv6. */
489 npc->npc_l4.icmp6 = nbuf_advance(nbuf, hlen,
490 sizeof(struct icmp6_hdr));
491 l4flags = NPC_LAYER4 | NPC_ICMP;
492 break;
493 default:
494 l4flags = 0;
495 break;
496 }
497
498 if (nbuf_flag_p(nbuf, NBUF_DATAREF_RESET)) {
499 goto again;
500 }
501
502 /* Add the L4 flags if nbuf_advance() succeeded. */
503 if (l4flags && npc->npc_l4.hdr) {
504 flags |= l4flags;
505 }
506 npc->npc_info |= flags;
507 return flags;
508 }
509
510 void
511 npf_recache(npf_cache_t *npc)
512 {
513 nbuf_t *nbuf = npc->npc_nbuf;
514 const int mflags __diagused = npc->npc_info & (NPC_IP46 | NPC_LAYER4);
515 int flags __diagused;
516
517 nbuf_reset(nbuf);
518 npc->npc_info = 0;
519 flags = npf_cache_all(npc);
520
521 KASSERT((flags & mflags) == mflags);
522 KASSERT(nbuf_flag_p(nbuf, NBUF_DATAREF_RESET) == 0);
523 }
524
525 /*
526 * npf_rwrip: rewrite required IP address.
527 */
528 bool
529 npf_rwrip(const npf_cache_t *npc, u_int which, const npf_addr_t *addr)
530 {
531 KASSERT(npf_iscached(npc, NPC_IP46));
532 KASSERT(which == NPF_SRC || which == NPF_DST);
533
534 memcpy(npc->npc_ips[which], addr, npc->npc_alen);
535 return true;
536 }
537
538 /*
539 * npf_rwrport: rewrite required TCP/UDP port.
540 */
541 bool
542 npf_rwrport(const npf_cache_t *npc, u_int which, const in_port_t port)
543 {
544 const int proto = npc->npc_proto;
545 in_port_t *oport;
546
547 KASSERT(npf_iscached(npc, NPC_TCP) || npf_iscached(npc, NPC_UDP));
548 KASSERT(proto == IPPROTO_TCP || proto == IPPROTO_UDP);
549 KASSERT(which == NPF_SRC || which == NPF_DST);
550
551 /* Get the offset and store the port in it. */
552 if (proto == IPPROTO_TCP) {
553 struct tcphdr *th = npc->npc_l4.tcp;
554 oport = (which == NPF_SRC) ? &th->th_sport : &th->th_dport;
555 } else {
556 struct udphdr *uh = npc->npc_l4.udp;
557 oport = (which == NPF_SRC) ? &uh->uh_sport : &uh->uh_dport;
558 }
559 memcpy(oport, &port, sizeof(in_port_t));
560 return true;
561 }
562
563 /*
564 * npf_rwrcksum: rewrite IPv4 and/or TCP/UDP checksum.
565 */
566 bool
567 npf_rwrcksum(const npf_cache_t *npc, u_int which,
568 const npf_addr_t *addr, const in_port_t port)
569 {
570 const npf_addr_t *oaddr = npc->npc_ips[which];
571 const int proto = npc->npc_proto;
572 const int alen = npc->npc_alen;
573 uint16_t *ocksum;
574 in_port_t oport;
575
576 KASSERT(npf_iscached(npc, NPC_LAYER4));
577 KASSERT(which == NPF_SRC || which == NPF_DST);
578
579 if (npf_iscached(npc, NPC_IP4)) {
580 struct ip *ip = npc->npc_ip.v4;
581 uint16_t ipsum = ip->ip_sum;
582
583 /* Recalculate IPv4 checksum and rewrite. */
584 ip->ip_sum = npf_addr_cksum(ipsum, alen, oaddr, addr);
585 } else {
586 /* No checksum for IPv6. */
587 KASSERT(npf_iscached(npc, NPC_IP6));
588 }
589
590 /* Nothing else to do for ICMP. */
591 if (proto == IPPROTO_ICMP || proto == IPPROTO_ICMPV6) {
592 return true;
593 }
594 KASSERT(npf_iscached(npc, NPC_TCP) || npf_iscached(npc, NPC_UDP));
595
596 /*
597 * Calculate TCP/UDP checksum:
598 * - Skip if UDP and the current checksum is zero.
599 * - Fixup the IP address change.
600 * - Fixup the port change, if required (non-zero).
601 */
602 if (proto == IPPROTO_TCP) {
603 struct tcphdr *th = npc->npc_l4.tcp;
604
605 ocksum = &th->th_sum;
606 oport = (which == NPF_SRC) ? th->th_sport : th->th_dport;
607 } else {
608 struct udphdr *uh = npc->npc_l4.udp;
609
610 KASSERT(proto == IPPROTO_UDP);
611 ocksum = &uh->uh_sum;
612 if (*ocksum == 0) {
613 /* No need to update. */
614 return true;
615 }
616 oport = (which == NPF_SRC) ? uh->uh_sport : uh->uh_dport;
617 }
618
619 uint16_t cksum = npf_addr_cksum(*ocksum, alen, oaddr, addr);
620 if (port) {
621 cksum = npf_fixup16_cksum(cksum, oport, port);
622 }
623
624 /* Rewrite TCP/UDP checksum. */
625 memcpy(ocksum, &cksum, sizeof(uint16_t));
626 return true;
627 }
628
629 /*
630 * npf_napt_rwr: perform address and/or port translation.
631 */
632 int
633 npf_napt_rwr(const npf_cache_t *npc, u_int which,
634 const npf_addr_t *addr, const in_addr_t port)
635 {
636 const unsigned proto = npc->npc_proto;
637
638 /*
639 * Rewrite IP and/or TCP/UDP checksums first, since we need the
640 * current (old) address/port for the calculations. Then perform
641 * the address translation i.e. rewrite source or destination.
642 */
643 if (!npf_rwrcksum(npc, which, addr, port)) {
644 return EINVAL;
645 }
646 if (!npf_rwrip(npc, which, addr)) {
647 return EINVAL;
648 }
649 if (port == 0) {
650 /* Done. */
651 return 0;
652 }
653
654 switch (proto) {
655 case IPPROTO_TCP:
656 case IPPROTO_UDP:
657 /* Rewrite source/destination port. */
658 if (!npf_rwrport(npc, which, port)) {
659 return EINVAL;
660 }
661 break;
662 case IPPROTO_ICMP:
663 #ifdef INET6
664 case IPPROTO_ICMPV6:
665 #endif
666 KASSERT(npf_iscached(npc, NPC_ICMP));
667 /* Nothing. */
668 break;
669 default:
670 return ENOTSUP;
671 }
672 return 0;
673 }
674
675 /*
676 * IPv6-to-IPv6 Network Prefix Translation (NPTv6), as per RFC 6296.
677 */
678
679 #ifdef INET6
680 int
681 npf_npt66_rwr(const npf_cache_t *npc, u_int which, const npf_addr_t *pref,
682 npf_netmask_t len, uint16_t adj)
683 {
684 npf_addr_t *addr = npc->npc_ips[which];
685 unsigned remnant, word, preflen = len >> 4;
686 uint32_t sum;
687
688 KASSERT(which == NPF_SRC || which == NPF_DST);
689
690 if (!npf_iscached(npc, NPC_IP6)) {
691 return EINVAL;
692 }
693 if (len <= 48) {
694 /*
695 * The word to adjust. Cannot translate the 0xffff
696 * subnet if /48 or shorter.
697 */
698 word = 3;
699 if (addr->s6_addr16[word] == 0xffff) {
700 return EINVAL;
701 }
702 } else {
703 /*
704 * Also, all 0s or 1s in the host part are disallowed for
705 * longer than /48 prefixes.
706 */
707 if ((addr->s6_addr32[2] == 0 && addr->s6_addr32[3] == 0) ||
708 (addr->s6_addr32[2] == ~0U && addr->s6_addr32[3] == ~0U))
709 return EINVAL;
710
711 /* Determine the 16-bit word to adjust. */
712 for (word = 4; word < 8; word++)
713 if (addr->s6_addr16[word] != 0xffff)
714 break;
715 }
716
717 /* Rewrite the prefix. */
718 for (unsigned i = 0; i < preflen; i++) {
719 addr->s6_addr16[i] = pref->s6_addr16[i];
720 }
721
722 /*
723 * If prefix length is within a 16-bit word (not dividable by 16),
724 * then prepare a mask, determine the word and adjust it.
725 */
726 if ((remnant = len - (preflen << 4)) != 0) {
727 const uint16_t wordmask = (1U << remnant) - 1;
728 const unsigned i = preflen;
729
730 addr->s6_addr16[i] = (pref->s6_addr16[i] & wordmask) |
731 (addr->s6_addr16[i] & ~wordmask);
732 }
733
734 /*
735 * Performing 1's complement sum/difference.
736 */
737 sum = addr->s6_addr16[word] + adj;
738 while (sum >> 16) {
739 sum = (sum >> 16) + (sum & 0xffff);
740 }
741 if (sum == 0xffff) {
742 /* RFC 1071. */
743 sum = 0x0000;
744 }
745 addr->s6_addr16[word] = sum;
746 return 0;
747 }
748 #endif
749
750 #if defined(DDB) || defined(_NPF_TESTING)
751
752 const char *
753 npf_addr_dump(const npf_addr_t *addr, int alen)
754 {
755 #ifdef INET6
756 if (alen == sizeof(struct in_addr)) {
757 #else
758 KASSERT(alen == sizeof(struct in_addr));
759 #endif
760 struct in_addr ip;
761 memcpy(&ip, addr, alen);
762 return inet_ntoa(ip);
763 #ifdef INET6
764 }
765 return ip6_sprintf(addr);
766 #endif
767 }
768
769 #endif
770