ip_encap.c revision 1.65.2.4 1 /* $NetBSD: ip_encap.c,v 1.65.2.4 2019/05/29 15:47:05 martin Exp $ */
2 /* $KAME: ip_encap.c,v 1.73 2001/10/02 08:30:58 itojun Exp $ */
3
4 /*
5 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of the project nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 */
32 /*
33 * My grandfather said that there's a devil inside tunnelling technology...
34 *
35 * We have surprisingly many protocols that want packets with IP protocol
36 * #4 or #41. Here's a list of protocols that want protocol #41:
37 * RFC1933 configured tunnel
38 * RFC1933 automatic tunnel
39 * RFC2401 IPsec tunnel
40 * RFC2473 IPv6 generic packet tunnelling
41 * RFC2529 6over4 tunnel
42 * RFC3056 6to4 tunnel
43 * isatap tunnel
44 * mobile-ip6 (uses RFC2473)
45 * Here's a list of protocol that want protocol #4:
46 * RFC1853 IPv4-in-IPv4 tunnelling
47 * RFC2003 IPv4 encapsulation within IPv4
48 * RFC2344 reverse tunnelling for mobile-ip4
49 * RFC2401 IPsec tunnel
50 * Well, what can I say. They impose different en/decapsulation mechanism
51 * from each other, so they need separate protocol handler. The only one
52 * we can easily determine by protocol # is IPsec, which always has
53 * AH/ESP/IPComp header right after outer IP header.
54 *
55 * So, clearly good old protosw does not work for protocol #4 and #41.
56 * The code will let you match protocol via src/dst address pair.
57 */
58 /* XXX is M_NETADDR correct? */
59
60 /*
61 * With USE_RADIX the code will use radix table for tunnel lookup, for
62 * tunnels registered with encap_attach() with a addr/mask pair.
63 * Faster on machines with thousands of tunnel registerations (= interfaces).
64 *
65 * The code assumes that radix table code can handle non-continuous netmask,
66 * as it will pass radix table memory region with (src + dst) sockaddr pair.
67 */
68 #define USE_RADIX
69
70 #include <sys/cdefs.h>
71 __KERNEL_RCSID(0, "$NetBSD: ip_encap.c,v 1.65.2.4 2019/05/29 15:47:05 martin Exp $");
72
73 #ifdef _KERNEL_OPT
74 #include "opt_mrouting.h"
75 #include "opt_inet.h"
76 #include "opt_net_mpsafe.h"
77 #endif
78
79 #include <sys/param.h>
80 #include <sys/systm.h>
81 #include <sys/socket.h>
82 #include <sys/socketvar.h> /* for softnet_lock */
83 #include <sys/sockio.h>
84 #include <sys/mbuf.h>
85 #include <sys/errno.h>
86 #include <sys/queue.h>
87 #include <sys/kmem.h>
88 #include <sys/mutex.h>
89 #include <sys/condvar.h>
90 #include <sys/psref.h>
91 #include <sys/pslist.h>
92
93 #include <net/if.h>
94
95 #include <netinet/in.h>
96 #include <netinet/in_systm.h>
97 #include <netinet/ip.h>
98 #include <netinet/ip_var.h>
99 #include <netinet/ip_encap.h>
100 #ifdef MROUTING
101 #include <netinet/ip_mroute.h>
102 #endif /* MROUTING */
103
104 #ifdef INET6
105 #include <netinet/ip6.h>
106 #include <netinet6/ip6_var.h>
107 #include <netinet6/ip6protosw.h> /* for struct ip6ctlparam */
108 #include <netinet6/in6_var.h>
109 #include <netinet6/in6_pcb.h>
110 #include <netinet/icmp6.h>
111 #endif
112
113 #include <net/net_osdep.h>
114
115 #ifdef NET_MPSAFE
116 #define ENCAP_MPSAFE 1
117 #endif
118
119 enum direction { INBOUND, OUTBOUND };
120
121 #ifdef INET
122 static struct encaptab *encap4_lookup(struct mbuf *, int, int, enum direction,
123 struct psref *);
124 #endif
125 #ifdef INET6
126 static struct encaptab *encap6_lookup(struct mbuf *, int, int, enum direction,
127 struct psref *);
128 #endif
129 static int encap_add(struct encaptab *);
130 static int encap_remove(struct encaptab *);
131 static int encap_afcheck(int, const struct sockaddr *, const struct sockaddr *);
132 #ifdef USE_RADIX
133 static struct radix_node_head *encap_rnh(int);
134 static int mask_matchlen(const struct sockaddr *);
135 #else
136 static int mask_match(const struct encaptab *, const struct sockaddr *,
137 const struct sockaddr *);
138 #endif
139
140 /*
141 * In encap[46]_lookup(), ep->func can sleep(e.g. rtalloc1) while walking
142 * encap_table. So, it cannot use pserialize_read_enter()
143 */
144 static struct {
145 struct pslist_head list;
146 pserialize_t psz;
147 struct psref_class *elem_class; /* for the element of et_list */
148 } encaptab __cacheline_aligned = {
149 .list = PSLIST_INITIALIZER,
150 };
151 #define encap_table encaptab.list
152
153 static struct {
154 kmutex_t lock;
155 kcondvar_t cv;
156 struct lwp *busy;
157 } encap_whole __cacheline_aligned;
158
159 #ifdef USE_RADIX
160 struct radix_node_head *encap_head[2]; /* 0 for AF_INET, 1 for AF_INET6 */
161 static bool encap_head_updating = false;
162 #endif
163
164 static bool encap_initialized = false;
165 /*
166 * must be done before other encap interfaces initialization.
167 */
168 void
169 encapinit(void)
170 {
171
172 if (encap_initialized)
173 return;
174
175 encaptab.psz = pserialize_create();
176 encaptab.elem_class = psref_class_create("encapelem", IPL_SOFTNET);
177
178 mutex_init(&encap_whole.lock, MUTEX_DEFAULT, IPL_NONE);
179 cv_init(&encap_whole.cv, "ip_encap cv");
180 encap_whole.busy = NULL;
181
182 encap_initialized = true;
183 }
184
185 void
186 encap_init(void)
187 {
188 static int initialized = 0;
189
190 if (initialized)
191 return;
192 initialized++;
193 #if 0
194 /*
195 * we cannot use LIST_INIT() here, since drivers may want to call
196 * encap_attach(), on driver attach. encap_init() will be called
197 * on AF_INET{,6} initialization, which happens after driver
198 * initialization - using LIST_INIT() here can nuke encap_attach()
199 * from drivers.
200 */
201 PSLIST_INIT(&encap_table);
202 #endif
203
204 #ifdef USE_RADIX
205 /*
206 * initialize radix lookup table when the radix subsystem is inited.
207 */
208 rn_delayedinit((void *)&encap_head[0],
209 sizeof(struct sockaddr_pack) << 3);
210 #ifdef INET6
211 rn_delayedinit((void *)&encap_head[1],
212 sizeof(struct sockaddr_pack) << 3);
213 #endif
214 #endif
215 }
216
217 #ifdef INET
218 static struct encaptab *
219 encap4_lookup(struct mbuf *m, int off, int proto, enum direction dir,
220 struct psref *match_psref)
221 {
222 struct ip *ip;
223 struct ip_pack4 pack;
224 struct encaptab *ep, *match;
225 int prio, matchprio;
226 int s;
227 #ifdef USE_RADIX
228 struct radix_node_head *rnh = encap_rnh(AF_INET);
229 struct radix_node *rn;
230 #endif
231
232 KASSERT(m->m_len >= sizeof(*ip));
233
234 ip = mtod(m, struct ip *);
235
236 memset(&pack, 0, sizeof(pack));
237 pack.p.sp_len = sizeof(pack);
238 pack.mine.sin_family = pack.yours.sin_family = AF_INET;
239 pack.mine.sin_len = pack.yours.sin_len = sizeof(struct sockaddr_in);
240 if (dir == INBOUND) {
241 pack.mine.sin_addr = ip->ip_dst;
242 pack.yours.sin_addr = ip->ip_src;
243 } else {
244 pack.mine.sin_addr = ip->ip_src;
245 pack.yours.sin_addr = ip->ip_dst;
246 }
247
248 match = NULL;
249 matchprio = 0;
250
251 s = pserialize_read_enter();
252 #ifdef USE_RADIX
253 if (encap_head_updating) {
254 /*
255 * Update in progress. Do nothing.
256 */
257 pserialize_read_exit(s);
258 return NULL;
259 }
260
261 rn = rnh->rnh_matchaddr((void *)&pack, rnh);
262 if (rn && (rn->rn_flags & RNF_ROOT) == 0) {
263 struct encaptab *encapp = (struct encaptab *)rn;
264
265 psref_acquire(match_psref, &encapp->psref,
266 encaptab.elem_class);
267 match = encapp;
268 matchprio = mask_matchlen(match->srcmask) +
269 mask_matchlen(match->dstmask);
270 }
271 #endif
272 PSLIST_READER_FOREACH(ep, &encap_table, struct encaptab, chain) {
273 struct psref elem_psref;
274
275 if (ep->af != AF_INET)
276 continue;
277 if (ep->proto >= 0 && ep->proto != proto)
278 continue;
279
280 psref_acquire(&elem_psref, &ep->psref,
281 encaptab.elem_class);
282 if (ep->func) {
283 pserialize_read_exit(s);
284 /* ep->func is sleepable. e.g. rtalloc1 */
285 prio = (*ep->func)(m, off, proto, ep->arg);
286 s = pserialize_read_enter();
287 } else {
288 #ifdef USE_RADIX
289 psref_release(&elem_psref, &ep->psref,
290 encaptab.elem_class);
291 continue;
292 #else
293 prio = mask_match(ep, (struct sockaddr *)&pack.mine,
294 (struct sockaddr *)&pack.yours);
295 #endif
296 }
297
298 /*
299 * We prioritize the matches by using bit length of the
300 * matches. mask_match() and user-supplied matching function
301 * should return the bit length of the matches (for example,
302 * if both src/dst are matched for IPv4, 64 should be returned).
303 * 0 or negative return value means "it did not match".
304 *
305 * The question is, since we have two "mask" portion, we
306 * cannot really define total order between entries.
307 * For example, which of these should be preferred?
308 * mask_match() returns 48 (32 + 16) for both of them.
309 * src=3ffe::/16, dst=3ffe:501::/32
310 * src=3ffe:501::/32, dst=3ffe::/16
311 *
312 * We need to loop through all the possible candidates
313 * to get the best match - the search takes O(n) for
314 * n attachments (i.e. interfaces).
315 *
316 * For radix-based lookup, I guess source takes precedence.
317 * See rn_{refines,lexobetter} for the correct answer.
318 */
319 if (prio <= 0) {
320 psref_release(&elem_psref, &ep->psref,
321 encaptab.elem_class);
322 continue;
323 }
324 if (prio > matchprio) {
325 /* release last matched ep */
326 if (match != NULL)
327 psref_release(match_psref, &match->psref,
328 encaptab.elem_class);
329
330 psref_copy(match_psref, &elem_psref,
331 encaptab.elem_class);
332 matchprio = prio;
333 match = ep;
334 }
335 KASSERTMSG((match == NULL) || psref_held(&match->psref,
336 encaptab.elem_class),
337 "current match = %p, but not hold its psref", match);
338
339 psref_release(&elem_psref, &ep->psref,
340 encaptab.elem_class);
341 }
342 pserialize_read_exit(s);
343
344 return match;
345 }
346
347 void
348 encap4_input(struct mbuf *m, ...)
349 {
350 int off, proto;
351 va_list ap;
352 const struct encapsw *esw;
353 struct encaptab *match;
354 struct psref match_psref;
355
356 va_start(ap, m);
357 off = va_arg(ap, int);
358 proto = va_arg(ap, int);
359 va_end(ap);
360
361 match = encap4_lookup(m, off, proto, INBOUND, &match_psref);
362 if (match) {
363 /* found a match, "match" has the best one */
364 esw = match->esw;
365 if (esw && esw->encapsw4.pr_input) {
366 (*esw->encapsw4.pr_input)(m, off, proto, match->arg);
367 psref_release(&match_psref, &match->psref,
368 encaptab.elem_class);
369 } else {
370 psref_release(&match_psref, &match->psref,
371 encaptab.elem_class);
372 m_freem(m);
373 }
374 return;
375 }
376
377 /* last resort: inject to raw socket */
378 SOFTNET_LOCK_IF_NET_MPSAFE();
379 rip_input(m, off, proto);
380 SOFTNET_UNLOCK_IF_NET_MPSAFE();
381 }
382 #endif
383
384 #ifdef INET6
385 static struct encaptab *
386 encap6_lookup(struct mbuf *m, int off, int proto, enum direction dir,
387 struct psref *match_psref)
388 {
389 struct ip6_hdr *ip6;
390 struct ip_pack6 pack;
391 int prio, matchprio;
392 int s;
393 struct encaptab *ep, *match;
394 #ifdef USE_RADIX
395 struct radix_node_head *rnh = encap_rnh(AF_INET6);
396 struct radix_node *rn;
397 #endif
398
399 KASSERT(m->m_len >= sizeof(*ip6));
400
401 ip6 = mtod(m, struct ip6_hdr *);
402
403 memset(&pack, 0, sizeof(pack));
404 pack.p.sp_len = sizeof(pack);
405 pack.mine.sin6_family = pack.yours.sin6_family = AF_INET6;
406 pack.mine.sin6_len = pack.yours.sin6_len = sizeof(struct sockaddr_in6);
407 if (dir == INBOUND) {
408 pack.mine.sin6_addr = ip6->ip6_dst;
409 pack.yours.sin6_addr = ip6->ip6_src;
410 } else {
411 pack.mine.sin6_addr = ip6->ip6_src;
412 pack.yours.sin6_addr = ip6->ip6_dst;
413 }
414
415 match = NULL;
416 matchprio = 0;
417
418 s = pserialize_read_enter();
419 #ifdef USE_RADIX
420 if (encap_head_updating) {
421 /*
422 * Update in progress. Do nothing.
423 */
424 pserialize_read_exit(s);
425 return NULL;
426 }
427
428 rn = rnh->rnh_matchaddr((void *)&pack, rnh);
429 if (rn && (rn->rn_flags & RNF_ROOT) == 0) {
430 struct encaptab *encapp = (struct encaptab *)rn;
431
432 psref_acquire(match_psref, &encapp->psref,
433 encaptab.elem_class);
434 match = encapp;
435 matchprio = mask_matchlen(match->srcmask) +
436 mask_matchlen(match->dstmask);
437 }
438 #endif
439 PSLIST_READER_FOREACH(ep, &encap_table, struct encaptab, chain) {
440 struct psref elem_psref;
441
442 if (ep->af != AF_INET6)
443 continue;
444 if (ep->proto >= 0 && ep->proto != proto)
445 continue;
446
447 psref_acquire(&elem_psref, &ep->psref,
448 encaptab.elem_class);
449
450 if (ep->func) {
451 pserialize_read_exit(s);
452 /* ep->func is sleepable. e.g. rtalloc1 */
453 prio = (*ep->func)(m, off, proto, ep->arg);
454 s = pserialize_read_enter();
455 } else {
456 #ifdef USE_RADIX
457 psref_release(&elem_psref, &ep->psref,
458 encaptab.elem_class);
459 continue;
460 #else
461 prio = mask_match(ep, (struct sockaddr *)&pack.mine,
462 (struct sockaddr *)&pack.yours);
463 #endif
464 }
465
466 /* see encap4_lookup() for issues here */
467 if (prio <= 0) {
468 psref_release(&elem_psref, &ep->psref,
469 encaptab.elem_class);
470 continue;
471 }
472 if (prio > matchprio) {
473 /* release last matched ep */
474 if (match != NULL)
475 psref_release(match_psref, &match->psref,
476 encaptab.elem_class);
477
478 psref_copy(match_psref, &elem_psref,
479 encaptab.elem_class);
480 matchprio = prio;
481 match = ep;
482 }
483 KASSERTMSG((match == NULL) || psref_held(&match->psref,
484 encaptab.elem_class),
485 "current match = %p, but not hold its psref", match);
486
487 psref_release(&elem_psref, &ep->psref,
488 encaptab.elem_class);
489 }
490 pserialize_read_exit(s);
491
492 return match;
493 }
494
495 int
496 encap6_input(struct mbuf **mp, int *offp, int proto)
497 {
498 struct mbuf *m = *mp;
499 const struct encapsw *esw;
500 struct encaptab *match;
501 struct psref match_psref;
502 int rv;
503
504 match = encap6_lookup(m, *offp, proto, INBOUND, &match_psref);
505
506 if (match) {
507 /* found a match */
508 esw = match->esw;
509 if (esw && esw->encapsw6.pr_input) {
510 int ret;
511 ret = (*esw->encapsw6.pr_input)(mp, offp, proto,
512 match->arg);
513 psref_release(&match_psref, &match->psref,
514 encaptab.elem_class);
515 return ret;
516 } else {
517 psref_release(&match_psref, &match->psref,
518 encaptab.elem_class);
519 m_freem(m);
520 return IPPROTO_DONE;
521 }
522 }
523
524 /* last resort: inject to raw socket */
525 SOFTNET_LOCK_IF_NET_MPSAFE();
526 rv = rip6_input(mp, offp, proto);
527 SOFTNET_UNLOCK_IF_NET_MPSAFE();
528 return rv;
529 }
530 #endif
531
532 /*
533 * XXX
534 * The encaptab list and the rnh radix tree must be manipulated atomically.
535 */
536 static int
537 encap_add(struct encaptab *ep)
538 {
539 #ifdef USE_RADIX
540 struct radix_node_head *rnh = encap_rnh(ep->af);
541 #endif
542
543 KASSERT(encap_lock_held());
544
545 #ifdef USE_RADIX
546 if (!ep->func && rnh) {
547 /* Disable access to the radix tree for reader. */
548 encap_head_updating = true;
549 /* Wait for all readers to drain. */
550 pserialize_perform(encaptab.psz);
551
552 if (!rnh->rnh_addaddr((void *)ep->addrpack,
553 (void *)ep->maskpack, rnh, ep->nodes)) {
554 encap_head_updating = false;
555 return EEXIST;
556 }
557
558 /*
559 * The ep added to the radix tree must be skipped while
560 * encap[46]_lookup walks encaptab list. In other words,
561 * encap_add() does not need to care whether the ep has
562 * been added encaptab list or not yet.
563 * So, we can re-enable access to the radix tree for now.
564 */
565 encap_head_updating = false;
566 }
567 #endif
568 PSLIST_WRITER_INSERT_HEAD(&encap_table, ep, chain);
569
570 return 0;
571 }
572
573 /*
574 * XXX
575 * The encaptab list and the rnh radix tree must be manipulated atomically.
576 */
577 static int
578 encap_remove(struct encaptab *ep)
579 {
580 #ifdef USE_RADIX
581 struct radix_node_head *rnh = encap_rnh(ep->af);
582 #endif
583 int error = 0;
584
585 KASSERT(encap_lock_held());
586
587 #ifdef USE_RADIX
588 if (!ep->func && rnh) {
589 /* Disable access to the radix tree for reader. */
590 encap_head_updating = true;
591 /* Wait for all readers to drain. */
592 pserialize_perform(encaptab.psz);
593
594 if (!rnh->rnh_deladdr((void *)ep->addrpack,
595 (void *)ep->maskpack, rnh))
596 error = ESRCH;
597
598 /*
599 * The ep added to the radix tree must be skipped while
600 * encap[46]_lookup walks encaptab list. In other words,
601 * encap_add() does not need to care whether the ep has
602 * been added encaptab list or not yet.
603 * So, we can re-enable access to the radix tree for now.
604 */
605 encap_head_updating = false;
606 }
607 #endif
608 PSLIST_WRITER_REMOVE(ep, chain);
609
610 return error;
611 }
612
613 static int
614 encap_afcheck(int af, const struct sockaddr *sp, const struct sockaddr *dp)
615 {
616 if (sp && dp) {
617 if (sp->sa_len != dp->sa_len)
618 return EINVAL;
619 if (af != sp->sa_family || af != dp->sa_family)
620 return EINVAL;
621 } else if (!sp && !dp)
622 ;
623 else
624 return EINVAL;
625
626 switch (af) {
627 case AF_INET:
628 if (sp && sp->sa_len != sizeof(struct sockaddr_in))
629 return EINVAL;
630 if (dp && dp->sa_len != sizeof(struct sockaddr_in))
631 return EINVAL;
632 break;
633 #ifdef INET6
634 case AF_INET6:
635 if (sp && sp->sa_len != sizeof(struct sockaddr_in6))
636 return EINVAL;
637 if (dp && dp->sa_len != sizeof(struct sockaddr_in6))
638 return EINVAL;
639 break;
640 #endif
641 default:
642 return EAFNOSUPPORT;
643 }
644
645 return 0;
646 }
647
648 /*
649 * sp (src ptr) is always my side, and dp (dst ptr) is always remote side.
650 * length of mask (sm and dm) is assumed to be same as sp/dp.
651 * Return value will be necessary as input (cookie) for encap_detach().
652 */
653 const struct encaptab *
654 encap_attach(int af, int proto,
655 const struct sockaddr *sp, const struct sockaddr *sm,
656 const struct sockaddr *dp, const struct sockaddr *dm,
657 const struct encapsw *esw, void *arg)
658 {
659 struct encaptab *ep;
660 int error;
661 int pss;
662 size_t l;
663 struct ip_pack4 *pack4;
664 #ifdef INET6
665 struct ip_pack6 *pack6;
666 #endif
667 #ifndef ENCAP_MPSAFE
668 int s;
669
670 s = splsoftnet();
671 #endif
672 /* sanity check on args */
673 error = encap_afcheck(af, sp, dp);
674 if (error)
675 goto fail;
676
677 /* check if anyone have already attached with exactly same config */
678 pss = pserialize_read_enter();
679 PSLIST_READER_FOREACH(ep, &encap_table, struct encaptab, chain) {
680 if (ep->af != af)
681 continue;
682 if (ep->proto != proto)
683 continue;
684 if (ep->func)
685 continue;
686
687 KASSERT(ep->src != NULL);
688 KASSERT(ep->dst != NULL);
689 KASSERT(ep->srcmask != NULL);
690 KASSERT(ep->dstmask != NULL);
691
692 if (ep->src->sa_len != sp->sa_len ||
693 memcmp(ep->src, sp, sp->sa_len) != 0 ||
694 memcmp(ep->srcmask, sm, sp->sa_len) != 0)
695 continue;
696 if (ep->dst->sa_len != dp->sa_len ||
697 memcmp(ep->dst, dp, dp->sa_len) != 0 ||
698 memcmp(ep->dstmask, dm, dp->sa_len) != 0)
699 continue;
700
701 error = EEXIST;
702 pserialize_read_exit(pss);
703 goto fail;
704 }
705 pserialize_read_exit(pss);
706
707 switch (af) {
708 case AF_INET:
709 l = sizeof(*pack4);
710 break;
711 #ifdef INET6
712 case AF_INET6:
713 l = sizeof(*pack6);
714 break;
715 #endif
716 default:
717 goto fail;
718 }
719
720 /* M_NETADDR ok? */
721 ep = kmem_zalloc(sizeof(*ep), KM_NOSLEEP);
722 if (ep == NULL) {
723 error = ENOBUFS;
724 goto fail;
725 }
726 ep->addrpack = kmem_zalloc(l, KM_NOSLEEP);
727 if (ep->addrpack == NULL) {
728 error = ENOBUFS;
729 goto gc;
730 }
731 ep->maskpack = kmem_zalloc(l, KM_NOSLEEP);
732 if (ep->maskpack == NULL) {
733 error = ENOBUFS;
734 goto gc;
735 }
736
737 ep->af = af;
738 ep->proto = proto;
739 ep->addrpack->sa_len = l & 0xff;
740 ep->maskpack->sa_len = l & 0xff;
741 switch (af) {
742 case AF_INET:
743 pack4 = (struct ip_pack4 *)ep->addrpack;
744 ep->src = (struct sockaddr *)&pack4->mine;
745 ep->dst = (struct sockaddr *)&pack4->yours;
746 pack4 = (struct ip_pack4 *)ep->maskpack;
747 ep->srcmask = (struct sockaddr *)&pack4->mine;
748 ep->dstmask = (struct sockaddr *)&pack4->yours;
749 break;
750 #ifdef INET6
751 case AF_INET6:
752 pack6 = (struct ip_pack6 *)ep->addrpack;
753 ep->src = (struct sockaddr *)&pack6->mine;
754 ep->dst = (struct sockaddr *)&pack6->yours;
755 pack6 = (struct ip_pack6 *)ep->maskpack;
756 ep->srcmask = (struct sockaddr *)&pack6->mine;
757 ep->dstmask = (struct sockaddr *)&pack6->yours;
758 break;
759 #endif
760 }
761
762 memcpy(ep->src, sp, sp->sa_len);
763 memcpy(ep->srcmask, sm, sp->sa_len);
764 memcpy(ep->dst, dp, dp->sa_len);
765 memcpy(ep->dstmask, dm, dp->sa_len);
766 ep->esw = esw;
767 ep->arg = arg;
768 psref_target_init(&ep->psref, encaptab.elem_class);
769
770 error = encap_add(ep);
771 if (error)
772 goto gc;
773
774 error = 0;
775 #ifndef ENCAP_MPSAFE
776 splx(s);
777 #endif
778 return ep;
779
780 gc:
781 if (ep->addrpack)
782 kmem_free(ep->addrpack, l);
783 if (ep->maskpack)
784 kmem_free(ep->maskpack, l);
785 if (ep)
786 kmem_free(ep, sizeof(*ep));
787 fail:
788 #ifndef ENCAP_MPSAFE
789 splx(s);
790 #endif
791 return NULL;
792 }
793
794 const struct encaptab *
795 encap_attach_func(int af, int proto,
796 int (*func)(struct mbuf *, int, int, void *),
797 const struct encapsw *esw, void *arg)
798 {
799 struct encaptab *ep;
800 int error;
801 #ifndef ENCAP_MPSAFE
802 int s;
803
804 s = splsoftnet();
805 #endif
806 /* sanity check on args */
807 if (!func) {
808 error = EINVAL;
809 goto fail;
810 }
811
812 error = encap_afcheck(af, NULL, NULL);
813 if (error)
814 goto fail;
815
816 ep = kmem_alloc(sizeof(*ep), KM_NOSLEEP); /*XXX*/
817 if (ep == NULL) {
818 error = ENOBUFS;
819 goto fail;
820 }
821 memset(ep, 0, sizeof(*ep));
822
823 ep->af = af;
824 ep->proto = proto;
825 ep->func = func;
826 ep->esw = esw;
827 ep->arg = arg;
828 psref_target_init(&ep->psref, encaptab.elem_class);
829
830 error = encap_add(ep);
831 if (error)
832 goto gc;
833
834 error = 0;
835 #ifndef ENCAP_MPSAFE
836 splx(s);
837 #endif
838 return ep;
839
840 gc:
841 kmem_free(ep, sizeof(*ep));
842 fail:
843 #ifndef ENCAP_MPSAFE
844 splx(s);
845 #endif
846 return NULL;
847 }
848
849 /* XXX encap4_ctlinput() is necessary if we set DF=1 on outer IPv4 header */
850
851 #ifdef INET6
852 void *
853 encap6_ctlinput(int cmd, const struct sockaddr *sa, void *d0)
854 {
855 void *d = d0;
856 struct ip6_hdr *ip6;
857 struct mbuf *m;
858 int off;
859 struct ip6ctlparam *ip6cp = NULL;
860 int nxt;
861 int s;
862 struct encaptab *ep;
863 const struct encapsw *esw;
864
865 if (sa->sa_family != AF_INET6 ||
866 sa->sa_len != sizeof(struct sockaddr_in6))
867 return NULL;
868
869 if ((unsigned)cmd >= PRC_NCMDS)
870 return NULL;
871 if (cmd == PRC_HOSTDEAD)
872 d = NULL;
873 else if (cmd == PRC_MSGSIZE)
874 ; /* special code is present, see below */
875 else if (inet6ctlerrmap[cmd] == 0)
876 return NULL;
877
878 /* if the parameter is from icmp6, decode it. */
879 if (d != NULL) {
880 ip6cp = (struct ip6ctlparam *)d;
881 m = ip6cp->ip6c_m;
882 ip6 = ip6cp->ip6c_ip6;
883 off = ip6cp->ip6c_off;
884 nxt = ip6cp->ip6c_nxt;
885
886 if (ip6 && cmd == PRC_MSGSIZE) {
887 int valid = 0;
888 struct encaptab *match;
889 struct psref elem_psref;
890
891 /*
892 * Check to see if we have a valid encap configuration.
893 */
894 match = encap6_lookup(m, off, nxt, OUTBOUND,
895 &elem_psref);
896 if (match)
897 valid++;
898 psref_release(&elem_psref, &match->psref,
899 encaptab.elem_class);
900
901 /*
902 * Depending on the value of "valid" and routing table
903 * size (mtudisc_{hi,lo}wat), we will:
904 * - recalcurate the new MTU and create the
905 * corresponding routing entry, or
906 * - ignore the MTU change notification.
907 */
908 icmp6_mtudisc_update((struct ip6ctlparam *)d, valid);
909 }
910 } else {
911 m = NULL;
912 ip6 = NULL;
913 nxt = -1;
914 }
915
916 /* inform all listeners */
917
918 s = pserialize_read_enter();
919 PSLIST_READER_FOREACH(ep, &encap_table, struct encaptab, chain) {
920 struct psref elem_psref;
921
922 if (ep->af != AF_INET6)
923 continue;
924 if (ep->proto >= 0 && ep->proto != nxt)
925 continue;
926
927 /* should optimize by looking at address pairs */
928
929 /* XXX need to pass ep->arg or ep itself to listeners */
930 psref_acquire(&elem_psref, &ep->psref,
931 encaptab.elem_class);
932 esw = ep->esw;
933 if (esw && esw->encapsw6.pr_ctlinput) {
934 pserialize_read_exit(s);
935 /* pr_ctlinput is sleepable. e.g. rtcache_free */
936 (*esw->encapsw6.pr_ctlinput)(cmd, sa, d, ep->arg);
937 s = pserialize_read_enter();
938 }
939 psref_release(&elem_psref, &ep->psref,
940 encaptab.elem_class);
941 }
942 pserialize_read_exit(s);
943
944 rip6_ctlinput(cmd, sa, d0);
945 return NULL;
946 }
947 #endif
948
949 int
950 encap_detach(const struct encaptab *cookie)
951 {
952 const struct encaptab *ep = cookie;
953 struct encaptab *p;
954 int error;
955
956 KASSERT(encap_lock_held());
957
958 PSLIST_WRITER_FOREACH(p, &encap_table, struct encaptab, chain) {
959 if (p == ep) {
960 error = encap_remove(p);
961 if (error)
962 return error;
963 else
964 break;
965 }
966 }
967 if (p == NULL)
968 return ENOENT;
969
970 pserialize_perform(encaptab.psz);
971 psref_target_destroy(&p->psref,
972 encaptab.elem_class);
973 if (!ep->func) {
974 kmem_free(p->addrpack, ep->addrpack->sa_len);
975 kmem_free(p->maskpack, ep->maskpack->sa_len);
976 }
977 kmem_free(p, sizeof(*p));
978
979 return 0;
980 }
981
982 #ifdef USE_RADIX
983 static struct radix_node_head *
984 encap_rnh(int af)
985 {
986
987 switch (af) {
988 case AF_INET:
989 return encap_head[0];
990 #ifdef INET6
991 case AF_INET6:
992 return encap_head[1];
993 #endif
994 default:
995 return NULL;
996 }
997 }
998
999 static int
1000 mask_matchlen(const struct sockaddr *sa)
1001 {
1002 const char *p, *ep;
1003 int l;
1004
1005 p = (const char *)sa;
1006 ep = p + sa->sa_len;
1007 p += 2; /* sa_len + sa_family */
1008
1009 l = 0;
1010 while (p < ep) {
1011 l += (*p ? 8 : 0); /* estimate */
1012 p++;
1013 }
1014 return l;
1015 }
1016 #endif
1017
1018 #ifndef USE_RADIX
1019 static int
1020 mask_match(const struct encaptab *ep,
1021 const struct sockaddr *sp,
1022 const struct sockaddr *dp)
1023 {
1024 struct sockaddr_storage s;
1025 struct sockaddr_storage d;
1026 int i;
1027 const u_int8_t *p, *q;
1028 u_int8_t *r;
1029 int matchlen;
1030
1031 KASSERTMSG(ep->func == NULL, "wrong encaptab passed to mask_match");
1032
1033 if (sp->sa_len > sizeof(s) || dp->sa_len > sizeof(d))
1034 return 0;
1035 if (sp->sa_family != ep->af || dp->sa_family != ep->af)
1036 return 0;
1037 if (sp->sa_len != ep->src->sa_len || dp->sa_len != ep->dst->sa_len)
1038 return 0;
1039
1040 matchlen = 0;
1041
1042 p = (const u_int8_t *)sp;
1043 q = (const u_int8_t *)ep->srcmask;
1044 r = (u_int8_t *)&s;
1045 for (i = 0 ; i < sp->sa_len; i++) {
1046 r[i] = p[i] & q[i];
1047 /* XXX estimate */
1048 matchlen += (q[i] ? 8 : 0);
1049 }
1050
1051 p = (const u_int8_t *)dp;
1052 q = (const u_int8_t *)ep->dstmask;
1053 r = (u_int8_t *)&d;
1054 for (i = 0 ; i < dp->sa_len; i++) {
1055 r[i] = p[i] & q[i];
1056 /* XXX rough estimate */
1057 matchlen += (q[i] ? 8 : 0);
1058 }
1059
1060 /* need to overwrite len/family portion as we don't compare them */
1061 s.ss_len = sp->sa_len;
1062 s.ss_family = sp->sa_family;
1063 d.ss_len = dp->sa_len;
1064 d.ss_family = dp->sa_family;
1065
1066 if (memcmp(&s, ep->src, ep->src->sa_len) == 0 &&
1067 memcmp(&d, ep->dst, ep->dst->sa_len) == 0) {
1068 return matchlen;
1069 } else
1070 return 0;
1071 }
1072 #endif
1073
1074 int
1075 encap_lock_enter(void)
1076 {
1077 int error;
1078
1079 mutex_enter(&encap_whole.lock);
1080 while (encap_whole.busy != NULL) {
1081 error = cv_wait_sig(&encap_whole.cv, &encap_whole.lock);
1082 if (error) {
1083 mutex_exit(&encap_whole.lock);
1084 return error;
1085 }
1086 }
1087 KASSERT(encap_whole.busy == NULL);
1088 encap_whole.busy = curlwp;
1089 mutex_exit(&encap_whole.lock);
1090
1091 return 0;
1092 }
1093
1094 void
1095 encap_lock_exit(void)
1096 {
1097
1098 mutex_enter(&encap_whole.lock);
1099 KASSERT(encap_whole.busy == curlwp);
1100 encap_whole.busy = NULL;
1101 cv_broadcast(&encap_whole.cv);
1102 mutex_exit(&encap_whole.lock);
1103 }
1104
1105 bool
1106 encap_lock_held(void)
1107 {
1108
1109 return (encap_whole.busy == curlwp);
1110 }
1111