ip_encap.c revision 1.63 1 /* $NetBSD: ip_encap.c,v 1.63 2017/04/07 03:31:50 ozaki-r Exp $ */
2 /* $KAME: ip_encap.c,v 1.73 2001/10/02 08:30:58 itojun Exp $ */
3
4 /*
5 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of the project nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 */
32 /*
33 * My grandfather said that there's a devil inside tunnelling technology...
34 *
35 * We have surprisingly many protocols that want packets with IP protocol
36 * #4 or #41. Here's a list of protocols that want protocol #41:
37 * RFC1933 configured tunnel
38 * RFC1933 automatic tunnel
39 * RFC2401 IPsec tunnel
40 * RFC2473 IPv6 generic packet tunnelling
41 * RFC2529 6over4 tunnel
42 * RFC3056 6to4 tunnel
43 * isatap tunnel
44 * mobile-ip6 (uses RFC2473)
45 * Here's a list of protocol that want protocol #4:
46 * RFC1853 IPv4-in-IPv4 tunnelling
47 * RFC2003 IPv4 encapsulation within IPv4
48 * RFC2344 reverse tunnelling for mobile-ip4
49 * RFC2401 IPsec tunnel
50 * Well, what can I say. They impose different en/decapsulation mechanism
51 * from each other, so they need separate protocol handler. The only one
52 * we can easily determine by protocol # is IPsec, which always has
53 * AH/ESP/IPComp header right after outer IP header.
54 *
55 * So, clearly good old protosw does not work for protocol #4 and #41.
56 * The code will let you match protocol via src/dst address pair.
57 */
58 /* XXX is M_NETADDR correct? */
59
60 /*
61 * With USE_RADIX the code will use radix table for tunnel lookup, for
62 * tunnels registered with encap_attach() with a addr/mask pair.
63 * Faster on machines with thousands of tunnel registerations (= interfaces).
64 *
65 * The code assumes that radix table code can handle non-continuous netmask,
66 * as it will pass radix table memory region with (src + dst) sockaddr pair.
67 */
68 #define USE_RADIX
69
70 #include <sys/cdefs.h>
71 __KERNEL_RCSID(0, "$NetBSD: ip_encap.c,v 1.63 2017/04/07 03:31:50 ozaki-r Exp $");
72
73 #ifdef _KERNEL_OPT
74 #include "opt_mrouting.h"
75 #include "opt_inet.h"
76 #include "opt_net_mpsafe.h"
77 #endif
78
79 #include <sys/param.h>
80 #include <sys/systm.h>
81 #include <sys/socket.h>
82 #include <sys/sockio.h>
83 #include <sys/mbuf.h>
84 #include <sys/errno.h>
85 #include <sys/queue.h>
86 #include <sys/kmem.h>
87 #include <sys/mutex.h>
88 #include <sys/condvar.h>
89 #include <sys/psref.h>
90 #include <sys/pslist.h>
91
92 #include <net/if.h>
93
94 #include <netinet/in.h>
95 #include <netinet/in_systm.h>
96 #include <netinet/ip.h>
97 #include <netinet/ip_var.h>
98 #include <netinet/ip_encap.h>
99 #ifdef MROUTING
100 #include <netinet/ip_mroute.h>
101 #endif /* MROUTING */
102
103 #ifdef INET6
104 #include <netinet/ip6.h>
105 #include <netinet6/ip6_var.h>
106 #include <netinet6/ip6protosw.h> /* for struct ip6ctlparam */
107 #include <netinet6/in6_var.h>
108 #include <netinet6/in6_pcb.h>
109 #include <netinet/icmp6.h>
110 #endif
111
112 #include <net/net_osdep.h>
113
114 #ifdef NET_MPSAFE
115 #define ENCAP_MPSAFE 1
116 #endif
117
118 enum direction { INBOUND, OUTBOUND };
119
120 #ifdef INET
121 static struct encaptab *encap4_lookup(struct mbuf *, int, int, enum direction,
122 struct psref *);
123 #endif
124 #ifdef INET6
125 static struct encaptab *encap6_lookup(struct mbuf *, int, int, enum direction,
126 struct psref *);
127 #endif
128 static int encap_add(struct encaptab *);
129 static int encap_remove(struct encaptab *);
130 static int encap_afcheck(int, const struct sockaddr *, const struct sockaddr *);
131 #ifdef USE_RADIX
132 static struct radix_node_head *encap_rnh(int);
133 static int mask_matchlen(const struct sockaddr *);
134 #else
135 static int mask_match(const struct encaptab *, const struct sockaddr *,
136 const struct sockaddr *);
137 #endif
138 static void encap_fillarg(struct mbuf *, const struct encaptab *);
139
140 /*
141 * In encap[46]_lookup(), ep->func can sleep(e.g. rtalloc1) while walking
142 * encap_table. So, it cannot use pserialize_read_enter()
143 */
144 static struct {
145 struct pslist_head list;
146 pserialize_t psz;
147 struct psref_class *elem_class; /* for the element of et_list */
148 } encaptab __cacheline_aligned = {
149 .list = PSLIST_INITIALIZER,
150 };
151 #define encap_table encaptab.list
152
153 static struct {
154 kmutex_t lock;
155 kcondvar_t cv;
156 struct lwp *busy;
157 } encap_whole __cacheline_aligned;
158
159 #ifdef USE_RADIX
160 struct radix_node_head *encap_head[2]; /* 0 for AF_INET, 1 for AF_INET6 */
161 static bool encap_head_updating = false;
162 #endif
163
164 static bool encap_initialized = false;
165 /*
166 * must be done before other encap interfaces initialization.
167 */
168 void
169 encapinit(void)
170 {
171
172 if (encap_initialized)
173 return;
174
175 encaptab.psz = pserialize_create();
176 encaptab.elem_class = psref_class_create("encapelem", IPL_SOFTNET);
177 if (encaptab.elem_class == NULL)
178 panic("encaptab.elem_class cannot be allocated.\n");
179
180 mutex_init(&encap_whole.lock, MUTEX_DEFAULT, IPL_NONE);
181 cv_init(&encap_whole.cv, "ip_encap cv");
182 encap_whole.busy = NULL;
183
184 encap_initialized = true;
185 }
186
187 void
188 encap_init(void)
189 {
190 static int initialized = 0;
191
192 if (initialized)
193 return;
194 initialized++;
195 #if 0
196 /*
197 * we cannot use LIST_INIT() here, since drivers may want to call
198 * encap_attach(), on driver attach. encap_init() will be called
199 * on AF_INET{,6} initialization, which happens after driver
200 * initialization - using LIST_INIT() here can nuke encap_attach()
201 * from drivers.
202 */
203 PSLIST_INIT(&encap_table);
204 #endif
205
206 #ifdef USE_RADIX
207 /*
208 * initialize radix lookup table when the radix subsystem is inited.
209 */
210 rn_delayedinit((void *)&encap_head[0],
211 sizeof(struct sockaddr_pack) << 3);
212 #ifdef INET6
213 rn_delayedinit((void *)&encap_head[1],
214 sizeof(struct sockaddr_pack) << 3);
215 #endif
216 #endif
217 }
218
219 #ifdef INET
220 static struct encaptab *
221 encap4_lookup(struct mbuf *m, int off, int proto, enum direction dir,
222 struct psref *match_psref)
223 {
224 struct ip *ip;
225 struct ip_pack4 pack;
226 struct encaptab *ep, *match;
227 int prio, matchprio;
228 int s;
229 #ifdef USE_RADIX
230 struct radix_node_head *rnh = encap_rnh(AF_INET);
231 struct radix_node *rn;
232 #endif
233
234 KASSERT(m->m_len >= sizeof(*ip));
235
236 ip = mtod(m, struct ip *);
237
238 memset(&pack, 0, sizeof(pack));
239 pack.p.sp_len = sizeof(pack);
240 pack.mine.sin_family = pack.yours.sin_family = AF_INET;
241 pack.mine.sin_len = pack.yours.sin_len = sizeof(struct sockaddr_in);
242 if (dir == INBOUND) {
243 pack.mine.sin_addr = ip->ip_dst;
244 pack.yours.sin_addr = ip->ip_src;
245 } else {
246 pack.mine.sin_addr = ip->ip_src;
247 pack.yours.sin_addr = ip->ip_dst;
248 }
249
250 match = NULL;
251 matchprio = 0;
252
253 s = pserialize_read_enter();
254 #ifdef USE_RADIX
255 if (encap_head_updating) {
256 /*
257 * Update in progress. Do nothing.
258 */
259 pserialize_read_exit(s);
260 return NULL;
261 }
262
263 rn = rnh->rnh_matchaddr((void *)&pack, rnh);
264 if (rn && (rn->rn_flags & RNF_ROOT) == 0) {
265 struct encaptab *encapp = (struct encaptab *)rn;
266
267 psref_acquire(match_psref, &encapp->psref,
268 encaptab.elem_class);
269 match = encapp;
270 matchprio = mask_matchlen(match->srcmask) +
271 mask_matchlen(match->dstmask);
272 }
273 #endif
274 PSLIST_READER_FOREACH(ep, &encap_table, struct encaptab, chain) {
275 struct psref elem_psref;
276
277 membar_datadep_consumer();
278
279 if (ep->af != AF_INET)
280 continue;
281 if (ep->proto >= 0 && ep->proto != proto)
282 continue;
283
284 psref_acquire(&elem_psref, &ep->psref,
285 encaptab.elem_class);
286 if (ep->func) {
287 pserialize_read_exit(s);
288 /* ep->func is sleepable. e.g. rtalloc1 */
289 prio = (*ep->func)(m, off, proto, ep->arg);
290 s = pserialize_read_enter();
291 } else {
292 #ifdef USE_RADIX
293 psref_release(&elem_psref, &ep->psref,
294 encaptab.elem_class);
295 continue;
296 #else
297 prio = mask_match(ep, (struct sockaddr *)&pack.mine,
298 (struct sockaddr *)&pack.yours);
299 #endif
300 }
301
302 /*
303 * We prioritize the matches by using bit length of the
304 * matches. mask_match() and user-supplied matching function
305 * should return the bit length of the matches (for example,
306 * if both src/dst are matched for IPv4, 64 should be returned).
307 * 0 or negative return value means "it did not match".
308 *
309 * The question is, since we have two "mask" portion, we
310 * cannot really define total order between entries.
311 * For example, which of these should be preferred?
312 * mask_match() returns 48 (32 + 16) for both of them.
313 * src=3ffe::/16, dst=3ffe:501::/32
314 * src=3ffe:501::/32, dst=3ffe::/16
315 *
316 * We need to loop through all the possible candidates
317 * to get the best match - the search takes O(n) for
318 * n attachments (i.e. interfaces).
319 *
320 * For radix-based lookup, I guess source takes precedence.
321 * See rn_{refines,lexobetter} for the correct answer.
322 */
323 if (prio <= 0) {
324 psref_release(&elem_psref, &ep->psref,
325 encaptab.elem_class);
326 continue;
327 }
328 if (prio > matchprio) {
329 /* release last matched ep */
330 if (match != NULL)
331 psref_release(match_psref, &match->psref,
332 encaptab.elem_class);
333
334 psref_copy(match_psref, &elem_psref,
335 encaptab.elem_class);
336 matchprio = prio;
337 match = ep;
338 }
339 KASSERTMSG((match == NULL) || psref_held(&match->psref,
340 encaptab.elem_class),
341 "current match = %p, but not hold its psref", match);
342
343 psref_release(&elem_psref, &ep->psref,
344 encaptab.elem_class);
345 }
346 pserialize_read_exit(s);
347
348 return match;
349 }
350
351 void
352 encap4_input(struct mbuf *m, ...)
353 {
354 int off, proto;
355 va_list ap;
356 const struct encapsw *esw;
357 struct encaptab *match;
358 struct psref match_psref;
359
360 va_start(ap, m);
361 off = va_arg(ap, int);
362 proto = va_arg(ap, int);
363 va_end(ap);
364
365 match = encap4_lookup(m, off, proto, INBOUND, &match_psref);
366 if (match) {
367 /* found a match, "match" has the best one */
368 esw = match->esw;
369 if (esw && esw->encapsw4.pr_input) {
370 encap_fillarg(m, match);
371 (*esw->encapsw4.pr_input)(m, off, proto);
372 psref_release(&match_psref, &match->psref,
373 encaptab.elem_class);
374 } else {
375 psref_release(&match_psref, &match->psref,
376 encaptab.elem_class);
377 m_freem(m);
378 }
379 return;
380 }
381
382 /* last resort: inject to raw socket */
383 rip_input(m, off, proto);
384 }
385 #endif
386
387 #ifdef INET6
388 static struct encaptab *
389 encap6_lookup(struct mbuf *m, int off, int proto, enum direction dir,
390 struct psref *match_psref)
391 {
392 struct ip6_hdr *ip6;
393 struct ip_pack6 pack;
394 int prio, matchprio;
395 int s;
396 struct encaptab *ep, *match;
397 #ifdef USE_RADIX
398 struct radix_node_head *rnh = encap_rnh(AF_INET6);
399 struct radix_node *rn;
400 #endif
401
402 KASSERT(m->m_len >= sizeof(*ip6));
403
404 ip6 = mtod(m, struct ip6_hdr *);
405
406 memset(&pack, 0, sizeof(pack));
407 pack.p.sp_len = sizeof(pack);
408 pack.mine.sin6_family = pack.yours.sin6_family = AF_INET6;
409 pack.mine.sin6_len = pack.yours.sin6_len = sizeof(struct sockaddr_in6);
410 if (dir == INBOUND) {
411 pack.mine.sin6_addr = ip6->ip6_dst;
412 pack.yours.sin6_addr = ip6->ip6_src;
413 } else {
414 pack.mine.sin6_addr = ip6->ip6_src;
415 pack.yours.sin6_addr = ip6->ip6_dst;
416 }
417
418 match = NULL;
419 matchprio = 0;
420
421 s = pserialize_read_enter();
422 #ifdef USE_RADIX
423 if (encap_head_updating) {
424 /*
425 * Update in progress. Do nothing.
426 */
427 pserialize_read_exit(s);
428 return NULL;
429 }
430
431 rn = rnh->rnh_matchaddr((void *)&pack, rnh);
432 if (rn && (rn->rn_flags & RNF_ROOT) == 0) {
433 struct encaptab *encapp = (struct encaptab *)rn;
434
435 psref_acquire(match_psref, &encapp->psref,
436 encaptab.elem_class);
437 match = encapp;
438 matchprio = mask_matchlen(match->srcmask) +
439 mask_matchlen(match->dstmask);
440 }
441 #endif
442 PSLIST_READER_FOREACH(ep, &encap_table, struct encaptab, chain) {
443 struct psref elem_psref;
444
445 membar_datadep_consumer();
446
447 if (ep->af != AF_INET6)
448 continue;
449 if (ep->proto >= 0 && ep->proto != proto)
450 continue;
451
452 psref_acquire(&elem_psref, &ep->psref,
453 encaptab.elem_class);
454
455 if (ep->func) {
456 pserialize_read_exit(s);
457 /* ep->func is sleepable. e.g. rtalloc1 */
458 prio = (*ep->func)(m, off, proto, ep->arg);
459 s = pserialize_read_enter();
460 } else {
461 #ifdef USE_RADIX
462 psref_release(&elem_psref, &ep->psref,
463 encaptab.elem_class);
464 continue;
465 #else
466 prio = mask_match(ep, (struct sockaddr *)&pack.mine,
467 (struct sockaddr *)&pack.yours);
468 #endif
469 }
470
471 /* see encap4_lookup() for issues here */
472 if (prio <= 0) {
473 psref_release(&elem_psref, &ep->psref,
474 encaptab.elem_class);
475 continue;
476 }
477 if (prio > matchprio) {
478 /* release last matched ep */
479 if (match != NULL)
480 psref_release(match_psref, &match->psref,
481 encaptab.elem_class);
482
483 psref_copy(match_psref, &elem_psref,
484 encaptab.elem_class);
485 matchprio = prio;
486 match = ep;
487 }
488 KASSERTMSG((match == NULL) || psref_held(&match->psref,
489 encaptab.elem_class),
490 "current match = %p, but not hold its psref", match);
491
492 psref_release(&elem_psref, &ep->psref,
493 encaptab.elem_class);
494 }
495 pserialize_read_exit(s);
496
497 return match;
498 }
499
500 int
501 encap6_input(struct mbuf **mp, int *offp, int proto)
502 {
503 struct mbuf *m = *mp;
504 const struct encapsw *esw;
505 struct encaptab *match;
506 struct psref match_psref;
507
508 match = encap6_lookup(m, *offp, proto, INBOUND, &match_psref);
509
510 if (match) {
511 /* found a match */
512 esw = match->esw;
513 if (esw && esw->encapsw6.pr_input) {
514 int ret;
515 encap_fillarg(m, match);
516 ret = (*esw->encapsw6.pr_input)(mp, offp, proto);
517 psref_release(&match_psref, &match->psref,
518 encaptab.elem_class);
519 return ret;
520 } else {
521 psref_release(&match_psref, &match->psref,
522 encaptab.elem_class);
523 m_freem(m);
524 return IPPROTO_DONE;
525 }
526 }
527
528 /* last resort: inject to raw socket */
529 return rip6_input(mp, offp, proto);
530 }
531 #endif
532
533 /*
534 * XXX
535 * The encaptab list and the rnh radix tree must be manipulated atomically.
536 */
537 static int
538 encap_add(struct encaptab *ep)
539 {
540 #ifdef USE_RADIX
541 struct radix_node_head *rnh = encap_rnh(ep->af);
542 #endif
543
544 KASSERT(encap_lock_held());
545
546 #ifdef USE_RADIX
547 if (!ep->func && rnh) {
548 /* Disable access to the radix tree for reader. */
549 encap_head_updating = true;
550 /* Wait for all readers to drain. */
551 pserialize_perform(encaptab.psz);
552
553 if (!rnh->rnh_addaddr((void *)ep->addrpack,
554 (void *)ep->maskpack, rnh, ep->nodes)) {
555 encap_head_updating = false;
556 return EEXIST;
557 }
558
559 /*
560 * The ep added to the radix tree must be skipped while
561 * encap[46]_lookup walks encaptab list. In other words,
562 * encap_add() does not need to care whether the ep has
563 * been added encaptab list or not yet.
564 * So, we can re-enable access to the radix tree for now.
565 */
566 encap_head_updating = false;
567 }
568 #endif
569 PSLIST_WRITER_INSERT_HEAD(&encap_table, ep, chain);
570
571 return 0;
572 }
573
574 /*
575 * XXX
576 * The encaptab list and the rnh radix tree must be manipulated atomically.
577 */
578 static int
579 encap_remove(struct encaptab *ep)
580 {
581 #ifdef USE_RADIX
582 struct radix_node_head *rnh = encap_rnh(ep->af);
583 #endif
584 int error = 0;
585
586 KASSERT(encap_lock_held());
587
588 #ifdef USE_RADIX
589 if (!ep->func && rnh) {
590 /* Disable access to the radix tree for reader. */
591 encap_head_updating = true;
592 /* Wait for all readers to drain. */
593 pserialize_perform(encaptab.psz);
594
595 if (!rnh->rnh_deladdr((void *)ep->addrpack,
596 (void *)ep->maskpack, rnh))
597 error = ESRCH;
598
599 /*
600 * The ep added to the radix tree must be skipped while
601 * encap[46]_lookup walks encaptab list. In other words,
602 * encap_add() does not need to care whether the ep has
603 * been added encaptab list or not yet.
604 * So, we can re-enable access to the radix tree for now.
605 */
606 encap_head_updating = false;
607 }
608 #endif
609 PSLIST_WRITER_REMOVE(ep, chain);
610
611 return error;
612 }
613
614 static int
615 encap_afcheck(int af, const struct sockaddr *sp, const struct sockaddr *dp)
616 {
617 if (sp && dp) {
618 if (sp->sa_len != dp->sa_len)
619 return EINVAL;
620 if (af != sp->sa_family || af != dp->sa_family)
621 return EINVAL;
622 } else if (!sp && !dp)
623 ;
624 else
625 return EINVAL;
626
627 switch (af) {
628 case AF_INET:
629 if (sp && sp->sa_len != sizeof(struct sockaddr_in))
630 return EINVAL;
631 if (dp && dp->sa_len != sizeof(struct sockaddr_in))
632 return EINVAL;
633 break;
634 #ifdef INET6
635 case AF_INET6:
636 if (sp && sp->sa_len != sizeof(struct sockaddr_in6))
637 return EINVAL;
638 if (dp && dp->sa_len != sizeof(struct sockaddr_in6))
639 return EINVAL;
640 break;
641 #endif
642 default:
643 return EAFNOSUPPORT;
644 }
645
646 return 0;
647 }
648
649 /*
650 * sp (src ptr) is always my side, and dp (dst ptr) is always remote side.
651 * length of mask (sm and dm) is assumed to be same as sp/dp.
652 * Return value will be necessary as input (cookie) for encap_detach().
653 */
654 const struct encaptab *
655 encap_attach(int af, int proto,
656 const struct sockaddr *sp, const struct sockaddr *sm,
657 const struct sockaddr *dp, const struct sockaddr *dm,
658 const struct encapsw *esw, void *arg)
659 {
660 struct encaptab *ep;
661 int error;
662 int pss;
663 size_t l;
664 struct ip_pack4 *pack4;
665 #ifdef INET6
666 struct ip_pack6 *pack6;
667 #endif
668 #ifndef ENCAP_MPSAFE
669 int s;
670
671 s = splsoftnet();
672 #endif
673 /* sanity check on args */
674 error = encap_afcheck(af, sp, dp);
675 if (error)
676 goto fail;
677
678 /* check if anyone have already attached with exactly same config */
679 pss = pserialize_read_enter();
680 PSLIST_READER_FOREACH(ep, &encap_table, struct encaptab, chain) {
681 membar_datadep_consumer();
682
683 if (ep->af != af)
684 continue;
685 if (ep->proto != proto)
686 continue;
687 if (ep->func)
688 continue;
689
690 KASSERT(ep->src != NULL);
691 KASSERT(ep->dst != NULL);
692 KASSERT(ep->srcmask != NULL);
693 KASSERT(ep->dstmask != NULL);
694
695 if (ep->src->sa_len != sp->sa_len ||
696 memcmp(ep->src, sp, sp->sa_len) != 0 ||
697 memcmp(ep->srcmask, sm, sp->sa_len) != 0)
698 continue;
699 if (ep->dst->sa_len != dp->sa_len ||
700 memcmp(ep->dst, dp, dp->sa_len) != 0 ||
701 memcmp(ep->dstmask, dm, dp->sa_len) != 0)
702 continue;
703
704 error = EEXIST;
705 pserialize_read_exit(pss);
706 goto fail;
707 }
708 pserialize_read_exit(pss);
709
710 switch (af) {
711 case AF_INET:
712 l = sizeof(*pack4);
713 break;
714 #ifdef INET6
715 case AF_INET6:
716 l = sizeof(*pack6);
717 break;
718 #endif
719 default:
720 goto fail;
721 }
722
723 /* M_NETADDR ok? */
724 ep = kmem_zalloc(sizeof(*ep), KM_NOSLEEP);
725 if (ep == NULL) {
726 error = ENOBUFS;
727 goto fail;
728 }
729 ep->addrpack = kmem_zalloc(l, KM_NOSLEEP);
730 if (ep->addrpack == NULL) {
731 error = ENOBUFS;
732 goto gc;
733 }
734 ep->maskpack = kmem_zalloc(l, KM_NOSLEEP);
735 if (ep->maskpack == NULL) {
736 error = ENOBUFS;
737 goto gc;
738 }
739
740 ep->af = af;
741 ep->proto = proto;
742 ep->addrpack->sa_len = l & 0xff;
743 ep->maskpack->sa_len = l & 0xff;
744 switch (af) {
745 case AF_INET:
746 pack4 = (struct ip_pack4 *)ep->addrpack;
747 ep->src = (struct sockaddr *)&pack4->mine;
748 ep->dst = (struct sockaddr *)&pack4->yours;
749 pack4 = (struct ip_pack4 *)ep->maskpack;
750 ep->srcmask = (struct sockaddr *)&pack4->mine;
751 ep->dstmask = (struct sockaddr *)&pack4->yours;
752 break;
753 #ifdef INET6
754 case AF_INET6:
755 pack6 = (struct ip_pack6 *)ep->addrpack;
756 ep->src = (struct sockaddr *)&pack6->mine;
757 ep->dst = (struct sockaddr *)&pack6->yours;
758 pack6 = (struct ip_pack6 *)ep->maskpack;
759 ep->srcmask = (struct sockaddr *)&pack6->mine;
760 ep->dstmask = (struct sockaddr *)&pack6->yours;
761 break;
762 #endif
763 }
764
765 memcpy(ep->src, sp, sp->sa_len);
766 memcpy(ep->srcmask, sm, sp->sa_len);
767 memcpy(ep->dst, dp, dp->sa_len);
768 memcpy(ep->dstmask, dm, dp->sa_len);
769 ep->esw = esw;
770 ep->arg = arg;
771 psref_target_init(&ep->psref, encaptab.elem_class);
772
773 error = encap_add(ep);
774 if (error)
775 goto gc;
776
777 error = 0;
778 #ifndef ENCAP_MPSAFE
779 splx(s);
780 #endif
781 return ep;
782
783 gc:
784 if (ep->addrpack)
785 kmem_free(ep->addrpack, l);
786 if (ep->maskpack)
787 kmem_free(ep->maskpack, l);
788 if (ep)
789 kmem_free(ep, sizeof(*ep));
790 fail:
791 #ifndef ENCAP_MPSAFE
792 splx(s);
793 #endif
794 return NULL;
795 }
796
797 const struct encaptab *
798 encap_attach_func(int af, int proto,
799 int (*func)(struct mbuf *, int, int, void *),
800 const struct encapsw *esw, void *arg)
801 {
802 struct encaptab *ep;
803 int error;
804 #ifndef ENCAP_MPSAFE
805 int s;
806
807 s = splsoftnet();
808 #endif
809 /* sanity check on args */
810 if (!func) {
811 error = EINVAL;
812 goto fail;
813 }
814
815 error = encap_afcheck(af, NULL, NULL);
816 if (error)
817 goto fail;
818
819 ep = kmem_alloc(sizeof(*ep), KM_NOSLEEP); /*XXX*/
820 if (ep == NULL) {
821 error = ENOBUFS;
822 goto fail;
823 }
824 memset(ep, 0, sizeof(*ep));
825
826 ep->af = af;
827 ep->proto = proto;
828 ep->func = func;
829 ep->esw = esw;
830 ep->arg = arg;
831 psref_target_init(&ep->psref, encaptab.elem_class);
832
833 error = encap_add(ep);
834 if (error)
835 goto fail;
836
837 error = 0;
838 #ifndef ENCAP_MPSAFE
839 splx(s);
840 #endif
841 return ep;
842
843 fail:
844 #ifndef ENCAP_MPSAFE
845 splx(s);
846 #endif
847 return NULL;
848 }
849
850 /* XXX encap4_ctlinput() is necessary if we set DF=1 on outer IPv4 header */
851
852 #ifdef INET6
853 void *
854 encap6_ctlinput(int cmd, const struct sockaddr *sa, void *d0)
855 {
856 void *d = d0;
857 struct ip6_hdr *ip6;
858 struct mbuf *m;
859 int off;
860 struct ip6ctlparam *ip6cp = NULL;
861 int nxt;
862 int s;
863 struct encaptab *ep;
864 const struct encapsw *esw;
865
866 if (sa->sa_family != AF_INET6 ||
867 sa->sa_len != sizeof(struct sockaddr_in6))
868 return NULL;
869
870 if ((unsigned)cmd >= PRC_NCMDS)
871 return NULL;
872 if (cmd == PRC_HOSTDEAD)
873 d = NULL;
874 else if (cmd == PRC_MSGSIZE)
875 ; /* special code is present, see below */
876 else if (inet6ctlerrmap[cmd] == 0)
877 return NULL;
878
879 /* if the parameter is from icmp6, decode it. */
880 if (d != NULL) {
881 ip6cp = (struct ip6ctlparam *)d;
882 m = ip6cp->ip6c_m;
883 ip6 = ip6cp->ip6c_ip6;
884 off = ip6cp->ip6c_off;
885 nxt = ip6cp->ip6c_nxt;
886
887 if (ip6 && cmd == PRC_MSGSIZE) {
888 int valid = 0;
889 struct encaptab *match;
890 struct psref elem_psref;
891
892 /*
893 * Check to see if we have a valid encap configuration.
894 */
895 match = encap6_lookup(m, off, nxt, OUTBOUND,
896 &elem_psref);
897 if (match)
898 valid++;
899 psref_release(&elem_psref, &match->psref,
900 encaptab.elem_class);
901
902 /*
903 * Depending on the value of "valid" and routing table
904 * size (mtudisc_{hi,lo}wat), we will:
905 * - recalcurate the new MTU and create the
906 * corresponding routing entry, or
907 * - ignore the MTU change notification.
908 */
909 icmp6_mtudisc_update((struct ip6ctlparam *)d, valid);
910 }
911 } else {
912 m = NULL;
913 ip6 = NULL;
914 nxt = -1;
915 }
916
917 /* inform all listeners */
918
919 s = pserialize_read_enter();
920 PSLIST_READER_FOREACH(ep, &encap_table, struct encaptab, chain) {
921 struct psref elem_psref;
922
923 membar_datadep_consumer();
924
925 if (ep->af != AF_INET6)
926 continue;
927 if (ep->proto >= 0 && ep->proto != nxt)
928 continue;
929
930 /* should optimize by looking at address pairs */
931
932 /* XXX need to pass ep->arg or ep itself to listeners */
933 psref_acquire(&elem_psref, &ep->psref,
934 encaptab.elem_class);
935 esw = ep->esw;
936 if (esw && esw->encapsw6.pr_ctlinput) {
937 pserialize_read_exit(s);
938 /* pr_ctlinput is sleepable. e.g. rtcache_free */
939 (*esw->encapsw6.pr_ctlinput)(cmd, sa, d, ep->arg);
940 s = pserialize_read_enter();
941 }
942 psref_release(&elem_psref, &ep->psref,
943 encaptab.elem_class);
944 }
945 pserialize_read_exit(s);
946
947 rip6_ctlinput(cmd, sa, d0);
948 return NULL;
949 }
950 #endif
951
952 int
953 encap_detach(const struct encaptab *cookie)
954 {
955 const struct encaptab *ep = cookie;
956 struct encaptab *p;
957 int error;
958
959 KASSERT(encap_lock_held());
960
961 PSLIST_WRITER_FOREACH(p, &encap_table, struct encaptab, chain) {
962 membar_datadep_consumer();
963
964 if (p == ep) {
965 error = encap_remove(p);
966 if (error)
967 return error;
968 else
969 break;
970 }
971 }
972 if (p == NULL)
973 return ENOENT;
974
975 pserialize_perform(encaptab.psz);
976 psref_target_destroy(&p->psref,
977 encaptab.elem_class);
978 if (!ep->func) {
979 kmem_free(p->addrpack, ep->addrpack->sa_len);
980 kmem_free(p->maskpack, ep->maskpack->sa_len);
981 }
982 kmem_free(p, sizeof(*p));
983
984 return 0;
985 }
986
987 #ifdef USE_RADIX
988 static struct radix_node_head *
989 encap_rnh(int af)
990 {
991
992 switch (af) {
993 case AF_INET:
994 return encap_head[0];
995 #ifdef INET6
996 case AF_INET6:
997 return encap_head[1];
998 #endif
999 default:
1000 return NULL;
1001 }
1002 }
1003
1004 static int
1005 mask_matchlen(const struct sockaddr *sa)
1006 {
1007 const char *p, *ep;
1008 int l;
1009
1010 p = (const char *)sa;
1011 ep = p + sa->sa_len;
1012 p += 2; /* sa_len + sa_family */
1013
1014 l = 0;
1015 while (p < ep) {
1016 l += (*p ? 8 : 0); /* estimate */
1017 p++;
1018 }
1019 return l;
1020 }
1021 #endif
1022
1023 #ifndef USE_RADIX
1024 static int
1025 mask_match(const struct encaptab *ep,
1026 const struct sockaddr *sp,
1027 const struct sockaddr *dp)
1028 {
1029 struct sockaddr_storage s;
1030 struct sockaddr_storage d;
1031 int i;
1032 const u_int8_t *p, *q;
1033 u_int8_t *r;
1034 int matchlen;
1035
1036 KASSERTMSG(ep->func == NULL, "wrong encaptab passed to mask_match");
1037
1038 if (sp->sa_len > sizeof(s) || dp->sa_len > sizeof(d))
1039 return 0;
1040 if (sp->sa_family != ep->af || dp->sa_family != ep->af)
1041 return 0;
1042 if (sp->sa_len != ep->src->sa_len || dp->sa_len != ep->dst->sa_len)
1043 return 0;
1044
1045 matchlen = 0;
1046
1047 p = (const u_int8_t *)sp;
1048 q = (const u_int8_t *)ep->srcmask;
1049 r = (u_int8_t *)&s;
1050 for (i = 0 ; i < sp->sa_len; i++) {
1051 r[i] = p[i] & q[i];
1052 /* XXX estimate */
1053 matchlen += (q[i] ? 8 : 0);
1054 }
1055
1056 p = (const u_int8_t *)dp;
1057 q = (const u_int8_t *)ep->dstmask;
1058 r = (u_int8_t *)&d;
1059 for (i = 0 ; i < dp->sa_len; i++) {
1060 r[i] = p[i] & q[i];
1061 /* XXX rough estimate */
1062 matchlen += (q[i] ? 8 : 0);
1063 }
1064
1065 /* need to overwrite len/family portion as we don't compare them */
1066 s.ss_len = sp->sa_len;
1067 s.ss_family = sp->sa_family;
1068 d.ss_len = dp->sa_len;
1069 d.ss_family = dp->sa_family;
1070
1071 if (memcmp(&s, ep->src, ep->src->sa_len) == 0 &&
1072 memcmp(&d, ep->dst, ep->dst->sa_len) == 0) {
1073 return matchlen;
1074 } else
1075 return 0;
1076 }
1077 #endif
1078
1079 static void
1080 encap_fillarg(struct mbuf *m, const struct encaptab *ep)
1081 {
1082 struct m_tag *mtag;
1083
1084 mtag = m_tag_get(PACKET_TAG_ENCAP, sizeof(void *), M_NOWAIT);
1085 if (mtag) {
1086 *(void **)(mtag + 1) = ep->arg;
1087 m_tag_prepend(m, mtag);
1088 }
1089 }
1090
1091 void *
1092 encap_getarg(struct mbuf *m)
1093 {
1094 void *p;
1095 struct m_tag *mtag;
1096
1097 p = NULL;
1098 mtag = m_tag_find(m, PACKET_TAG_ENCAP, NULL);
1099 if (mtag != NULL) {
1100 p = *(void **)(mtag + 1);
1101 m_tag_delete(m, mtag);
1102 }
1103 return p;
1104 }
1105
1106 int
1107 encap_lock_enter(void)
1108 {
1109 int error;
1110
1111 mutex_enter(&encap_whole.lock);
1112 while (encap_whole.busy != NULL) {
1113 error = cv_wait_sig(&encap_whole.cv, &encap_whole.lock);
1114 if (error) {
1115 mutex_exit(&encap_whole.lock);
1116 return error;
1117 }
1118 }
1119 KASSERT(encap_whole.busy == NULL);
1120 encap_whole.busy = curlwp;
1121 mutex_exit(&encap_whole.lock);
1122
1123 return 0;
1124 }
1125
1126 void
1127 encap_lock_exit(void)
1128 {
1129
1130 mutex_enter(&encap_whole.lock);
1131 KASSERT(encap_whole.busy == curlwp);
1132 encap_whole.busy = NULL;
1133 cv_broadcast(&encap_whole.cv);
1134 mutex_exit(&encap_whole.lock);
1135 }
1136
1137 bool
1138 encap_lock_held(void)
1139 {
1140
1141 return (encap_whole.busy == curlwp);
1142 }
1143