ip_encap.c revision 1.76 1 /* $NetBSD: ip_encap.c,v 1.76 2022/12/07 08:28:46 knakahara Exp $ */
2 /* $KAME: ip_encap.c,v 1.73 2001/10/02 08:30:58 itojun Exp $ */
3
4 /*
5 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of the project nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 */
32 /*
33 * My grandfather said that there's a devil inside tunnelling technology...
34 *
35 * We have surprisingly many protocols that want packets with IP protocol
36 * #4 or #41. Here's a list of protocols that want protocol #41:
37 * RFC1933 configured tunnel
38 * RFC1933 automatic tunnel
39 * RFC2401 IPsec tunnel
40 * RFC2473 IPv6 generic packet tunnelling
41 * RFC2529 6over4 tunnel
42 * RFC3056 6to4 tunnel
43 * isatap tunnel
44 * mobile-ip6 (uses RFC2473)
45 * Here's a list of protocol that want protocol #4:
46 * RFC1853 IPv4-in-IPv4 tunnelling
47 * RFC2003 IPv4 encapsulation within IPv4
48 * RFC2344 reverse tunnelling for mobile-ip4
49 * RFC2401 IPsec tunnel
50 * Well, what can I say. They impose different en/decapsulation mechanism
51 * from each other, so they need separate protocol handler. The only one
52 * we can easily determine by protocol # is IPsec, which always has
53 * AH/ESP/IPComp header right after outer IP header.
54 *
55 * So, clearly good old protosw does not work for protocol #4 and #41.
56 * The code will let you match protocol via src/dst address pair.
57 */
58 /* XXX is M_NETADDR correct? */
59
60 /*
61 * With USE_RADIX the code will use radix table for tunnel lookup, for
62 * tunnels registered with encap_attach() with a addr/mask pair.
63 * Faster on machines with thousands of tunnel registerations (= interfaces).
64 *
65 * The code assumes that radix table code can handle non-continuous netmask,
66 * as it will pass radix table memory region with (src + dst) sockaddr pair.
67 */
68 #define USE_RADIX
69
70 #include <sys/cdefs.h>
71 __KERNEL_RCSID(0, "$NetBSD: ip_encap.c,v 1.76 2022/12/07 08:28:46 knakahara Exp $");
72
73 #ifdef _KERNEL_OPT
74 #include "opt_mrouting.h"
75 #include "opt_inet.h"
76 #include "opt_net_mpsafe.h"
77 #endif
78
79 #include <sys/param.h>
80 #include <sys/systm.h>
81 #include <sys/socket.h>
82 #include <sys/socketvar.h> /* for softnet_lock */
83 #include <sys/sockio.h>
84 #include <sys/mbuf.h>
85 #include <sys/errno.h>
86 #include <sys/queue.h>
87 #include <sys/kmem.h>
88 #include <sys/mutex.h>
89 #include <sys/condvar.h>
90 #include <sys/psref.h>
91 #include <sys/pslist.h>
92 #include <sys/thmap.h>
93
94 #include <net/if.h>
95
96 #include <netinet/in.h>
97 #include <netinet/in_systm.h>
98 #include <netinet/ip.h>
99 #include <netinet/ip_var.h>
100 #include <netinet/ip_encap.h>
101 #ifdef MROUTING
102 #include <netinet/ip_mroute.h>
103 #endif /* MROUTING */
104
105 #ifdef INET6
106 #include <netinet/ip6.h>
107 #include <netinet6/ip6_var.h>
108 #include <netinet6/ip6protosw.h> /* for struct ip6ctlparam */
109 #include <netinet6/in6_var.h>
110 #include <netinet6/in6_pcb.h>
111 #include <netinet/icmp6.h>
112 #endif
113
114 #ifdef NET_MPSAFE
115 #define ENCAP_MPSAFE 1
116 #endif
117
118 enum direction { INBOUND, OUTBOUND };
119
120 #ifdef INET
121 static struct encaptab *encap4_lookup(struct mbuf *, int, int, enum direction,
122 struct psref *);
123 #endif
124 #ifdef INET6
125 static struct encaptab *encap6_lookup(struct mbuf *, int, int, enum direction,
126 struct psref *);
127 #endif
128 static int encap_add(struct encaptab *);
129 static int encap_remove(struct encaptab *);
130 static void encap_afcheck(int, const struct sockaddr *, const struct sockaddr *);
131 #ifdef USE_RADIX
132 static struct radix_node_head *encap_rnh(int);
133 static int mask_matchlen(const struct sockaddr *);
134 #else
135 static int mask_match(const struct encaptab *, const struct sockaddr *,
136 const struct sockaddr *);
137 #endif
138 static void encap_key_init(struct encap_key *, const struct sockaddr *,
139 const struct sockaddr *);
140 static void encap_key_inc(struct encap_key *);
141
142 /*
143 * In encap[46]_lookup(), ep->func can sleep(e.g. rtalloc1) while walking
144 * encap_table. So, it cannot use pserialize_read_enter()
145 */
146 static struct {
147 struct pslist_head list;
148 pserialize_t psz;
149 struct psref_class *elem_class; /* for the element of et_list */
150 } encaptab __cacheline_aligned = {
151 .list = PSLIST_INITIALIZER,
152 };
153 #define encap_table encaptab.list
154
155 static struct {
156 kmutex_t lock;
157 kcondvar_t cv;
158 struct lwp *busy;
159 } encap_whole __cacheline_aligned;
160
161 #ifdef USE_RADIX
162 struct radix_node_head *encap_head[2]; /* 0 for AF_INET, 1 for AF_INET6 */
163 static bool encap_head_updating = false;
164 #endif
165
166 static thmap_t *encap_map[2]; /* 0 for AF_INET, 1 for AF_INET6 */
167
168 static bool encap_initialized = false;
169 /*
170 * must be done before other encap interfaces initialization.
171 */
172 void
173 encapinit(void)
174 {
175
176 if (encap_initialized)
177 return;
178
179 encaptab.psz = pserialize_create();
180 encaptab.elem_class = psref_class_create("encapelem", IPL_SOFTNET);
181
182 mutex_init(&encap_whole.lock, MUTEX_DEFAULT, IPL_NONE);
183 cv_init(&encap_whole.cv, "ip_encap cv");
184 encap_whole.busy = NULL;
185
186 encap_initialized = true;
187 }
188
189 void
190 encap_init(void)
191 {
192 static int initialized = 0;
193
194 if (initialized)
195 return;
196 initialized++;
197 #if 0
198 /*
199 * we cannot use LIST_INIT() here, since drivers may want to call
200 * encap_attach(), on driver attach. encap_init() will be called
201 * on AF_INET{,6} initialization, which happens after driver
202 * initialization - using LIST_INIT() here can nuke encap_attach()
203 * from drivers.
204 */
205 PSLIST_INIT(&encap_table);
206 #endif
207
208 #ifdef USE_RADIX
209 /*
210 * initialize radix lookup table when the radix subsystem is inited.
211 */
212 rn_delayedinit((void *)&encap_head[0],
213 sizeof(struct sockaddr_pack) << 3);
214 #ifdef INET6
215 rn_delayedinit((void *)&encap_head[1],
216 sizeof(struct sockaddr_pack) << 3);
217 #endif
218 #endif
219
220 encap_map[0] = thmap_create(0, NULL, THMAP_NOCOPY);
221 #ifdef INET6
222 encap_map[1] = thmap_create(0, NULL, THMAP_NOCOPY);
223 #endif
224 }
225
226 #ifdef INET
227 static struct encaptab *
228 encap4_lookup(struct mbuf *m, int off, int proto, enum direction dir,
229 struct psref *match_psref)
230 {
231 struct ip *ip;
232 struct ip_pack4 pack;
233 struct encaptab *ep, *match;
234 int prio, matchprio;
235 int s;
236 #ifdef USE_RADIX
237 struct radix_node_head *rnh = encap_rnh(AF_INET);
238 struct radix_node *rn;
239 #endif
240 thmap_t *emap = encap_map[0];
241 struct encap_key key;
242
243 KASSERT(m->m_len >= sizeof(*ip));
244
245 ip = mtod(m, struct ip *);
246
247 memset(&pack, 0, sizeof(pack));
248 pack.p.sp_len = sizeof(pack);
249 pack.mine.sin_family = pack.yours.sin_family = AF_INET;
250 pack.mine.sin_len = pack.yours.sin_len = sizeof(struct sockaddr_in);
251 if (dir == INBOUND) {
252 pack.mine.sin_addr = ip->ip_dst;
253 pack.yours.sin_addr = ip->ip_src;
254 } else {
255 pack.mine.sin_addr = ip->ip_src;
256 pack.yours.sin_addr = ip->ip_dst;
257 }
258
259 match = NULL;
260 matchprio = 0;
261
262 s = pserialize_read_enter();
263 #ifdef USE_RADIX
264 if (encap_head_updating) {
265 /*
266 * Update in progress. Do nothing.
267 */
268 pserialize_read_exit(s);
269 return NULL;
270 }
271
272 rn = rnh->rnh_matchaddr((void *)&pack, rnh);
273 if (rn && (rn->rn_flags & RNF_ROOT) == 0) {
274 struct encaptab *encapp = (struct encaptab *)rn;
275
276 psref_acquire(match_psref, &encapp->psref,
277 encaptab.elem_class);
278 match = encapp;
279 matchprio = mask_matchlen(match->srcmask) +
280 mask_matchlen(match->dstmask);
281 }
282 #endif
283
284 encap_key_init(&key, sintosa(&pack.mine), sintosa(&pack.yours));
285 while ((ep = thmap_get(emap, &key, sizeof(key))) != NULL) {
286 struct psref elem_psref;
287
288 KASSERT(ep->af == AF_INET);
289
290 if (ep->proto >= 0 && ep->proto != proto) {
291 encap_key_inc(&key);
292 continue;
293 }
294
295 psref_acquire(&elem_psref, &ep->psref,
296 encaptab.elem_class);
297 if (ep->func) {
298 pserialize_read_exit(s);
299 prio = (*ep->func)(m, off, proto, ep->arg);
300 s = pserialize_read_enter();
301 } else {
302 prio = pack.mine.sin_len + pack.yours.sin_len;
303 }
304
305 if (prio <= 0) {
306 psref_release(&elem_psref, &ep->psref,
307 encaptab.elem_class);
308 encap_key_inc(&key);
309 continue;
310 }
311 if (prio > matchprio) {
312 /* release last matched ep */
313 if (match != NULL)
314 psref_release(match_psref, &match->psref,
315 encaptab.elem_class);
316
317 psref_copy(match_psref, &elem_psref,
318 encaptab.elem_class);
319 matchprio = prio;
320 match = ep;
321 }
322
323 psref_release(&elem_psref, &ep->psref,
324 encaptab.elem_class);
325 encap_key_inc(&key);
326 }
327
328 PSLIST_READER_FOREACH(ep, &encap_table, struct encaptab, chain) {
329 struct psref elem_psref;
330
331 if (ep->af != AF_INET)
332 continue;
333 if (ep->proto >= 0 && ep->proto != proto)
334 continue;
335
336 psref_acquire(&elem_psref, &ep->psref,
337 encaptab.elem_class);
338 if (ep->func) {
339 pserialize_read_exit(s);
340 /* ep->func is sleepable. e.g. rtalloc1 */
341 prio = (*ep->func)(m, off, proto, ep->arg);
342 s = pserialize_read_enter();
343 } else {
344 #ifdef USE_RADIX
345 psref_release(&elem_psref, &ep->psref,
346 encaptab.elem_class);
347 continue;
348 #else
349 prio = mask_match(ep, (struct sockaddr *)&pack.mine,
350 (struct sockaddr *)&pack.yours);
351 #endif
352 }
353
354 /*
355 * We prioritize the matches by using bit length of the
356 * matches. mask_match() and user-supplied matching function
357 * should return the bit length of the matches (for example,
358 * if both src/dst are matched for IPv4, 64 should be returned).
359 * 0 or negative return value means "it did not match".
360 *
361 * The question is, since we have two "mask" portion, we
362 * cannot really define total order between entries.
363 * For example, which of these should be preferred?
364 * mask_match() returns 48 (32 + 16) for both of them.
365 * src=3ffe::/16, dst=3ffe:501::/32
366 * src=3ffe:501::/32, dst=3ffe::/16
367 *
368 * We need to loop through all the possible candidates
369 * to get the best match - the search takes O(n) for
370 * n attachments (i.e. interfaces).
371 *
372 * For radix-based lookup, I guess source takes precedence.
373 * See rn_{refines,lexobetter} for the correct answer.
374 */
375 if (prio <= 0) {
376 psref_release(&elem_psref, &ep->psref,
377 encaptab.elem_class);
378 continue;
379 }
380 if (prio > matchprio) {
381 /* release last matched ep */
382 if (match != NULL)
383 psref_release(match_psref, &match->psref,
384 encaptab.elem_class);
385
386 psref_copy(match_psref, &elem_psref,
387 encaptab.elem_class);
388 matchprio = prio;
389 match = ep;
390 }
391 KASSERTMSG((match == NULL) || psref_held(&match->psref,
392 encaptab.elem_class),
393 "current match = %p, but not hold its psref", match);
394
395 psref_release(&elem_psref, &ep->psref,
396 encaptab.elem_class);
397 }
398 pserialize_read_exit(s);
399
400 return match;
401 }
402
403 void
404 encap4_input(struct mbuf *m, int off, int proto)
405 {
406 const struct encapsw *esw;
407 struct encaptab *match;
408 struct psref match_psref;
409
410 match = encap4_lookup(m, off, proto, INBOUND, &match_psref);
411 if (match) {
412 /* found a match, "match" has the best one */
413 esw = match->esw;
414 if (esw && esw->encapsw4.pr_input) {
415 (*esw->encapsw4.pr_input)(m, off, proto, match->arg);
416 psref_release(&match_psref, &match->psref,
417 encaptab.elem_class);
418 } else {
419 psref_release(&match_psref, &match->psref,
420 encaptab.elem_class);
421 m_freem(m);
422 }
423 return;
424 }
425
426 /* last resort: inject to raw socket */
427 SOFTNET_LOCK_IF_NET_MPSAFE();
428 rip_input(m, off, proto);
429 SOFTNET_UNLOCK_IF_NET_MPSAFE();
430 }
431 #endif
432
433 #ifdef INET6
434 static struct encaptab *
435 encap6_lookup(struct mbuf *m, int off, int proto, enum direction dir,
436 struct psref *match_psref)
437 {
438 struct ip6_hdr *ip6;
439 struct ip_pack6 pack;
440 int prio, matchprio;
441 int s;
442 struct encaptab *ep, *match;
443 #ifdef USE_RADIX
444 struct radix_node_head *rnh = encap_rnh(AF_INET6);
445 struct radix_node *rn;
446 #endif
447 thmap_t *emap = encap_map[1];
448 struct encap_key key;
449
450 KASSERT(m->m_len >= sizeof(*ip6));
451
452 ip6 = mtod(m, struct ip6_hdr *);
453
454 memset(&pack, 0, sizeof(pack));
455 pack.p.sp_len = sizeof(pack);
456 pack.mine.sin6_family = pack.yours.sin6_family = AF_INET6;
457 pack.mine.sin6_len = pack.yours.sin6_len = sizeof(struct sockaddr_in6);
458 if (dir == INBOUND) {
459 pack.mine.sin6_addr = ip6->ip6_dst;
460 pack.yours.sin6_addr = ip6->ip6_src;
461 } else {
462 pack.mine.sin6_addr = ip6->ip6_src;
463 pack.yours.sin6_addr = ip6->ip6_dst;
464 }
465
466 match = NULL;
467 matchprio = 0;
468
469 s = pserialize_read_enter();
470 #ifdef USE_RADIX
471 if (encap_head_updating) {
472 /*
473 * Update in progress. Do nothing.
474 */
475 pserialize_read_exit(s);
476 return NULL;
477 }
478
479 rn = rnh->rnh_matchaddr((void *)&pack, rnh);
480 if (rn && (rn->rn_flags & RNF_ROOT) == 0) {
481 struct encaptab *encapp = (struct encaptab *)rn;
482
483 psref_acquire(match_psref, &encapp->psref,
484 encaptab.elem_class);
485 match = encapp;
486 matchprio = mask_matchlen(match->srcmask) +
487 mask_matchlen(match->dstmask);
488 }
489 #endif
490
491 encap_key_init(&key, sin6tosa(&pack.mine), sin6tosa(&pack.yours));
492 while ((ep = thmap_get(emap, &key, sizeof(key))) != NULL) {
493 struct psref elem_psref;
494
495 KASSERT(ep->af == AF_INET6);
496
497 if (ep->proto >= 0 && ep->proto != proto) {
498 encap_key_inc(&key);
499 continue;
500 }
501
502 psref_acquire(&elem_psref, &ep->psref,
503 encaptab.elem_class);
504 if (ep->func) {
505 pserialize_read_exit(s);
506 prio = (*ep->func)(m, off, proto, ep->arg);
507 s = pserialize_read_enter();
508 } else {
509 prio = pack.mine.sin6_len + pack.yours.sin6_len;
510 }
511
512 if (prio <= 0) {
513 psref_release(&elem_psref, &ep->psref,
514 encaptab.elem_class);
515 encap_key_inc(&key);
516 continue;
517 }
518 if (prio > matchprio) {
519 /* release last matched ep */
520 if (match != NULL)
521 psref_release(match_psref, &match->psref,
522 encaptab.elem_class);
523
524 psref_copy(match_psref, &elem_psref,
525 encaptab.elem_class);
526 matchprio = prio;
527 match = ep;
528 }
529 psref_release(&elem_psref, &ep->psref,
530 encaptab.elem_class);
531 encap_key_inc(&key);
532 }
533
534 PSLIST_READER_FOREACH(ep, &encap_table, struct encaptab, chain) {
535 struct psref elem_psref;
536
537 if (ep->af != AF_INET6)
538 continue;
539 if (ep->proto >= 0 && ep->proto != proto)
540 continue;
541
542 psref_acquire(&elem_psref, &ep->psref,
543 encaptab.elem_class);
544
545 if (ep->func) {
546 pserialize_read_exit(s);
547 /* ep->func is sleepable. e.g. rtalloc1 */
548 prio = (*ep->func)(m, off, proto, ep->arg);
549 s = pserialize_read_enter();
550 } else {
551 #ifdef USE_RADIX
552 psref_release(&elem_psref, &ep->psref,
553 encaptab.elem_class);
554 continue;
555 #else
556 prio = mask_match(ep, (struct sockaddr *)&pack.mine,
557 (struct sockaddr *)&pack.yours);
558 #endif
559 }
560
561 /* see encap4_lookup() for issues here */
562 if (prio <= 0) {
563 psref_release(&elem_psref, &ep->psref,
564 encaptab.elem_class);
565 continue;
566 }
567 if (prio > matchprio) {
568 /* release last matched ep */
569 if (match != NULL)
570 psref_release(match_psref, &match->psref,
571 encaptab.elem_class);
572
573 psref_copy(match_psref, &elem_psref,
574 encaptab.elem_class);
575 matchprio = prio;
576 match = ep;
577 }
578 KASSERTMSG((match == NULL) || psref_held(&match->psref,
579 encaptab.elem_class),
580 "current match = %p, but not hold its psref", match);
581
582 psref_release(&elem_psref, &ep->psref,
583 encaptab.elem_class);
584 }
585 pserialize_read_exit(s);
586
587 return match;
588 }
589
590 int
591 encap6_input(struct mbuf **mp, int *offp, int proto)
592 {
593 struct mbuf *m = *mp;
594 const struct encapsw *esw;
595 struct encaptab *match;
596 struct psref match_psref;
597 int rv;
598
599 match = encap6_lookup(m, *offp, proto, INBOUND, &match_psref);
600
601 if (match) {
602 /* found a match */
603 esw = match->esw;
604 if (esw && esw->encapsw6.pr_input) {
605 int ret;
606 ret = (*esw->encapsw6.pr_input)(mp, offp, proto,
607 match->arg);
608 psref_release(&match_psref, &match->psref,
609 encaptab.elem_class);
610 return ret;
611 } else {
612 psref_release(&match_psref, &match->psref,
613 encaptab.elem_class);
614 m_freem(m);
615 return IPPROTO_DONE;
616 }
617 }
618
619 /* last resort: inject to raw socket */
620 SOFTNET_LOCK_IF_NET_MPSAFE();
621 rv = rip6_input(mp, offp, proto);
622 SOFTNET_UNLOCK_IF_NET_MPSAFE();
623 return rv;
624 }
625 #endif
626
627 /*
628 * XXX
629 * The encaptab list and the rnh radix tree must be manipulated atomically.
630 */
631 static int
632 encap_add(struct encaptab *ep)
633 {
634 #ifdef USE_RADIX
635 struct radix_node_head *rnh = encap_rnh(ep->af);
636 #endif
637
638 KASSERT(encap_lock_held());
639
640 #ifdef USE_RADIX
641 if (!ep->func && rnh) {
642 /* Disable access to the radix tree for reader. */
643 encap_head_updating = true;
644 /* Wait for all readers to drain. */
645 pserialize_perform(encaptab.psz);
646
647 if (!rnh->rnh_addaddr((void *)ep->addrpack,
648 (void *)ep->maskpack, rnh, ep->nodes)) {
649 encap_head_updating = false;
650 return EEXIST;
651 }
652
653 /*
654 * The ep added to the radix tree must be skipped while
655 * encap[46]_lookup walks encaptab list. In other words,
656 * encap_add() does not need to care whether the ep has
657 * been added encaptab list or not yet.
658 * So, we can re-enable access to the radix tree for now.
659 */
660 encap_head_updating = false;
661 }
662 #endif
663 PSLIST_WRITER_INSERT_HEAD(&encap_table, ep, chain);
664
665 return 0;
666 }
667
668 /*
669 * XXX
670 * The encaptab list and the rnh radix tree must be manipulated atomically.
671 */
672 static int
673 encap_remove(struct encaptab *ep)
674 {
675 #ifdef USE_RADIX
676 struct radix_node_head *rnh = encap_rnh(ep->af);
677 #endif
678 int error = 0;
679
680 KASSERT(encap_lock_held());
681
682 #ifdef USE_RADIX
683 if (!ep->func && rnh) {
684 /* Disable access to the radix tree for reader. */
685 encap_head_updating = true;
686 /* Wait for all readers to drain. */
687 pserialize_perform(encaptab.psz);
688
689 if (!rnh->rnh_deladdr((void *)ep->addrpack,
690 (void *)ep->maskpack, rnh))
691 error = ESRCH;
692
693 /*
694 * The ep added to the radix tree must be skipped while
695 * encap[46]_lookup walks encaptab list. In other words,
696 * encap_add() does not need to care whether the ep has
697 * been added encaptab list or not yet.
698 * So, we can re-enable access to the radix tree for now.
699 */
700 encap_head_updating = false;
701 }
702 #endif
703 PSLIST_WRITER_REMOVE(ep, chain);
704
705 return error;
706 }
707
708 static void
709 encap_afcheck(int af, const struct sockaddr *sp, const struct sockaddr *dp)
710 {
711
712 KASSERT(sp != NULL && dp != NULL);
713 KASSERT(sp->sa_len == dp->sa_len);
714 KASSERT(af == sp->sa_family && af == dp->sa_family);
715
716 socklen_t len __diagused = sockaddr_getsize_by_family(af);
717 KASSERT(len != 0 && len == sp->sa_len && len == dp->sa_len);
718 }
719
720 /*
721 * sp (src ptr) is always my side, and dp (dst ptr) is always remote side.
722 * length of mask (sm and dm) is assumed to be same as sp/dp.
723 * Return value will be necessary as input (cookie) for encap_detach().
724 */
725 const struct encaptab *
726 encap_attach(int af, int proto,
727 const struct sockaddr *sp, const struct sockaddr *sm,
728 const struct sockaddr *dp, const struct sockaddr *dm,
729 const struct encapsw *esw, void *arg)
730 {
731 struct encaptab *ep;
732 int error;
733 int pss;
734 size_t l;
735 struct ip_pack4 *pack4;
736 #ifdef INET6
737 struct ip_pack6 *pack6;
738 #endif
739 #ifndef ENCAP_MPSAFE
740 int s;
741
742 s = splsoftnet();
743 #endif
744
745 ASSERT_SLEEPABLE();
746
747 /* sanity check on args */
748 encap_afcheck(af, sp, dp);
749
750 /* check if anyone have already attached with exactly same config */
751 pss = pserialize_read_enter();
752 PSLIST_READER_FOREACH(ep, &encap_table, struct encaptab, chain) {
753 if (ep->af != af)
754 continue;
755 if (ep->proto != proto)
756 continue;
757 if (ep->func)
758 continue;
759
760 KASSERT(ep->src != NULL);
761 KASSERT(ep->dst != NULL);
762 KASSERT(ep->srcmask != NULL);
763 KASSERT(ep->dstmask != NULL);
764
765 if (ep->src->sa_len != sp->sa_len ||
766 memcmp(ep->src, sp, sp->sa_len) != 0 ||
767 memcmp(ep->srcmask, sm, sp->sa_len) != 0)
768 continue;
769 if (ep->dst->sa_len != dp->sa_len ||
770 memcmp(ep->dst, dp, dp->sa_len) != 0 ||
771 memcmp(ep->dstmask, dm, dp->sa_len) != 0)
772 continue;
773
774 error = EEXIST;
775 pserialize_read_exit(pss);
776 goto fail;
777 }
778 pserialize_read_exit(pss);
779
780 switch (af) {
781 case AF_INET:
782 l = sizeof(*pack4);
783 break;
784 #ifdef INET6
785 case AF_INET6:
786 l = sizeof(*pack6);
787 break;
788 #endif
789 default:
790 goto fail;
791 }
792
793 /* M_NETADDR ok? */
794 ep = kmem_zalloc(sizeof(*ep), KM_SLEEP);
795 ep->addrpack = kmem_zalloc(l, KM_SLEEP);
796 ep->maskpack = kmem_zalloc(l, KM_SLEEP);
797
798 ep->af = af;
799 ep->proto = proto;
800 ep->addrpack->sa_len = l & 0xff;
801 ep->maskpack->sa_len = l & 0xff;
802 switch (af) {
803 case AF_INET:
804 pack4 = (struct ip_pack4 *)ep->addrpack;
805 ep->src = (struct sockaddr *)&pack4->mine;
806 ep->dst = (struct sockaddr *)&pack4->yours;
807 pack4 = (struct ip_pack4 *)ep->maskpack;
808 ep->srcmask = (struct sockaddr *)&pack4->mine;
809 ep->dstmask = (struct sockaddr *)&pack4->yours;
810 break;
811 #ifdef INET6
812 case AF_INET6:
813 pack6 = (struct ip_pack6 *)ep->addrpack;
814 ep->src = (struct sockaddr *)&pack6->mine;
815 ep->dst = (struct sockaddr *)&pack6->yours;
816 pack6 = (struct ip_pack6 *)ep->maskpack;
817 ep->srcmask = (struct sockaddr *)&pack6->mine;
818 ep->dstmask = (struct sockaddr *)&pack6->yours;
819 break;
820 #endif
821 }
822
823 memcpy(ep->src, sp, sp->sa_len);
824 memcpy(ep->srcmask, sm, sp->sa_len);
825 memcpy(ep->dst, dp, dp->sa_len);
826 memcpy(ep->dstmask, dm, dp->sa_len);
827 ep->esw = esw;
828 ep->arg = arg;
829 psref_target_init(&ep->psref, encaptab.elem_class);
830
831 error = encap_add(ep);
832 if (error)
833 goto gc;
834
835 error = 0;
836 #ifndef ENCAP_MPSAFE
837 splx(s);
838 #endif
839 return ep;
840
841 gc:
842 if (ep->addrpack)
843 kmem_free(ep->addrpack, l);
844 if (ep->maskpack)
845 kmem_free(ep->maskpack, l);
846 if (ep)
847 kmem_free(ep, sizeof(*ep));
848 fail:
849 #ifndef ENCAP_MPSAFE
850 splx(s);
851 #endif
852 return NULL;
853 }
854
855 const struct encaptab *
856 encap_attach_func(int af, int proto,
857 encap_priofunc_t *func,
858 const struct encapsw *esw, void *arg)
859 {
860 struct encaptab *ep;
861 int error;
862 #ifndef ENCAP_MPSAFE
863 int s;
864
865 s = splsoftnet();
866 #endif
867
868 ASSERT_SLEEPABLE();
869
870 /* sanity check on args */
871 KASSERT(func != NULL);
872 KASSERT(af == AF_INET
873 #ifdef INET6
874 || af == AF_INET6
875 #endif
876 );
877
878 ep = kmem_alloc(sizeof(*ep), KM_SLEEP);
879 memset(ep, 0, sizeof(*ep));
880
881 ep->af = af;
882 ep->proto = proto;
883 ep->func = func;
884 ep->esw = esw;
885 ep->arg = arg;
886 psref_target_init(&ep->psref, encaptab.elem_class);
887
888 error = encap_add(ep);
889 if (error)
890 goto gc;
891
892 error = 0;
893 #ifndef ENCAP_MPSAFE
894 splx(s);
895 #endif
896 return ep;
897
898 gc:
899 kmem_free(ep, sizeof(*ep));
900 #ifndef ENCAP_MPSAFE
901 splx(s);
902 #endif
903 return NULL;
904 }
905
906 static void
907 encap_key_init(struct encap_key *key,
908 const struct sockaddr *local, const struct sockaddr *remote)
909 {
910
911 memset(key, 0, sizeof(*key));
912
913 sockaddr_copy(&key->local_sa, sizeof(key->local_u), local);
914 sockaddr_copy(&key->remote_sa, sizeof(key->remote_u), remote);
915 }
916
917 static void
918 encap_key_inc(struct encap_key *key)
919 {
920
921 (key->seq)++;
922 }
923
924 static void
925 encap_key_dec(struct encap_key *key)
926 {
927
928 (key->seq)--;
929 }
930
931 static void
932 encap_key_copy(struct encap_key *dst, const struct encap_key *src)
933 {
934
935 memset(dst, 0, sizeof(*dst));
936 *dst = *src;
937 }
938
939 /*
940 * src is always my side, and dst is always remote side.
941 * Return value will be necessary as input (cookie) for encap_detach().
942 */
943 const struct encaptab *
944 encap_attach_addr(int af, int proto,
945 const struct sockaddr *src, const struct sockaddr *dst,
946 encap_priofunc_t *func,
947 const struct encapsw *esw, void *arg)
948 {
949 struct encaptab *ep;
950 size_t l;
951 thmap_t *emap;
952 void *retep;
953 struct ip_pack4 *pack4;
954 #ifdef INET6
955 struct ip_pack6 *pack6;
956 #endif
957
958 ASSERT_SLEEPABLE();
959
960 encap_afcheck(af, src, dst);
961
962 switch (af) {
963 case AF_INET:
964 l = sizeof(*pack4);
965 emap = encap_map[0];
966 break;
967 #ifdef INET6
968 case AF_INET6:
969 l = sizeof(*pack6);
970 emap = encap_map[1];
971 break;
972 #endif
973 default:
974 return NULL;
975 }
976
977 ep = kmem_zalloc(sizeof(*ep), KM_SLEEP);
978 ep->addrpack = kmem_zalloc(l, KM_SLEEP);
979 ep->addrpack->sa_len = l & 0xff;
980 ep->af = af;
981 ep->proto = proto;
982 ep->flag = IP_ENCAP_ADDR_ENABLE;
983 switch (af) {
984 case AF_INET:
985 pack4 = (struct ip_pack4 *)ep->addrpack;
986 ep->src = (struct sockaddr *)&pack4->mine;
987 ep->dst = (struct sockaddr *)&pack4->yours;
988 break;
989 #ifdef INET6
990 case AF_INET6:
991 pack6 = (struct ip_pack6 *)ep->addrpack;
992 ep->src = (struct sockaddr *)&pack6->mine;
993 ep->dst = (struct sockaddr *)&pack6->yours;
994 break;
995 #endif
996 }
997 memcpy(ep->src, src, src->sa_len);
998 memcpy(ep->dst, dst, dst->sa_len);
999 ep->esw = esw;
1000 ep->arg = arg;
1001 ep->func = func;
1002 psref_target_init(&ep->psref, encaptab.elem_class);
1003
1004 encap_key_init(&ep->key, src, dst);
1005 while ((retep = thmap_put(emap, &ep->key, sizeof(ep->key), ep)) != ep)
1006 encap_key_inc(&ep->key);
1007 return ep;
1008 }
1009
1010
1011 /* XXX encap4_ctlinput() is necessary if we set DF=1 on outer IPv4 header */
1012
1013 #ifdef INET6
1014 void *
1015 encap6_ctlinput(int cmd, const struct sockaddr *sa, void *d0)
1016 {
1017 void *d = d0;
1018 struct ip6_hdr *ip6;
1019 struct mbuf *m;
1020 int off;
1021 struct ip6ctlparam *ip6cp = NULL;
1022 int nxt;
1023 int s;
1024 struct encaptab *ep;
1025 const struct encapsw *esw;
1026
1027 if (sa->sa_family != AF_INET6 ||
1028 sa->sa_len != sizeof(struct sockaddr_in6))
1029 return NULL;
1030
1031 if ((unsigned)cmd >= PRC_NCMDS)
1032 return NULL;
1033 if (cmd == PRC_HOSTDEAD)
1034 d = NULL;
1035 else if (cmd == PRC_MSGSIZE)
1036 ; /* special code is present, see below */
1037 else if (inet6ctlerrmap[cmd] == 0)
1038 return NULL;
1039
1040 /* if the parameter is from icmp6, decode it. */
1041 if (d != NULL) {
1042 ip6cp = (struct ip6ctlparam *)d;
1043 m = ip6cp->ip6c_m;
1044 ip6 = ip6cp->ip6c_ip6;
1045 off = ip6cp->ip6c_off;
1046 nxt = ip6cp->ip6c_nxt;
1047
1048 if (ip6 && cmd == PRC_MSGSIZE) {
1049 int valid = 0;
1050 struct encaptab *match;
1051 struct psref elem_psref;
1052
1053 /*
1054 * Check to see if we have a valid encap configuration.
1055 */
1056 match = encap6_lookup(m, off, nxt, OUTBOUND,
1057 &elem_psref);
1058 if (match) {
1059 valid++;
1060 psref_release(&elem_psref, &match->psref,
1061 encaptab.elem_class);
1062 }
1063
1064 /*
1065 * Depending on the value of "valid" and routing table
1066 * size (mtudisc_{hi,lo}wat), we will:
1067 * - recalcurate the new MTU and create the
1068 * corresponding routing entry, or
1069 * - ignore the MTU change notification.
1070 */
1071 icmp6_mtudisc_update((struct ip6ctlparam *)d, valid);
1072 }
1073 } else {
1074 m = NULL;
1075 ip6 = NULL;
1076 nxt = -1;
1077 }
1078
1079 /* inform all listeners */
1080
1081 s = pserialize_read_enter();
1082 PSLIST_READER_FOREACH(ep, &encap_table, struct encaptab, chain) {
1083 struct psref elem_psref;
1084
1085 if (ep->af != AF_INET6)
1086 continue;
1087 if (ep->proto >= 0 && ep->proto != nxt)
1088 continue;
1089
1090 /* should optimize by looking at address pairs */
1091
1092 /* XXX need to pass ep->arg or ep itself to listeners */
1093 psref_acquire(&elem_psref, &ep->psref,
1094 encaptab.elem_class);
1095 esw = ep->esw;
1096 if (esw && esw->encapsw6.pr_ctlinput) {
1097 pserialize_read_exit(s);
1098 /* pr_ctlinput is sleepable. e.g. rtcache_free */
1099 (*esw->encapsw6.pr_ctlinput)(cmd, sa, d, ep->arg);
1100 s = pserialize_read_enter();
1101 }
1102 psref_release(&elem_psref, &ep->psref,
1103 encaptab.elem_class);
1104 }
1105 pserialize_read_exit(s);
1106
1107 rip6_ctlinput(cmd, sa, d0);
1108 return NULL;
1109 }
1110 #endif
1111
1112 static int
1113 encap_detach_addr(const struct encaptab *ep)
1114 {
1115 thmap_t *emap;
1116 struct encaptab *retep;
1117 struct encaptab *target;
1118 void *thgc;
1119 struct encap_key key;
1120
1121 KASSERT(encap_lock_held());
1122 KASSERT(ep->flag & IP_ENCAP_ADDR_ENABLE);
1123
1124 switch (ep->af) {
1125 case AF_INET:
1126 emap = encap_map[0];
1127 break;
1128 #ifdef INET6
1129 case AF_INET6:
1130 emap = encap_map[1];
1131 break;
1132 #endif
1133 default:
1134 return EINVAL;
1135 }
1136
1137 retep = thmap_del(emap, &ep->key, sizeof(ep->key));
1138 if (retep != ep) {
1139 return ENOENT;
1140 }
1141 target = retep;
1142
1143 /*
1144 * To keep continuity, decrement seq after detached encaptab.
1145 */
1146 encap_key_copy(&key, &ep->key);
1147 encap_key_inc(&key);
1148 while ((retep = thmap_del(emap, &key, sizeof(key))) != NULL) {
1149 void *pp;
1150
1151 encap_key_dec(&retep->key);
1152 pp = thmap_put(emap, &retep->key, sizeof(retep->key), retep);
1153 KASSERT(retep == pp);
1154
1155 encap_key_inc(&key);
1156 }
1157
1158 thgc = thmap_stage_gc(emap);
1159 pserialize_perform(encaptab.psz);
1160 thmap_gc(emap, thgc);
1161 psref_target_destroy(&target->psref, encaptab.elem_class);
1162 kmem_free(target->addrpack, target->addrpack->sa_len);
1163 kmem_free(target, sizeof(*target));
1164
1165 return 0;
1166 }
1167
1168 int
1169 encap_detach(const struct encaptab *cookie)
1170 {
1171 const struct encaptab *ep = cookie;
1172 struct encaptab *p;
1173 int error;
1174
1175 KASSERT(encap_lock_held());
1176
1177 if (ep->flag & IP_ENCAP_ADDR_ENABLE)
1178 return encap_detach_addr(ep);
1179
1180 PSLIST_WRITER_FOREACH(p, &encap_table, struct encaptab, chain) {
1181 if (p == ep) {
1182 error = encap_remove(p);
1183 if (error)
1184 return error;
1185 else
1186 break;
1187 }
1188 }
1189 if (p == NULL)
1190 return ENOENT;
1191
1192 pserialize_perform(encaptab.psz);
1193 psref_target_destroy(&p->psref,
1194 encaptab.elem_class);
1195 if (!ep->func) {
1196 kmem_free(p->addrpack, ep->addrpack->sa_len);
1197 kmem_free(p->maskpack, ep->maskpack->sa_len);
1198 }
1199 kmem_free(p, sizeof(*p));
1200
1201 return 0;
1202 }
1203
1204 #ifdef USE_RADIX
1205 static struct radix_node_head *
1206 encap_rnh(int af)
1207 {
1208
1209 switch (af) {
1210 case AF_INET:
1211 return encap_head[0];
1212 #ifdef INET6
1213 case AF_INET6:
1214 return encap_head[1];
1215 #endif
1216 default:
1217 return NULL;
1218 }
1219 }
1220
1221 static int
1222 mask_matchlen(const struct sockaddr *sa)
1223 {
1224 const char *p, *ep;
1225 int l;
1226
1227 p = (const char *)sa;
1228 ep = p + sa->sa_len;
1229 p += 2; /* sa_len + sa_family */
1230
1231 l = 0;
1232 while (p < ep) {
1233 l += (*p ? 8 : 0); /* estimate */
1234 p++;
1235 }
1236 return l;
1237 }
1238 #endif
1239
1240 #ifndef USE_RADIX
1241 static int
1242 mask_match(const struct encaptab *ep,
1243 const struct sockaddr *sp,
1244 const struct sockaddr *dp)
1245 {
1246 struct sockaddr_storage s;
1247 struct sockaddr_storage d;
1248 int i;
1249 const u_int8_t *p, *q;
1250 u_int8_t *r;
1251 int matchlen;
1252
1253 KASSERTMSG(ep->func == NULL, "wrong encaptab passed to mask_match");
1254
1255 if (sp->sa_len > sizeof(s) || dp->sa_len > sizeof(d))
1256 return 0;
1257 if (sp->sa_family != ep->af || dp->sa_family != ep->af)
1258 return 0;
1259 if (sp->sa_len != ep->src->sa_len || dp->sa_len != ep->dst->sa_len)
1260 return 0;
1261
1262 matchlen = 0;
1263
1264 p = (const u_int8_t *)sp;
1265 q = (const u_int8_t *)ep->srcmask;
1266 r = (u_int8_t *)&s;
1267 for (i = 0 ; i < sp->sa_len; i++) {
1268 r[i] = p[i] & q[i];
1269 /* XXX estimate */
1270 matchlen += (q[i] ? 8 : 0);
1271 }
1272
1273 p = (const u_int8_t *)dp;
1274 q = (const u_int8_t *)ep->dstmask;
1275 r = (u_int8_t *)&d;
1276 for (i = 0 ; i < dp->sa_len; i++) {
1277 r[i] = p[i] & q[i];
1278 /* XXX rough estimate */
1279 matchlen += (q[i] ? 8 : 0);
1280 }
1281
1282 /* need to overwrite len/family portion as we don't compare them */
1283 s.ss_len = sp->sa_len;
1284 s.ss_family = sp->sa_family;
1285 d.ss_len = dp->sa_len;
1286 d.ss_family = dp->sa_family;
1287
1288 if (memcmp(&s, ep->src, ep->src->sa_len) == 0 &&
1289 memcmp(&d, ep->dst, ep->dst->sa_len) == 0) {
1290 return matchlen;
1291 } else
1292 return 0;
1293 }
1294 #endif
1295
1296 int
1297 encap_lock_enter(void)
1298 {
1299 int error;
1300
1301 mutex_enter(&encap_whole.lock);
1302 while (encap_whole.busy != NULL) {
1303 error = cv_wait_sig(&encap_whole.cv, &encap_whole.lock);
1304 if (error) {
1305 mutex_exit(&encap_whole.lock);
1306 return error;
1307 }
1308 }
1309 KASSERT(encap_whole.busy == NULL);
1310 encap_whole.busy = curlwp;
1311 mutex_exit(&encap_whole.lock);
1312
1313 return 0;
1314 }
1315
1316 void
1317 encap_lock_exit(void)
1318 {
1319
1320 mutex_enter(&encap_whole.lock);
1321 KASSERT(encap_whole.busy == curlwp);
1322 encap_whole.busy = NULL;
1323 cv_broadcast(&encap_whole.cv);
1324 mutex_exit(&encap_whole.lock);
1325 }
1326
1327 bool
1328 encap_lock_held(void)
1329 {
1330
1331 return (encap_whole.busy == curlwp);
1332 }
1333