if_l2tp.c revision 1.15 1 /* $NetBSD: if_l2tp.c,v 1.15 2017/11/16 03:07:18 ozaki-r Exp $ */
2
3 /*
4 * Copyright (c) 2017 Internet Initiative Japan Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /*
30 * L2TPv3 kernel interface
31 */
32
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: if_l2tp.c,v 1.15 2017/11/16 03:07:18 ozaki-r Exp $");
35
36 #ifdef _KERNEL_OPT
37 #include "opt_inet.h"
38 #endif
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 #include <sys/mbuf.h>
44 #include <sys/socket.h>
45 #include <sys/sockio.h>
46 #include <sys/errno.h>
47 #include <sys/ioctl.h>
48 #include <sys/time.h>
49 #include <sys/syslog.h>
50 #include <sys/proc.h>
51 #include <sys/conf.h>
52 #include <sys/kauth.h>
53 #include <sys/cpu.h>
54 #include <sys/cprng.h>
55 #include <sys/intr.h>
56 #include <sys/kmem.h>
57 #include <sys/mutex.h>
58 #include <sys/atomic.h>
59 #include <sys/pserialize.h>
60 #include <sys/device.h>
61 #include <sys/module.h>
62
63 #include <net/if.h>
64 #include <net/if_dl.h>
65 #include <net/if_ether.h>
66 #include <net/if_types.h>
67 #include <net/netisr.h>
68 #include <net/route.h>
69 #include <net/bpf.h>
70 #include <net/if_vlanvar.h>
71
72 #include <netinet/in.h>
73 #include <netinet/in_systm.h>
74 #include <netinet/ip.h>
75 #include <netinet/ip_encap.h>
76 #ifdef INET
77 #include <netinet/in_var.h>
78 #include <netinet/in_l2tp.h>
79 #endif /* INET */
80 #ifdef INET6
81 #include <netinet6/in6_l2tp.h>
82 #endif
83
84 #include <net/if_l2tp.h>
85
86 #include <net/if_vlanvar.h>
87
88 /* TODO: IP_TCPMSS support */
89 #undef IP_TCPMSS
90 #ifdef IP_TCPMSS
91 #include <netinet/ip_tcpmss.h>
92 #endif
93
94 #include <net/bpf.h>
95 #include <net/net_osdep.h>
96
97 /*
98 * l2tp global variable definitions
99 */
100 LIST_HEAD(l2tp_sclist, l2tp_softc);
101 static struct {
102 struct l2tp_sclist list;
103 kmutex_t lock;
104 } l2tp_softcs __cacheline_aligned;
105
106
107 #if !defined(L2TP_ID_HASH_SIZE)
108 #define L2TP_ID_HASH_SIZE 64
109 #endif
110 static struct {
111 kmutex_t lock;
112 struct pslist_head *lists;
113 u_long mask;
114 } l2tp_hash __cacheline_aligned = {
115 .lists = NULL,
116 };
117
118 pserialize_t l2tp_psz __read_mostly;
119 struct psref_class *lv_psref_class __read_mostly;
120
121 static void l2tp_ro_init_pc(void *, void *, struct cpu_info *);
122 static void l2tp_ro_fini_pc(void *, void *, struct cpu_info *);
123
124 static int l2tp_clone_create(struct if_clone *, int);
125 static int l2tp_clone_destroy(struct ifnet *);
126
127 struct if_clone l2tp_cloner =
128 IF_CLONE_INITIALIZER("l2tp", l2tp_clone_create, l2tp_clone_destroy);
129
130 static int l2tp_output(struct ifnet *, struct mbuf *,
131 const struct sockaddr *, const struct rtentry *);
132 static void l2tpintr(struct l2tp_variant *);
133
134 static void l2tp_hash_init(void);
135 static int l2tp_hash_fini(void);
136
137 static void l2tp_start(struct ifnet *);
138 static int l2tp_transmit(struct ifnet *, struct mbuf *);
139
140 static int l2tp_set_tunnel(struct ifnet *, struct sockaddr *,
141 struct sockaddr *);
142 static void l2tp_delete_tunnel(struct ifnet *);
143
144 static int id_hash_func(uint32_t, u_long);
145
146 static void l2tp_variant_update(struct l2tp_softc *, struct l2tp_variant *);
147 static int l2tp_set_session(struct l2tp_softc *, uint32_t, uint32_t);
148 static int l2tp_clear_session(struct l2tp_softc *);
149 static int l2tp_set_cookie(struct l2tp_softc *, uint64_t, u_int, uint64_t, u_int);
150 static void l2tp_clear_cookie(struct l2tp_softc *);
151 static void l2tp_set_state(struct l2tp_softc *, int);
152 static int l2tp_encap_attach(struct l2tp_variant *);
153 static int l2tp_encap_detach(struct l2tp_variant *);
154
155 #ifndef MAX_L2TP_NEST
156 /*
157 * This macro controls the upper limitation on nesting of l2tp tunnels.
158 * Since, setting a large value to this macro with a careless configuration
159 * may introduce system crash, we don't allow any nestings by default.
160 * If you need to configure nested l2tp tunnels, you can define this macro
161 * in your kernel configuration file. However, if you do so, please be
162 * careful to configure the tunnels so that it won't make a loop.
163 */
164 /*
165 * XXX
166 * Currently, if in_l2tp_output recursively calls, it causes locking against
167 * myself of struct l2tp_ro->lr_lock. So, nested l2tp tunnels is prohibited.
168 */
169 #define MAX_L2TP_NEST 0
170 #endif
171
172 static int max_l2tp_nesting = MAX_L2TP_NEST;
173
174 /* ARGSUSED */
175 void
176 l2tpattach(int count)
177 {
178 /*
179 * Nothing to do here, initialization is handled by the
180 * module initialization code in l2tpinit() below).
181 */
182 }
183
184 static void
185 l2tpinit(void)
186 {
187
188 mutex_init(&l2tp_softcs.lock, MUTEX_DEFAULT, IPL_NONE);
189 LIST_INIT(&l2tp_softcs.list);
190
191 mutex_init(&l2tp_hash.lock, MUTEX_DEFAULT, IPL_NONE);
192 l2tp_psz = pserialize_create();
193 lv_psref_class = psref_class_create("l2tpvar", IPL_SOFTNET);
194 if_clone_attach(&l2tp_cloner);
195
196 l2tp_hash_init();
197 }
198
199 static int
200 l2tpdetach(void)
201 {
202 int error;
203
204 mutex_enter(&l2tp_softcs.lock);
205 if (!LIST_EMPTY(&l2tp_softcs.list)) {
206 mutex_exit(&l2tp_softcs.lock);
207 return EBUSY;
208 }
209 mutex_exit(&l2tp_softcs.lock);
210
211 error = l2tp_hash_fini();
212 if (error)
213 return error;
214
215 if_clone_detach(&l2tp_cloner);
216 psref_class_destroy(lv_psref_class);
217 pserialize_destroy(l2tp_psz);
218 mutex_destroy(&l2tp_hash.lock);
219
220 mutex_destroy(&l2tp_softcs.lock);
221
222 return error;
223 }
224
225 static int
226 l2tp_clone_create(struct if_clone *ifc, int unit)
227 {
228 struct l2tp_softc *sc;
229 struct l2tp_variant *var;
230 int rv;
231
232 sc = kmem_zalloc(sizeof(struct l2tp_softc), KM_SLEEP);
233 if_initname(&sc->l2tp_ec.ec_if, ifc->ifc_name, unit);
234 rv = l2tpattach0(sc);
235 if (rv != 0) {
236 kmem_free(sc, sizeof(struct l2tp_softc));
237 return rv;
238 }
239
240 var = kmem_zalloc(sizeof(struct l2tp_variant), KM_SLEEP);
241 var->lv_softc = sc;
242 var->lv_state = L2TP_STATE_DOWN;
243 var->lv_use_cookie = L2TP_COOKIE_OFF;
244 psref_target_init(&var->lv_psref, lv_psref_class);
245
246 sc->l2tp_var = var;
247 mutex_init(&sc->l2tp_lock, MUTEX_DEFAULT, IPL_NONE);
248 PSLIST_ENTRY_INIT(sc, l2tp_hash);
249
250 sc->l2tp_ro_percpu = percpu_alloc(sizeof(struct l2tp_ro));
251 percpu_foreach(sc->l2tp_ro_percpu, l2tp_ro_init_pc, NULL);
252
253 mutex_enter(&l2tp_softcs.lock);
254 LIST_INSERT_HEAD(&l2tp_softcs.list, sc, l2tp_list);
255 mutex_exit(&l2tp_softcs.lock);
256
257 return (0);
258 }
259
260 int
261 l2tpattach0(struct l2tp_softc *sc)
262 {
263 int rv;
264
265 sc->l2tp_ec.ec_if.if_addrlen = 0;
266 sc->l2tp_ec.ec_if.if_mtu = L2TP_MTU;
267 sc->l2tp_ec.ec_if.if_flags = IFF_POINTOPOINT|IFF_MULTICAST|IFF_SIMPLEX;
268 sc->l2tp_ec.ec_if.if_extflags = IFEF_MPSAFE | IFEF_NO_LINK_STATE_CHANGE;
269 sc->l2tp_ec.ec_if.if_ioctl = l2tp_ioctl;
270 sc->l2tp_ec.ec_if.if_output = l2tp_output;
271 sc->l2tp_ec.ec_if.if_type = IFT_L2TP;
272 sc->l2tp_ec.ec_if.if_dlt = DLT_NULL;
273 sc->l2tp_ec.ec_if.if_start = l2tp_start;
274 sc->l2tp_ec.ec_if.if_transmit = l2tp_transmit;
275 sc->l2tp_ec.ec_if._if_input = ether_input;
276 IFQ_SET_READY(&sc->l2tp_ec.ec_if.if_snd);
277 /* XXX
278 * It may improve performance to use if_initialize()/if_register()
279 * so that l2tp_input() calls if_input() instead of
280 * if_percpuq_enqueue(). However, that causes recursive softnet_lock
281 * when NET_MPSAFE is not set.
282 */
283 rv = if_attach(&sc->l2tp_ec.ec_if);
284 if (rv != 0)
285 return rv;
286 if_alloc_sadl(&sc->l2tp_ec.ec_if);
287 bpf_attach(&sc->l2tp_ec.ec_if, DLT_EN10MB, sizeof(struct ether_header));
288
289 return 0;
290 }
291
292 void
293 l2tp_ro_init_pc(void *p, void *arg __unused, struct cpu_info *ci __unused)
294 {
295 struct l2tp_ro *lro = p;
296
297 mutex_init(&lro->lr_lock, MUTEX_DEFAULT, IPL_NONE);
298 }
299
300 void
301 l2tp_ro_fini_pc(void *p, void *arg __unused, struct cpu_info *ci __unused)
302 {
303 struct l2tp_ro *lro = p;
304
305 rtcache_free(&lro->lr_ro);
306
307 mutex_destroy(&lro->lr_lock);
308 }
309
310 static int
311 l2tp_clone_destroy(struct ifnet *ifp)
312 {
313 struct l2tp_variant *var;
314 struct l2tp_softc *sc = container_of(ifp, struct l2tp_softc,
315 l2tp_ec.ec_if);
316
317 l2tp_clear_session(sc);
318 l2tp_delete_tunnel(&sc->l2tp_ec.ec_if);
319 /*
320 * To avoid for l2tp_transmit() to access sc->l2tp_var after free it.
321 */
322 mutex_enter(&sc->l2tp_lock);
323 var = sc->l2tp_var;
324 l2tp_variant_update(sc, NULL);
325 mutex_exit(&sc->l2tp_lock);
326
327 mutex_enter(&l2tp_softcs.lock);
328 LIST_REMOVE(sc, l2tp_list);
329 mutex_exit(&l2tp_softcs.lock);
330
331 bpf_detach(ifp);
332
333 if_detach(ifp);
334
335 percpu_foreach(sc->l2tp_ro_percpu, l2tp_ro_fini_pc, NULL);
336 percpu_free(sc->l2tp_ro_percpu, sizeof(struct l2tp_ro));
337
338 kmem_free(var, sizeof(struct l2tp_variant));
339 mutex_destroy(&sc->l2tp_lock);
340 kmem_free(sc, sizeof(struct l2tp_softc));
341
342 return 0;
343 }
344
345 static int
346 l2tp_output(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *dst,
347 const struct rtentry *rt)
348 {
349 struct l2tp_softc *sc = container_of(ifp, struct l2tp_softc,
350 l2tp_ec.ec_if);
351 struct l2tp_variant *var;
352 struct psref psref;
353 int error = 0;
354
355 var = l2tp_getref_variant(sc, &psref);
356 if (var == NULL) {
357 m_freem(m);
358 return ENETDOWN;
359 }
360
361 IFQ_CLASSIFY(&ifp->if_snd, m, dst->sa_family);
362
363 m->m_flags &= ~(M_BCAST|M_MCAST);
364
365 if ((ifp->if_flags & IFF_UP) == 0) {
366 m_freem(m);
367 error = ENETDOWN;
368 goto end;
369 }
370
371 if (var->lv_psrc == NULL || var->lv_pdst == NULL) {
372 m_freem(m);
373 error = ENETDOWN;
374 goto end;
375 }
376
377 /* XXX should we check if our outer source is legal? */
378
379 /* use DLT_NULL encapsulation here to pass inner af type */
380 M_PREPEND(m, sizeof(int), M_DONTWAIT);
381 if (!m) {
382 error = ENOBUFS;
383 goto end;
384 }
385 *mtod(m, int *) = dst->sa_family;
386
387 IFQ_ENQUEUE(&ifp->if_snd, m, error);
388 if (error)
389 goto end;
390
391 /*
392 * direct call to avoid infinite loop at l2tpintr()
393 */
394 l2tpintr(var);
395
396 error = 0;
397
398 end:
399 l2tp_putref_variant(var, &psref);
400 if (error)
401 ifp->if_oerrors++;
402
403 return error;
404 }
405
406 static void
407 l2tpintr(struct l2tp_variant *var)
408 {
409 struct l2tp_softc *sc;
410 struct ifnet *ifp;
411 struct mbuf *m;
412 int error;
413
414 KASSERT(psref_held(&var->lv_psref, lv_psref_class));
415
416 sc = var->lv_softc;
417 ifp = &sc->l2tp_ec.ec_if;
418
419 /* output processing */
420 if (var->lv_my_sess_id == 0 || var->lv_peer_sess_id == 0) {
421 IFQ_PURGE(&ifp->if_snd);
422 return;
423 }
424
425 for (;;) {
426 IFQ_DEQUEUE(&ifp->if_snd, m);
427 if (m == NULL)
428 break;
429 m->m_flags &= ~(M_BCAST|M_MCAST);
430 bpf_mtap(ifp, m);
431 switch (var->lv_psrc->sa_family) {
432 #ifdef INET
433 case AF_INET:
434 error = in_l2tp_output(var, m);
435 break;
436 #endif
437 #ifdef INET6
438 case AF_INET6:
439 error = in6_l2tp_output(var, m);
440 break;
441 #endif
442 default:
443 m_freem(m);
444 error = ENETDOWN;
445 break;
446 }
447
448 if (error)
449 ifp->if_oerrors++;
450 else {
451 ifp->if_opackets++;
452 /*
453 * obytes is incremented at ether_output() or
454 * bridge_enqueue().
455 */
456 }
457 }
458
459 }
460
461 void
462 l2tp_input(struct mbuf *m, struct ifnet *ifp)
463 {
464
465 KASSERT(ifp != NULL);
466
467 if (0 == (mtod(m, u_long) & 0x03)) {
468 /* copy and align head of payload */
469 struct mbuf *m_head;
470 int copy_length;
471
472 #define L2TP_COPY_LENGTH 60
473 #define L2TP_LINK_HDR_ROOM (MHLEN - L2TP_COPY_LENGTH - 4/*round4(2)*/)
474
475 if (m->m_pkthdr.len < L2TP_COPY_LENGTH) {
476 copy_length = m->m_pkthdr.len;
477 } else {
478 copy_length = L2TP_COPY_LENGTH;
479 }
480
481 if (m->m_len < copy_length) {
482 m = m_pullup(m, copy_length);
483 if (m == NULL)
484 return;
485 }
486
487 MGETHDR(m_head, M_DONTWAIT, MT_HEADER);
488 if (m_head == NULL) {
489 m_freem(m);
490 return;
491 }
492 M_COPY_PKTHDR(m_head, m);
493
494 m_head->m_data += 2 /* align */ + L2TP_LINK_HDR_ROOM;
495 memcpy(m_head->m_data, m->m_data, copy_length);
496 m_head->m_len = copy_length;
497 m->m_data += copy_length;
498 m->m_len -= copy_length;
499
500 /* construct chain */
501 if (m->m_len == 0) {
502 m_head->m_next = m_free(m); /* not m_freem */
503 } else {
504 /*
505 * copyed mtag in previous call M_COPY_PKTHDR
506 * but don't delete mtag in case cutt of M_PKTHDR flag
507 */
508 m_tag_delete_chain(m, NULL);
509 m->m_flags &= ~M_PKTHDR;
510 m_head->m_next = m;
511 }
512
513 /* override m */
514 m = m_head;
515 }
516
517 m_set_rcvif(m, ifp);
518
519 /*
520 * bpf_mtap() and ifp->if_ipackets++ is done in if_input()
521 *
522 * obytes is incremented at ether_output() or bridge_enqueue().
523 */
524 if_percpuq_enqueue(ifp->if_percpuq, m);
525 }
526
527 void
528 l2tp_start(struct ifnet *ifp)
529 {
530 struct psref psref;
531 struct l2tp_variant *var;
532 struct l2tp_softc *sc = container_of(ifp, struct l2tp_softc,
533 l2tp_ec.ec_if);
534
535 var = l2tp_getref_variant(sc, &psref);
536 if (var == NULL)
537 return;
538
539 if (var->lv_psrc == NULL || var->lv_pdst == NULL)
540 return;
541
542 l2tpintr(var);
543 l2tp_putref_variant(var, &psref);
544 }
545
546 int
547 l2tp_transmit(struct ifnet *ifp, struct mbuf *m)
548 {
549 int error;
550 struct psref psref;
551 struct l2tp_variant *var;
552 struct l2tp_softc *sc = container_of(ifp, struct l2tp_softc,
553 l2tp_ec.ec_if);
554
555 var = l2tp_getref_variant(sc, &psref);
556 if (var == NULL) {
557 m_freem(m);
558 return ENETDOWN;
559 }
560
561 if (var->lv_psrc == NULL || var->lv_pdst == NULL) {
562 m_freem(m);
563 error = ENETDOWN;
564 goto out;
565 }
566
567 m->m_flags &= ~(M_BCAST|M_MCAST);
568 bpf_mtap(ifp, m);
569 switch (var->lv_psrc->sa_family) {
570 #ifdef INET
571 case AF_INET:
572 error = in_l2tp_output(var, m);
573 break;
574 #endif
575 #ifdef INET6
576 case AF_INET6:
577 error = in6_l2tp_output(var, m);
578 break;
579 #endif
580 default:
581 m_freem(m);
582 error = ENETDOWN;
583 break;
584 }
585
586 if (error)
587 ifp->if_oerrors++;
588 else {
589 ifp->if_opackets++;
590 /*
591 * obytes is incremented at ether_output() or bridge_enqueue().
592 */
593 }
594
595 out:
596 l2tp_putref_variant(var, &psref);
597 return error;
598 }
599
600 /* XXX how should we handle IPv6 scope on SIOC[GS]IFPHYADDR? */
601 int
602 l2tp_ioctl(struct ifnet *ifp, u_long cmd, void *data)
603 {
604 struct l2tp_softc *sc = container_of(ifp, struct l2tp_softc,
605 l2tp_ec.ec_if);
606 struct l2tp_variant *var, *var_tmp;
607 struct ifreq *ifr = data;
608 int error = 0, size;
609 struct sockaddr *dst, *src;
610 struct l2tp_req l2tpr;
611 u_long mtu;
612 int bound;
613 struct psref psref;
614
615 switch (cmd) {
616 case SIOCSIFADDR:
617 ifp->if_flags |= IFF_UP;
618 break;
619
620 case SIOCSIFDSTADDR:
621 break;
622
623 case SIOCADDMULTI:
624 case SIOCDELMULTI:
625 switch (ifr->ifr_addr.sa_family) {
626 #ifdef INET
627 case AF_INET: /* IP supports Multicast */
628 break;
629 #endif /* INET */
630 #ifdef INET6
631 case AF_INET6: /* IP6 supports Multicast */
632 break;
633 #endif /* INET6 */
634 default: /* Other protocols doesn't support Multicast */
635 error = EAFNOSUPPORT;
636 break;
637 }
638 break;
639
640 case SIOCSIFMTU:
641 mtu = ifr->ifr_mtu;
642 if (mtu < L2TP_MTU_MIN || mtu > L2TP_MTU_MAX)
643 return (EINVAL);
644 ifp->if_mtu = mtu;
645 break;
646
647 #ifdef INET
648 case SIOCSIFPHYADDR:
649 src = (struct sockaddr *)
650 &(((struct in_aliasreq *)data)->ifra_addr);
651 dst = (struct sockaddr *)
652 &(((struct in_aliasreq *)data)->ifra_dstaddr);
653 if (src->sa_family != AF_INET || dst->sa_family != AF_INET)
654 return EAFNOSUPPORT;
655 else if (src->sa_len != sizeof(struct sockaddr_in)
656 || dst->sa_len != sizeof(struct sockaddr_in))
657 return EINVAL;
658
659 error = l2tp_set_tunnel(&sc->l2tp_ec.ec_if, src, dst);
660 break;
661
662 #endif /* INET */
663 #ifdef INET6
664 case SIOCSIFPHYADDR_IN6:
665 src = (struct sockaddr *)
666 &(((struct in6_aliasreq *)data)->ifra_addr);
667 dst = (struct sockaddr *)
668 &(((struct in6_aliasreq *)data)->ifra_dstaddr);
669 if (src->sa_family != AF_INET6 || dst->sa_family != AF_INET6)
670 return EAFNOSUPPORT;
671 else if (src->sa_len != sizeof(struct sockaddr_in6)
672 || dst->sa_len != sizeof(struct sockaddr_in6))
673 return EINVAL;
674
675 error = l2tp_set_tunnel(&sc->l2tp_ec.ec_if, src, dst);
676 break;
677
678 #endif /* INET6 */
679 case SIOCSLIFPHYADDR:
680 src = (struct sockaddr *)
681 &(((struct if_laddrreq *)data)->addr);
682 dst = (struct sockaddr *)
683 &(((struct if_laddrreq *)data)->dstaddr);
684 if (src->sa_family != dst->sa_family)
685 return EINVAL;
686 else if (src->sa_family == AF_INET
687 && src->sa_len != sizeof(struct sockaddr_in))
688 return EINVAL;
689 else if (src->sa_family == AF_INET6
690 && src->sa_len != sizeof(struct sockaddr_in6))
691 return EINVAL;
692 else if (dst->sa_family == AF_INET
693 && dst->sa_len != sizeof(struct sockaddr_in))
694 return EINVAL;
695 else if (dst->sa_family == AF_INET6
696 && dst->sa_len != sizeof(struct sockaddr_in6))
697 return EINVAL;
698
699 error = l2tp_set_tunnel(&sc->l2tp_ec.ec_if, src, dst);
700 break;
701
702 case SIOCDIFPHYADDR:
703 l2tp_delete_tunnel(&sc->l2tp_ec.ec_if);
704 break;
705
706 case SIOCGIFPSRCADDR:
707 #ifdef INET6
708 case SIOCGIFPSRCADDR_IN6:
709 #endif /* INET6 */
710 bound = curlwp_bind();
711 var = l2tp_getref_variant(sc, &psref);
712 if (var == NULL) {
713 curlwp_bindx(bound);
714 error = EADDRNOTAVAIL;
715 goto bad;
716 }
717 if (var->lv_psrc == NULL) {
718 l2tp_putref_variant(var, &psref);
719 curlwp_bindx(bound);
720 error = EADDRNOTAVAIL;
721 goto bad;
722 }
723 src = var->lv_psrc;
724 switch (cmd) {
725 #ifdef INET
726 case SIOCGIFPSRCADDR:
727 dst = &ifr->ifr_addr;
728 size = sizeof(ifr->ifr_addr);
729 break;
730 #endif /* INET */
731 #ifdef INET6
732 case SIOCGIFPSRCADDR_IN6:
733 dst = (struct sockaddr *)
734 &(((struct in6_ifreq *)data)->ifr_addr);
735 size = sizeof(((struct in6_ifreq *)data)->ifr_addr);
736 break;
737 #endif /* INET6 */
738 default:
739 l2tp_putref_variant(var, &psref);
740 curlwp_bindx(bound);
741 error = EADDRNOTAVAIL;
742 goto bad;
743 }
744 if (src->sa_len > size) {
745 l2tp_putref_variant(var, &psref);
746 curlwp_bindx(bound);
747 return EINVAL;
748 }
749 sockaddr_copy(dst, src->sa_len, src);
750 l2tp_putref_variant(var, &psref);
751 curlwp_bindx(bound);
752 break;
753
754 case SIOCGIFPDSTADDR:
755 #ifdef INET6
756 case SIOCGIFPDSTADDR_IN6:
757 #endif /* INET6 */
758 bound = curlwp_bind();
759 var = l2tp_getref_variant(sc, &psref);
760 if (var == NULL) {
761 curlwp_bindx(bound);
762 error = EADDRNOTAVAIL;
763 goto bad;
764 }
765 if (var->lv_pdst == NULL) {
766 l2tp_putref_variant(var, &psref);
767 curlwp_bindx(bound);
768 error = EADDRNOTAVAIL;
769 goto bad;
770 }
771 src = var->lv_pdst;
772 switch (cmd) {
773 #ifdef INET
774 case SIOCGIFPDSTADDR:
775 dst = &ifr->ifr_addr;
776 size = sizeof(ifr->ifr_addr);
777 break;
778 #endif /* INET */
779 #ifdef INET6
780 case SIOCGIFPDSTADDR_IN6:
781 dst = (struct sockaddr *)
782 &(((struct in6_ifreq *)data)->ifr_addr);
783 size = sizeof(((struct in6_ifreq *)data)->ifr_addr);
784 break;
785 #endif /* INET6 */
786 default:
787 l2tp_putref_variant(var, &psref);
788 curlwp_bindx(bound);
789 error = EADDRNOTAVAIL;
790 goto bad;
791 }
792 if (src->sa_len > size) {
793 l2tp_putref_variant(var, &psref);
794 curlwp_bindx(bound);
795 return EINVAL;
796 }
797 sockaddr_copy(dst, src->sa_len, src);
798 l2tp_putref_variant(var, &psref);
799 curlwp_bindx(bound);
800 break;
801
802 case SIOCGLIFPHYADDR:
803 bound = curlwp_bind();
804 var = l2tp_getref_variant(sc, &psref);
805 if (var == NULL) {
806 curlwp_bindx(bound);
807 error = EADDRNOTAVAIL;
808 goto bad;
809 }
810 if (var->lv_psrc == NULL || var->lv_pdst == NULL) {
811 l2tp_putref_variant(var, &psref);
812 curlwp_bindx(bound);
813 error = EADDRNOTAVAIL;
814 goto bad;
815 }
816
817 /* copy src */
818 src = var->lv_psrc;
819 dst = (struct sockaddr *)
820 &(((struct if_laddrreq *)data)->addr);
821 size = sizeof(((struct if_laddrreq *)data)->addr);
822 if (src->sa_len > size) {
823 l2tp_putref_variant(var, &psref);
824 curlwp_bindx(bound);
825 return EINVAL;
826 }
827 sockaddr_copy(dst, src->sa_len, src);
828
829 /* copy dst */
830 src = var->lv_pdst;
831 dst = (struct sockaddr *)
832 &(((struct if_laddrreq *)data)->dstaddr);
833 size = sizeof(((struct if_laddrreq *)data)->dstaddr);
834 if (src->sa_len > size) {
835 l2tp_putref_variant(var, &psref);
836 curlwp_bindx(bound);
837 return EINVAL;
838 }
839 sockaddr_copy(dst, src->sa_len, src);
840 l2tp_putref_variant(var, &psref);
841 curlwp_bindx(bound);
842 break;
843
844 case SIOCSL2TPSESSION:
845 if ((error = copyin(ifr->ifr_data, &l2tpr, sizeof(l2tpr))) != 0)
846 break;
847
848 /* session id must not zero */
849 if (l2tpr.my_sess_id == 0 || l2tpr.peer_sess_id == 0)
850 return EINVAL;
851
852 bound = curlwp_bind();
853 var_tmp = l2tp_lookup_session_ref(l2tpr.my_sess_id, &psref);
854 if (var_tmp != NULL) {
855 /* duplicate session id */
856 log(LOG_WARNING, "%s: duplicate session id %" PRIu32 " of %s\n",
857 sc->l2tp_ec.ec_if.if_xname, l2tpr.my_sess_id,
858 var_tmp->lv_softc->l2tp_ec.ec_if.if_xname);
859 psref_release(&psref, &var_tmp->lv_psref,
860 lv_psref_class);
861 curlwp_bindx(bound);
862 return EINVAL;
863 }
864 curlwp_bindx(bound);
865
866 error = l2tp_set_session(sc, l2tpr.my_sess_id, l2tpr.peer_sess_id);
867 break;
868 case SIOCDL2TPSESSION:
869 l2tp_clear_session(sc);
870 break;
871 case SIOCSL2TPCOOKIE:
872 if ((error = copyin(ifr->ifr_data, &l2tpr, sizeof(l2tpr))) != 0)
873 break;
874
875 error = l2tp_set_cookie(sc, l2tpr.my_cookie, l2tpr.my_cookie_len,
876 l2tpr.peer_cookie, l2tpr.peer_cookie_len);
877 break;
878 case SIOCDL2TPCOOKIE:
879 l2tp_clear_cookie(sc);
880 break;
881 case SIOCSL2TPSTATE:
882 if ((error = copyin(ifr->ifr_data, &l2tpr, sizeof(l2tpr))) != 0)
883 break;
884
885 l2tp_set_state(sc, l2tpr.state);
886 break;
887 case SIOCGL2TP:
888 /* get L2TPV3 session info */
889 memset(&l2tpr, 0, sizeof(l2tpr));
890
891 bound = curlwp_bind();
892 var = l2tp_getref_variant(sc, &psref);
893 if (var == NULL) {
894 curlwp_bindx(bound);
895 error = EADDRNOTAVAIL;
896 goto bad;
897 }
898
899 l2tpr.state = var->lv_state;
900 l2tpr.my_sess_id = var->lv_my_sess_id;
901 l2tpr.peer_sess_id = var->lv_peer_sess_id;
902 l2tpr.my_cookie = var->lv_my_cookie;
903 l2tpr.my_cookie_len = var->lv_my_cookie_len;
904 l2tpr.peer_cookie = var->lv_peer_cookie;
905 l2tpr.peer_cookie_len = var->lv_peer_cookie_len;
906 l2tp_putref_variant(var, &psref);
907 curlwp_bindx(bound);
908
909 error = copyout(&l2tpr, ifr->ifr_data, sizeof(l2tpr));
910 break;
911
912 default:
913 error = ifioctl_common(ifp, cmd, data);
914 break;
915 }
916 bad:
917 return error;
918 }
919
920 static int
921 l2tp_set_tunnel(struct ifnet *ifp, struct sockaddr *src, struct sockaddr *dst)
922 {
923 struct l2tp_softc *sc = container_of(ifp, struct l2tp_softc,
924 l2tp_ec.ec_if);
925 struct sockaddr *osrc, *odst;
926 struct sockaddr *nsrc, *ndst;
927 struct l2tp_variant *ovar, *nvar;
928 int error;
929
930 nsrc = sockaddr_dup(src, M_WAITOK);
931 ndst = sockaddr_dup(dst, M_WAITOK);
932
933 nvar = kmem_alloc(sizeof(*nvar), KM_SLEEP);
934
935 error = encap_lock_enter();
936 if (error)
937 goto error;
938
939 mutex_enter(&sc->l2tp_lock);
940
941 ovar = sc->l2tp_var;
942 osrc = ovar->lv_psrc;
943 odst = ovar->lv_pdst;
944 *nvar = *ovar;
945 psref_target_init(&nvar->lv_psref, lv_psref_class);
946 nvar->lv_psrc = nsrc;
947 nvar->lv_pdst = ndst;
948 error = l2tp_encap_attach(nvar);
949 if (error) {
950 mutex_exit(&sc->l2tp_lock);
951 encap_lock_exit();
952 goto error;
953 }
954 membar_producer();
955 l2tp_variant_update(sc, nvar);
956
957 mutex_exit(&sc->l2tp_lock);
958
959 (void)l2tp_encap_detach(ovar);
960 encap_lock_exit();
961
962 if (osrc)
963 sockaddr_free(osrc);
964 if (odst)
965 sockaddr_free(odst);
966 kmem_free(ovar, sizeof(*ovar));
967
968 return 0;
969
970 error:
971 sockaddr_free(nsrc);
972 sockaddr_free(ndst);
973 kmem_free(nvar, sizeof(*nvar));
974
975 return error;
976 }
977
978 static void
979 l2tp_delete_tunnel(struct ifnet *ifp)
980 {
981 struct l2tp_softc *sc = container_of(ifp, struct l2tp_softc,
982 l2tp_ec.ec_if);
983 struct sockaddr *osrc, *odst;
984 struct l2tp_variant *ovar, *nvar;
985 int error;
986
987 nvar = kmem_alloc(sizeof(*nvar), KM_SLEEP);
988
989 error = encap_lock_enter();
990 if (error) {
991 kmem_free(nvar, sizeof(*nvar));
992 return;
993 }
994 mutex_enter(&sc->l2tp_lock);
995
996 ovar = sc->l2tp_var;
997 osrc = ovar->lv_psrc;
998 odst = ovar->lv_pdst;
999 *nvar = *ovar;
1000 psref_target_init(&nvar->lv_psref, lv_psref_class);
1001 nvar->lv_psrc = NULL;
1002 nvar->lv_pdst = NULL;
1003 membar_producer();
1004 l2tp_variant_update(sc, nvar);
1005
1006 mutex_exit(&sc->l2tp_lock);
1007
1008 (void)l2tp_encap_detach(ovar);
1009 encap_lock_exit();
1010
1011 if (osrc)
1012 sockaddr_free(osrc);
1013 if (odst)
1014 sockaddr_free(odst);
1015 kmem_free(ovar, sizeof(*ovar));
1016 }
1017
1018 static int
1019 id_hash_func(uint32_t id, u_long mask)
1020 {
1021 uint32_t hash;
1022
1023 hash = (id >> 16) ^ id;
1024 hash = (hash >> 4) ^ hash;
1025
1026 return hash & mask;
1027 }
1028
1029 static void
1030 l2tp_hash_init(void)
1031 {
1032
1033 l2tp_hash.lists = hashinit(L2TP_ID_HASH_SIZE, HASH_PSLIST, true,
1034 &l2tp_hash.mask);
1035 }
1036
1037 static int
1038 l2tp_hash_fini(void)
1039 {
1040 int i;
1041
1042 mutex_enter(&l2tp_hash.lock);
1043
1044 for (i = 0; i < l2tp_hash.mask + 1; i++) {
1045 if (PSLIST_WRITER_FIRST(&l2tp_hash.lists[i], struct l2tp_softc,
1046 l2tp_hash) != NULL) {
1047 mutex_exit(&l2tp_hash.lock);
1048 return EBUSY;
1049 }
1050 }
1051 for (i = 0; i < l2tp_hash.mask + 1; i++)
1052 PSLIST_DESTROY(&l2tp_hash.lists[i]);
1053
1054 mutex_exit(&l2tp_hash.lock);
1055
1056 hashdone(l2tp_hash.lists, HASH_PSLIST, l2tp_hash.mask);
1057
1058 return 0;
1059 }
1060
1061 static int
1062 l2tp_set_session(struct l2tp_softc *sc, uint32_t my_sess_id,
1063 uint32_t peer_sess_id)
1064 {
1065 uint32_t idx;
1066 struct l2tp_variant *nvar;
1067 struct l2tp_variant *ovar;
1068 struct ifnet *ifp = &sc->l2tp_ec.ec_if;
1069
1070 nvar = kmem_alloc(sizeof(*nvar), KM_SLEEP);
1071
1072 mutex_enter(&sc->l2tp_lock);
1073 ovar = sc->l2tp_var;
1074 *nvar = *ovar;
1075 psref_target_init(&nvar->lv_psref, lv_psref_class);
1076 nvar->lv_my_sess_id = my_sess_id;
1077 nvar->lv_peer_sess_id = peer_sess_id;
1078 membar_producer();
1079
1080 mutex_enter(&l2tp_hash.lock);
1081 if (ovar->lv_my_sess_id > 0 && ovar->lv_peer_sess_id > 0) {
1082 PSLIST_WRITER_REMOVE(sc, l2tp_hash);
1083 pserialize_perform(l2tp_psz);
1084 }
1085 mutex_exit(&l2tp_hash.lock);
1086 PSLIST_ENTRY_DESTROY(sc, l2tp_hash);
1087
1088 l2tp_variant_update(sc, nvar);
1089 mutex_exit(&sc->l2tp_lock);
1090
1091 idx = id_hash_func(nvar->lv_my_sess_id, l2tp_hash.mask);
1092 if ((ifp->if_flags & IFF_DEBUG) != 0)
1093 log(LOG_DEBUG, "%s: add hash entry: sess_id=%" PRIu32 ", idx=%" PRIu32 "\n",
1094 sc->l2tp_ec.ec_if.if_xname, nvar->lv_my_sess_id, idx);
1095
1096 PSLIST_ENTRY_INIT(sc, l2tp_hash);
1097 mutex_enter(&l2tp_hash.lock);
1098 PSLIST_WRITER_INSERT_HEAD(&l2tp_hash.lists[idx], sc, l2tp_hash);
1099 mutex_exit(&l2tp_hash.lock);
1100
1101 kmem_free(ovar, sizeof(*ovar));
1102 return 0;
1103 }
1104
1105 static int
1106 l2tp_clear_session(struct l2tp_softc *sc)
1107 {
1108 struct l2tp_variant *nvar;
1109 struct l2tp_variant *ovar;
1110
1111 nvar = kmem_alloc(sizeof(*nvar), KM_SLEEP);
1112
1113 mutex_enter(&sc->l2tp_lock);
1114 ovar = sc->l2tp_var;
1115 *nvar = *ovar;
1116 psref_target_init(&nvar->lv_psref, lv_psref_class);
1117 nvar->lv_my_sess_id = 0;
1118 nvar->lv_peer_sess_id = 0;
1119 membar_producer();
1120
1121 mutex_enter(&l2tp_hash.lock);
1122 if (ovar->lv_my_sess_id > 0 && ovar->lv_peer_sess_id > 0) {
1123 PSLIST_WRITER_REMOVE(sc, l2tp_hash);
1124 pserialize_perform(l2tp_psz);
1125 }
1126 mutex_exit(&l2tp_hash.lock);
1127
1128 l2tp_variant_update(sc, nvar);
1129 mutex_exit(&sc->l2tp_lock);
1130 kmem_free(ovar, sizeof(*ovar));
1131 return 0;
1132 }
1133
1134 struct l2tp_variant *
1135 l2tp_lookup_session_ref(uint32_t id, struct psref *psref)
1136 {
1137 int idx;
1138 int s;
1139 struct l2tp_softc *sc;
1140
1141 idx = id_hash_func(id, l2tp_hash.mask);
1142
1143 s = pserialize_read_enter();
1144 PSLIST_READER_FOREACH(sc, &l2tp_hash.lists[idx], struct l2tp_softc,
1145 l2tp_hash) {
1146 struct l2tp_variant *var = sc->l2tp_var;
1147 if (var == NULL)
1148 continue;
1149 if (var->lv_my_sess_id != id)
1150 continue;
1151 psref_acquire(psref, &var->lv_psref, lv_psref_class);
1152 pserialize_read_exit(s);
1153 return var;
1154 }
1155 pserialize_read_exit(s);
1156 return NULL;
1157 }
1158
1159 /*
1160 * l2tp_variant update API.
1161 *
1162 * Assumption:
1163 * reader side dereferences sc->l2tp_var in reader critical section only,
1164 * that is, all of reader sides do not reader the sc->l2tp_var after
1165 * pserialize_perform().
1166 */
1167 static void
1168 l2tp_variant_update(struct l2tp_softc *sc, struct l2tp_variant *nvar)
1169 {
1170 struct ifnet *ifp = &sc->l2tp_ec.ec_if;
1171 struct l2tp_variant *ovar = sc->l2tp_var;
1172
1173 KASSERT(mutex_owned(&sc->l2tp_lock));
1174
1175 sc->l2tp_var = nvar;
1176 pserialize_perform(l2tp_psz);
1177 psref_target_destroy(&ovar->lv_psref, lv_psref_class);
1178
1179 /*
1180 * In the manual of atomic_swap_ptr(3), there is no mention if 2nd
1181 * argument is rewrite or not. So, use sc->l2tp_var instead of nvar.
1182 */
1183 if (sc->l2tp_var != NULL) {
1184 if (sc->l2tp_var->lv_psrc != NULL
1185 && sc->l2tp_var->lv_pdst != NULL)
1186 ifp->if_flags |= IFF_RUNNING;
1187 else
1188 ifp->if_flags &= ~IFF_RUNNING;
1189 }
1190 }
1191
1192 static int
1193 l2tp_set_cookie(struct l2tp_softc *sc, uint64_t my_cookie, u_int my_cookie_len,
1194 uint64_t peer_cookie, u_int peer_cookie_len)
1195 {
1196 struct l2tp_variant *nvar;
1197
1198 if (my_cookie == 0 || peer_cookie == 0)
1199 return EINVAL;
1200
1201 if (my_cookie_len != 4 && my_cookie_len != 8
1202 && peer_cookie_len != 4 && peer_cookie_len != 8)
1203 return EINVAL;
1204
1205 nvar = kmem_alloc(sizeof(*nvar), KM_SLEEP);
1206
1207 mutex_enter(&sc->l2tp_lock);
1208
1209 *nvar = *sc->l2tp_var;
1210 psref_target_init(&nvar->lv_psref, lv_psref_class);
1211 nvar->lv_my_cookie = my_cookie;
1212 nvar->lv_my_cookie_len = my_cookie_len;
1213 nvar->lv_peer_cookie = peer_cookie;
1214 nvar->lv_peer_cookie_len = peer_cookie_len;
1215 nvar->lv_use_cookie = L2TP_COOKIE_ON;
1216 membar_producer();
1217 l2tp_variant_update(sc, nvar);
1218
1219 mutex_exit(&sc->l2tp_lock);
1220
1221 struct ifnet *ifp = &sc->l2tp_ec.ec_if;
1222 if ((ifp->if_flags & IFF_DEBUG) != 0) {
1223 log(LOG_DEBUG,
1224 "%s: set cookie: "
1225 "local cookie_len=%u local cookie=%" PRIu64 ", "
1226 "remote cookie_len=%u remote cookie=%" PRIu64 "\n",
1227 ifp->if_xname, my_cookie_len, my_cookie,
1228 peer_cookie_len, peer_cookie);
1229 }
1230
1231 return 0;
1232 }
1233
1234 static void
1235 l2tp_clear_cookie(struct l2tp_softc *sc)
1236 {
1237 struct l2tp_variant *nvar;
1238
1239 nvar = kmem_alloc(sizeof(*nvar), KM_SLEEP);
1240
1241 mutex_enter(&sc->l2tp_lock);
1242
1243 *nvar = *sc->l2tp_var;
1244 psref_target_init(&nvar->lv_psref, lv_psref_class);
1245 nvar->lv_my_cookie = 0;
1246 nvar->lv_my_cookie_len = 0;
1247 nvar->lv_peer_cookie = 0;
1248 nvar->lv_peer_cookie_len = 0;
1249 nvar->lv_use_cookie = L2TP_COOKIE_OFF;
1250 membar_producer();
1251 l2tp_variant_update(sc, nvar);
1252
1253 mutex_exit(&sc->l2tp_lock);
1254 }
1255
1256 static void
1257 l2tp_set_state(struct l2tp_softc *sc, int state)
1258 {
1259 struct ifnet *ifp = &sc->l2tp_ec.ec_if;
1260 struct l2tp_variant *nvar;
1261
1262 nvar = kmem_alloc(sizeof(*nvar), KM_SLEEP);
1263
1264 mutex_enter(&sc->l2tp_lock);
1265
1266 *nvar = *sc->l2tp_var;
1267 psref_target_init(&nvar->lv_psref, lv_psref_class);
1268 nvar->lv_state = state;
1269 membar_producer();
1270 l2tp_variant_update(sc, nvar);
1271
1272 if (nvar->lv_state == L2TP_STATE_UP) {
1273 ifp->if_link_state = LINK_STATE_UP;
1274 } else {
1275 ifp->if_link_state = LINK_STATE_DOWN;
1276 }
1277
1278 mutex_exit(&sc->l2tp_lock);
1279
1280 #ifdef NOTYET
1281 vlan_linkstate_notify(ifp, ifp->if_link_state);
1282 #endif
1283 }
1284
1285 static int
1286 l2tp_encap_attach(struct l2tp_variant *var)
1287 {
1288 int error;
1289
1290 if (var == NULL || var->lv_psrc == NULL)
1291 return EINVAL;
1292
1293 switch (var->lv_psrc->sa_family) {
1294 #ifdef INET
1295 case AF_INET:
1296 error = in_l2tp_attach(var);
1297 break;
1298 #endif
1299 #ifdef INET6
1300 case AF_INET6:
1301 error = in6_l2tp_attach(var);
1302 break;
1303 #endif
1304 default:
1305 error = EINVAL;
1306 break;
1307 }
1308
1309 return error;
1310 }
1311
1312 static int
1313 l2tp_encap_detach(struct l2tp_variant *var)
1314 {
1315 int error;
1316
1317 if (var == NULL || var->lv_psrc == NULL)
1318 return EINVAL;
1319
1320 switch (var->lv_psrc->sa_family) {
1321 #ifdef INET
1322 case AF_INET:
1323 error = in_l2tp_detach(var);
1324 break;
1325 #endif
1326 #ifdef INET6
1327 case AF_INET6:
1328 error = in6_l2tp_detach(var);
1329 break;
1330 #endif
1331 default:
1332 error = EINVAL;
1333 break;
1334 }
1335
1336 return error;
1337 }
1338
1339 /*
1340 * TODO:
1341 * unify with gif_check_nesting().
1342 */
1343 int
1344 l2tp_check_nesting(struct ifnet *ifp, struct mbuf *m)
1345 {
1346 struct m_tag *mtag;
1347 int *count;
1348
1349 mtag = m_tag_find(m, PACKET_TAG_TUNNEL_INFO, NULL);
1350 if (mtag != NULL) {
1351 count = (int *)(mtag + 1);
1352 if (++(*count) > max_l2tp_nesting) {
1353 log(LOG_NOTICE,
1354 "%s: recursively called too many times(%d)\n",
1355 if_name(ifp),
1356 *count);
1357 return EIO;
1358 }
1359 } else {
1360 mtag = m_tag_get(PACKET_TAG_TUNNEL_INFO, sizeof(*count),
1361 M_NOWAIT);
1362 if (mtag != NULL) {
1363 m_tag_prepend(m, mtag);
1364 count = (int *)(mtag + 1);
1365 *count = 0;
1366 }
1367 #ifdef L2TP_DEBUG
1368 else {
1369 log(LOG_DEBUG,
1370 "%s: m_tag_get() failed, recursion calls are not prevented.\n",
1371 if_name(ifp));
1372 }
1373 #endif
1374 }
1375
1376 return 0;
1377 }
1378
1379 /*
1380 * Module infrastructure
1381 */
1382 #include "if_module.h"
1383
1384 IF_MODULE(MODULE_CLASS_DRIVER, l2tp, "")
1385
1386
1387 /* TODO: IP_TCPMSS support */
1388 #ifdef IP_TCPMSS
1389 static int l2tp_need_tcpmss_clamp(struct ifnet *);
1390 #ifdef INET
1391 static struct mbuf *l2tp_tcpmss4_clamp(struct ifnet *, struct mbuf *);
1392 #endif
1393 #ifdef INET6
1394 static struct mbuf *l2tp_tcpmss6_clamp(struct ifnet *, struct mbuf *);
1395 #endif
1396
1397 struct mbuf *
1398 l2tp_tcpmss_clamp(struct ifnet *ifp, struct mbuf *m)
1399 {
1400
1401 if (l2tp_need_tcpmss_clamp(ifp)) {
1402 struct ether_header *eh;
1403 struct ether_vlan_header evh;
1404
1405 /* save ether header */
1406 m_copydata(m, 0, sizeof(evh), (void *)&evh);
1407 eh = (struct ether_header *)&evh;
1408
1409 switch (ntohs(eh->ether_type)) {
1410 case ETHERTYPE_VLAN: /* Ether + VLAN */
1411 if (m->m_pkthdr.len <= sizeof(struct ether_vlan_header))
1412 break;
1413 m_adj(m, sizeof(struct ether_vlan_header));
1414 switch (ntohs(evh.evl_proto)) {
1415 #ifdef INET
1416 case ETHERTYPE_IP: /* Ether + VLAN + IPv4 */
1417 m = l2tp_tcpmss4_clamp(ifp, m);
1418 if (m == NULL)
1419 return NULL;
1420 break;
1421 #endif /* INET */
1422 #ifdef INET6
1423 case ETHERTYPE_IPV6: /* Ether + VLAN + IPv6 */
1424 m = l2tp_tcpmss6_clamp(ifp, m);
1425 if (m == NULL)
1426 return NULL;
1427 break;
1428 #endif /* INET6 */
1429 default:
1430 break;
1431 }
1432 /* restore ether header */
1433 M_PREPEND(m, sizeof(struct ether_vlan_header),
1434 M_DONTWAIT);
1435 if (m == NULL)
1436 return NULL;
1437 *mtod(m, struct ether_vlan_header *) = evh;
1438 break;
1439 #ifdef INET
1440 case ETHERTYPE_IP: /* Ether + IPv4 */
1441 if (m->m_pkthdr.len <= sizeof(struct ether_header))
1442 break;
1443 m_adj(m, sizeof(struct ether_header));
1444 m = l2tp_tcpmss4_clamp(ifp, m);
1445 if (m == NULL)
1446 return NULL;
1447 /* restore ether header */
1448 M_PREPEND(m, sizeof(struct ether_header), M_DONTWAIT);
1449 if (m == NULL)
1450 return NULL;
1451 *mtod(m, struct ether_header *) = *eh;
1452 break;
1453 #endif /* INET */
1454 #ifdef INET6
1455 case ETHERTYPE_IPV6: /* Ether + IPv6 */
1456 if (m->m_pkthdr.len <= sizeof(struct ether_header))
1457 break;
1458 m_adj(m, sizeof(struct ether_header));
1459 m = l2tp_tcpmss6_clamp(ifp, m);
1460 if (m == NULL)
1461 return NULL;
1462 /* restore ether header */
1463 M_PREPEND(m, sizeof(struct ether_header), M_DONTWAIT);
1464 if (m == NULL)
1465 return NULL;
1466 *mtod(m, struct ether_header *) = *eh;
1467 break;
1468 #endif /* INET6 */
1469 default:
1470 break;
1471 }
1472 }
1473
1474 return m;
1475 }
1476
1477 static int
1478 l2tp_need_tcpmss_clamp(struct ifnet *ifp)
1479 {
1480 int ret = 0;
1481
1482 #ifdef INET
1483 if (ifp->if_tcpmss != 0)
1484 ret = 1;
1485 #endif /* INET */
1486
1487 #ifdef INET6
1488 if (ifp->if_tcpmss6 != 0)
1489 ret = 1;
1490 #endif /* INET6 */
1491
1492 return ret;
1493 }
1494
1495 #ifdef INET
1496 static struct mbuf *
1497 l2tp_tcpmss4_clamp(struct ifnet *ifp, struct mbuf *m)
1498 {
1499
1500 if (ifp->if_tcpmss != 0) {
1501 return ip_tcpmss(m, (ifp->if_tcpmss < 0) ?
1502 ifp->if_mtu - IP_TCPMSS_EXTLEN :
1503 ifp->if_tcpmss);
1504 }
1505 return m;
1506 }
1507 #endif /* INET */
1508
1509 #ifdef INET6
1510 static struct mbuf *
1511 l2tp_tcpmss6_clamp(struct ifnet *ifp, struct mbuf *m)
1512 {
1513 int ip6hdrlen;
1514
1515 if (ifp->if_tcpmss6 != 0 &&
1516 ip6_tcpmss_applicable(m, &ip6hdrlen)) {
1517 return ip6_tcpmss(m, ip6hdrlen,
1518 (ifp->if_tcpmss6 < 0) ?
1519 ifp->if_mtu - IP6_TCPMSS_EXTLEN :
1520 ifp->if_tcpmss6);
1521 }
1522 return m;
1523 }
1524 #endif /* INET6 */
1525
1526 #endif /* IP_TCPMSS */
1527