if_l2tp.c revision 1.2 1 /* $NetBSD: if_l2tp.c,v 1.2 2017/03/30 06:42:05 knakahara Exp $ */
2
3 /*
4 * Copyright (c) 2017 Internet Initiative Japan Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /*
30 * L2TPv3 kernel interface
31 */
32
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: if_l2tp.c,v 1.2 2017/03/30 06:42:05 knakahara Exp $");
35
36 #ifdef _KERNEL_OPT
37 #include "opt_inet.h"
38 #endif
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 #include <sys/mbuf.h>
44 #include <sys/socket.h>
45 #include <sys/sockio.h>
46 #include <sys/errno.h>
47 #include <sys/ioctl.h>
48 #include <sys/time.h>
49 #include <sys/syslog.h>
50 #include <sys/proc.h>
51 #include <sys/conf.h>
52 #include <sys/kauth.h>
53 #include <sys/cpu.h>
54 #include <sys/cprng.h>
55 #include <sys/intr.h>
56 #include <sys/kmem.h>
57 #include <sys/mutex.h>
58 #include <sys/atomic.h>
59 #include <sys/pserialize.h>
60 #include <sys/device.h>
61 #include <sys/module.h>
62
63 #include <net/if.h>
64 #include <net/if_dl.h>
65 #include <net/if_ether.h>
66 #include <net/if_types.h>
67 #include <net/netisr.h>
68 #include <net/route.h>
69 #include <net/bpf.h>
70 #include <net/if_vlanvar.h>
71
72 #include <netinet/in.h>
73 #include <netinet/in_systm.h>
74 #include <netinet/ip.h>
75 #include <netinet/ip_encap.h>
76 #ifdef INET
77 #include <netinet/in_var.h>
78 #include <netinet/in_l2tp.h>
79 #endif /* INET */
80 #ifdef INET6
81 #include <netinet6/in6_l2tp.h>
82 #endif
83
84 #include <net/if_l2tp.h>
85
86 #if NVLAN > 0
87 #include <net/if_vlanvar.h>
88 #endif
89
90 /* TODO: IP_TCPMSS support */
91 #undef IP_TCPMSS
92 #ifdef IP_TCPMSS
93 #include <netinet/ip_tcpmss.h>
94 #endif
95
96 #include <net/bpf.h>
97 #include <net/net_osdep.h>
98
99 /*
100 * l2tp global variable definitions
101 */
102 LIST_HEAD(l2tp_sclist, l2tp_softc);
103 static struct {
104 struct l2tp_sclist list;
105 kmutex_t lock;
106 } l2tp_softcs __cacheline_aligned;
107
108
109 #if !defined(L2TP_ID_HASH_SIZE)
110 #define L2TP_ID_HASH_SIZE 64
111 #endif
112 static struct {
113 kmutex_t lock;
114 struct pslist_head *lists;
115 } l2tp_hash __cacheline_aligned = {
116 .lists = NULL,
117 };
118
119 pserialize_t l2tp_psz __read_mostly;
120 struct psref_class *lv_psref_class __read_mostly;
121
122 static void l2tp_ro_init_pc(void *, void *, struct cpu_info *);
123 static void l2tp_ro_fini_pc(void *, void *, struct cpu_info *);
124
125 static int l2tp_clone_create(struct if_clone *, int);
126 static int l2tp_clone_destroy(struct ifnet *);
127
128 struct if_clone l2tp_cloner =
129 IF_CLONE_INITIALIZER("l2tp", l2tp_clone_create, l2tp_clone_destroy);
130
131 static int l2tp_output(struct ifnet *, struct mbuf *,
132 const struct sockaddr *, const struct rtentry *);
133 static void l2tpintr(struct l2tp_variant *);
134
135 static void l2tp_hash_init(void);
136 static int l2tp_hash_fini(void);
137
138 static void l2tp_start(struct ifnet *);
139 static int l2tp_transmit(struct ifnet *, struct mbuf *);
140
141 static int l2tp_set_tunnel(struct ifnet *, struct sockaddr *,
142 struct sockaddr *);
143 static void l2tp_delete_tunnel(struct ifnet *);
144
145 static int id_hash_func(uint32_t);
146
147 static void l2tp_variant_update(struct l2tp_softc *, struct l2tp_variant *);
148 static int l2tp_set_session(struct l2tp_softc *, uint32_t, uint32_t);
149 static int l2tp_clear_session(struct l2tp_softc *);
150 static int l2tp_set_cookie(struct l2tp_softc *, uint64_t, u_int, uint64_t, u_int);
151 static void l2tp_clear_cookie(struct l2tp_softc *);
152 static void l2tp_set_state(struct l2tp_softc *, int);
153 static int l2tp_encap_attach(struct l2tp_variant *);
154 static int l2tp_encap_detach(struct l2tp_variant *);
155
156 #ifndef MAX_L2TP_NEST
157 /*
158 * This macro controls the upper limitation on nesting of l2tp tunnels.
159 * Since, setting a large value to this macro with a careless configuration
160 * may introduce system crash, we don't allow any nestings by default.
161 * If you need to configure nested l2tp tunnels, you can define this macro
162 * in your kernel configuration file. However, if you do so, please be
163 * careful to configure the tunnels so that it won't make a loop.
164 */
165 /*
166 * XXX
167 * Currently, if in_l2tp_output recursively calls, it causes locking against
168 * myself of struct l2tp_ro->lr_lock. So, nested l2tp tunnels is prohibited.
169 */
170 #define MAX_L2TP_NEST 0
171 #endif
172
173 static int max_l2tp_nesting = MAX_L2TP_NEST;
174
175 /* ARGSUSED */
176 void
177 l2tpattach(int count)
178 {
179 /*
180 * Nothing to do here, initialization is handled by the
181 * module initialization code in l2tpinit() below).
182 */
183 }
184
185 static void
186 l2tpinit(void)
187 {
188
189 mutex_init(&l2tp_softcs.lock, MUTEX_DEFAULT, IPL_NONE);
190 LIST_INIT(&l2tp_softcs.list);
191
192 mutex_init(&l2tp_hash.lock, MUTEX_DEFAULT, IPL_NONE);
193 l2tp_psz = pserialize_create();
194 lv_psref_class = psref_class_create("l2tpvar", IPL_SOFTNET);
195 if_clone_attach(&l2tp_cloner);
196
197 l2tp_hash_init();
198 }
199
200 static int
201 l2tpdetach(void)
202 {
203 int error;
204
205 mutex_enter(&l2tp_softcs.lock);
206 if (!LIST_EMPTY(&l2tp_softcs.list)) {
207 mutex_exit(&l2tp_softcs.lock);
208 return EBUSY;
209 }
210 mutex_exit(&l2tp_softcs.lock);
211
212 error = l2tp_hash_fini();
213 if (error)
214 return error;
215
216 if_clone_detach(&l2tp_cloner);
217 psref_class_destroy(lv_psref_class);
218 pserialize_destroy(l2tp_psz);
219 mutex_destroy(&l2tp_hash.lock);
220
221 return error;
222 }
223
224 static int
225 l2tp_clone_create(struct if_clone *ifc, int unit)
226 {
227 struct l2tp_softc *sc;
228 struct l2tp_variant *var;
229
230 sc = kmem_zalloc(sizeof(struct l2tp_softc), KM_SLEEP);
231 var = kmem_zalloc(sizeof(struct l2tp_variant), KM_SLEEP);
232
233 var->lv_softc = sc;
234 var->lv_state = L2TP_STATE_DOWN;
235 var->lv_use_cookie = L2TP_COOKIE_OFF;
236 psref_target_init(&var->lv_psref, lv_psref_class);
237
238 sc->l2tp_var = var;
239 mutex_init(&sc->l2tp_lock, MUTEX_DEFAULT, IPL_NONE);
240 PSLIST_ENTRY_INIT(sc, l2tp_hash);
241
242 if_initname(&sc->l2tp_ec.ec_if, ifc->ifc_name, unit);
243
244 l2tpattach0(sc);
245
246 sc->l2tp_ro_percpu = percpu_alloc(sizeof(struct l2tp_ro));
247 KASSERTMSG(sc->l2tp_ro_percpu != NULL,
248 "failed to allocate sc->l2tp_ro_percpu");
249 percpu_foreach(sc->l2tp_ro_percpu, l2tp_ro_init_pc, NULL);
250
251 mutex_enter(&l2tp_softcs.lock);
252 LIST_INSERT_HEAD(&l2tp_softcs.list, sc, l2tp_list);
253 mutex_exit(&l2tp_softcs.lock);
254
255 return (0);
256 }
257
258 void
259 l2tpattach0(struct l2tp_softc *sc)
260 {
261
262 sc->l2tp_ec.ec_if.if_addrlen = 0;
263 sc->l2tp_ec.ec_if.if_mtu = L2TP_MTU;
264 sc->l2tp_ec.ec_if.if_flags = IFF_POINTOPOINT|IFF_MULTICAST|IFF_SIMPLEX;
265 sc->l2tp_ec.ec_if.if_ioctl = l2tp_ioctl;
266 sc->l2tp_ec.ec_if.if_output = l2tp_output;
267 sc->l2tp_ec.ec_if.if_type = IFT_L2TP;
268 sc->l2tp_ec.ec_if.if_dlt = DLT_NULL;
269 sc->l2tp_ec.ec_if.if_start = l2tp_start;
270 sc->l2tp_ec.ec_if.if_transmit = l2tp_transmit;
271 sc->l2tp_ec.ec_if._if_input = ether_input;
272 IFQ_SET_READY(&sc->l2tp_ec.ec_if.if_snd);
273 if_attach(&sc->l2tp_ec.ec_if);
274 if_alloc_sadl(&sc->l2tp_ec.ec_if);
275 bpf_attach(&sc->l2tp_ec.ec_if, DLT_EN10MB, sizeof(struct ether_header));
276 }
277
278 void
279 l2tp_ro_init_pc(void *p, void *arg __unused, struct cpu_info *ci __unused)
280 {
281 struct l2tp_ro *lro = p;
282
283 mutex_init(&lro->lr_lock, MUTEX_DEFAULT, IPL_NONE);
284 }
285
286 void
287 l2tp_ro_fini_pc(void *p, void *arg __unused, struct cpu_info *ci __unused)
288 {
289 struct l2tp_ro *lro = p;
290
291 rtcache_free(&lro->lr_ro);
292
293 mutex_destroy(&lro->lr_lock);
294 }
295
296 static int
297 l2tp_clone_destroy(struct ifnet *ifp)
298 {
299 struct l2tp_softc *sc = container_of(ifp, struct l2tp_softc,
300 l2tp_ec.ec_if);
301
302 l2tp_clear_session(sc);
303 l2tp_delete_tunnel(&sc->l2tp_ec.ec_if);
304
305 mutex_enter(&l2tp_softcs.lock);
306 LIST_REMOVE(sc, l2tp_list);
307 mutex_exit(&l2tp_softcs.lock);
308
309 bpf_detach(ifp);
310
311 if_detach(ifp);
312
313 percpu_foreach(sc->l2tp_ro_percpu, l2tp_ro_fini_pc, NULL);
314 percpu_free(sc->l2tp_ro_percpu, sizeof(struct l2tp_ro));
315
316 kmem_free(sc->l2tp_var, sizeof(struct l2tp_variant));
317 mutex_destroy(&sc->l2tp_lock);
318 kmem_free(sc, sizeof(struct l2tp_softc));
319
320 return 0;
321 }
322
323 static int
324 l2tp_output(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *dst,
325 const struct rtentry *rt)
326 {
327 struct l2tp_softc *sc = container_of(ifp, struct l2tp_softc,
328 l2tp_ec.ec_if);
329 struct l2tp_variant *var;
330 struct psref psref;
331 int error = 0;
332
333 var = l2tp_getref_variant(sc, &psref);
334 if (var == NULL) {
335 m_freem(m);
336 return ENETDOWN;
337 }
338
339 IFQ_CLASSIFY(&ifp->if_snd, m, dst->sa_family);
340
341 m->m_flags &= ~(M_BCAST|M_MCAST);
342
343 if ((ifp->if_flags & IFF_UP) == 0) {
344 m_freem(m);
345 error = ENETDOWN;
346 goto end;
347 }
348
349 if (var->lv_psrc == NULL || var->lv_pdst == NULL) {
350 m_freem(m);
351 error = ENETDOWN;
352 goto end;
353 }
354
355 /* XXX should we check if our outer source is legal? */
356
357 /* use DLT_NULL encapsulation here to pass inner af type */
358 M_PREPEND(m, sizeof(int), M_DONTWAIT);
359 if (!m) {
360 error = ENOBUFS;
361 goto end;
362 }
363 *mtod(m, int *) = dst->sa_family;
364
365 IFQ_ENQUEUE(&ifp->if_snd, m, error);
366 if (error)
367 goto end;
368
369 /*
370 * direct call to avoid infinite loop at l2tpintr()
371 */
372 l2tpintr(var);
373
374 error = 0;
375
376 end:
377 l2tp_putref_variant(var, &psref);
378 if (error)
379 ifp->if_oerrors++;
380
381 return error;
382 }
383
384 static void
385 l2tpintr(struct l2tp_variant *var)
386 {
387 struct l2tp_softc *sc;
388 struct ifnet *ifp;
389 struct mbuf *m;
390 int error;
391
392 KASSERT(psref_held(&var->lv_psref, lv_psref_class));
393
394 sc = var->lv_softc;
395 ifp = &sc->l2tp_ec.ec_if;
396
397 /* output processing */
398 if (var->lv_my_sess_id == 0 || var->lv_peer_sess_id == 0) {
399 IFQ_PURGE(&ifp->if_snd);
400 return;
401 }
402
403 for (;;) {
404 IFQ_DEQUEUE(&ifp->if_snd, m);
405 if (m == NULL)
406 break;
407 m->m_flags &= ~(M_BCAST|M_MCAST);
408 bpf_mtap(ifp, m);
409 switch (var->lv_psrc->sa_family) {
410 #ifdef INET
411 case AF_INET:
412 error = in_l2tp_output(var, m);
413 break;
414 #endif
415 #ifdef INET6
416 case AF_INET6:
417 error = in6_l2tp_output(var, m);
418 break;
419 #endif
420 default:
421 m_freem(m);
422 error = ENETDOWN;
423 break;
424 }
425
426 if (error)
427 ifp->if_oerrors++;
428 else {
429 ifp->if_opackets++;
430 /*
431 * obytes is incremented at ether_output() or
432 * bridge_enqueue().
433 */
434 }
435 }
436
437 }
438
439 void
440 l2tp_input(struct mbuf *m, struct ifnet *ifp)
441 {
442
443 KASSERT(ifp != NULL);
444
445 if (0 == (mtod(m, u_long) & 0x03)) {
446 /* copy and align head of payload */
447 struct mbuf *m_head;
448 int copy_length;
449
450 #define L2TP_COPY_LENGTH 60
451 #define L2TP_LINK_HDR_ROOM (MHLEN - L2TP_COPY_LENGTH - 4/*round4(2)*/)
452
453 if (m->m_pkthdr.len < L2TP_COPY_LENGTH) {
454 copy_length = m->m_pkthdr.len;
455 } else {
456 copy_length = L2TP_COPY_LENGTH;
457 }
458
459 if (m->m_len < copy_length) {
460 m = m_pullup(m, copy_length);
461 if (m == NULL)
462 return;
463 }
464
465 MGETHDR(m_head, M_DONTWAIT, MT_HEADER);
466 if (m_head == NULL) {
467 m_freem(m);
468 return;
469 }
470 M_COPY_PKTHDR(m_head, m);
471
472 m_head->m_data += 2 /* align */ + L2TP_LINK_HDR_ROOM;
473 memcpy(m_head->m_data, m->m_data, copy_length);
474 m_head->m_len = copy_length;
475 m->m_data += copy_length;
476 m->m_len -= copy_length;
477
478 /* construct chain */
479 if (m->m_len == 0) {
480 m_head->m_next = m_free(m); /* not m_freem */
481 } else {
482 /*
483 * copyed mtag in previous call M_COPY_PKTHDR
484 * but don't delete mtag in case cutt of M_PKTHDR flag
485 */
486 m_tag_delete_chain(m, NULL);
487 m->m_flags &= ~M_PKTHDR;
488 m_head->m_next = m;
489 }
490
491 /* override m */
492 m = m_head;
493 }
494
495 m_set_rcvif(m, ifp);
496
497 /*
498 * bpf_mtap() and ifp->if_ipackets++ is done in if_input()
499 *
500 * obytes is incremented at ether_output() or bridge_enqueue().
501 */
502 if_percpuq_enqueue(ifp->if_percpuq, m);
503 }
504
505 void
506 l2tp_start(struct ifnet *ifp)
507 {
508 struct psref psref;
509 struct l2tp_variant *var;
510 struct l2tp_softc *sc = container_of(ifp, struct l2tp_softc,
511 l2tp_ec.ec_if);
512
513 var = l2tp_getref_variant(sc, &psref);
514 if (var == NULL)
515 return;
516
517 if (var->lv_psrc == NULL || var->lv_pdst == NULL)
518 return;
519
520 l2tpintr(var);
521 l2tp_putref_variant(var, &psref);
522 }
523
524 int
525 l2tp_transmit(struct ifnet *ifp, struct mbuf *m)
526 {
527 int error;
528 struct psref psref;
529 struct l2tp_variant *var;
530 struct l2tp_softc *sc = container_of(ifp, struct l2tp_softc,
531 l2tp_ec.ec_if);
532
533 var = l2tp_getref_variant(sc, &psref);
534 if (var == NULL) {
535 m_freem(m);
536 return ENETDOWN;
537 }
538
539 if (var->lv_psrc == NULL || var->lv_pdst == NULL) {
540 m_freem(m);
541 error = ENETDOWN;
542 goto out;
543 }
544
545 m->m_flags &= ~(M_BCAST|M_MCAST);
546 bpf_mtap(ifp, m);
547 switch (var->lv_psrc->sa_family) {
548 #ifdef INET
549 case AF_INET:
550 error = in_l2tp_output(var, m);
551 break;
552 #endif
553 #ifdef INET6
554 case AF_INET6:
555 error = in6_l2tp_output(var, m);
556 break;
557 #endif
558 default:
559 m_freem(m);
560 error = ENETDOWN;
561 break;
562 }
563
564 if (error)
565 ifp->if_oerrors++;
566 else {
567 ifp->if_opackets++;
568 /*
569 * obytes is incremented at ether_output() or bridge_enqueue().
570 */
571 }
572
573 out:
574 l2tp_putref_variant(var, &psref);
575 return error;
576 }
577
578 /* XXX how should we handle IPv6 scope on SIOC[GS]IFPHYADDR? */
579 int
580 l2tp_ioctl(struct ifnet *ifp, u_long cmd, void *data)
581 {
582 struct l2tp_softc *sc = container_of(ifp, struct l2tp_softc,
583 l2tp_ec.ec_if);
584 struct l2tp_variant *var, *var_tmp;
585 struct ifreq *ifr = data;
586 int error = 0, size;
587 struct sockaddr *dst, *src;
588 struct l2tp_req l2tpr;
589 u_long mtu;
590 int bound;
591 struct psref psref;
592
593 switch (cmd) {
594 case SIOCSIFADDR:
595 ifp->if_flags |= IFF_UP;
596 break;
597
598 case SIOCSIFDSTADDR:
599 break;
600
601 case SIOCADDMULTI:
602 case SIOCDELMULTI:
603 switch (ifr->ifr_addr.sa_family) {
604 #ifdef INET
605 case AF_INET: /* IP supports Multicast */
606 break;
607 #endif /* INET */
608 #ifdef INET6
609 case AF_INET6: /* IP6 supports Multicast */
610 break;
611 #endif /* INET6 */
612 default: /* Other protocols doesn't support Multicast */
613 error = EAFNOSUPPORT;
614 break;
615 }
616 break;
617
618 case SIOCSIFMTU:
619 mtu = ifr->ifr_mtu;
620 if (mtu < L2TP_MTU_MIN || mtu > L2TP_MTU_MAX)
621 return (EINVAL);
622 ifp->if_mtu = mtu;
623 break;
624
625 #ifdef INET
626 case SIOCSIFPHYADDR:
627 src = (struct sockaddr *)
628 &(((struct in_aliasreq *)data)->ifra_addr);
629 dst = (struct sockaddr *)
630 &(((struct in_aliasreq *)data)->ifra_dstaddr);
631 if (src->sa_family != AF_INET || dst->sa_family != AF_INET)
632 return EAFNOSUPPORT;
633 else if (src->sa_len != sizeof(struct sockaddr_in)
634 || dst->sa_len != sizeof(struct sockaddr_in))
635 return EINVAL;
636
637 error = l2tp_set_tunnel(&sc->l2tp_ec.ec_if, src, dst);
638 break;
639
640 #endif /* INET */
641 #ifdef INET6
642 case SIOCSIFPHYADDR_IN6:
643 src = (struct sockaddr *)
644 &(((struct in6_aliasreq *)data)->ifra_addr);
645 dst = (struct sockaddr *)
646 &(((struct in6_aliasreq *)data)->ifra_dstaddr);
647 if (src->sa_family != AF_INET6 || dst->sa_family != AF_INET6)
648 return EAFNOSUPPORT;
649 else if (src->sa_len != sizeof(struct sockaddr_in6)
650 || dst->sa_len != sizeof(struct sockaddr_in6))
651 return EINVAL;
652
653 error = l2tp_set_tunnel(&sc->l2tp_ec.ec_if, src, dst);
654 break;
655
656 #endif /* INET6 */
657 case SIOCSLIFPHYADDR:
658 src = (struct sockaddr *)
659 &(((struct if_laddrreq *)data)->addr);
660 dst = (struct sockaddr *)
661 &(((struct if_laddrreq *)data)->dstaddr);
662 if (src->sa_family != dst->sa_family)
663 return EINVAL;
664 else if (src->sa_family == AF_INET
665 && src->sa_len != sizeof(struct sockaddr_in))
666 return EINVAL;
667 else if (src->sa_family == AF_INET6
668 && src->sa_len != sizeof(struct sockaddr_in6))
669 return EINVAL;
670 else if (dst->sa_family == AF_INET
671 && dst->sa_len != sizeof(struct sockaddr_in))
672 return EINVAL;
673 else if (dst->sa_family == AF_INET6
674 && dst->sa_len != sizeof(struct sockaddr_in6))
675 return EINVAL;
676
677 error = l2tp_set_tunnel(&sc->l2tp_ec.ec_if, src, dst);
678 break;
679
680 case SIOCDIFPHYADDR:
681 l2tp_delete_tunnel(&sc->l2tp_ec.ec_if);
682 break;
683
684 case SIOCGIFPSRCADDR:
685 #ifdef INET6
686 case SIOCGIFPSRCADDR_IN6:
687 #endif /* INET6 */
688 bound = curlwp_bind();
689 var = l2tp_getref_variant(sc, &psref);
690 if (var == NULL) {
691 curlwp_bindx(bound);
692 error = EADDRNOTAVAIL;
693 goto bad;
694 }
695 if (var->lv_psrc == NULL) {
696 l2tp_putref_variant(var, &psref);
697 curlwp_bindx(bound);
698 error = EADDRNOTAVAIL;
699 goto bad;
700 }
701 src = var->lv_psrc;
702 switch (cmd) {
703 #ifdef INET
704 case SIOCGIFPSRCADDR:
705 dst = &ifr->ifr_addr;
706 size = sizeof(ifr->ifr_addr);
707 break;
708 #endif /* INET */
709 #ifdef INET6
710 case SIOCGIFPSRCADDR_IN6:
711 dst = (struct sockaddr *)
712 &(((struct in6_ifreq *)data)->ifr_addr);
713 size = sizeof(((struct in6_ifreq *)data)->ifr_addr);
714 break;
715 #endif /* INET6 */
716 default:
717 l2tp_putref_variant(var, &psref);
718 curlwp_bindx(bound);
719 error = EADDRNOTAVAIL;
720 goto bad;
721 }
722 if (src->sa_len > size) {
723 l2tp_putref_variant(var, &psref);
724 curlwp_bindx(bound);
725 return EINVAL;
726 }
727 sockaddr_copy(dst, src->sa_len, src);
728 l2tp_putref_variant(var, &psref);
729 curlwp_bindx(bound);
730 break;
731
732 case SIOCGIFPDSTADDR:
733 #ifdef INET6
734 case SIOCGIFPDSTADDR_IN6:
735 #endif /* INET6 */
736 bound = curlwp_bind();
737 var = l2tp_getref_variant(sc, &psref);
738 if (var == NULL) {
739 curlwp_bindx(bound);
740 error = EADDRNOTAVAIL;
741 goto bad;
742 }
743 if (var->lv_pdst == NULL) {
744 l2tp_putref_variant(var, &psref);
745 curlwp_bindx(bound);
746 error = EADDRNOTAVAIL;
747 goto bad;
748 }
749 src = var->lv_pdst;
750 switch (cmd) {
751 #ifdef INET
752 case SIOCGIFPDSTADDR:
753 dst = &ifr->ifr_addr;
754 size = sizeof(ifr->ifr_addr);
755 break;
756 #endif /* INET */
757 #ifdef INET6
758 case SIOCGIFPDSTADDR_IN6:
759 dst = (struct sockaddr *)
760 &(((struct in6_ifreq *)data)->ifr_addr);
761 size = sizeof(((struct in6_ifreq *)data)->ifr_addr);
762 break;
763 #endif /* INET6 */
764 default:
765 l2tp_putref_variant(var, &psref);
766 curlwp_bindx(bound);
767 error = EADDRNOTAVAIL;
768 goto bad;
769 }
770 if (src->sa_len > size) {
771 l2tp_putref_variant(var, &psref);
772 curlwp_bindx(bound);
773 return EINVAL;
774 }
775 sockaddr_copy(dst, src->sa_len, src);
776 l2tp_putref_variant(var, &psref);
777 curlwp_bindx(bound);
778 break;
779
780 case SIOCGLIFPHYADDR:
781 bound = curlwp_bind();
782 var = l2tp_getref_variant(sc, &psref);
783 if (var == NULL) {
784 curlwp_bindx(bound);
785 error = EADDRNOTAVAIL;
786 goto bad;
787 }
788 if (var->lv_psrc == NULL || var->lv_pdst == NULL) {
789 l2tp_putref_variant(var, &psref);
790 curlwp_bindx(bound);
791 error = EADDRNOTAVAIL;
792 goto bad;
793 }
794
795 /* copy src */
796 src = var->lv_psrc;
797 dst = (struct sockaddr *)
798 &(((struct if_laddrreq *)data)->addr);
799 size = sizeof(((struct if_laddrreq *)data)->addr);
800 if (src->sa_len > size) {
801 l2tp_putref_variant(var, &psref);
802 curlwp_bindx(bound);
803 return EINVAL;
804 }
805 sockaddr_copy(dst, src->sa_len, src);
806
807 /* copy dst */
808 src = var->lv_pdst;
809 dst = (struct sockaddr *)
810 &(((struct if_laddrreq *)data)->dstaddr);
811 size = sizeof(((struct if_laddrreq *)data)->dstaddr);
812 if (src->sa_len > size) {
813 l2tp_putref_variant(var, &psref);
814 curlwp_bindx(bound);
815 return EINVAL;
816 }
817 sockaddr_copy(dst, src->sa_len, src);
818 l2tp_putref_variant(var, &psref);
819 curlwp_bindx(bound);
820 break;
821
822 case SIOCSL2TPSESSION:
823 if ((error = copyin(ifr->ifr_data, &l2tpr, sizeof(l2tpr))) != 0)
824 break;
825
826 /* session id must not zero */
827 if (l2tpr.my_sess_id == 0 || l2tpr.peer_sess_id == 0)
828 return EINVAL;
829
830 bound = curlwp_bind();
831 var_tmp = l2tp_lookup_session_ref(l2tpr.my_sess_id, &psref);
832 if (var_tmp != NULL) {
833 /* duplicate session id */
834 log(LOG_WARNING, "%s: duplicate session id %" PRIu32 " of %s\n",
835 sc->l2tp_ec.ec_if.if_xname, l2tpr.my_sess_id,
836 var_tmp->lv_softc->l2tp_ec.ec_if.if_xname);
837 psref_release(&psref, &var_tmp->lv_psref,
838 lv_psref_class);
839 curlwp_bindx(bound);
840 return EINVAL;
841 }
842 curlwp_bindx(bound);
843
844 error = l2tp_set_session(sc, l2tpr.my_sess_id, l2tpr.peer_sess_id);
845 break;
846 case SIOCDL2TPSESSION:
847 l2tp_clear_session(sc);
848 break;
849 case SIOCSL2TPCOOKIE:
850 if ((error = copyin(ifr->ifr_data, &l2tpr, sizeof(l2tpr))) != 0)
851 break;
852
853 error = l2tp_set_cookie(sc, l2tpr.my_cookie, l2tpr.my_cookie_len,
854 l2tpr.peer_cookie, l2tpr.peer_cookie_len);
855 break;
856 case SIOCDL2TPCOOKIE:
857 l2tp_clear_cookie(sc);
858 break;
859 case SIOCSL2TPSTATE:
860 if ((error = copyin(ifr->ifr_data, &l2tpr, sizeof(l2tpr))) != 0)
861 break;
862
863 l2tp_set_state(sc, l2tpr.state);
864 break;
865 case SIOCGL2TP:
866 /* get L2TPV3 session info */
867 memset(&l2tpr, 0, sizeof(l2tpr));
868
869 bound = curlwp_bind();
870 var = l2tp_getref_variant(sc, &psref);
871 if (var == NULL) {
872 curlwp_bindx(bound);
873 error = EADDRNOTAVAIL;
874 goto bad;
875 }
876
877 l2tpr.state = var->lv_state;
878 l2tpr.my_sess_id = var->lv_my_sess_id;
879 l2tpr.peer_sess_id = var->lv_peer_sess_id;
880 l2tpr.my_cookie = var->lv_my_cookie;
881 l2tpr.my_cookie_len = var->lv_my_cookie_len;
882 l2tpr.peer_cookie = var->lv_peer_cookie;
883 l2tpr.peer_cookie_len = var->lv_peer_cookie_len;
884 l2tp_putref_variant(var, &psref);
885 curlwp_bindx(bound);
886
887 error = copyout(&l2tpr, ifr->ifr_data, sizeof(l2tpr));
888 break;
889
890 default:
891 error = ifioctl_common(ifp, cmd, data);
892 break;
893 }
894 bad:
895 return error;
896 }
897
898 static int
899 l2tp_set_tunnel(struct ifnet *ifp, struct sockaddr *src, struct sockaddr *dst)
900 {
901 struct l2tp_softc *sc = container_of(ifp, struct l2tp_softc,
902 l2tp_ec.ec_if);
903 struct sockaddr *osrc, *odst;
904 struct sockaddr *nsrc, *ndst;
905 struct l2tp_variant *ovar, *nvar;
906 int error;
907
908 nsrc = sockaddr_dup(src, M_WAITOK);
909 ndst = sockaddr_dup(dst, M_WAITOK);
910
911 nvar = kmem_alloc(sizeof(*nvar), KM_SLEEP);
912
913 error = encap_lock_enter();
914 if (error)
915 goto error;
916
917 mutex_enter(&sc->l2tp_lock);
918
919 ovar = sc->l2tp_var;
920 osrc = ovar->lv_psrc;
921 odst = ovar->lv_pdst;
922 *nvar = *ovar;
923 psref_target_init(&nvar->lv_psref, lv_psref_class);
924 nvar->lv_psrc = nsrc;
925 nvar->lv_pdst = ndst;
926 error = l2tp_encap_attach(nvar);
927 if (error) {
928 mutex_exit(&sc->l2tp_lock);
929 encap_lock_exit();
930 goto error;
931 }
932 membar_producer();
933 l2tp_variant_update(sc, nvar);
934
935 mutex_exit(&sc->l2tp_lock);
936
937 (void)l2tp_encap_detach(ovar);
938 encap_lock_exit();
939
940 if (osrc)
941 sockaddr_free(osrc);
942 if (odst)
943 sockaddr_free(odst);
944 kmem_free(ovar, sizeof(*ovar));
945
946 return 0;
947
948 error:
949 sockaddr_free(nsrc);
950 sockaddr_free(ndst);
951 kmem_free(nvar, sizeof(*nvar));
952
953 return error;
954 }
955
956 static void
957 l2tp_delete_tunnel(struct ifnet *ifp)
958 {
959 struct l2tp_softc *sc = container_of(ifp, struct l2tp_softc,
960 l2tp_ec.ec_if);
961 struct sockaddr *osrc, *odst;
962 struct l2tp_variant *ovar, *nvar;
963 int error;
964
965 nvar = kmem_alloc(sizeof(*nvar), KM_SLEEP);
966
967 error = encap_lock_enter();
968 if (error) {
969 kmem_free(nvar, sizeof(*nvar));
970 return;
971 }
972 mutex_enter(&sc->l2tp_lock);
973
974 ovar = sc->l2tp_var;
975 osrc = ovar->lv_psrc;
976 odst = ovar->lv_pdst;
977 *nvar = *ovar;
978 psref_target_init(&nvar->lv_psref, lv_psref_class);
979 nvar->lv_psrc = NULL;
980 nvar->lv_pdst = NULL;
981 membar_producer();
982 l2tp_variant_update(sc, nvar);
983
984 mutex_exit(&sc->l2tp_lock);
985
986 (void)l2tp_encap_detach(ovar);
987 encap_lock_exit();
988
989 if (osrc)
990 sockaddr_free(osrc);
991 if (odst)
992 sockaddr_free(odst);
993 kmem_free(ovar, sizeof(*ovar));
994 }
995
996 static int
997 id_hash_func(uint32_t id)
998 {
999 uint32_t hash;
1000
1001 hash = (id >> 16) ^ id;
1002 hash = (hash >> 4) ^ hash;
1003
1004 return hash & (L2TP_ID_HASH_SIZE - 1);
1005 }
1006
1007 static void
1008 l2tp_hash_init(void)
1009 {
1010 u_long mask;
1011
1012 l2tp_hash.lists = hashinit(L2TP_ID_HASH_SIZE, HASH_PSLIST, true,
1013 &mask);
1014 KASSERT(mask == (L2TP_ID_HASH_SIZE - 1));
1015 }
1016
1017 static int
1018 l2tp_hash_fini(void)
1019 {
1020 int i;
1021
1022 mutex_enter(&l2tp_hash.lock);
1023
1024 for (i = 0; i < L2TP_ID_HASH_SIZE; i++) {
1025 if (PSLIST_WRITER_FIRST(&l2tp_hash.lists[i], struct l2tp_softc,
1026 l2tp_hash) != NULL) {
1027 mutex_exit(&l2tp_hash.lock);
1028 return EBUSY;
1029 }
1030 }
1031 for (i = 0; i < L2TP_ID_HASH_SIZE; i++)
1032 PSLIST_DESTROY(&l2tp_hash.lists[i]);
1033
1034 mutex_exit(&l2tp_hash.lock);
1035
1036 hashdone(l2tp_hash.lists, HASH_PSLIST, L2TP_ID_HASH_SIZE - 1);
1037
1038 return 0;
1039 }
1040
1041 static int
1042 l2tp_set_session(struct l2tp_softc *sc, uint32_t my_sess_id,
1043 uint32_t peer_sess_id)
1044 {
1045 uint32_t idx;
1046 struct l2tp_variant *nvar;
1047 struct l2tp_variant *ovar;
1048 struct ifnet *ifp = &sc->l2tp_ec.ec_if;
1049
1050 nvar = kmem_alloc(sizeof(*nvar), KM_SLEEP);
1051
1052 mutex_enter(&sc->l2tp_lock);
1053 ovar = sc->l2tp_var;
1054 *nvar = *ovar;
1055 psref_target_init(&nvar->lv_psref, lv_psref_class);
1056 nvar->lv_my_sess_id = my_sess_id;
1057 nvar->lv_peer_sess_id = peer_sess_id;
1058 membar_producer();
1059
1060 mutex_enter(&l2tp_hash.lock);
1061 if (ovar->lv_my_sess_id > 0 && ovar->lv_peer_sess_id > 0) {
1062 PSLIST_WRITER_REMOVE(sc, l2tp_hash);
1063 pserialize_perform(l2tp_psz);
1064 }
1065 mutex_exit(&l2tp_hash.lock);
1066
1067 l2tp_variant_update(sc, nvar);
1068 mutex_exit(&sc->l2tp_lock);
1069
1070 idx = id_hash_func(nvar->lv_my_sess_id);
1071 if ((ifp->if_flags & IFF_DEBUG) != 0)
1072 log(LOG_DEBUG, "%s: add hash entry: sess_id=%" PRIu32 ", idx=%" PRIu32 "\n",
1073 sc->l2tp_ec.ec_if.if_xname, nvar->lv_my_sess_id, idx);
1074
1075 mutex_enter(&l2tp_hash.lock);
1076 PSLIST_WRITER_INSERT_HEAD(&l2tp_hash.lists[idx], sc, l2tp_hash);
1077 mutex_exit(&l2tp_hash.lock);
1078
1079 kmem_free(ovar, sizeof(*ovar));
1080 return 0;
1081 }
1082
1083 static int
1084 l2tp_clear_session(struct l2tp_softc *sc)
1085 {
1086 struct l2tp_variant *nvar;
1087 struct l2tp_variant *ovar;
1088
1089 nvar = kmem_alloc(sizeof(*nvar), KM_SLEEP);
1090
1091 mutex_enter(&sc->l2tp_lock);
1092 ovar = sc->l2tp_var;
1093 *nvar = *ovar;
1094 psref_target_init(&nvar->lv_psref, lv_psref_class);
1095 nvar->lv_my_sess_id = 0;
1096 nvar->lv_peer_sess_id = 0;
1097 membar_producer();
1098
1099 mutex_enter(&l2tp_hash.lock);
1100 if (ovar->lv_my_sess_id > 0 && ovar->lv_peer_sess_id > 0) {
1101 PSLIST_WRITER_REMOVE(sc, l2tp_hash);
1102 pserialize_perform(l2tp_psz);
1103 }
1104 mutex_exit(&l2tp_hash.lock);
1105
1106 l2tp_variant_update(sc, nvar);
1107 mutex_exit(&sc->l2tp_lock);
1108 kmem_free(ovar, sizeof(*ovar));
1109 return 0;
1110 }
1111
1112 struct l2tp_variant *
1113 l2tp_lookup_session_ref(uint32_t id, struct psref *psref)
1114 {
1115 int idx;
1116 int s;
1117 struct l2tp_softc *sc;
1118
1119 idx = id_hash_func(id);
1120
1121 s = pserialize_read_enter();
1122 PSLIST_READER_FOREACH(sc, &l2tp_hash.lists[idx], struct l2tp_softc,
1123 l2tp_hash) {
1124 struct l2tp_variant *var = sc->l2tp_var;
1125 if (var == NULL)
1126 continue;
1127 if (var->lv_my_sess_id != id)
1128 continue;
1129 psref_acquire(psref, &var->lv_psref, lv_psref_class);
1130 pserialize_read_exit(s);
1131 return var;
1132 }
1133 pserialize_read_exit(s);
1134 return NULL;
1135 }
1136
1137 /*
1138 * l2tp_variant update API.
1139 *
1140 * Assumption:
1141 * reader side dereferences sc->l2tp_var in reader critical section only,
1142 * that is, all of reader sides do not reader the sc->l2tp_var after
1143 * pserialize_perform().
1144 */
1145 static void
1146 l2tp_variant_update(struct l2tp_softc *sc, struct l2tp_variant *nvar)
1147 {
1148 struct ifnet *ifp = &sc->l2tp_ec.ec_if;
1149 struct l2tp_variant *ovar = sc->l2tp_var;
1150
1151 KASSERT(mutex_owned(&sc->l2tp_lock));
1152
1153 sc->l2tp_var = nvar;
1154 pserialize_perform(l2tp_psz);
1155 psref_target_destroy(&ovar->lv_psref, lv_psref_class);
1156
1157 /*
1158 * In the manual of atomic_swap_ptr(3), there is no mention if 2nd
1159 * argument is rewrite or not. So, use sc->l2tp_var instead of nvar.
1160 */
1161 if (sc->l2tp_var->lv_psrc != NULL && sc->l2tp_var->lv_pdst != NULL)
1162 ifp->if_flags |= IFF_RUNNING;
1163 else
1164 ifp->if_flags &= ~IFF_RUNNING;
1165 }
1166
1167 static int
1168 l2tp_set_cookie(struct l2tp_softc *sc, uint64_t my_cookie, u_int my_cookie_len,
1169 uint64_t peer_cookie, u_int peer_cookie_len)
1170 {
1171 struct l2tp_variant *nvar;
1172
1173 if (my_cookie == 0 || peer_cookie == 0)
1174 return EINVAL;
1175
1176 if (my_cookie_len != 4 && my_cookie_len != 8
1177 && peer_cookie_len != 4 && peer_cookie_len != 8)
1178 return EINVAL;
1179
1180 nvar = kmem_alloc(sizeof(*nvar), KM_SLEEP);
1181
1182 mutex_enter(&sc->l2tp_lock);
1183
1184 *nvar = *sc->l2tp_var;
1185 psref_target_init(&nvar->lv_psref, lv_psref_class);
1186 nvar->lv_my_cookie = my_cookie;
1187 nvar->lv_my_cookie_len = my_cookie_len;
1188 nvar->lv_peer_cookie = peer_cookie;
1189 nvar->lv_peer_cookie_len = peer_cookie_len;
1190 nvar->lv_use_cookie = L2TP_COOKIE_ON;
1191 membar_producer();
1192 l2tp_variant_update(sc, nvar);
1193
1194 mutex_exit(&sc->l2tp_lock);
1195
1196 struct ifnet *ifp = &sc->l2tp_ec.ec_if;
1197 if ((ifp->if_flags & IFF_DEBUG) != 0) {
1198 log(LOG_DEBUG,
1199 "%s: set cookie: "
1200 "local cookie_len=%u local cookie=%" PRIu64 ", "
1201 "remote cookie_len=%u remote cookie=%" PRIu64 "\n",
1202 ifp->if_xname, my_cookie_len, my_cookie,
1203 peer_cookie_len, peer_cookie);
1204 }
1205
1206 return 0;
1207 }
1208
1209 static void
1210 l2tp_clear_cookie(struct l2tp_softc *sc)
1211 {
1212 struct l2tp_variant *nvar;
1213
1214 nvar = kmem_alloc(sizeof(*nvar), KM_SLEEP);
1215
1216 mutex_enter(&sc->l2tp_lock);
1217
1218 *nvar = *sc->l2tp_var;
1219 psref_target_init(&nvar->lv_psref, lv_psref_class);
1220 nvar->lv_my_cookie = 0;
1221 nvar->lv_my_cookie_len = 0;
1222 nvar->lv_peer_cookie = 0;
1223 nvar->lv_peer_cookie_len = 0;
1224 nvar->lv_use_cookie = L2TP_COOKIE_OFF;
1225 membar_producer();
1226 l2tp_variant_update(sc, nvar);
1227
1228 mutex_exit(&sc->l2tp_lock);
1229 }
1230
1231 static void
1232 l2tp_set_state(struct l2tp_softc *sc, int state)
1233 {
1234 struct ifnet *ifp = &sc->l2tp_ec.ec_if;
1235 struct l2tp_variant *nvar;
1236
1237 nvar = kmem_alloc(sizeof(*nvar), KM_SLEEP);
1238
1239 mutex_enter(&sc->l2tp_lock);
1240
1241 *nvar = *sc->l2tp_var;
1242 psref_target_init(&nvar->lv_psref, lv_psref_class);
1243 nvar->lv_state = state;
1244 membar_producer();
1245 l2tp_variant_update(sc, nvar);
1246
1247 if (nvar->lv_state == L2TP_STATE_UP) {
1248 ifp->if_link_state = LINK_STATE_UP;
1249 } else {
1250 ifp->if_link_state = LINK_STATE_DOWN;
1251 }
1252
1253 mutex_exit(&sc->l2tp_lock);
1254
1255 #ifdef NOTYET
1256 #if NVLAN > 0
1257 vlan_linkstate_notify(ifp, ifp->if_link_state);
1258 #endif
1259 #endif
1260 }
1261
1262 static int
1263 l2tp_encap_attach(struct l2tp_variant *var)
1264 {
1265 int error;
1266
1267 if (var == NULL || var->lv_psrc == NULL)
1268 return EINVAL;
1269
1270 switch (var->lv_psrc->sa_family) {
1271 #ifdef INET
1272 case AF_INET:
1273 error = in_l2tp_attach(var);
1274 break;
1275 #endif
1276 #ifdef INET6
1277 case AF_INET6:
1278 error = in6_l2tp_attach(var);
1279 break;
1280 #endif
1281 default:
1282 error = EINVAL;
1283 break;
1284 }
1285
1286 return error;
1287 }
1288
1289 static int
1290 l2tp_encap_detach(struct l2tp_variant *var)
1291 {
1292 int error;
1293
1294 if (var == NULL || var->lv_psrc == NULL)
1295 return EINVAL;
1296
1297 switch (var->lv_psrc->sa_family) {
1298 #ifdef INET
1299 case AF_INET:
1300 error = in_l2tp_detach(var);
1301 break;
1302 #endif
1303 #ifdef INET6
1304 case AF_INET6:
1305 error = in6_l2tp_detach(var);
1306 break;
1307 #endif
1308 default:
1309 error = EINVAL;
1310 break;
1311 }
1312
1313 return error;
1314 }
1315
1316 /*
1317 * TODO:
1318 * unify with gif_check_nesting().
1319 */
1320 int
1321 l2tp_check_nesting(struct ifnet *ifp, struct mbuf *m)
1322 {
1323 struct m_tag *mtag;
1324 int *count;
1325
1326 mtag = m_tag_find(m, PACKET_TAG_TUNNEL_INFO, NULL);
1327 if (mtag != NULL) {
1328 count = (int *)(mtag + 1);
1329 if (++(*count) > max_l2tp_nesting) {
1330 log(LOG_NOTICE,
1331 "%s: recursively called too many times(%d)\n",
1332 if_name(ifp),
1333 *count);
1334 return EIO;
1335 }
1336 } else {
1337 mtag = m_tag_get(PACKET_TAG_TUNNEL_INFO, sizeof(*count),
1338 M_NOWAIT);
1339 if (mtag != NULL) {
1340 m_tag_prepend(m, mtag);
1341 count = (int *)(mtag + 1);
1342 *count = 0;
1343 }
1344 #ifdef L2TP_DEBUG
1345 else {
1346 log(LOG_DEBUG,
1347 "%s: m_tag_get() failed, recursion calls are not prevented.\n",
1348 if_name(ifp));
1349 }
1350 #endif
1351 }
1352
1353 return 0;
1354 }
1355
1356 /*
1357 * Module infrastructure
1358 */
1359 #include "if_module.h"
1360
1361 IF_MODULE(MODULE_CLASS_DRIVER, l2tp, "")
1362
1363
1364 /* TODO: IP_TCPMSS support */
1365 #ifdef IP_TCPMSS
1366 static int l2tp_need_tcpmss_clamp(struct ifnet *);
1367 #ifdef INET
1368 static struct mbuf *l2tp_tcpmss4_clamp(struct ifnet *, struct mbuf *);
1369 #endif
1370 #ifdef INET6
1371 static struct mbuf *l2tp_tcpmss6_clamp(struct ifnet *, struct mbuf *);
1372 #endif
1373
1374 struct mbuf *
1375 l2tp_tcpmss_clamp(struct ifnet *ifp, struct mbuf *m)
1376 {
1377
1378 if (l2tp_need_tcpmss_clamp(ifp)) {
1379 struct ether_header *eh;
1380 struct ether_vlan_header evh;
1381
1382 /* save ether header */
1383 m_copydata(m, 0, sizeof(evh), (void *)&evh);
1384 eh = (struct ether_header *)&evh;
1385
1386 switch (ntohs(eh->ether_type)) {
1387 case ETHERTYPE_VLAN: /* Ether + VLAN */
1388 if (m->m_pkthdr.len <= sizeof(struct ether_vlan_header))
1389 break;
1390 m_adj(m, sizeof(struct ether_vlan_header));
1391 switch (ntohs(evh.evl_proto)) {
1392 #ifdef INET
1393 case ETHERTYPE_IP: /* Ether + VLAN + IPv4 */
1394 m = l2tp_tcpmss4_clamp(ifp, m);
1395 if (m == NULL)
1396 return NULL;
1397 break;
1398 #endif /* INET */
1399 #ifdef INET6
1400 case ETHERTYPE_IPV6: /* Ether + VLAN + IPv6 */
1401 m = l2tp_tcpmss6_clamp(ifp, m);
1402 if (m == NULL)
1403 return NULL;
1404 break;
1405 #endif /* INET6 */
1406 default:
1407 break;
1408 }
1409 /* restore ether header */
1410 M_PREPEND(m, sizeof(struct ether_vlan_header),
1411 M_DONTWAIT);
1412 if (m == NULL)
1413 return NULL;
1414 *mtod(m, struct ether_vlan_header *) = evh;
1415 break;
1416 #ifdef INET
1417 case ETHERTYPE_IP: /* Ether + IPv4 */
1418 if (m->m_pkthdr.len <= sizeof(struct ether_header))
1419 break;
1420 m_adj(m, sizeof(struct ether_header));
1421 m = l2tp_tcpmss4_clamp(ifp, m);
1422 if (m == NULL)
1423 return NULL;
1424 /* restore ether header */
1425 M_PREPEND(m, sizeof(struct ether_header), M_DONTWAIT);
1426 if (m == NULL)
1427 return NULL;
1428 *mtod(m, struct ether_header *) = *eh;
1429 break;
1430 #endif /* INET */
1431 #ifdef INET6
1432 case ETHERTYPE_IPV6: /* Ether + IPv6 */
1433 if (m->m_pkthdr.len <= sizeof(struct ether_header))
1434 break;
1435 m_adj(m, sizeof(struct ether_header));
1436 m = l2tp_tcpmss6_clamp(ifp, m);
1437 if (m == NULL)
1438 return NULL;
1439 /* restore ether header */
1440 M_PREPEND(m, sizeof(struct ether_header), M_DONTWAIT);
1441 if (m == NULL)
1442 return NULL;
1443 *mtod(m, struct ether_header *) = *eh;
1444 break;
1445 #endif /* INET6 */
1446 default:
1447 break;
1448 }
1449 }
1450
1451 return m;
1452 }
1453
1454 static int
1455 l2tp_need_tcpmss_clamp(struct ifnet *ifp)
1456 {
1457 int ret = 0;
1458
1459 #ifdef INET
1460 if (ifp->if_tcpmss != 0)
1461 ret = 1;
1462 #endif /* INET */
1463
1464 #ifdef INET6
1465 if (ifp->if_tcpmss6 != 0)
1466 ret = 1;
1467 #endif /* INET6 */
1468
1469 return ret;
1470 }
1471
1472 #ifdef INET
1473 static struct mbuf *
1474 l2tp_tcpmss4_clamp(struct ifnet *ifp, struct mbuf *m)
1475 {
1476
1477 if (ifp->if_tcpmss != 0) {
1478 return ip_tcpmss(m, (ifp->if_tcpmss < 0) ?
1479 ifp->if_mtu - IP_TCPMSS_EXTLEN :
1480 ifp->if_tcpmss);
1481 }
1482 return m;
1483 }
1484 #endif /* INET */
1485
1486 #ifdef INET6
1487 static struct mbuf *
1488 l2tp_tcpmss6_clamp(struct ifnet *ifp, struct mbuf *m)
1489 {
1490 int ip6hdrlen;
1491
1492 if (ifp->if_tcpmss6 != 0 &&
1493 ip6_tcpmss_applicable(m, &ip6hdrlen)) {
1494 return ip6_tcpmss(m, ip6hdrlen,
1495 (ifp->if_tcpmss6 < 0) ?
1496 ifp->if_mtu - IP6_TCPMSS_EXTLEN :
1497 ifp->if_tcpmss6);
1498 }
1499 return m;
1500 }
1501 #endif /* INET6 */
1502
1503 #endif /* IP_TCPMSS */
1504