if_l2tp.c revision 1.1.2.2 1 /* $NetBSD: if_l2tp.c,v 1.1.2.2 2017/03/20 06:57:50 pgoyette Exp $ */
2
3 /*
4 * Copyright (c) 2017 Internet Initiative Japan Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /*
30 * L2TPv3 kernel interface
31 */
32
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: if_l2tp.c,v 1.1.2.2 2017/03/20 06:57:50 pgoyette Exp $");
35
36 #ifdef _KERNEL_OPT
37 #include "opt_inet.h"
38 #endif
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 #include <sys/mbuf.h>
44 #include <sys/socket.h>
45 #include <sys/sockio.h>
46 #include <sys/errno.h>
47 #include <sys/ioctl.h>
48 #include <sys/time.h>
49 #include <sys/syslog.h>
50 #include <sys/proc.h>
51 #include <sys/conf.h>
52 #include <sys/kauth.h>
53 #include <sys/cpu.h>
54 #include <sys/cprng.h>
55 #include <sys/intr.h>
56 #include <sys/kmem.h>
57 #include <sys/mutex.h>
58 #include <sys/atomic.h>
59 #include <sys/pserialize.h>
60 #include <sys/device.h>
61 #include <sys/module.h>
62
63 #include <net/if.h>
64 #include <net/if_dl.h>
65 #include <net/if_ether.h>
66 #include <net/if_types.h>
67 #include <net/netisr.h>
68 #include <net/route.h>
69 #include <net/bpf.h>
70 #include <net/if_vlanvar.h>
71
72 #include <netinet/in.h>
73 #include <netinet/in_systm.h>
74 #include <netinet/ip.h>
75 #include <netinet/ip_encap.h>
76 #ifdef INET
77 #include <netinet/in_var.h>
78 #include <netinet/in_l2tp.h>
79 #endif /* INET */
80 #ifdef INET6
81 #include <netinet6/in6_l2tp.h>
82 #endif
83
84 #include <net/if_l2tp.h>
85
86 #if NVLAN > 0
87 #include <net/if_vlanvar.h>
88 #endif
89
90 /* TODO: IP_TCPMSS support */
91 #undef IP_TCPMSS
92 #ifdef IP_TCPMSS
93 #include <netinet/ip_tcpmss.h>
94 #endif
95
96 #include <net/bpf.h>
97 #include <net/net_osdep.h>
98
99 /*
100 * l2tp global variable definitions
101 */
102 LIST_HEAD(l2tp_sclist, l2tp_softc);
103 static struct {
104 struct l2tp_sclist list;
105 kmutex_t lock;
106 } l2tp_softcs __cacheline_aligned;
107
108
109 #if !defined(L2TP_ID_HASH_SIZE)
110 #define L2TP_ID_HASH_SIZE 64
111 #endif
112 static struct {
113 kmutex_t lock;
114 struct pslist_head *lists;
115 } l2tp_hash __cacheline_aligned = {
116 .lists = NULL,
117 };
118
119 pserialize_t l2tp_psz __read_mostly;
120 struct psref_class *lv_psref_class __read_mostly;
121
122 static void l2tp_ro_init_pc(void *, void *, struct cpu_info *);
123 static void l2tp_ro_fini_pc(void *, void *, struct cpu_info *);
124
125 static int l2tp_clone_create(struct if_clone *, int);
126 static int l2tp_clone_destroy(struct ifnet *);
127
128 struct if_clone l2tp_cloner =
129 IF_CLONE_INITIALIZER("l2tp", l2tp_clone_create, l2tp_clone_destroy);
130
131 static int l2tp_output(struct ifnet *, struct mbuf *,
132 const struct sockaddr *, const struct rtentry *);
133 static void l2tpintr(struct l2tp_variant *);
134
135 static void l2tp_hash_init(void);
136 static int l2tp_hash_fini(void);
137
138 static void l2tp_start(struct ifnet *);
139 static int l2tp_transmit(struct ifnet *, struct mbuf *);
140
141 static int l2tp_set_tunnel(struct ifnet *, struct sockaddr *,
142 struct sockaddr *);
143 static void l2tp_delete_tunnel(struct ifnet *);
144
145 static int id_hash_func(uint32_t);
146
147 static void l2tp_variant_update(struct l2tp_softc *, struct l2tp_variant *);
148 static int l2tp_set_session(struct l2tp_softc *, uint32_t, uint32_t);
149 static int l2tp_clear_session(struct l2tp_softc *);
150 static int l2tp_set_cookie(struct l2tp_softc *, uint64_t, u_int, uint64_t, u_int);
151 static void l2tp_clear_cookie(struct l2tp_softc *);
152 static void l2tp_set_state(struct l2tp_softc *, int);
153 static int l2tp_encap_attach(struct l2tp_variant *);
154 static int l2tp_encap_detach(struct l2tp_variant *);
155
156 #ifndef MAX_L2TP_NEST
157 /*
158 * This macro controls the upper limitation on nesting of l2tp tunnels.
159 * Since, setting a large value to this macro with a careless configuration
160 * may introduce system crash, we don't allow any nestings by default.
161 * If you need to configure nested l2tp tunnels, you can define this macro
162 * in your kernel configuration file. However, if you do so, please be
163 * careful to configure the tunnels so that it won't make a loop.
164 */
165 /*
166 * XXX
167 * Currently, if in_l2tp_output recursively calls, it causes locking against
168 * myself of struct l2tp_ro->lr_lock. So, nested l2tp tunnels is prohibited.
169 */
170 #define MAX_L2TP_NEST 0
171 #endif
172
173 static int max_l2tp_nesting = MAX_L2TP_NEST;
174
175 /* ARGSUSED */
176 void
177 l2tpattach(int count)
178 {
179 /*
180 * Nothing to do here, initialization is handled by the
181 * module initialization code in l2tpinit() below).
182 */
183 }
184
185 static void
186 l2tpinit(void)
187 {
188
189 mutex_init(&l2tp_softcs.lock, MUTEX_DEFAULT, IPL_NONE);
190 LIST_INIT(&l2tp_softcs.list);
191
192 mutex_init(&l2tp_hash.lock, MUTEX_DEFAULT, IPL_NONE);
193 l2tp_psz = pserialize_create();
194 lv_psref_class = psref_class_create("l2tpvar", IPL_SOFTNET);
195 if_clone_attach(&l2tp_cloner);
196
197 l2tp_hash_init();
198 }
199
200 static int
201 l2tpdetach(void)
202 {
203 int error;
204
205 mutex_enter(&l2tp_softcs.lock);
206 if (!LIST_EMPTY(&l2tp_softcs.list)) {
207 mutex_exit(&l2tp_softcs.lock);
208 return EBUSY;
209 }
210 mutex_exit(&l2tp_softcs.lock);
211
212 error = l2tp_hash_fini();
213 if (error)
214 return error;
215
216 if_clone_detach(&l2tp_cloner);
217 psref_class_destroy(lv_psref_class);
218 pserialize_destroy(l2tp_psz);
219 mutex_destroy(&l2tp_hash.lock);
220
221 return error;
222 }
223
224 static int
225 l2tp_clone_create(struct if_clone *ifc, int unit)
226 {
227 struct l2tp_softc *sc;
228 struct l2tp_variant *var;
229
230 sc = kmem_zalloc(sizeof(struct l2tp_softc), KM_SLEEP);
231 var = kmem_zalloc(sizeof(struct l2tp_variant), KM_SLEEP);
232
233 var->lv_softc = sc;
234 var->lv_state = L2TP_STATE_DOWN;
235 var->lv_use_cookie = L2TP_COOKIE_OFF;
236 psref_target_init(&var->lv_psref, lv_psref_class);
237
238 sc->l2tp_var = var;
239 mutex_init(&sc->l2tp_lock, MUTEX_DEFAULT, IPL_NONE);
240 PSLIST_ENTRY_INIT(sc, l2tp_hash);
241
242 if_initname(&sc->l2tp_ec.ec_if, ifc->ifc_name, unit);
243
244 l2tpattach0(sc);
245
246 sc->l2tp_ro_percpu = percpu_alloc(sizeof(struct l2tp_ro));
247 KASSERTMSG(sc->l2tp_ro_percpu != NULL,
248 "failed to allocate sc->l2tp_ro_percpu");
249 percpu_foreach(sc->l2tp_ro_percpu, l2tp_ro_init_pc, NULL);
250
251 mutex_enter(&l2tp_softcs.lock);
252 LIST_INSERT_HEAD(&l2tp_softcs.list, sc, l2tp_list);
253 mutex_exit(&l2tp_softcs.lock);
254
255 return (0);
256 }
257
258 void
259 l2tpattach0(struct l2tp_softc *sc)
260 {
261
262 sc->l2tp_ec.ec_if.if_addrlen = 0;
263 sc->l2tp_ec.ec_if.if_mtu = L2TP_MTU;
264 sc->l2tp_ec.ec_if.if_flags = IFF_POINTOPOINT|IFF_MULTICAST|IFF_SIMPLEX;
265 sc->l2tp_ec.ec_if.if_ioctl = l2tp_ioctl;
266 sc->l2tp_ec.ec_if.if_output = l2tp_output;
267 sc->l2tp_ec.ec_if.if_type = IFT_L2TP;
268 sc->l2tp_ec.ec_if.if_dlt = DLT_NULL;
269 sc->l2tp_ec.ec_if.if_start = l2tp_start;
270 sc->l2tp_ec.ec_if.if_transmit = l2tp_transmit;
271 sc->l2tp_ec.ec_if._if_input = ether_input;
272 IFQ_SET_READY(&sc->l2tp_ec.ec_if.if_snd);
273 if_attach(&sc->l2tp_ec.ec_if);
274 if_alloc_sadl(&sc->l2tp_ec.ec_if);
275 bpf_attach(&sc->l2tp_ec.ec_if, DLT_EN10MB, sizeof(struct ether_header));
276 }
277
278 void
279 l2tp_ro_init_pc(void *p, void *arg __unused, struct cpu_info *ci __unused)
280 {
281 struct l2tp_ro *lro = p;
282
283 mutex_init(&lro->lr_lock, MUTEX_DEFAULT, IPL_NONE);
284 }
285
286 void
287 l2tp_ro_fini_pc(void *p, void *arg __unused, struct cpu_info *ci __unused)
288 {
289 struct l2tp_ro *lro = p;
290
291 rtcache_free(&lro->lr_ro);
292
293 mutex_destroy(&lro->lr_lock);
294 }
295
296 static int
297 l2tp_clone_destroy(struct ifnet *ifp)
298 {
299 struct l2tp_softc *sc = container_of(ifp, struct l2tp_softc,
300 l2tp_ec.ec_if);
301
302 l2tp_clear_session(sc);
303 l2tp_delete_tunnel(&sc->l2tp_ec.ec_if);
304
305 mutex_enter(&l2tp_softcs.lock);
306 LIST_REMOVE(sc, l2tp_list);
307 mutex_exit(&l2tp_softcs.lock);
308
309 bpf_detach(ifp);
310
311 if_detach(ifp);
312
313 percpu_foreach(sc->l2tp_ro_percpu, l2tp_ro_fini_pc, NULL);
314 percpu_free(sc->l2tp_ro_percpu, sizeof(struct l2tp_ro));
315
316 kmem_free(sc->l2tp_var, sizeof(struct l2tp_variant));
317 mutex_destroy(&sc->l2tp_lock);
318 kmem_free(sc, sizeof(struct l2tp_softc));
319
320 return 0;
321 }
322
323 static int
324 l2tp_output(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *dst,
325 const struct rtentry *rt)
326 {
327 struct l2tp_softc *sc = container_of(ifp, struct l2tp_softc,
328 l2tp_ec.ec_if);
329 struct l2tp_variant *var;
330 struct psref psref;
331 int error = 0;
332
333 var = l2tp_getref_variant(sc, &psref);
334 if (var == NULL) {
335 m_freem(m);
336 return ENETDOWN;
337 }
338
339 IFQ_CLASSIFY(&ifp->if_snd, m, dst->sa_family);
340
341 m->m_flags &= ~(M_BCAST|M_MCAST);
342
343 if ((ifp->if_flags & IFF_UP) == 0) {
344 m_freem(m);
345 error = ENETDOWN;
346 goto end;
347 }
348
349 if (var->lv_psrc == NULL || var->lv_pdst == NULL) {
350 m_freem(m);
351 error = ENETDOWN;
352 goto end;
353 }
354
355 /* XXX should we check if our outer source is legal? */
356
357 /* use DLT_NULL encapsulation here to pass inner af type */
358 M_PREPEND(m, sizeof(int), M_DONTWAIT);
359 if (!m) {
360 error = ENOBUFS;
361 goto end;
362 }
363 *mtod(m, int *) = dst->sa_family;
364
365 IFQ_ENQUEUE(&ifp->if_snd, m, error);
366 if (error)
367 goto end;
368
369 /*
370 * direct call to avoid infinite loop at l2tpintr()
371 */
372 l2tpintr(var);
373
374 error = 0;
375
376 end:
377 l2tp_putref_variant(var, &psref);
378 if (error)
379 ifp->if_oerrors++;
380
381 return error;
382 }
383
384 static void
385 l2tpintr(struct l2tp_variant *var)
386 {
387 struct l2tp_softc *sc;
388 struct ifnet *ifp;
389 struct mbuf *m;
390 int error;
391
392 KASSERT(psref_held(&var->lv_psref, lv_psref_class));
393
394 sc = var->lv_softc;
395 ifp = &sc->l2tp_ec.ec_if;
396
397 /* output processing */
398 if (var->lv_my_sess_id == 0 || var->lv_peer_sess_id == 0) {
399 IFQ_PURGE(&ifp->if_snd);
400 return;
401 }
402
403 for (;;) {
404 IFQ_DEQUEUE(&ifp->if_snd, m);
405 if (m == NULL)
406 break;
407 m->m_flags &= ~(M_BCAST|M_MCAST);
408 bpf_mtap(ifp, m);
409 switch (var->lv_psrc->sa_family) {
410 #ifdef INET
411 case AF_INET:
412 error = in_l2tp_output(var, m);
413 break;
414 #endif
415 #ifdef INET6
416 case AF_INET6:
417 error = in6_l2tp_output(var, m);
418 break;
419 #endif
420 default:
421 m_freem(m);
422 error = ENETDOWN;
423 break;
424 }
425
426 if (error)
427 ifp->if_oerrors++;
428 else {
429 ifp->if_opackets++;
430 /*
431 * obytes is incremented at ether_output() or
432 * bridge_enqueue().
433 */
434 }
435 }
436
437 }
438
439 void
440 l2tp_input(struct mbuf *m, struct ifnet *ifp)
441 {
442
443 KASSERT(ifp != NULL);
444
445 if (0 == (mtod(m, u_long) & 0x03)) {
446 /* copy and align head of payload */
447 struct mbuf *m_head;
448 int copy_length;
449
450 #define L2TP_COPY_LENGTH 60
451 #define L2TP_LINK_HDR_ROOM (MHLEN - L2TP_COPY_LENGTH - 4/*round4(2)*/)
452
453 if (m->m_pkthdr.len < L2TP_COPY_LENGTH) {
454 copy_length = m->m_pkthdr.len;
455 } else {
456 copy_length = L2TP_COPY_LENGTH;
457 }
458
459 if (m->m_len < copy_length) {
460 m = m_pullup(m, copy_length);
461 if (m == NULL)
462 return;
463 }
464
465 MGETHDR(m_head, M_DONTWAIT, MT_HEADER);
466 if (m_head == NULL) {
467 m_freem(m);
468 return;
469 }
470 M_COPY_PKTHDR(m_head, m);
471
472 m_head->m_data += 2 /* align */ + L2TP_LINK_HDR_ROOM;
473 memcpy(m_head->m_data, m->m_data, copy_length);
474 m_head->m_len = copy_length;
475 m->m_data += copy_length;
476 m->m_len -= copy_length;
477
478 /* construct chain */
479 if (m->m_len == 0) {
480 m_head->m_next = m_free(m); /* not m_freem */
481 } else {
482 /*
483 * copyed mtag in previous call M_COPY_PKTHDR
484 * but don't delete mtag in case cutt of M_PKTHDR flag
485 */
486 m_tag_delete_chain(m, NULL);
487 m->m_flags &= ~M_PKTHDR;
488 m_head->m_next = m;
489 }
490
491 /* override m */
492 m = m_head;
493 }
494
495 m_set_rcvif(m, ifp);
496
497 /*
498 * bpf_mtap() and ifp->if_ipackets++ is done in if_input()
499 *
500 * obytes is incremented at ether_output() or bridge_enqueue().
501 */
502 if_percpuq_enqueue(ifp->if_percpuq, m);
503 }
504
505 void
506 l2tp_start(struct ifnet *ifp)
507 {
508 struct psref psref;
509 struct l2tp_variant *var;
510 struct l2tp_softc *sc = container_of(ifp, struct l2tp_softc,
511 l2tp_ec.ec_if);
512
513 var = l2tp_getref_variant(sc, &psref);
514 if (var == NULL)
515 return;
516
517 if (var->lv_psrc == NULL || var->lv_pdst == NULL)
518 return;
519
520 l2tpintr(var);
521 l2tp_putref_variant(var, &psref);
522 }
523
524 int
525 l2tp_transmit(struct ifnet *ifp, struct mbuf *m)
526 {
527 int error;
528 struct psref psref;
529 struct l2tp_variant *var;
530 struct l2tp_softc *sc = container_of(ifp, struct l2tp_softc,
531 l2tp_ec.ec_if);
532
533 var = l2tp_getref_variant(sc, &psref);
534 if (var == NULL) {
535 m_freem(m);
536 return ENETDOWN;
537 }
538
539 if (var->lv_psrc == NULL || var->lv_pdst == NULL) {
540 m_freem(m);
541 error = ENETDOWN;
542 goto out;
543 }
544
545 m->m_flags &= ~(M_BCAST|M_MCAST);
546 bpf_mtap(ifp, m);
547 switch (var->lv_psrc->sa_family) {
548 #ifdef INET
549 case AF_INET:
550 error = in_l2tp_output(var, m);
551 break;
552 #endif
553 #ifdef INET6
554 case AF_INET6:
555 error = in6_l2tp_output(var, m);
556 break;
557 #endif
558 default:
559 m_freem(m);
560 error = ENETDOWN;
561 break;
562 }
563
564 if (error)
565 ifp->if_oerrors++;
566 else {
567 ifp->if_opackets++;
568 /*
569 * obytes is incremented at ether_output() or bridge_enqueue().
570 */
571 }
572
573 out:
574 l2tp_putref_variant(var, &psref);
575 return error;
576 }
577
578 /* XXX how should we handle IPv6 scope on SIOC[GS]IFPHYADDR? */
579 int
580 l2tp_ioctl(struct ifnet *ifp, u_long cmd, void *data)
581 {
582 struct l2tp_softc *sc = container_of(ifp, struct l2tp_softc,
583 l2tp_ec.ec_if);
584 struct l2tp_variant *var, *var_tmp;
585 struct ifreq *ifr = data;
586 int error = 0, size;
587 struct sockaddr *dst, *src;
588 struct l2tp_req l2tpr;
589 u_long mtu;
590 int bound;
591 struct psref psref;
592
593 switch (cmd) {
594 case SIOCSIFADDR:
595 ifp->if_flags |= IFF_UP;
596 break;
597
598 case SIOCSIFDSTADDR:
599 break;
600
601 case SIOCADDMULTI:
602 case SIOCDELMULTI:
603 switch (ifr->ifr_addr.sa_family) {
604 #ifdef INET
605 case AF_INET: /* IP supports Multicast */
606 break;
607 #endif /* INET */
608 #ifdef INET6
609 case AF_INET6: /* IP6 supports Multicast */
610 break;
611 #endif /* INET6 */
612 default: /* Other protocols doesn't support Multicast */
613 error = EAFNOSUPPORT;
614 break;
615 }
616 break;
617
618 case SIOCSIFMTU:
619 mtu = ifr->ifr_mtu;
620 if (mtu < L2TP_MTU_MIN || mtu > L2TP_MTU_MAX)
621 return (EINVAL);
622 ifp->if_mtu = mtu;
623 break;
624
625 #ifdef INET
626 case SIOCSIFPHYADDR:
627 src = (struct sockaddr *)
628 &(((struct in_aliasreq *)data)->ifra_addr);
629 dst = (struct sockaddr *)
630 &(((struct in_aliasreq *)data)->ifra_dstaddr);
631 if (src->sa_family != AF_INET || dst->sa_family != AF_INET)
632 return EAFNOSUPPORT;
633 else if (src->sa_len != sizeof(struct sockaddr_in)
634 || dst->sa_len != sizeof(struct sockaddr_in))
635 return EINVAL;
636
637 error = l2tp_set_tunnel(&sc->l2tp_ec.ec_if, src, dst);
638 break;
639
640 #endif /* INET */
641 #ifdef INET6
642 case SIOCSIFPHYADDR_IN6:
643 src = (struct sockaddr *)
644 &(((struct in6_aliasreq *)data)->ifra_addr);
645 dst = (struct sockaddr *)
646 &(((struct in6_aliasreq *)data)->ifra_dstaddr);
647 if (src->sa_family != AF_INET6 || dst->sa_family != AF_INET6)
648 return EAFNOSUPPORT;
649 else if (src->sa_len != sizeof(struct sockaddr_in6)
650 || dst->sa_len != sizeof(struct sockaddr_in6))
651 return EINVAL;
652
653 error = l2tp_set_tunnel(&sc->l2tp_ec.ec_if, src, dst);
654 break;
655
656 #endif /* INET6 */
657 case SIOCSLIFPHYADDR:
658 src = (struct sockaddr *)
659 &(((struct if_laddrreq *)data)->addr);
660 dst = (struct sockaddr *)
661 &(((struct if_laddrreq *)data)->dstaddr);
662 if (src->sa_family != dst->sa_family)
663 return EINVAL;
664 else if (src->sa_family == AF_INET
665 && src->sa_len != sizeof(struct sockaddr_in))
666 return EINVAL;
667 else if (src->sa_family == AF_INET6
668 && src->sa_len != sizeof(struct sockaddr_in6))
669 return EINVAL;
670 else if (dst->sa_family == AF_INET
671 && dst->sa_len != sizeof(struct sockaddr_in))
672 return EINVAL;
673 else if (dst->sa_family == AF_INET6
674 && dst->sa_len != sizeof(struct sockaddr_in6))
675 return EINVAL;
676
677 error = l2tp_set_tunnel(&sc->l2tp_ec.ec_if, src, dst);
678 break;
679
680 case SIOCDIFPHYADDR:
681 l2tp_delete_tunnel(&sc->l2tp_ec.ec_if);
682 break;
683
684 case SIOCGIFPSRCADDR:
685 #ifdef INET6
686 case SIOCGIFPSRCADDR_IN6:
687 #endif /* INET6 */
688 bound = curlwp_bind();
689 var = l2tp_getref_variant(sc, &psref);
690 if (var == NULL) {
691 curlwp_bindx(bound);
692 error = EADDRNOTAVAIL;
693 goto bad;
694 }
695 if (var->lv_psrc == NULL) {
696 l2tp_putref_variant(var, &psref);
697 curlwp_bindx(bound);
698 error = EADDRNOTAVAIL;
699 goto bad;
700 }
701 src = var->lv_psrc;
702 switch (cmd) {
703 #ifdef INET
704 case SIOCGIFPSRCADDR:
705 dst = &ifr->ifr_addr;
706 size = sizeof(ifr->ifr_addr);
707 break;
708 #endif /* INET */
709 #ifdef INET6
710 case SIOCGIFPSRCADDR_IN6:
711 dst = (struct sockaddr *)
712 &(((struct in6_ifreq *)data)->ifr_addr);
713 size = sizeof(((struct in6_ifreq *)data)->ifr_addr);
714 break;
715 #endif /* INET6 */
716 default:
717 l2tp_putref_variant(var, &psref);
718 curlwp_bindx(bound);
719 error = EADDRNOTAVAIL;
720 goto bad;
721 }
722 if (src->sa_len > size) {
723 l2tp_putref_variant(var, &psref);
724 curlwp_bindx(bound);
725 return EINVAL;
726 }
727 sockaddr_copy(dst, src->sa_len, src);
728 l2tp_putref_variant(var, &psref);
729 curlwp_bindx(bound);
730 break;
731
732 case SIOCGIFPDSTADDR:
733 #ifdef INET6
734 case SIOCGIFPDSTADDR_IN6:
735 #endif /* INET6 */
736 bound = curlwp_bind();
737 var = l2tp_getref_variant(sc, &psref);
738 if (var == NULL) {
739 curlwp_bindx(bound);
740 error = EADDRNOTAVAIL;
741 goto bad;
742 }
743 if (var->lv_pdst == NULL) {
744 l2tp_putref_variant(var, &psref);
745 curlwp_bindx(bound);
746 error = EADDRNOTAVAIL;
747 goto bad;
748 }
749 src = var->lv_pdst;
750 switch (cmd) {
751 #ifdef INET
752 case SIOCGIFPDSTADDR:
753 dst = &ifr->ifr_addr;
754 size = sizeof(ifr->ifr_addr);
755 break;
756 #endif /* INET */
757 #ifdef INET6
758 case SIOCGIFPDSTADDR_IN6:
759 dst = (struct sockaddr *)
760 &(((struct in6_ifreq *)data)->ifr_addr);
761 size = sizeof(((struct in6_ifreq *)data)->ifr_addr);
762 break;
763 #endif /* INET6 */
764 default:
765 l2tp_putref_variant(var, &psref);
766 curlwp_bindx(bound);
767 error = EADDRNOTAVAIL;
768 goto bad;
769 }
770 if (src->sa_len > size) {
771 l2tp_putref_variant(var, &psref);
772 curlwp_bindx(bound);
773 return EINVAL;
774 }
775 sockaddr_copy(dst, src->sa_len, src);
776 l2tp_putref_variant(var, &psref);
777 curlwp_bindx(bound);
778 break;
779
780 case SIOCGLIFPHYADDR:
781 bound = curlwp_bind();
782 var = l2tp_getref_variant(sc, &psref);
783 if (var == NULL) {
784 curlwp_bindx(bound);
785 error = EADDRNOTAVAIL;
786 goto bad;
787 }
788 if (var->lv_psrc == NULL || var->lv_pdst == NULL) {
789 l2tp_putref_variant(var, &psref);
790 curlwp_bindx(bound);
791 error = EADDRNOTAVAIL;
792 goto bad;
793 }
794
795 /* copy src */
796 src = var->lv_psrc;
797 dst = (struct sockaddr *)
798 &(((struct if_laddrreq *)data)->addr);
799 size = sizeof(((struct if_laddrreq *)data)->addr);
800 if (src->sa_len > size) {
801 l2tp_putref_variant(var, &psref);
802 curlwp_bindx(bound);
803 return EINVAL;
804 }
805 sockaddr_copy(dst, src->sa_len, src);
806
807 /* copy dst */
808 src = var->lv_pdst;
809 dst = (struct sockaddr *)
810 &(((struct if_laddrreq *)data)->dstaddr);
811 size = sizeof(((struct if_laddrreq *)data)->dstaddr);
812 if (src->sa_len > size) {
813 l2tp_putref_variant(var, &psref);
814 curlwp_bindx(bound);
815 return EINVAL;
816 }
817 sockaddr_copy(dst, src->sa_len, src);
818 l2tp_putref_variant(var, &psref);
819 curlwp_bindx(bound);
820 break;
821
822 case SIOCSL2TPSESSION:
823 if ((error = copyin(ifr->ifr_data, &l2tpr, sizeof(l2tpr))) != 0)
824 break;
825
826 /* session id must not zero */
827 if (l2tpr.my_sess_id == 0 || l2tpr.peer_sess_id == 0)
828 return EINVAL;
829
830 bound = curlwp_bind();
831 var_tmp = l2tp_lookup_session_ref(l2tpr.my_sess_id, &psref);
832 if (var_tmp != NULL) {
833 /* duplicate session id */
834 log(LOG_WARNING, "%s: duplicate session id %" PRIu32 " of %s\n",
835 sc->l2tp_ec.ec_if.if_xname, l2tpr.my_sess_id,
836 var_tmp->lv_softc->l2tp_ec.ec_if.if_xname);
837 psref_release(&psref, &var_tmp->lv_psref,
838 lv_psref_class);
839 curlwp_bindx(bound);
840 return EINVAL;
841 }
842 curlwp_bindx(bound);
843
844 error = l2tp_set_session(sc, l2tpr.my_sess_id, l2tpr.peer_sess_id);
845 break;
846 case SIOCDL2TPSESSION:
847 l2tp_clear_session(sc);
848 break;
849 case SIOCSL2TPCOOKIE:
850 if ((error = copyin(ifr->ifr_data, &l2tpr, sizeof(l2tpr))) != 0)
851 break;
852
853 error = l2tp_set_cookie(sc, l2tpr.my_cookie, l2tpr.my_cookie_len,
854 l2tpr.peer_cookie, l2tpr.peer_cookie_len);
855 break;
856 case SIOCDL2TPCOOKIE:
857 l2tp_clear_cookie(sc);
858 break;
859 case SIOCSL2TPSTATE:
860 if ((error = copyin(ifr->ifr_data, &l2tpr, sizeof(l2tpr))) != 0)
861 break;
862
863 l2tp_set_state(sc, l2tpr.state);
864 break;
865 case SIOCGL2TP:
866 /* get L2TPV3 session info */
867 memset(&l2tpr, 0, sizeof(l2tpr));
868
869 bound = curlwp_bind();
870 var = l2tp_getref_variant(sc, &psref);
871 if (var == NULL) {
872 curlwp_bindx(bound);
873 error = EADDRNOTAVAIL;
874 goto bad;
875 }
876
877 l2tpr.state = var->lv_state;
878 l2tpr.my_sess_id = var->lv_my_sess_id;
879 l2tpr.peer_sess_id = var->lv_peer_sess_id;
880 l2tpr.my_cookie = var->lv_my_cookie;
881 l2tpr.my_cookie_len = var->lv_my_cookie_len;
882 l2tpr.peer_cookie = var->lv_peer_cookie;
883 l2tpr.peer_cookie_len = var->lv_peer_cookie_len;
884 l2tp_putref_variant(var, &psref);
885 curlwp_bindx(bound);
886
887 error = copyout(&l2tpr, ifr->ifr_data, sizeof(l2tpr));
888 break;
889
890 default:
891 error = ifioctl_common(ifp, cmd, data);
892 break;
893 }
894 bad:
895 return error;
896 }
897
898 static int
899 l2tp_set_tunnel(struct ifnet *ifp, struct sockaddr *src, struct sockaddr *dst)
900 {
901 struct l2tp_softc *sc = container_of(ifp, struct l2tp_softc,
902 l2tp_ec.ec_if);
903 struct sockaddr *osrc, *odst;
904 struct sockaddr *nsrc, *ndst;
905 struct l2tp_variant *ovar, *nvar;
906 int error;
907
908 nsrc = sockaddr_dup(src, M_WAITOK);
909 ndst = sockaddr_dup(dst, M_WAITOK);
910
911 nvar = kmem_alloc(sizeof(*nvar), KM_SLEEP);
912
913 error = encap_lock_enter();
914 if (error)
915 goto error;
916
917 mutex_enter(&sc->l2tp_lock);
918
919 ovar = sc->l2tp_var;
920 osrc = ovar->lv_psrc;
921 odst = ovar->lv_pdst;
922 *nvar = *ovar;
923 psref_target_init(&nvar->lv_psref, lv_psref_class);
924 nvar->lv_psrc = nsrc;
925 nvar->lv_pdst = ndst;
926 error = l2tp_encap_attach(nvar);
927 if (error) {
928 mutex_exit(&sc->l2tp_lock);
929 encap_lock_exit();
930 goto error;
931 }
932 membar_producer();
933 l2tp_variant_update(sc, nvar);
934
935 mutex_exit(&sc->l2tp_lock);
936
937 (void)l2tp_encap_detach(ovar);
938 encap_lock_exit();
939
940 if (osrc)
941 sockaddr_free(osrc);
942 if (odst)
943 sockaddr_free(odst);
944 kmem_free(ovar, sizeof(*ovar));
945
946 return 0;
947
948 error:
949 sockaddr_free(nsrc);
950 sockaddr_free(ndst);
951 kmem_free(nvar, sizeof(*nvar));
952
953 return error;
954 }
955
956 static void
957 l2tp_delete_tunnel(struct ifnet *ifp)
958 {
959 struct l2tp_softc *sc = container_of(ifp, struct l2tp_softc,
960 l2tp_ec.ec_if);
961 struct sockaddr *osrc, *odst;
962 struct l2tp_variant *ovar, *nvar;
963 int error;
964
965 nvar = kmem_alloc(sizeof(*nvar), KM_SLEEP);
966
967 error = encap_lock_enter();
968 if (error) {
969 kmem_free(nvar, sizeof(*nvar));
970 return;
971 }
972 mutex_enter(&sc->l2tp_lock);
973
974 ovar = sc->l2tp_var;
975 osrc = ovar->lv_psrc;
976 odst = ovar->lv_pdst;
977 *nvar = *ovar;
978 psref_target_init(&nvar->lv_psref, lv_psref_class);
979 nvar->lv_psrc = NULL;
980 nvar->lv_pdst = NULL;
981 membar_producer();
982 l2tp_variant_update(sc, nvar);
983
984 mutex_exit(&sc->l2tp_lock);
985
986 (void)l2tp_encap_detach(ovar);
987 encap_lock_exit();
988
989 if (osrc)
990 sockaddr_free(osrc);
991 if (odst)
992 sockaddr_free(odst);
993 kmem_free(ovar, sizeof(*ovar));
994 }
995
996 static int id_hash_func(uint32_t id)
997 {
998 uint32_t hash;
999
1000 hash = (id >> 16) ^ id;
1001 hash = (hash >> 4) ^ hash;
1002
1003 return hash & (L2TP_ID_HASH_SIZE - 1);
1004 }
1005
1006 static void
1007 l2tp_hash_init(void)
1008 {
1009 u_long mask;
1010
1011 l2tp_hash.lists = hashinit(L2TP_ID_HASH_SIZE, HASH_PSLIST, true,
1012 &mask);
1013 KASSERT(mask == (L2TP_ID_HASH_SIZE - 1));
1014 }
1015
1016 static int
1017 l2tp_hash_fini(void)
1018 {
1019 int i;
1020
1021 mutex_enter(&l2tp_hash.lock);
1022
1023 for (i = 0; i < L2TP_ID_HASH_SIZE; i++) {
1024 if (PSLIST_WRITER_FIRST(&l2tp_hash.lists[i], struct l2tp_softc,
1025 l2tp_hash) != NULL) {
1026 mutex_exit(&l2tp_hash.lock);
1027 return EBUSY;
1028 }
1029 }
1030 for (i = 0; i < L2TP_ID_HASH_SIZE; i++)
1031 PSLIST_DESTROY(&l2tp_hash.lists[i]);
1032
1033 mutex_exit(&l2tp_hash.lock);
1034
1035 hashdone(l2tp_hash.lists, HASH_PSLIST, L2TP_ID_HASH_SIZE - 1);
1036
1037 return 0;
1038 }
1039
1040 static int
1041 l2tp_set_session(struct l2tp_softc *sc, uint32_t my_sess_id,
1042 uint32_t peer_sess_id)
1043 {
1044 uint32_t idx;
1045 struct l2tp_variant *nvar;
1046 struct l2tp_variant *ovar;
1047 struct ifnet *ifp = &sc->l2tp_ec.ec_if;
1048
1049 nvar = kmem_alloc(sizeof(*nvar), KM_SLEEP);
1050
1051 mutex_enter(&sc->l2tp_lock);
1052 ovar = sc->l2tp_var;
1053 *nvar = *ovar;
1054 psref_target_init(&nvar->lv_psref, lv_psref_class);
1055 nvar->lv_my_sess_id = my_sess_id;
1056 nvar->lv_peer_sess_id = peer_sess_id;
1057 membar_producer();
1058
1059 mutex_enter(&l2tp_hash.lock);
1060 if (ovar->lv_my_sess_id > 0 && ovar->lv_peer_sess_id > 0) {
1061 PSLIST_WRITER_REMOVE(sc, l2tp_hash);
1062 pserialize_perform(l2tp_psz);
1063 }
1064 mutex_exit(&l2tp_hash.lock);
1065
1066 l2tp_variant_update(sc, nvar);
1067 mutex_exit(&sc->l2tp_lock);
1068
1069 idx = id_hash_func(nvar->lv_my_sess_id);
1070 if ((ifp->if_flags & IFF_DEBUG) != 0)
1071 log(LOG_DEBUG, "%s: add hash entry: sess_id=%" PRIu32 ", idx=%" PRIu32 "\n",
1072 sc->l2tp_ec.ec_if.if_xname, nvar->lv_my_sess_id, idx);
1073
1074 mutex_enter(&l2tp_hash.lock);
1075 PSLIST_WRITER_INSERT_HEAD(&l2tp_hash.lists[idx], sc, l2tp_hash);
1076 mutex_exit(&l2tp_hash.lock);
1077
1078 kmem_free(ovar, sizeof(*ovar));
1079 return 0;
1080 }
1081
1082 static int
1083 l2tp_clear_session(struct l2tp_softc *sc)
1084 {
1085 struct l2tp_variant *nvar;
1086 struct l2tp_variant *ovar;
1087
1088 nvar = kmem_alloc(sizeof(*nvar), KM_SLEEP);
1089
1090 mutex_enter(&sc->l2tp_lock);
1091 ovar = sc->l2tp_var;
1092 *nvar = *ovar;
1093 psref_target_init(&nvar->lv_psref, lv_psref_class);
1094 nvar->lv_my_sess_id = 0;
1095 nvar->lv_peer_sess_id = 0;
1096 membar_producer();
1097
1098 mutex_enter(&l2tp_hash.lock);
1099 if (ovar->lv_my_sess_id > 0 && ovar->lv_peer_sess_id > 0) {
1100 PSLIST_WRITER_REMOVE(sc, l2tp_hash);
1101 pserialize_perform(l2tp_psz);
1102 }
1103 mutex_exit(&l2tp_hash.lock);
1104
1105 l2tp_variant_update(sc, nvar);
1106 mutex_exit(&sc->l2tp_lock);
1107 kmem_free(ovar, sizeof(*ovar));
1108 return 0;
1109 }
1110
1111 struct l2tp_variant *
1112 l2tp_lookup_session_ref(uint32_t id, struct psref *psref)
1113 {
1114 int idx;
1115 int s;
1116 struct l2tp_softc *sc;
1117
1118 idx = id_hash_func(id);
1119
1120 s = pserialize_read_enter();
1121 PSLIST_READER_FOREACH(sc, &l2tp_hash.lists[idx], struct l2tp_softc,
1122 l2tp_hash) {
1123 struct l2tp_variant *var = sc->l2tp_var;
1124 if (var == NULL)
1125 continue;
1126 if (var->lv_my_sess_id != id)
1127 continue;
1128 psref_acquire(psref, &var->lv_psref, lv_psref_class);
1129 pserialize_read_exit(s);
1130 return var;
1131 }
1132 pserialize_read_exit(s);
1133 return NULL;
1134 }
1135
1136 /*
1137 * l2tp_variant update API.
1138 *
1139 * Assumption:
1140 * reader side dereferences sc->l2tp_var in reader critical section only,
1141 * that is, all of reader sides do not reader the sc->l2tp_var after
1142 * pserialize_perform().
1143 */
1144 static void
1145 l2tp_variant_update(struct l2tp_softc *sc, struct l2tp_variant *nvar)
1146 {
1147 struct ifnet *ifp = &sc->l2tp_ec.ec_if;
1148 struct l2tp_variant *ovar = sc->l2tp_var;
1149
1150 KASSERT(mutex_owned(&sc->l2tp_lock));
1151
1152 sc->l2tp_var = nvar;
1153 pserialize_perform(l2tp_psz);
1154 psref_target_destroy(&ovar->lv_psref, lv_psref_class);
1155
1156 /*
1157 * In the manual of atomic_swap_ptr(3), there is no mention if 2nd
1158 * argument is rewrite or not. So, use sc->l2tp_var instead of nvar.
1159 */
1160 if (sc->l2tp_var->lv_psrc != NULL && sc->l2tp_var->lv_pdst != NULL)
1161 ifp->if_flags |= IFF_RUNNING;
1162 else
1163 ifp->if_flags &= ~IFF_RUNNING;
1164 }
1165
1166 static int
1167 l2tp_set_cookie(struct l2tp_softc *sc, uint64_t my_cookie, u_int my_cookie_len,
1168 uint64_t peer_cookie, u_int peer_cookie_len)
1169 {
1170 struct l2tp_variant *nvar;
1171
1172 if (my_cookie == 0 || peer_cookie == 0)
1173 return EINVAL;
1174
1175 if (my_cookie_len != 4 && my_cookie_len != 8
1176 && peer_cookie_len != 4 && peer_cookie_len != 8)
1177 return EINVAL;
1178
1179 nvar = kmem_alloc(sizeof(*nvar), KM_SLEEP);
1180
1181 mutex_enter(&sc->l2tp_lock);
1182
1183 *nvar = *sc->l2tp_var;
1184 psref_target_init(&nvar->lv_psref, lv_psref_class);
1185 nvar->lv_my_cookie = my_cookie;
1186 nvar->lv_my_cookie_len = my_cookie_len;
1187 nvar->lv_peer_cookie = peer_cookie;
1188 nvar->lv_peer_cookie_len = peer_cookie_len;
1189 nvar->lv_use_cookie = L2TP_COOKIE_ON;
1190 membar_producer();
1191 l2tp_variant_update(sc, nvar);
1192
1193 mutex_exit(&sc->l2tp_lock);
1194
1195 struct ifnet *ifp = &sc->l2tp_ec.ec_if;
1196 if ((ifp->if_flags & IFF_DEBUG) != 0) {
1197 log(LOG_DEBUG,
1198 "%s: set cookie: "
1199 "local cookie_len=%u local cookie=%" PRIu64 ", "
1200 "remote cookie_len=%u remote cookie=%" PRIu64 "\n",
1201 ifp->if_xname, my_cookie_len, my_cookie,
1202 peer_cookie_len, peer_cookie);
1203 }
1204
1205 return 0;
1206 }
1207
1208 static void
1209 l2tp_clear_cookie(struct l2tp_softc *sc)
1210 {
1211 struct l2tp_variant *nvar;
1212
1213 nvar = kmem_alloc(sizeof(*nvar), KM_SLEEP);
1214
1215 mutex_enter(&sc->l2tp_lock);
1216
1217 *nvar = *sc->l2tp_var;
1218 psref_target_init(&nvar->lv_psref, lv_psref_class);
1219 nvar->lv_my_cookie = 0;
1220 nvar->lv_my_cookie_len = 0;
1221 nvar->lv_peer_cookie = 0;
1222 nvar->lv_peer_cookie_len = 0;
1223 nvar->lv_use_cookie = L2TP_COOKIE_OFF;
1224 membar_producer();
1225 l2tp_variant_update(sc, nvar);
1226
1227 mutex_exit(&sc->l2tp_lock);
1228 }
1229
1230 static void
1231 l2tp_set_state(struct l2tp_softc *sc, int state)
1232 {
1233 struct ifnet *ifp = &sc->l2tp_ec.ec_if;
1234 struct l2tp_variant *nvar;
1235
1236 nvar = kmem_alloc(sizeof(*nvar), KM_SLEEP);
1237
1238 mutex_enter(&sc->l2tp_lock);
1239
1240 *nvar = *sc->l2tp_var;
1241 psref_target_init(&nvar->lv_psref, lv_psref_class);
1242 nvar->lv_state = state;
1243 membar_producer();
1244 l2tp_variant_update(sc, nvar);
1245
1246 if (nvar->lv_state == L2TP_STATE_UP) {
1247 ifp->if_link_state = LINK_STATE_UP;
1248 } else {
1249 ifp->if_link_state = LINK_STATE_DOWN;
1250 }
1251
1252 mutex_exit(&sc->l2tp_lock);
1253
1254 #ifdef NOTYET
1255 #if NVLAN > 0
1256 vlan_linkstate_notify(ifp, ifp->if_link_state);
1257 #endif
1258 #endif
1259 }
1260
1261 static int
1262 l2tp_encap_attach(struct l2tp_variant *var)
1263 {
1264 int error;
1265
1266 if (var == NULL || var->lv_psrc == NULL)
1267 return EINVAL;
1268
1269 switch (var->lv_psrc->sa_family) {
1270 #ifdef INET
1271 case AF_INET:
1272 error = in_l2tp_attach(var);
1273 break;
1274 #endif
1275 #ifdef INET6
1276 case AF_INET6:
1277 error = in6_l2tp_attach(var);
1278 break;
1279 #endif
1280 default:
1281 error = EINVAL;
1282 break;
1283 }
1284
1285 return error;
1286 }
1287
1288 static int
1289 l2tp_encap_detach(struct l2tp_variant *var)
1290 {
1291 int error;
1292
1293 if (var == NULL || var->lv_psrc == NULL)
1294 return EINVAL;
1295
1296 switch (var->lv_psrc->sa_family) {
1297 #ifdef INET
1298 case AF_INET:
1299 error = in_l2tp_detach(var);
1300 break;
1301 #endif
1302 #ifdef INET6
1303 case AF_INET6:
1304 error = in6_l2tp_detach(var);
1305 break;
1306 #endif
1307 default:
1308 error = EINVAL;
1309 break;
1310 }
1311
1312 return error;
1313 }
1314
1315 /*
1316 * TODO:
1317 * unify with gif_check_nesting().
1318 */
1319 int
1320 l2tp_check_nesting(struct ifnet *ifp, struct mbuf *m)
1321 {
1322 struct m_tag *mtag;
1323 int *count;
1324
1325 mtag = m_tag_find(m, PACKET_TAG_TUNNEL_INFO, NULL);
1326 if (mtag != NULL) {
1327 count = (int *)(mtag + 1);
1328 if (++(*count) > max_l2tp_nesting) {
1329 log(LOG_NOTICE,
1330 "%s: recursively called too many times(%d)\n",
1331 if_name(ifp),
1332 *count);
1333 return EIO;
1334 }
1335 } else {
1336 mtag = m_tag_get(PACKET_TAG_TUNNEL_INFO, sizeof(*count),
1337 M_NOWAIT);
1338 if (mtag != NULL) {
1339 m_tag_prepend(m, mtag);
1340 count = (int *)(mtag + 1);
1341 *count = 0;
1342 }
1343 #ifdef L2TP_DEBUG
1344 else {
1345 log(LOG_DEBUG,
1346 "%s: m_tag_get() failed, recursion calls are not prevented.\n",
1347 if_name(ifp));
1348 }
1349 #endif
1350 }
1351
1352 return 0;
1353 }
1354
1355 /*
1356 * Module infrastructure
1357 */
1358 #include "if_module.h"
1359
1360 IF_MODULE(MODULE_CLASS_DRIVER, l2tp, "")
1361
1362
1363 /* TODO: IP_TCPMSS support */
1364 #ifdef IP_TCPMSS
1365 static int l2tp_need_tcpmss_clamp(struct ifnet *);
1366 #ifdef INET
1367 static struct mbuf *l2tp_tcpmss4_clamp(struct ifnet *, struct mbuf *);
1368 #endif
1369 #ifdef INET6
1370 static struct mbuf *l2tp_tcpmss6_clamp(struct ifnet *, struct mbuf *);
1371 #endif
1372
1373 struct mbuf *
1374 l2tp_tcpmss_clamp(struct ifnet *ifp, struct mbuf *m)
1375 {
1376
1377 if (l2tp_need_tcpmss_clamp(ifp)) {
1378 struct ether_header *eh;
1379 struct ether_vlan_header evh;
1380
1381 /* save ether header */
1382 m_copydata(m, 0, sizeof(evh), (void *)&evh);
1383 eh = (struct ether_header *)&evh;
1384
1385 switch (ntohs(eh->ether_type)) {
1386 case ETHERTYPE_VLAN: /* Ether + VLAN */
1387 if (m->m_pkthdr.len <= sizeof(struct ether_vlan_header))
1388 break;
1389 m_adj(m, sizeof(struct ether_vlan_header));
1390 switch (ntohs(evh.evl_proto)) {
1391 #ifdef INET
1392 case ETHERTYPE_IP: /* Ether + VLAN + IPv4 */
1393 m = l2tp_tcpmss4_clamp(ifp, m);
1394 if (m == NULL)
1395 return NULL;
1396 break;
1397 #endif /* INET */
1398 #ifdef INET6
1399 case ETHERTYPE_IPV6: /* Ether + VLAN + IPv6 */
1400 m = l2tp_tcpmss6_clamp(ifp, m);
1401 if (m == NULL)
1402 return NULL;
1403 break;
1404 #endif /* INET6 */
1405 default:
1406 break;
1407 }
1408 /* restore ether header */
1409 M_PREPEND(m, sizeof(struct ether_vlan_header),
1410 M_DONTWAIT);
1411 if (m == NULL)
1412 return NULL;
1413 *mtod(m, struct ether_vlan_header *) = evh;
1414 break;
1415 #ifdef INET
1416 case ETHERTYPE_IP: /* Ether + IPv4 */
1417 if (m->m_pkthdr.len <= sizeof(struct ether_header))
1418 break;
1419 m_adj(m, sizeof(struct ether_header));
1420 m = l2tp_tcpmss4_clamp(ifp, m);
1421 if (m == NULL)
1422 return NULL;
1423 /* restore ether header */
1424 M_PREPEND(m, sizeof(struct ether_header), M_DONTWAIT);
1425 if (m == NULL)
1426 return NULL;
1427 *mtod(m, struct ether_header *) = *eh;
1428 break;
1429 #endif /* INET */
1430 #ifdef INET6
1431 case ETHERTYPE_IPV6: /* Ether + IPv6 */
1432 if (m->m_pkthdr.len <= sizeof(struct ether_header))
1433 break;
1434 m_adj(m, sizeof(struct ether_header));
1435 m = l2tp_tcpmss6_clamp(ifp, m);
1436 if (m == NULL)
1437 return NULL;
1438 /* restore ether header */
1439 M_PREPEND(m, sizeof(struct ether_header), M_DONTWAIT);
1440 if (m == NULL)
1441 return NULL;
1442 *mtod(m, struct ether_header *) = *eh;
1443 break;
1444 #endif /* INET6 */
1445 default:
1446 break;
1447 }
1448 }
1449
1450 return m;
1451 }
1452
1453 static int
1454 l2tp_need_tcpmss_clamp(struct ifnet *ifp)
1455 {
1456 int ret = 0;
1457
1458 #ifdef INET
1459 if (ifp->if_tcpmss != 0)
1460 ret = 1;
1461 #endif /* INET */
1462
1463 #ifdef INET6
1464 if (ifp->if_tcpmss6 != 0)
1465 ret = 1;
1466 #endif /* INET6 */
1467
1468 return ret;
1469 }
1470
1471 #ifdef INET
1472 static struct mbuf *
1473 l2tp_tcpmss4_clamp(struct ifnet *ifp, struct mbuf *m)
1474 {
1475
1476 if (ifp->if_tcpmss != 0) {
1477 return ip_tcpmss(m, (ifp->if_tcpmss < 0) ?
1478 ifp->if_mtu - IP_TCPMSS_EXTLEN :
1479 ifp->if_tcpmss);
1480 }
1481 return m;
1482 }
1483 #endif /* INET */
1484
1485 #ifdef INET6
1486 static struct mbuf *
1487 l2tp_tcpmss6_clamp(struct ifnet *ifp, struct mbuf *m)
1488 {
1489 int ip6hdrlen;
1490
1491 if (ifp->if_tcpmss6 != 0 &&
1492 ip6_tcpmss_applicable(m, &ip6hdrlen)) {
1493 return ip6_tcpmss(m, ip6hdrlen,
1494 (ifp->if_tcpmss6 < 0) ?
1495 ifp->if_mtu - IP6_TCPMSS_EXTLEN :
1496 ifp->if_tcpmss6);
1497 }
1498 return m;
1499 }
1500 #endif /* INET6 */
1501
1502 #endif /* IP_TCPMSS */
1503