if_l2tp.c revision 1.3 1 /* $NetBSD: if_l2tp.c,v 1.3 2017/04/03 10:08:24 knakahara Exp $ */
2
3 /*
4 * Copyright (c) 2017 Internet Initiative Japan Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /*
30 * L2TPv3 kernel interface
31 */
32
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: if_l2tp.c,v 1.3 2017/04/03 10:08:24 knakahara Exp $");
35
36 #ifdef _KERNEL_OPT
37 #include "opt_inet.h"
38 #endif
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 #include <sys/mbuf.h>
44 #include <sys/socket.h>
45 #include <sys/sockio.h>
46 #include <sys/errno.h>
47 #include <sys/ioctl.h>
48 #include <sys/time.h>
49 #include <sys/syslog.h>
50 #include <sys/proc.h>
51 #include <sys/conf.h>
52 #include <sys/kauth.h>
53 #include <sys/cpu.h>
54 #include <sys/cprng.h>
55 #include <sys/intr.h>
56 #include <sys/kmem.h>
57 #include <sys/mutex.h>
58 #include <sys/atomic.h>
59 #include <sys/pserialize.h>
60 #include <sys/device.h>
61 #include <sys/module.h>
62
63 #include <net/if.h>
64 #include <net/if_dl.h>
65 #include <net/if_ether.h>
66 #include <net/if_types.h>
67 #include <net/netisr.h>
68 #include <net/route.h>
69 #include <net/bpf.h>
70 #include <net/if_vlanvar.h>
71
72 #include <netinet/in.h>
73 #include <netinet/in_systm.h>
74 #include <netinet/ip.h>
75 #include <netinet/ip_encap.h>
76 #ifdef INET
77 #include <netinet/in_var.h>
78 #include <netinet/in_l2tp.h>
79 #endif /* INET */
80 #ifdef INET6
81 #include <netinet6/in6_l2tp.h>
82 #endif
83
84 #include <net/if_l2tp.h>
85
86 #if NVLAN > 0
87 #include <net/if_vlanvar.h>
88 #endif
89
90 /* TODO: IP_TCPMSS support */
91 #undef IP_TCPMSS
92 #ifdef IP_TCPMSS
93 #include <netinet/ip_tcpmss.h>
94 #endif
95
96 #include <net/bpf.h>
97 #include <net/net_osdep.h>
98
99 /*
100 * l2tp global variable definitions
101 */
102 LIST_HEAD(l2tp_sclist, l2tp_softc);
103 static struct {
104 struct l2tp_sclist list;
105 kmutex_t lock;
106 } l2tp_softcs __cacheline_aligned;
107
108
109 #if !defined(L2TP_ID_HASH_SIZE)
110 #define L2TP_ID_HASH_SIZE 64
111 #endif
112 static struct {
113 kmutex_t lock;
114 struct pslist_head *lists;
115 } l2tp_hash __cacheline_aligned = {
116 .lists = NULL,
117 };
118
119 pserialize_t l2tp_psz __read_mostly;
120 struct psref_class *lv_psref_class __read_mostly;
121
122 static void l2tp_ro_init_pc(void *, void *, struct cpu_info *);
123 static void l2tp_ro_fini_pc(void *, void *, struct cpu_info *);
124
125 static int l2tp_clone_create(struct if_clone *, int);
126 static int l2tp_clone_destroy(struct ifnet *);
127
128 struct if_clone l2tp_cloner =
129 IF_CLONE_INITIALIZER("l2tp", l2tp_clone_create, l2tp_clone_destroy);
130
131 static int l2tp_output(struct ifnet *, struct mbuf *,
132 const struct sockaddr *, const struct rtentry *);
133 static void l2tpintr(struct l2tp_variant *);
134
135 static void l2tp_hash_init(void);
136 static int l2tp_hash_fini(void);
137
138 static void l2tp_start(struct ifnet *);
139 static int l2tp_transmit(struct ifnet *, struct mbuf *);
140
141 static int l2tp_set_tunnel(struct ifnet *, struct sockaddr *,
142 struct sockaddr *);
143 static void l2tp_delete_tunnel(struct ifnet *);
144
145 static int id_hash_func(uint32_t);
146
147 static void l2tp_variant_update(struct l2tp_softc *, struct l2tp_variant *);
148 static int l2tp_set_session(struct l2tp_softc *, uint32_t, uint32_t);
149 static int l2tp_clear_session(struct l2tp_softc *);
150 static int l2tp_set_cookie(struct l2tp_softc *, uint64_t, u_int, uint64_t, u_int);
151 static void l2tp_clear_cookie(struct l2tp_softc *);
152 static void l2tp_set_state(struct l2tp_softc *, int);
153 static int l2tp_encap_attach(struct l2tp_variant *);
154 static int l2tp_encap_detach(struct l2tp_variant *);
155
156 #ifndef MAX_L2TP_NEST
157 /*
158 * This macro controls the upper limitation on nesting of l2tp tunnels.
159 * Since, setting a large value to this macro with a careless configuration
160 * may introduce system crash, we don't allow any nestings by default.
161 * If you need to configure nested l2tp tunnels, you can define this macro
162 * in your kernel configuration file. However, if you do so, please be
163 * careful to configure the tunnels so that it won't make a loop.
164 */
165 /*
166 * XXX
167 * Currently, if in_l2tp_output recursively calls, it causes locking against
168 * myself of struct l2tp_ro->lr_lock. So, nested l2tp tunnels is prohibited.
169 */
170 #define MAX_L2TP_NEST 0
171 #endif
172
173 static int max_l2tp_nesting = MAX_L2TP_NEST;
174
175 /* ARGSUSED */
176 void
177 l2tpattach(int count)
178 {
179 /*
180 * Nothing to do here, initialization is handled by the
181 * module initialization code in l2tpinit() below).
182 */
183 }
184
185 static void
186 l2tpinit(void)
187 {
188
189 mutex_init(&l2tp_softcs.lock, MUTEX_DEFAULT, IPL_NONE);
190 LIST_INIT(&l2tp_softcs.list);
191
192 mutex_init(&l2tp_hash.lock, MUTEX_DEFAULT, IPL_NONE);
193 l2tp_psz = pserialize_create();
194 lv_psref_class = psref_class_create("l2tpvar", IPL_SOFTNET);
195 if_clone_attach(&l2tp_cloner);
196
197 l2tp_hash_init();
198 }
199
200 static int
201 l2tpdetach(void)
202 {
203 int error;
204
205 mutex_enter(&l2tp_softcs.lock);
206 if (!LIST_EMPTY(&l2tp_softcs.list)) {
207 mutex_exit(&l2tp_softcs.lock);
208 return EBUSY;
209 }
210 mutex_exit(&l2tp_softcs.lock);
211
212 error = l2tp_hash_fini();
213 if (error)
214 return error;
215
216 if_clone_detach(&l2tp_cloner);
217 psref_class_destroy(lv_psref_class);
218 pserialize_destroy(l2tp_psz);
219 mutex_destroy(&l2tp_hash.lock);
220
221 return error;
222 }
223
224 static int
225 l2tp_clone_create(struct if_clone *ifc, int unit)
226 {
227 struct l2tp_softc *sc;
228 struct l2tp_variant *var;
229
230 sc = kmem_zalloc(sizeof(struct l2tp_softc), KM_SLEEP);
231 var = kmem_zalloc(sizeof(struct l2tp_variant), KM_SLEEP);
232
233 var->lv_softc = sc;
234 var->lv_state = L2TP_STATE_DOWN;
235 var->lv_use_cookie = L2TP_COOKIE_OFF;
236 psref_target_init(&var->lv_psref, lv_psref_class);
237
238 sc->l2tp_var = var;
239 mutex_init(&sc->l2tp_lock, MUTEX_DEFAULT, IPL_NONE);
240 PSLIST_ENTRY_INIT(sc, l2tp_hash);
241
242 if_initname(&sc->l2tp_ec.ec_if, ifc->ifc_name, unit);
243
244 l2tpattach0(sc);
245
246 sc->l2tp_ro_percpu = percpu_alloc(sizeof(struct l2tp_ro));
247 KASSERTMSG(sc->l2tp_ro_percpu != NULL,
248 "failed to allocate sc->l2tp_ro_percpu");
249 percpu_foreach(sc->l2tp_ro_percpu, l2tp_ro_init_pc, NULL);
250
251 mutex_enter(&l2tp_softcs.lock);
252 LIST_INSERT_HEAD(&l2tp_softcs.list, sc, l2tp_list);
253 mutex_exit(&l2tp_softcs.lock);
254
255 return (0);
256 }
257
258 void
259 l2tpattach0(struct l2tp_softc *sc)
260 {
261
262 sc->l2tp_ec.ec_if.if_addrlen = 0;
263 sc->l2tp_ec.ec_if.if_mtu = L2TP_MTU;
264 sc->l2tp_ec.ec_if.if_flags = IFF_POINTOPOINT|IFF_MULTICAST|IFF_SIMPLEX;
265 sc->l2tp_ec.ec_if.if_ioctl = l2tp_ioctl;
266 sc->l2tp_ec.ec_if.if_output = l2tp_output;
267 sc->l2tp_ec.ec_if.if_type = IFT_L2TP;
268 sc->l2tp_ec.ec_if.if_dlt = DLT_NULL;
269 sc->l2tp_ec.ec_if.if_start = l2tp_start;
270 sc->l2tp_ec.ec_if.if_transmit = l2tp_transmit;
271 sc->l2tp_ec.ec_if._if_input = ether_input;
272 IFQ_SET_READY(&sc->l2tp_ec.ec_if.if_snd);
273 if_attach(&sc->l2tp_ec.ec_if);
274 if_alloc_sadl(&sc->l2tp_ec.ec_if);
275 bpf_attach(&sc->l2tp_ec.ec_if, DLT_EN10MB, sizeof(struct ether_header));
276 }
277
278 void
279 l2tp_ro_init_pc(void *p, void *arg __unused, struct cpu_info *ci __unused)
280 {
281 struct l2tp_ro *lro = p;
282
283 mutex_init(&lro->lr_lock, MUTEX_DEFAULT, IPL_NONE);
284 }
285
286 void
287 l2tp_ro_fini_pc(void *p, void *arg __unused, struct cpu_info *ci __unused)
288 {
289 struct l2tp_ro *lro = p;
290
291 rtcache_free(&lro->lr_ro);
292
293 mutex_destroy(&lro->lr_lock);
294 }
295
296 static int
297 l2tp_clone_destroy(struct ifnet *ifp)
298 {
299 struct l2tp_softc *sc = container_of(ifp, struct l2tp_softc,
300 l2tp_ec.ec_if);
301
302 l2tp_clear_session(sc);
303 l2tp_delete_tunnel(&sc->l2tp_ec.ec_if);
304 /*
305 * To avoid for l2tp_transmit() to access sc->l2tp_var after free it.
306 */
307 mutex_enter(&sc->l2tp_lock);
308 l2tp_variant_update(sc, NULL);
309 mutex_exit(&sc->l2tp_lock);
310
311 mutex_enter(&l2tp_softcs.lock);
312 LIST_REMOVE(sc, l2tp_list);
313 mutex_exit(&l2tp_softcs.lock);
314
315 bpf_detach(ifp);
316
317 if_detach(ifp);
318
319 percpu_foreach(sc->l2tp_ro_percpu, l2tp_ro_fini_pc, NULL);
320 percpu_free(sc->l2tp_ro_percpu, sizeof(struct l2tp_ro));
321
322 kmem_free(sc->l2tp_var, sizeof(struct l2tp_variant));
323 mutex_destroy(&sc->l2tp_lock);
324 kmem_free(sc, sizeof(struct l2tp_softc));
325
326 return 0;
327 }
328
329 static int
330 l2tp_output(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *dst,
331 const struct rtentry *rt)
332 {
333 struct l2tp_softc *sc = container_of(ifp, struct l2tp_softc,
334 l2tp_ec.ec_if);
335 struct l2tp_variant *var;
336 struct psref psref;
337 int error = 0;
338
339 var = l2tp_getref_variant(sc, &psref);
340 if (var == NULL) {
341 m_freem(m);
342 return ENETDOWN;
343 }
344
345 IFQ_CLASSIFY(&ifp->if_snd, m, dst->sa_family);
346
347 m->m_flags &= ~(M_BCAST|M_MCAST);
348
349 if ((ifp->if_flags & IFF_UP) == 0) {
350 m_freem(m);
351 error = ENETDOWN;
352 goto end;
353 }
354
355 if (var->lv_psrc == NULL || var->lv_pdst == NULL) {
356 m_freem(m);
357 error = ENETDOWN;
358 goto end;
359 }
360
361 /* XXX should we check if our outer source is legal? */
362
363 /* use DLT_NULL encapsulation here to pass inner af type */
364 M_PREPEND(m, sizeof(int), M_DONTWAIT);
365 if (!m) {
366 error = ENOBUFS;
367 goto end;
368 }
369 *mtod(m, int *) = dst->sa_family;
370
371 IFQ_ENQUEUE(&ifp->if_snd, m, error);
372 if (error)
373 goto end;
374
375 /*
376 * direct call to avoid infinite loop at l2tpintr()
377 */
378 l2tpintr(var);
379
380 error = 0;
381
382 end:
383 l2tp_putref_variant(var, &psref);
384 if (error)
385 ifp->if_oerrors++;
386
387 return error;
388 }
389
390 static void
391 l2tpintr(struct l2tp_variant *var)
392 {
393 struct l2tp_softc *sc;
394 struct ifnet *ifp;
395 struct mbuf *m;
396 int error;
397
398 KASSERT(psref_held(&var->lv_psref, lv_psref_class));
399
400 sc = var->lv_softc;
401 ifp = &sc->l2tp_ec.ec_if;
402
403 /* output processing */
404 if (var->lv_my_sess_id == 0 || var->lv_peer_sess_id == 0) {
405 IFQ_PURGE(&ifp->if_snd);
406 return;
407 }
408
409 for (;;) {
410 IFQ_DEQUEUE(&ifp->if_snd, m);
411 if (m == NULL)
412 break;
413 m->m_flags &= ~(M_BCAST|M_MCAST);
414 bpf_mtap(ifp, m);
415 switch (var->lv_psrc->sa_family) {
416 #ifdef INET
417 case AF_INET:
418 error = in_l2tp_output(var, m);
419 break;
420 #endif
421 #ifdef INET6
422 case AF_INET6:
423 error = in6_l2tp_output(var, m);
424 break;
425 #endif
426 default:
427 m_freem(m);
428 error = ENETDOWN;
429 break;
430 }
431
432 if (error)
433 ifp->if_oerrors++;
434 else {
435 ifp->if_opackets++;
436 /*
437 * obytes is incremented at ether_output() or
438 * bridge_enqueue().
439 */
440 }
441 }
442
443 }
444
445 void
446 l2tp_input(struct mbuf *m, struct ifnet *ifp)
447 {
448
449 KASSERT(ifp != NULL);
450
451 if (0 == (mtod(m, u_long) & 0x03)) {
452 /* copy and align head of payload */
453 struct mbuf *m_head;
454 int copy_length;
455
456 #define L2TP_COPY_LENGTH 60
457 #define L2TP_LINK_HDR_ROOM (MHLEN - L2TP_COPY_LENGTH - 4/*round4(2)*/)
458
459 if (m->m_pkthdr.len < L2TP_COPY_LENGTH) {
460 copy_length = m->m_pkthdr.len;
461 } else {
462 copy_length = L2TP_COPY_LENGTH;
463 }
464
465 if (m->m_len < copy_length) {
466 m = m_pullup(m, copy_length);
467 if (m == NULL)
468 return;
469 }
470
471 MGETHDR(m_head, M_DONTWAIT, MT_HEADER);
472 if (m_head == NULL) {
473 m_freem(m);
474 return;
475 }
476 M_COPY_PKTHDR(m_head, m);
477
478 m_head->m_data += 2 /* align */ + L2TP_LINK_HDR_ROOM;
479 memcpy(m_head->m_data, m->m_data, copy_length);
480 m_head->m_len = copy_length;
481 m->m_data += copy_length;
482 m->m_len -= copy_length;
483
484 /* construct chain */
485 if (m->m_len == 0) {
486 m_head->m_next = m_free(m); /* not m_freem */
487 } else {
488 /*
489 * copyed mtag in previous call M_COPY_PKTHDR
490 * but don't delete mtag in case cutt of M_PKTHDR flag
491 */
492 m_tag_delete_chain(m, NULL);
493 m->m_flags &= ~M_PKTHDR;
494 m_head->m_next = m;
495 }
496
497 /* override m */
498 m = m_head;
499 }
500
501 m_set_rcvif(m, ifp);
502
503 /*
504 * bpf_mtap() and ifp->if_ipackets++ is done in if_input()
505 *
506 * obytes is incremented at ether_output() or bridge_enqueue().
507 */
508 if_percpuq_enqueue(ifp->if_percpuq, m);
509 }
510
511 void
512 l2tp_start(struct ifnet *ifp)
513 {
514 struct psref psref;
515 struct l2tp_variant *var;
516 struct l2tp_softc *sc = container_of(ifp, struct l2tp_softc,
517 l2tp_ec.ec_if);
518
519 var = l2tp_getref_variant(sc, &psref);
520 if (var == NULL)
521 return;
522
523 if (var->lv_psrc == NULL || var->lv_pdst == NULL)
524 return;
525
526 l2tpintr(var);
527 l2tp_putref_variant(var, &psref);
528 }
529
530 int
531 l2tp_transmit(struct ifnet *ifp, struct mbuf *m)
532 {
533 int error;
534 struct psref psref;
535 struct l2tp_variant *var;
536 struct l2tp_softc *sc = container_of(ifp, struct l2tp_softc,
537 l2tp_ec.ec_if);
538
539 var = l2tp_getref_variant(sc, &psref);
540 if (var == NULL) {
541 m_freem(m);
542 return ENETDOWN;
543 }
544
545 if (var->lv_psrc == NULL || var->lv_pdst == NULL) {
546 m_freem(m);
547 error = ENETDOWN;
548 goto out;
549 }
550
551 m->m_flags &= ~(M_BCAST|M_MCAST);
552 bpf_mtap(ifp, m);
553 switch (var->lv_psrc->sa_family) {
554 #ifdef INET
555 case AF_INET:
556 error = in_l2tp_output(var, m);
557 break;
558 #endif
559 #ifdef INET6
560 case AF_INET6:
561 error = in6_l2tp_output(var, m);
562 break;
563 #endif
564 default:
565 m_freem(m);
566 error = ENETDOWN;
567 break;
568 }
569
570 if (error)
571 ifp->if_oerrors++;
572 else {
573 ifp->if_opackets++;
574 /*
575 * obytes is incremented at ether_output() or bridge_enqueue().
576 */
577 }
578
579 out:
580 l2tp_putref_variant(var, &psref);
581 return error;
582 }
583
584 /* XXX how should we handle IPv6 scope on SIOC[GS]IFPHYADDR? */
585 int
586 l2tp_ioctl(struct ifnet *ifp, u_long cmd, void *data)
587 {
588 struct l2tp_softc *sc = container_of(ifp, struct l2tp_softc,
589 l2tp_ec.ec_if);
590 struct l2tp_variant *var, *var_tmp;
591 struct ifreq *ifr = data;
592 int error = 0, size;
593 struct sockaddr *dst, *src;
594 struct l2tp_req l2tpr;
595 u_long mtu;
596 int bound;
597 struct psref psref;
598
599 switch (cmd) {
600 case SIOCSIFADDR:
601 ifp->if_flags |= IFF_UP;
602 break;
603
604 case SIOCSIFDSTADDR:
605 break;
606
607 case SIOCADDMULTI:
608 case SIOCDELMULTI:
609 switch (ifr->ifr_addr.sa_family) {
610 #ifdef INET
611 case AF_INET: /* IP supports Multicast */
612 break;
613 #endif /* INET */
614 #ifdef INET6
615 case AF_INET6: /* IP6 supports Multicast */
616 break;
617 #endif /* INET6 */
618 default: /* Other protocols doesn't support Multicast */
619 error = EAFNOSUPPORT;
620 break;
621 }
622 break;
623
624 case SIOCSIFMTU:
625 mtu = ifr->ifr_mtu;
626 if (mtu < L2TP_MTU_MIN || mtu > L2TP_MTU_MAX)
627 return (EINVAL);
628 ifp->if_mtu = mtu;
629 break;
630
631 #ifdef INET
632 case SIOCSIFPHYADDR:
633 src = (struct sockaddr *)
634 &(((struct in_aliasreq *)data)->ifra_addr);
635 dst = (struct sockaddr *)
636 &(((struct in_aliasreq *)data)->ifra_dstaddr);
637 if (src->sa_family != AF_INET || dst->sa_family != AF_INET)
638 return EAFNOSUPPORT;
639 else if (src->sa_len != sizeof(struct sockaddr_in)
640 || dst->sa_len != sizeof(struct sockaddr_in))
641 return EINVAL;
642
643 error = l2tp_set_tunnel(&sc->l2tp_ec.ec_if, src, dst);
644 break;
645
646 #endif /* INET */
647 #ifdef INET6
648 case SIOCSIFPHYADDR_IN6:
649 src = (struct sockaddr *)
650 &(((struct in6_aliasreq *)data)->ifra_addr);
651 dst = (struct sockaddr *)
652 &(((struct in6_aliasreq *)data)->ifra_dstaddr);
653 if (src->sa_family != AF_INET6 || dst->sa_family != AF_INET6)
654 return EAFNOSUPPORT;
655 else if (src->sa_len != sizeof(struct sockaddr_in6)
656 || dst->sa_len != sizeof(struct sockaddr_in6))
657 return EINVAL;
658
659 error = l2tp_set_tunnel(&sc->l2tp_ec.ec_if, src, dst);
660 break;
661
662 #endif /* INET6 */
663 case SIOCSLIFPHYADDR:
664 src = (struct sockaddr *)
665 &(((struct if_laddrreq *)data)->addr);
666 dst = (struct sockaddr *)
667 &(((struct if_laddrreq *)data)->dstaddr);
668 if (src->sa_family != dst->sa_family)
669 return EINVAL;
670 else if (src->sa_family == AF_INET
671 && src->sa_len != sizeof(struct sockaddr_in))
672 return EINVAL;
673 else if (src->sa_family == AF_INET6
674 && src->sa_len != sizeof(struct sockaddr_in6))
675 return EINVAL;
676 else if (dst->sa_family == AF_INET
677 && dst->sa_len != sizeof(struct sockaddr_in))
678 return EINVAL;
679 else if (dst->sa_family == AF_INET6
680 && dst->sa_len != sizeof(struct sockaddr_in6))
681 return EINVAL;
682
683 error = l2tp_set_tunnel(&sc->l2tp_ec.ec_if, src, dst);
684 break;
685
686 case SIOCDIFPHYADDR:
687 l2tp_delete_tunnel(&sc->l2tp_ec.ec_if);
688 break;
689
690 case SIOCGIFPSRCADDR:
691 #ifdef INET6
692 case SIOCGIFPSRCADDR_IN6:
693 #endif /* INET6 */
694 bound = curlwp_bind();
695 var = l2tp_getref_variant(sc, &psref);
696 if (var == NULL) {
697 curlwp_bindx(bound);
698 error = EADDRNOTAVAIL;
699 goto bad;
700 }
701 if (var->lv_psrc == NULL) {
702 l2tp_putref_variant(var, &psref);
703 curlwp_bindx(bound);
704 error = EADDRNOTAVAIL;
705 goto bad;
706 }
707 src = var->lv_psrc;
708 switch (cmd) {
709 #ifdef INET
710 case SIOCGIFPSRCADDR:
711 dst = &ifr->ifr_addr;
712 size = sizeof(ifr->ifr_addr);
713 break;
714 #endif /* INET */
715 #ifdef INET6
716 case SIOCGIFPSRCADDR_IN6:
717 dst = (struct sockaddr *)
718 &(((struct in6_ifreq *)data)->ifr_addr);
719 size = sizeof(((struct in6_ifreq *)data)->ifr_addr);
720 break;
721 #endif /* INET6 */
722 default:
723 l2tp_putref_variant(var, &psref);
724 curlwp_bindx(bound);
725 error = EADDRNOTAVAIL;
726 goto bad;
727 }
728 if (src->sa_len > size) {
729 l2tp_putref_variant(var, &psref);
730 curlwp_bindx(bound);
731 return EINVAL;
732 }
733 sockaddr_copy(dst, src->sa_len, src);
734 l2tp_putref_variant(var, &psref);
735 curlwp_bindx(bound);
736 break;
737
738 case SIOCGIFPDSTADDR:
739 #ifdef INET6
740 case SIOCGIFPDSTADDR_IN6:
741 #endif /* INET6 */
742 bound = curlwp_bind();
743 var = l2tp_getref_variant(sc, &psref);
744 if (var == NULL) {
745 curlwp_bindx(bound);
746 error = EADDRNOTAVAIL;
747 goto bad;
748 }
749 if (var->lv_pdst == NULL) {
750 l2tp_putref_variant(var, &psref);
751 curlwp_bindx(bound);
752 error = EADDRNOTAVAIL;
753 goto bad;
754 }
755 src = var->lv_pdst;
756 switch (cmd) {
757 #ifdef INET
758 case SIOCGIFPDSTADDR:
759 dst = &ifr->ifr_addr;
760 size = sizeof(ifr->ifr_addr);
761 break;
762 #endif /* INET */
763 #ifdef INET6
764 case SIOCGIFPDSTADDR_IN6:
765 dst = (struct sockaddr *)
766 &(((struct in6_ifreq *)data)->ifr_addr);
767 size = sizeof(((struct in6_ifreq *)data)->ifr_addr);
768 break;
769 #endif /* INET6 */
770 default:
771 l2tp_putref_variant(var, &psref);
772 curlwp_bindx(bound);
773 error = EADDRNOTAVAIL;
774 goto bad;
775 }
776 if (src->sa_len > size) {
777 l2tp_putref_variant(var, &psref);
778 curlwp_bindx(bound);
779 return EINVAL;
780 }
781 sockaddr_copy(dst, src->sa_len, src);
782 l2tp_putref_variant(var, &psref);
783 curlwp_bindx(bound);
784 break;
785
786 case SIOCGLIFPHYADDR:
787 bound = curlwp_bind();
788 var = l2tp_getref_variant(sc, &psref);
789 if (var == NULL) {
790 curlwp_bindx(bound);
791 error = EADDRNOTAVAIL;
792 goto bad;
793 }
794 if (var->lv_psrc == NULL || var->lv_pdst == NULL) {
795 l2tp_putref_variant(var, &psref);
796 curlwp_bindx(bound);
797 error = EADDRNOTAVAIL;
798 goto bad;
799 }
800
801 /* copy src */
802 src = var->lv_psrc;
803 dst = (struct sockaddr *)
804 &(((struct if_laddrreq *)data)->addr);
805 size = sizeof(((struct if_laddrreq *)data)->addr);
806 if (src->sa_len > size) {
807 l2tp_putref_variant(var, &psref);
808 curlwp_bindx(bound);
809 return EINVAL;
810 }
811 sockaddr_copy(dst, src->sa_len, src);
812
813 /* copy dst */
814 src = var->lv_pdst;
815 dst = (struct sockaddr *)
816 &(((struct if_laddrreq *)data)->dstaddr);
817 size = sizeof(((struct if_laddrreq *)data)->dstaddr);
818 if (src->sa_len > size) {
819 l2tp_putref_variant(var, &psref);
820 curlwp_bindx(bound);
821 return EINVAL;
822 }
823 sockaddr_copy(dst, src->sa_len, src);
824 l2tp_putref_variant(var, &psref);
825 curlwp_bindx(bound);
826 break;
827
828 case SIOCSL2TPSESSION:
829 if ((error = copyin(ifr->ifr_data, &l2tpr, sizeof(l2tpr))) != 0)
830 break;
831
832 /* session id must not zero */
833 if (l2tpr.my_sess_id == 0 || l2tpr.peer_sess_id == 0)
834 return EINVAL;
835
836 bound = curlwp_bind();
837 var_tmp = l2tp_lookup_session_ref(l2tpr.my_sess_id, &psref);
838 if (var_tmp != NULL) {
839 /* duplicate session id */
840 log(LOG_WARNING, "%s: duplicate session id %" PRIu32 " of %s\n",
841 sc->l2tp_ec.ec_if.if_xname, l2tpr.my_sess_id,
842 var_tmp->lv_softc->l2tp_ec.ec_if.if_xname);
843 psref_release(&psref, &var_tmp->lv_psref,
844 lv_psref_class);
845 curlwp_bindx(bound);
846 return EINVAL;
847 }
848 curlwp_bindx(bound);
849
850 error = l2tp_set_session(sc, l2tpr.my_sess_id, l2tpr.peer_sess_id);
851 break;
852 case SIOCDL2TPSESSION:
853 l2tp_clear_session(sc);
854 break;
855 case SIOCSL2TPCOOKIE:
856 if ((error = copyin(ifr->ifr_data, &l2tpr, sizeof(l2tpr))) != 0)
857 break;
858
859 error = l2tp_set_cookie(sc, l2tpr.my_cookie, l2tpr.my_cookie_len,
860 l2tpr.peer_cookie, l2tpr.peer_cookie_len);
861 break;
862 case SIOCDL2TPCOOKIE:
863 l2tp_clear_cookie(sc);
864 break;
865 case SIOCSL2TPSTATE:
866 if ((error = copyin(ifr->ifr_data, &l2tpr, sizeof(l2tpr))) != 0)
867 break;
868
869 l2tp_set_state(sc, l2tpr.state);
870 break;
871 case SIOCGL2TP:
872 /* get L2TPV3 session info */
873 memset(&l2tpr, 0, sizeof(l2tpr));
874
875 bound = curlwp_bind();
876 var = l2tp_getref_variant(sc, &psref);
877 if (var == NULL) {
878 curlwp_bindx(bound);
879 error = EADDRNOTAVAIL;
880 goto bad;
881 }
882
883 l2tpr.state = var->lv_state;
884 l2tpr.my_sess_id = var->lv_my_sess_id;
885 l2tpr.peer_sess_id = var->lv_peer_sess_id;
886 l2tpr.my_cookie = var->lv_my_cookie;
887 l2tpr.my_cookie_len = var->lv_my_cookie_len;
888 l2tpr.peer_cookie = var->lv_peer_cookie;
889 l2tpr.peer_cookie_len = var->lv_peer_cookie_len;
890 l2tp_putref_variant(var, &psref);
891 curlwp_bindx(bound);
892
893 error = copyout(&l2tpr, ifr->ifr_data, sizeof(l2tpr));
894 break;
895
896 default:
897 error = ifioctl_common(ifp, cmd, data);
898 break;
899 }
900 bad:
901 return error;
902 }
903
904 static int
905 l2tp_set_tunnel(struct ifnet *ifp, struct sockaddr *src, struct sockaddr *dst)
906 {
907 struct l2tp_softc *sc = container_of(ifp, struct l2tp_softc,
908 l2tp_ec.ec_if);
909 struct sockaddr *osrc, *odst;
910 struct sockaddr *nsrc, *ndst;
911 struct l2tp_variant *ovar, *nvar;
912 int error;
913
914 nsrc = sockaddr_dup(src, M_WAITOK);
915 ndst = sockaddr_dup(dst, M_WAITOK);
916
917 nvar = kmem_alloc(sizeof(*nvar), KM_SLEEP);
918
919 error = encap_lock_enter();
920 if (error)
921 goto error;
922
923 mutex_enter(&sc->l2tp_lock);
924
925 ovar = sc->l2tp_var;
926 osrc = ovar->lv_psrc;
927 odst = ovar->lv_pdst;
928 *nvar = *ovar;
929 psref_target_init(&nvar->lv_psref, lv_psref_class);
930 nvar->lv_psrc = nsrc;
931 nvar->lv_pdst = ndst;
932 error = l2tp_encap_attach(nvar);
933 if (error) {
934 mutex_exit(&sc->l2tp_lock);
935 encap_lock_exit();
936 goto error;
937 }
938 membar_producer();
939 l2tp_variant_update(sc, nvar);
940
941 mutex_exit(&sc->l2tp_lock);
942
943 (void)l2tp_encap_detach(ovar);
944 encap_lock_exit();
945
946 if (osrc)
947 sockaddr_free(osrc);
948 if (odst)
949 sockaddr_free(odst);
950 kmem_free(ovar, sizeof(*ovar));
951
952 return 0;
953
954 error:
955 sockaddr_free(nsrc);
956 sockaddr_free(ndst);
957 kmem_free(nvar, sizeof(*nvar));
958
959 return error;
960 }
961
962 static void
963 l2tp_delete_tunnel(struct ifnet *ifp)
964 {
965 struct l2tp_softc *sc = container_of(ifp, struct l2tp_softc,
966 l2tp_ec.ec_if);
967 struct sockaddr *osrc, *odst;
968 struct l2tp_variant *ovar, *nvar;
969 int error;
970
971 nvar = kmem_alloc(sizeof(*nvar), KM_SLEEP);
972
973 error = encap_lock_enter();
974 if (error) {
975 kmem_free(nvar, sizeof(*nvar));
976 return;
977 }
978 mutex_enter(&sc->l2tp_lock);
979
980 ovar = sc->l2tp_var;
981 osrc = ovar->lv_psrc;
982 odst = ovar->lv_pdst;
983 *nvar = *ovar;
984 psref_target_init(&nvar->lv_psref, lv_psref_class);
985 nvar->lv_psrc = NULL;
986 nvar->lv_pdst = NULL;
987 membar_producer();
988 l2tp_variant_update(sc, nvar);
989
990 mutex_exit(&sc->l2tp_lock);
991
992 (void)l2tp_encap_detach(ovar);
993 encap_lock_exit();
994
995 if (osrc)
996 sockaddr_free(osrc);
997 if (odst)
998 sockaddr_free(odst);
999 kmem_free(ovar, sizeof(*ovar));
1000 }
1001
1002 static int
1003 id_hash_func(uint32_t id)
1004 {
1005 uint32_t hash;
1006
1007 hash = (id >> 16) ^ id;
1008 hash = (hash >> 4) ^ hash;
1009
1010 return hash & (L2TP_ID_HASH_SIZE - 1);
1011 }
1012
1013 static void
1014 l2tp_hash_init(void)
1015 {
1016 u_long mask;
1017
1018 l2tp_hash.lists = hashinit(L2TP_ID_HASH_SIZE, HASH_PSLIST, true,
1019 &mask);
1020 KASSERT(mask == (L2TP_ID_HASH_SIZE - 1));
1021 }
1022
1023 static int
1024 l2tp_hash_fini(void)
1025 {
1026 int i;
1027
1028 mutex_enter(&l2tp_hash.lock);
1029
1030 for (i = 0; i < L2TP_ID_HASH_SIZE; i++) {
1031 if (PSLIST_WRITER_FIRST(&l2tp_hash.lists[i], struct l2tp_softc,
1032 l2tp_hash) != NULL) {
1033 mutex_exit(&l2tp_hash.lock);
1034 return EBUSY;
1035 }
1036 }
1037 for (i = 0; i < L2TP_ID_HASH_SIZE; i++)
1038 PSLIST_DESTROY(&l2tp_hash.lists[i]);
1039
1040 mutex_exit(&l2tp_hash.lock);
1041
1042 hashdone(l2tp_hash.lists, HASH_PSLIST, L2TP_ID_HASH_SIZE - 1);
1043
1044 return 0;
1045 }
1046
1047 static int
1048 l2tp_set_session(struct l2tp_softc *sc, uint32_t my_sess_id,
1049 uint32_t peer_sess_id)
1050 {
1051 uint32_t idx;
1052 struct l2tp_variant *nvar;
1053 struct l2tp_variant *ovar;
1054 struct ifnet *ifp = &sc->l2tp_ec.ec_if;
1055
1056 nvar = kmem_alloc(sizeof(*nvar), KM_SLEEP);
1057
1058 mutex_enter(&sc->l2tp_lock);
1059 ovar = sc->l2tp_var;
1060 *nvar = *ovar;
1061 psref_target_init(&nvar->lv_psref, lv_psref_class);
1062 nvar->lv_my_sess_id = my_sess_id;
1063 nvar->lv_peer_sess_id = peer_sess_id;
1064 membar_producer();
1065
1066 mutex_enter(&l2tp_hash.lock);
1067 if (ovar->lv_my_sess_id > 0 && ovar->lv_peer_sess_id > 0) {
1068 PSLIST_WRITER_REMOVE(sc, l2tp_hash);
1069 pserialize_perform(l2tp_psz);
1070 }
1071 mutex_exit(&l2tp_hash.lock);
1072
1073 l2tp_variant_update(sc, nvar);
1074 mutex_exit(&sc->l2tp_lock);
1075
1076 idx = id_hash_func(nvar->lv_my_sess_id);
1077 if ((ifp->if_flags & IFF_DEBUG) != 0)
1078 log(LOG_DEBUG, "%s: add hash entry: sess_id=%" PRIu32 ", idx=%" PRIu32 "\n",
1079 sc->l2tp_ec.ec_if.if_xname, nvar->lv_my_sess_id, idx);
1080
1081 mutex_enter(&l2tp_hash.lock);
1082 PSLIST_WRITER_INSERT_HEAD(&l2tp_hash.lists[idx], sc, l2tp_hash);
1083 mutex_exit(&l2tp_hash.lock);
1084
1085 kmem_free(ovar, sizeof(*ovar));
1086 return 0;
1087 }
1088
1089 static int
1090 l2tp_clear_session(struct l2tp_softc *sc)
1091 {
1092 struct l2tp_variant *nvar;
1093 struct l2tp_variant *ovar;
1094
1095 nvar = kmem_alloc(sizeof(*nvar), KM_SLEEP);
1096
1097 mutex_enter(&sc->l2tp_lock);
1098 ovar = sc->l2tp_var;
1099 *nvar = *ovar;
1100 psref_target_init(&nvar->lv_psref, lv_psref_class);
1101 nvar->lv_my_sess_id = 0;
1102 nvar->lv_peer_sess_id = 0;
1103 membar_producer();
1104
1105 mutex_enter(&l2tp_hash.lock);
1106 if (ovar->lv_my_sess_id > 0 && ovar->lv_peer_sess_id > 0) {
1107 PSLIST_WRITER_REMOVE(sc, l2tp_hash);
1108 pserialize_perform(l2tp_psz);
1109 }
1110 mutex_exit(&l2tp_hash.lock);
1111
1112 l2tp_variant_update(sc, nvar);
1113 mutex_exit(&sc->l2tp_lock);
1114 kmem_free(ovar, sizeof(*ovar));
1115 return 0;
1116 }
1117
1118 struct l2tp_variant *
1119 l2tp_lookup_session_ref(uint32_t id, struct psref *psref)
1120 {
1121 int idx;
1122 int s;
1123 struct l2tp_softc *sc;
1124
1125 idx = id_hash_func(id);
1126
1127 s = pserialize_read_enter();
1128 PSLIST_READER_FOREACH(sc, &l2tp_hash.lists[idx], struct l2tp_softc,
1129 l2tp_hash) {
1130 struct l2tp_variant *var = sc->l2tp_var;
1131 if (var == NULL)
1132 continue;
1133 if (var->lv_my_sess_id != id)
1134 continue;
1135 psref_acquire(psref, &var->lv_psref, lv_psref_class);
1136 pserialize_read_exit(s);
1137 return var;
1138 }
1139 pserialize_read_exit(s);
1140 return NULL;
1141 }
1142
1143 /*
1144 * l2tp_variant update API.
1145 *
1146 * Assumption:
1147 * reader side dereferences sc->l2tp_var in reader critical section only,
1148 * that is, all of reader sides do not reader the sc->l2tp_var after
1149 * pserialize_perform().
1150 */
1151 static void
1152 l2tp_variant_update(struct l2tp_softc *sc, struct l2tp_variant *nvar)
1153 {
1154 struct ifnet *ifp = &sc->l2tp_ec.ec_if;
1155 struct l2tp_variant *ovar = sc->l2tp_var;
1156
1157 KASSERT(mutex_owned(&sc->l2tp_lock));
1158
1159 sc->l2tp_var = nvar;
1160 pserialize_perform(l2tp_psz);
1161 psref_target_destroy(&ovar->lv_psref, lv_psref_class);
1162
1163 /*
1164 * In the manual of atomic_swap_ptr(3), there is no mention if 2nd
1165 * argument is rewrite or not. So, use sc->l2tp_var instead of nvar.
1166 */
1167 if (sc->l2tp_var->lv_psrc != NULL && sc->l2tp_var->lv_pdst != NULL)
1168 ifp->if_flags |= IFF_RUNNING;
1169 else
1170 ifp->if_flags &= ~IFF_RUNNING;
1171 }
1172
1173 static int
1174 l2tp_set_cookie(struct l2tp_softc *sc, uint64_t my_cookie, u_int my_cookie_len,
1175 uint64_t peer_cookie, u_int peer_cookie_len)
1176 {
1177 struct l2tp_variant *nvar;
1178
1179 if (my_cookie == 0 || peer_cookie == 0)
1180 return EINVAL;
1181
1182 if (my_cookie_len != 4 && my_cookie_len != 8
1183 && peer_cookie_len != 4 && peer_cookie_len != 8)
1184 return EINVAL;
1185
1186 nvar = kmem_alloc(sizeof(*nvar), KM_SLEEP);
1187
1188 mutex_enter(&sc->l2tp_lock);
1189
1190 *nvar = *sc->l2tp_var;
1191 psref_target_init(&nvar->lv_psref, lv_psref_class);
1192 nvar->lv_my_cookie = my_cookie;
1193 nvar->lv_my_cookie_len = my_cookie_len;
1194 nvar->lv_peer_cookie = peer_cookie;
1195 nvar->lv_peer_cookie_len = peer_cookie_len;
1196 nvar->lv_use_cookie = L2TP_COOKIE_ON;
1197 membar_producer();
1198 l2tp_variant_update(sc, nvar);
1199
1200 mutex_exit(&sc->l2tp_lock);
1201
1202 struct ifnet *ifp = &sc->l2tp_ec.ec_if;
1203 if ((ifp->if_flags & IFF_DEBUG) != 0) {
1204 log(LOG_DEBUG,
1205 "%s: set cookie: "
1206 "local cookie_len=%u local cookie=%" PRIu64 ", "
1207 "remote cookie_len=%u remote cookie=%" PRIu64 "\n",
1208 ifp->if_xname, my_cookie_len, my_cookie,
1209 peer_cookie_len, peer_cookie);
1210 }
1211
1212 return 0;
1213 }
1214
1215 static void
1216 l2tp_clear_cookie(struct l2tp_softc *sc)
1217 {
1218 struct l2tp_variant *nvar;
1219
1220 nvar = kmem_alloc(sizeof(*nvar), KM_SLEEP);
1221
1222 mutex_enter(&sc->l2tp_lock);
1223
1224 *nvar = *sc->l2tp_var;
1225 psref_target_init(&nvar->lv_psref, lv_psref_class);
1226 nvar->lv_my_cookie = 0;
1227 nvar->lv_my_cookie_len = 0;
1228 nvar->lv_peer_cookie = 0;
1229 nvar->lv_peer_cookie_len = 0;
1230 nvar->lv_use_cookie = L2TP_COOKIE_OFF;
1231 membar_producer();
1232 l2tp_variant_update(sc, nvar);
1233
1234 mutex_exit(&sc->l2tp_lock);
1235 }
1236
1237 static void
1238 l2tp_set_state(struct l2tp_softc *sc, int state)
1239 {
1240 struct ifnet *ifp = &sc->l2tp_ec.ec_if;
1241 struct l2tp_variant *nvar;
1242
1243 nvar = kmem_alloc(sizeof(*nvar), KM_SLEEP);
1244
1245 mutex_enter(&sc->l2tp_lock);
1246
1247 *nvar = *sc->l2tp_var;
1248 psref_target_init(&nvar->lv_psref, lv_psref_class);
1249 nvar->lv_state = state;
1250 membar_producer();
1251 l2tp_variant_update(sc, nvar);
1252
1253 if (nvar->lv_state == L2TP_STATE_UP) {
1254 ifp->if_link_state = LINK_STATE_UP;
1255 } else {
1256 ifp->if_link_state = LINK_STATE_DOWN;
1257 }
1258
1259 mutex_exit(&sc->l2tp_lock);
1260
1261 #ifdef NOTYET
1262 #if NVLAN > 0
1263 vlan_linkstate_notify(ifp, ifp->if_link_state);
1264 #endif
1265 #endif
1266 }
1267
1268 static int
1269 l2tp_encap_attach(struct l2tp_variant *var)
1270 {
1271 int error;
1272
1273 if (var == NULL || var->lv_psrc == NULL)
1274 return EINVAL;
1275
1276 switch (var->lv_psrc->sa_family) {
1277 #ifdef INET
1278 case AF_INET:
1279 error = in_l2tp_attach(var);
1280 break;
1281 #endif
1282 #ifdef INET6
1283 case AF_INET6:
1284 error = in6_l2tp_attach(var);
1285 break;
1286 #endif
1287 default:
1288 error = EINVAL;
1289 break;
1290 }
1291
1292 return error;
1293 }
1294
1295 static int
1296 l2tp_encap_detach(struct l2tp_variant *var)
1297 {
1298 int error;
1299
1300 if (var == NULL || var->lv_psrc == NULL)
1301 return EINVAL;
1302
1303 switch (var->lv_psrc->sa_family) {
1304 #ifdef INET
1305 case AF_INET:
1306 error = in_l2tp_detach(var);
1307 break;
1308 #endif
1309 #ifdef INET6
1310 case AF_INET6:
1311 error = in6_l2tp_detach(var);
1312 break;
1313 #endif
1314 default:
1315 error = EINVAL;
1316 break;
1317 }
1318
1319 return error;
1320 }
1321
1322 /*
1323 * TODO:
1324 * unify with gif_check_nesting().
1325 */
1326 int
1327 l2tp_check_nesting(struct ifnet *ifp, struct mbuf *m)
1328 {
1329 struct m_tag *mtag;
1330 int *count;
1331
1332 mtag = m_tag_find(m, PACKET_TAG_TUNNEL_INFO, NULL);
1333 if (mtag != NULL) {
1334 count = (int *)(mtag + 1);
1335 if (++(*count) > max_l2tp_nesting) {
1336 log(LOG_NOTICE,
1337 "%s: recursively called too many times(%d)\n",
1338 if_name(ifp),
1339 *count);
1340 return EIO;
1341 }
1342 } else {
1343 mtag = m_tag_get(PACKET_TAG_TUNNEL_INFO, sizeof(*count),
1344 M_NOWAIT);
1345 if (mtag != NULL) {
1346 m_tag_prepend(m, mtag);
1347 count = (int *)(mtag + 1);
1348 *count = 0;
1349 }
1350 #ifdef L2TP_DEBUG
1351 else {
1352 log(LOG_DEBUG,
1353 "%s: m_tag_get() failed, recursion calls are not prevented.\n",
1354 if_name(ifp));
1355 }
1356 #endif
1357 }
1358
1359 return 0;
1360 }
1361
1362 /*
1363 * Module infrastructure
1364 */
1365 #include "if_module.h"
1366
1367 IF_MODULE(MODULE_CLASS_DRIVER, l2tp, "")
1368
1369
1370 /* TODO: IP_TCPMSS support */
1371 #ifdef IP_TCPMSS
1372 static int l2tp_need_tcpmss_clamp(struct ifnet *);
1373 #ifdef INET
1374 static struct mbuf *l2tp_tcpmss4_clamp(struct ifnet *, struct mbuf *);
1375 #endif
1376 #ifdef INET6
1377 static struct mbuf *l2tp_tcpmss6_clamp(struct ifnet *, struct mbuf *);
1378 #endif
1379
1380 struct mbuf *
1381 l2tp_tcpmss_clamp(struct ifnet *ifp, struct mbuf *m)
1382 {
1383
1384 if (l2tp_need_tcpmss_clamp(ifp)) {
1385 struct ether_header *eh;
1386 struct ether_vlan_header evh;
1387
1388 /* save ether header */
1389 m_copydata(m, 0, sizeof(evh), (void *)&evh);
1390 eh = (struct ether_header *)&evh;
1391
1392 switch (ntohs(eh->ether_type)) {
1393 case ETHERTYPE_VLAN: /* Ether + VLAN */
1394 if (m->m_pkthdr.len <= sizeof(struct ether_vlan_header))
1395 break;
1396 m_adj(m, sizeof(struct ether_vlan_header));
1397 switch (ntohs(evh.evl_proto)) {
1398 #ifdef INET
1399 case ETHERTYPE_IP: /* Ether + VLAN + IPv4 */
1400 m = l2tp_tcpmss4_clamp(ifp, m);
1401 if (m == NULL)
1402 return NULL;
1403 break;
1404 #endif /* INET */
1405 #ifdef INET6
1406 case ETHERTYPE_IPV6: /* Ether + VLAN + IPv6 */
1407 m = l2tp_tcpmss6_clamp(ifp, m);
1408 if (m == NULL)
1409 return NULL;
1410 break;
1411 #endif /* INET6 */
1412 default:
1413 break;
1414 }
1415 /* restore ether header */
1416 M_PREPEND(m, sizeof(struct ether_vlan_header),
1417 M_DONTWAIT);
1418 if (m == NULL)
1419 return NULL;
1420 *mtod(m, struct ether_vlan_header *) = evh;
1421 break;
1422 #ifdef INET
1423 case ETHERTYPE_IP: /* Ether + IPv4 */
1424 if (m->m_pkthdr.len <= sizeof(struct ether_header))
1425 break;
1426 m_adj(m, sizeof(struct ether_header));
1427 m = l2tp_tcpmss4_clamp(ifp, m);
1428 if (m == NULL)
1429 return NULL;
1430 /* restore ether header */
1431 M_PREPEND(m, sizeof(struct ether_header), M_DONTWAIT);
1432 if (m == NULL)
1433 return NULL;
1434 *mtod(m, struct ether_header *) = *eh;
1435 break;
1436 #endif /* INET */
1437 #ifdef INET6
1438 case ETHERTYPE_IPV6: /* Ether + IPv6 */
1439 if (m->m_pkthdr.len <= sizeof(struct ether_header))
1440 break;
1441 m_adj(m, sizeof(struct ether_header));
1442 m = l2tp_tcpmss6_clamp(ifp, m);
1443 if (m == NULL)
1444 return NULL;
1445 /* restore ether header */
1446 M_PREPEND(m, sizeof(struct ether_header), M_DONTWAIT);
1447 if (m == NULL)
1448 return NULL;
1449 *mtod(m, struct ether_header *) = *eh;
1450 break;
1451 #endif /* INET6 */
1452 default:
1453 break;
1454 }
1455 }
1456
1457 return m;
1458 }
1459
1460 static int
1461 l2tp_need_tcpmss_clamp(struct ifnet *ifp)
1462 {
1463 int ret = 0;
1464
1465 #ifdef INET
1466 if (ifp->if_tcpmss != 0)
1467 ret = 1;
1468 #endif /* INET */
1469
1470 #ifdef INET6
1471 if (ifp->if_tcpmss6 != 0)
1472 ret = 1;
1473 #endif /* INET6 */
1474
1475 return ret;
1476 }
1477
1478 #ifdef INET
1479 static struct mbuf *
1480 l2tp_tcpmss4_clamp(struct ifnet *ifp, struct mbuf *m)
1481 {
1482
1483 if (ifp->if_tcpmss != 0) {
1484 return ip_tcpmss(m, (ifp->if_tcpmss < 0) ?
1485 ifp->if_mtu - IP_TCPMSS_EXTLEN :
1486 ifp->if_tcpmss);
1487 }
1488 return m;
1489 }
1490 #endif /* INET */
1491
1492 #ifdef INET6
1493 static struct mbuf *
1494 l2tp_tcpmss6_clamp(struct ifnet *ifp, struct mbuf *m)
1495 {
1496 int ip6hdrlen;
1497
1498 if (ifp->if_tcpmss6 != 0 &&
1499 ip6_tcpmss_applicable(m, &ip6hdrlen)) {
1500 return ip6_tcpmss(m, ip6hdrlen,
1501 (ifp->if_tcpmss6 < 0) ?
1502 ifp->if_mtu - IP6_TCPMSS_EXTLEN :
1503 ifp->if_tcpmss6);
1504 }
1505 return m;
1506 }
1507 #endif /* INET6 */
1508
1509 #endif /* IP_TCPMSS */
1510