if_l2tp.c revision 1.11.2.5 1 /* $NetBSD: if_l2tp.c,v 1.11.2.5 2018/03/08 13:41:40 martin Exp $ */
2
3 /*
4 * Copyright (c) 2017 Internet Initiative Japan Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /*
30 * L2TPv3 kernel interface
31 */
32
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: if_l2tp.c,v 1.11.2.5 2018/03/08 13:41:40 martin Exp $");
35
36 #ifdef _KERNEL_OPT
37 #include "opt_inet.h"
38 #include "opt_net_mpsafe.h"
39 #endif
40
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/kernel.h>
44 #include <sys/mbuf.h>
45 #include <sys/socket.h>
46 #include <sys/sockio.h>
47 #include <sys/errno.h>
48 #include <sys/ioctl.h>
49 #include <sys/time.h>
50 #include <sys/syslog.h>
51 #include <sys/proc.h>
52 #include <sys/conf.h>
53 #include <sys/kauth.h>
54 #include <sys/cpu.h>
55 #include <sys/cprng.h>
56 #include <sys/intr.h>
57 #include <sys/kmem.h>
58 #include <sys/mutex.h>
59 #include <sys/atomic.h>
60 #include <sys/pserialize.h>
61 #include <sys/device.h>
62 #include <sys/module.h>
63
64 #include <net/if.h>
65 #include <net/if_dl.h>
66 #include <net/if_ether.h>
67 #include <net/if_types.h>
68 #include <net/netisr.h>
69 #include <net/route.h>
70 #include <net/bpf.h>
71 #include <net/if_vlanvar.h>
72
73 #include <netinet/in.h>
74 #include <netinet/in_systm.h>
75 #include <netinet/ip.h>
76 #include <netinet/ip_encap.h>
77 #ifdef INET
78 #include <netinet/in_var.h>
79 #include <netinet/in_l2tp.h>
80 #endif /* INET */
81 #ifdef INET6
82 #include <netinet6/in6_l2tp.h>
83 #endif
84
85 #include <net/if_l2tp.h>
86
87 #include <net/if_vlanvar.h>
88
89 /* TODO: IP_TCPMSS support */
90 #undef IP_TCPMSS
91 #ifdef IP_TCPMSS
92 #include <netinet/ip_tcpmss.h>
93 #endif
94
95 #include <net/bpf.h>
96 #include <net/net_osdep.h>
97
98 /*
99 * l2tp global variable definitions
100 */
101 LIST_HEAD(l2tp_sclist, l2tp_softc);
102 static struct {
103 struct l2tp_sclist list;
104 kmutex_t lock;
105 } l2tp_softcs __cacheline_aligned;
106
107
108 #if !defined(L2TP_ID_HASH_SIZE)
109 #define L2TP_ID_HASH_SIZE 64
110 #endif
111 static struct {
112 kmutex_t lock;
113 struct pslist_head *lists;
114 u_long mask;
115 } l2tp_hash __cacheline_aligned = {
116 .lists = NULL,
117 };
118
119 pserialize_t l2tp_psz __read_mostly;
120 struct psref_class *lv_psref_class __read_mostly;
121
122 static void l2tp_ro_init_pc(void *, void *, struct cpu_info *);
123 static void l2tp_ro_fini_pc(void *, void *, struct cpu_info *);
124
125 static int l2tp_clone_create(struct if_clone *, int);
126 static int l2tp_clone_destroy(struct ifnet *);
127
128 struct if_clone l2tp_cloner =
129 IF_CLONE_INITIALIZER("l2tp", l2tp_clone_create, l2tp_clone_destroy);
130
131 static int l2tp_output(struct ifnet *, struct mbuf *,
132 const struct sockaddr *, const struct rtentry *);
133 static void l2tpintr(struct l2tp_variant *);
134
135 static void l2tp_hash_init(void);
136 static int l2tp_hash_fini(void);
137
138 static void l2tp_start(struct ifnet *);
139 static int l2tp_transmit(struct ifnet *, struct mbuf *);
140
141 static int l2tp_set_tunnel(struct ifnet *, struct sockaddr *,
142 struct sockaddr *);
143 static void l2tp_delete_tunnel(struct ifnet *);
144
145 static int id_hash_func(uint32_t, u_long);
146
147 static void l2tp_variant_update(struct l2tp_softc *, struct l2tp_variant *);
148 static int l2tp_set_session(struct l2tp_softc *, uint32_t, uint32_t);
149 static int l2tp_clear_session(struct l2tp_softc *);
150 static int l2tp_set_cookie(struct l2tp_softc *, uint64_t, u_int, uint64_t, u_int);
151 static void l2tp_clear_cookie(struct l2tp_softc *);
152 static void l2tp_set_state(struct l2tp_softc *, int);
153 static int l2tp_encap_attach(struct l2tp_variant *);
154 static int l2tp_encap_detach(struct l2tp_variant *);
155
156 #ifndef MAX_L2TP_NEST
157 /*
158 * This macro controls the upper limitation on nesting of l2tp tunnels.
159 * Since, setting a large value to this macro with a careless configuration
160 * may introduce system crash, we don't allow any nestings by default.
161 * If you need to configure nested l2tp tunnels, you can define this macro
162 * in your kernel configuration file. However, if you do so, please be
163 * careful to configure the tunnels so that it won't make a loop.
164 */
165 /*
166 * XXX
167 * Currently, if in_l2tp_output recursively calls, it causes locking against
168 * myself of struct l2tp_ro->lr_lock. So, nested l2tp tunnels is prohibited.
169 */
170 #define MAX_L2TP_NEST 0
171 #endif
172
173 static int max_l2tp_nesting = MAX_L2TP_NEST;
174
175 /* ARGSUSED */
176 void
177 l2tpattach(int count)
178 {
179 /*
180 * Nothing to do here, initialization is handled by the
181 * module initialization code in l2tpinit() below).
182 */
183 }
184
185 static void
186 l2tpinit(void)
187 {
188
189 mutex_init(&l2tp_softcs.lock, MUTEX_DEFAULT, IPL_NONE);
190 LIST_INIT(&l2tp_softcs.list);
191
192 mutex_init(&l2tp_hash.lock, MUTEX_DEFAULT, IPL_NONE);
193 l2tp_psz = pserialize_create();
194 lv_psref_class = psref_class_create("l2tpvar", IPL_SOFTNET);
195 if_clone_attach(&l2tp_cloner);
196
197 l2tp_hash_init();
198 }
199
200 static int
201 l2tpdetach(void)
202 {
203 int error;
204
205 mutex_enter(&l2tp_softcs.lock);
206 if (!LIST_EMPTY(&l2tp_softcs.list)) {
207 mutex_exit(&l2tp_softcs.lock);
208 return EBUSY;
209 }
210 mutex_exit(&l2tp_softcs.lock);
211
212 error = l2tp_hash_fini();
213 if (error)
214 return error;
215
216 if_clone_detach(&l2tp_cloner);
217 psref_class_destroy(lv_psref_class);
218 pserialize_destroy(l2tp_psz);
219 mutex_destroy(&l2tp_hash.lock);
220
221 mutex_destroy(&l2tp_softcs.lock);
222
223 return error;
224 }
225
226 static int
227 l2tp_clone_create(struct if_clone *ifc, int unit)
228 {
229 struct l2tp_softc *sc;
230 struct l2tp_variant *var;
231 int rv;
232
233 sc = kmem_zalloc(sizeof(struct l2tp_softc), KM_SLEEP);
234 if_initname(&sc->l2tp_ec.ec_if, ifc->ifc_name, unit);
235 rv = l2tpattach0(sc);
236 if (rv != 0) {
237 kmem_free(sc, sizeof(struct l2tp_softc));
238 return rv;
239 }
240
241 var = kmem_zalloc(sizeof(struct l2tp_variant), KM_SLEEP);
242 var->lv_softc = sc;
243 var->lv_state = L2TP_STATE_DOWN;
244 var->lv_use_cookie = L2TP_COOKIE_OFF;
245 psref_target_init(&var->lv_psref, lv_psref_class);
246
247 sc->l2tp_var = var;
248 mutex_init(&sc->l2tp_lock, MUTEX_DEFAULT, IPL_NONE);
249 PSLIST_ENTRY_INIT(sc, l2tp_hash);
250
251 sc->l2tp_ro_percpu = percpu_alloc(sizeof(struct l2tp_ro));
252 percpu_foreach(sc->l2tp_ro_percpu, l2tp_ro_init_pc, NULL);
253
254 mutex_enter(&l2tp_softcs.lock);
255 LIST_INSERT_HEAD(&l2tp_softcs.list, sc, l2tp_list);
256 mutex_exit(&l2tp_softcs.lock);
257
258 return (0);
259 }
260
261 int
262 l2tpattach0(struct l2tp_softc *sc)
263 {
264 int rv;
265
266 sc->l2tp_ec.ec_if.if_addrlen = 0;
267 sc->l2tp_ec.ec_if.if_mtu = L2TP_MTU;
268 sc->l2tp_ec.ec_if.if_flags = IFF_POINTOPOINT|IFF_MULTICAST|IFF_SIMPLEX;
269 sc->l2tp_ec.ec_if.if_extflags = IFEF_NO_LINK_STATE_CHANGE;
270 #ifdef NET_MPSAFE
271 sc->l2tp_ec.ec_if.if_extflags |= IFEF_MPSAFE;
272 #endif
273 sc->l2tp_ec.ec_if.if_ioctl = l2tp_ioctl;
274 sc->l2tp_ec.ec_if.if_output = l2tp_output;
275 sc->l2tp_ec.ec_if.if_type = IFT_L2TP;
276 sc->l2tp_ec.ec_if.if_dlt = DLT_NULL;
277 sc->l2tp_ec.ec_if.if_start = l2tp_start;
278 sc->l2tp_ec.ec_if.if_transmit = l2tp_transmit;
279 sc->l2tp_ec.ec_if._if_input = ether_input;
280 IFQ_SET_READY(&sc->l2tp_ec.ec_if.if_snd);
281 /* XXX
282 * It may improve performance to use if_initialize()/if_register()
283 * so that l2tp_input() calls if_input() instead of
284 * if_percpuq_enqueue(). However, that causes recursive softnet_lock
285 * when NET_MPSAFE is not set.
286 */
287 rv = if_attach(&sc->l2tp_ec.ec_if);
288 if (rv != 0)
289 return rv;
290 if_alloc_sadl(&sc->l2tp_ec.ec_if);
291 bpf_attach(&sc->l2tp_ec.ec_if, DLT_EN10MB, sizeof(struct ether_header));
292
293 return 0;
294 }
295
296 void
297 l2tp_ro_init_pc(void *p, void *arg __unused, struct cpu_info *ci __unused)
298 {
299 struct l2tp_ro *lro = p;
300
301 mutex_init(&lro->lr_lock, MUTEX_DEFAULT, IPL_NONE);
302 }
303
304 void
305 l2tp_ro_fini_pc(void *p, void *arg __unused, struct cpu_info *ci __unused)
306 {
307 struct l2tp_ro *lro = p;
308
309 rtcache_free(&lro->lr_ro);
310
311 mutex_destroy(&lro->lr_lock);
312 }
313
314 static int
315 l2tp_clone_destroy(struct ifnet *ifp)
316 {
317 struct l2tp_variant *var;
318 struct l2tp_softc *sc = container_of(ifp, struct l2tp_softc,
319 l2tp_ec.ec_if);
320
321 l2tp_clear_session(sc);
322 l2tp_delete_tunnel(&sc->l2tp_ec.ec_if);
323 /*
324 * To avoid for l2tp_transmit() to access sc->l2tp_var after free it.
325 */
326 mutex_enter(&sc->l2tp_lock);
327 var = sc->l2tp_var;
328 l2tp_variant_update(sc, NULL);
329 mutex_exit(&sc->l2tp_lock);
330
331 mutex_enter(&l2tp_softcs.lock);
332 LIST_REMOVE(sc, l2tp_list);
333 mutex_exit(&l2tp_softcs.lock);
334
335 bpf_detach(ifp);
336
337 if_detach(ifp);
338
339 percpu_foreach(sc->l2tp_ro_percpu, l2tp_ro_fini_pc, NULL);
340 percpu_free(sc->l2tp_ro_percpu, sizeof(struct l2tp_ro));
341
342 kmem_free(var, sizeof(struct l2tp_variant));
343 mutex_destroy(&sc->l2tp_lock);
344 kmem_free(sc, sizeof(struct l2tp_softc));
345
346 return 0;
347 }
348
349 static int
350 l2tp_output(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *dst,
351 const struct rtentry *rt)
352 {
353 struct l2tp_softc *sc = container_of(ifp, struct l2tp_softc,
354 l2tp_ec.ec_if);
355 struct l2tp_variant *var;
356 struct psref psref;
357 int error = 0;
358
359 var = l2tp_getref_variant(sc, &psref);
360 if (var == NULL) {
361 m_freem(m);
362 return ENETDOWN;
363 }
364
365 IFQ_CLASSIFY(&ifp->if_snd, m, dst->sa_family);
366
367 m->m_flags &= ~(M_BCAST|M_MCAST);
368
369 if ((ifp->if_flags & IFF_UP) == 0) {
370 m_freem(m);
371 error = ENETDOWN;
372 goto end;
373 }
374
375 if (var->lv_psrc == NULL || var->lv_pdst == NULL) {
376 m_freem(m);
377 error = ENETDOWN;
378 goto end;
379 }
380
381 /* XXX should we check if our outer source is legal? */
382
383 /* use DLT_NULL encapsulation here to pass inner af type */
384 M_PREPEND(m, sizeof(int), M_DONTWAIT);
385 if (!m) {
386 error = ENOBUFS;
387 goto end;
388 }
389 *mtod(m, int *) = dst->sa_family;
390
391 IFQ_ENQUEUE(&ifp->if_snd, m, error);
392 if (error)
393 goto end;
394
395 /*
396 * direct call to avoid infinite loop at l2tpintr()
397 */
398 l2tpintr(var);
399
400 error = 0;
401
402 end:
403 l2tp_putref_variant(var, &psref);
404 if (error)
405 ifp->if_oerrors++;
406
407 return error;
408 }
409
410 static void
411 l2tpintr(struct l2tp_variant *var)
412 {
413 struct l2tp_softc *sc;
414 struct ifnet *ifp;
415 struct mbuf *m;
416 int error;
417
418 KASSERT(psref_held(&var->lv_psref, lv_psref_class));
419
420 sc = var->lv_softc;
421 ifp = &sc->l2tp_ec.ec_if;
422
423 /* output processing */
424 if (var->lv_my_sess_id == 0 || var->lv_peer_sess_id == 0) {
425 IFQ_PURGE(&ifp->if_snd);
426 return;
427 }
428
429 for (;;) {
430 IFQ_DEQUEUE(&ifp->if_snd, m);
431 if (m == NULL)
432 break;
433 m->m_flags &= ~(M_BCAST|M_MCAST);
434 bpf_mtap(ifp, m);
435 switch (var->lv_psrc->sa_family) {
436 #ifdef INET
437 case AF_INET:
438 error = in_l2tp_output(var, m);
439 break;
440 #endif
441 #ifdef INET6
442 case AF_INET6:
443 error = in6_l2tp_output(var, m);
444 break;
445 #endif
446 default:
447 m_freem(m);
448 error = ENETDOWN;
449 break;
450 }
451
452 if (error)
453 ifp->if_oerrors++;
454 else {
455 ifp->if_opackets++;
456 /*
457 * obytes is incremented at ether_output() or
458 * bridge_enqueue().
459 */
460 }
461 }
462
463 }
464
465 void
466 l2tp_input(struct mbuf *m, struct ifnet *ifp)
467 {
468 u_long val;
469
470 KASSERT(ifp != NULL);
471
472 if (m->m_pkthdr.len < sizeof(val)) {
473 m_freem(m);
474 return;
475 }
476
477 m_copydata(m, 0, sizeof(val), &val);
478
479 if ((val & 0x03) == 0) {
480 /* copy and align head of payload */
481 struct mbuf *m_head;
482 int copy_length;
483
484 #define L2TP_COPY_LENGTH 60
485
486 if (m->m_pkthdr.len < L2TP_COPY_LENGTH) {
487 copy_length = m->m_pkthdr.len;
488 } else {
489 copy_length = L2TP_COPY_LENGTH;
490 }
491
492 if (m->m_len < copy_length) {
493 m = m_pullup(m, copy_length);
494 if (m == NULL)
495 return;
496 }
497
498 MGETHDR(m_head, M_DONTWAIT, MT_HEADER);
499 if (m_head == NULL) {
500 m_freem(m);
501 return;
502 }
503 M_COPY_PKTHDR(m_head, m);
504
505 MH_ALIGN(m_head, L2TP_COPY_LENGTH);
506 memcpy(mtod(m_head, void *), mtod(m, void *), copy_length);
507 m_head->m_len = copy_length;
508 m->m_data += copy_length;
509 m->m_len -= copy_length;
510
511 /* construct chain */
512 if (m->m_len == 0) {
513 m_head->m_next = m_free(m);
514 } else {
515 /*
516 * Already copied mtag with M_COPY_PKTHDR.
517 * but don't delete mtag in case cut off M_PKTHDR flag
518 */
519 m_tag_delete_chain(m, NULL);
520 m->m_flags &= ~M_PKTHDR;
521 m_head->m_next = m;
522 }
523
524 /* override m */
525 m = m_head;
526 }
527
528 m_set_rcvif(m, ifp);
529
530 /*
531 * bpf_mtap() and ifp->if_ipackets++ is done in if_input()
532 *
533 * obytes is incremented at ether_output() or bridge_enqueue().
534 */
535 if_percpuq_enqueue(ifp->if_percpuq, m);
536 }
537
538 void
539 l2tp_start(struct ifnet *ifp)
540 {
541 struct psref psref;
542 struct l2tp_variant *var;
543 struct l2tp_softc *sc = container_of(ifp, struct l2tp_softc,
544 l2tp_ec.ec_if);
545
546 var = l2tp_getref_variant(sc, &psref);
547 if (var == NULL)
548 return;
549
550 if (var->lv_psrc == NULL || var->lv_pdst == NULL)
551 return;
552
553 l2tpintr(var);
554 l2tp_putref_variant(var, &psref);
555 }
556
557 int
558 l2tp_transmit(struct ifnet *ifp, struct mbuf *m)
559 {
560 int error;
561 struct psref psref;
562 struct l2tp_variant *var;
563 struct l2tp_softc *sc = container_of(ifp, struct l2tp_softc,
564 l2tp_ec.ec_if);
565
566 var = l2tp_getref_variant(sc, &psref);
567 if (var == NULL) {
568 m_freem(m);
569 return ENETDOWN;
570 }
571
572 if (var->lv_psrc == NULL || var->lv_pdst == NULL) {
573 m_freem(m);
574 error = ENETDOWN;
575 goto out;
576 }
577
578 m->m_flags &= ~(M_BCAST|M_MCAST);
579 bpf_mtap(ifp, m);
580 switch (var->lv_psrc->sa_family) {
581 #ifdef INET
582 case AF_INET:
583 error = in_l2tp_output(var, m);
584 break;
585 #endif
586 #ifdef INET6
587 case AF_INET6:
588 error = in6_l2tp_output(var, m);
589 break;
590 #endif
591 default:
592 m_freem(m);
593 error = ENETDOWN;
594 break;
595 }
596
597 if (error)
598 ifp->if_oerrors++;
599 else {
600 ifp->if_opackets++;
601 /*
602 * obytes is incremented at ether_output() or bridge_enqueue().
603 */
604 }
605
606 out:
607 l2tp_putref_variant(var, &psref);
608 return error;
609 }
610
611 /* XXX how should we handle IPv6 scope on SIOC[GS]IFPHYADDR? */
612 int
613 l2tp_ioctl(struct ifnet *ifp, u_long cmd, void *data)
614 {
615 struct l2tp_softc *sc = container_of(ifp, struct l2tp_softc,
616 l2tp_ec.ec_if);
617 struct l2tp_variant *var, *var_tmp;
618 struct ifreq *ifr = data;
619 int error = 0, size;
620 struct sockaddr *dst, *src;
621 struct l2tp_req l2tpr;
622 u_long mtu;
623 int bound;
624 struct psref psref;
625
626 switch (cmd) {
627 case SIOCSIFADDR:
628 ifp->if_flags |= IFF_UP;
629 break;
630
631 case SIOCSIFDSTADDR:
632 break;
633
634 case SIOCADDMULTI:
635 case SIOCDELMULTI:
636 switch (ifr->ifr_addr.sa_family) {
637 #ifdef INET
638 case AF_INET: /* IP supports Multicast */
639 break;
640 #endif /* INET */
641 #ifdef INET6
642 case AF_INET6: /* IP6 supports Multicast */
643 break;
644 #endif /* INET6 */
645 default: /* Other protocols doesn't support Multicast */
646 error = EAFNOSUPPORT;
647 break;
648 }
649 break;
650
651 case SIOCSIFMTU:
652 mtu = ifr->ifr_mtu;
653 if (mtu < L2TP_MTU_MIN || mtu > L2TP_MTU_MAX)
654 return (EINVAL);
655 ifp->if_mtu = mtu;
656 break;
657
658 #ifdef INET
659 case SIOCSIFPHYADDR:
660 src = (struct sockaddr *)
661 &(((struct in_aliasreq *)data)->ifra_addr);
662 dst = (struct sockaddr *)
663 &(((struct in_aliasreq *)data)->ifra_dstaddr);
664 if (src->sa_family != AF_INET || dst->sa_family != AF_INET)
665 return EAFNOSUPPORT;
666 else if (src->sa_len != sizeof(struct sockaddr_in)
667 || dst->sa_len != sizeof(struct sockaddr_in))
668 return EINVAL;
669
670 error = l2tp_set_tunnel(&sc->l2tp_ec.ec_if, src, dst);
671 break;
672
673 #endif /* INET */
674 #ifdef INET6
675 case SIOCSIFPHYADDR_IN6:
676 src = (struct sockaddr *)
677 &(((struct in6_aliasreq *)data)->ifra_addr);
678 dst = (struct sockaddr *)
679 &(((struct in6_aliasreq *)data)->ifra_dstaddr);
680 if (src->sa_family != AF_INET6 || dst->sa_family != AF_INET6)
681 return EAFNOSUPPORT;
682 else if (src->sa_len != sizeof(struct sockaddr_in6)
683 || dst->sa_len != sizeof(struct sockaddr_in6))
684 return EINVAL;
685
686 error = l2tp_set_tunnel(&sc->l2tp_ec.ec_if, src, dst);
687 break;
688
689 #endif /* INET6 */
690 case SIOCSLIFPHYADDR:
691 src = (struct sockaddr *)
692 &(((struct if_laddrreq *)data)->addr);
693 dst = (struct sockaddr *)
694 &(((struct if_laddrreq *)data)->dstaddr);
695 if (src->sa_family != dst->sa_family)
696 return EINVAL;
697 else if (src->sa_family == AF_INET
698 && src->sa_len != sizeof(struct sockaddr_in))
699 return EINVAL;
700 else if (src->sa_family == AF_INET6
701 && src->sa_len != sizeof(struct sockaddr_in6))
702 return EINVAL;
703 else if (dst->sa_family == AF_INET
704 && dst->sa_len != sizeof(struct sockaddr_in))
705 return EINVAL;
706 else if (dst->sa_family == AF_INET6
707 && dst->sa_len != sizeof(struct sockaddr_in6))
708 return EINVAL;
709
710 error = l2tp_set_tunnel(&sc->l2tp_ec.ec_if, src, dst);
711 break;
712
713 case SIOCDIFPHYADDR:
714 l2tp_delete_tunnel(&sc->l2tp_ec.ec_if);
715 break;
716
717 case SIOCGIFPSRCADDR:
718 #ifdef INET6
719 case SIOCGIFPSRCADDR_IN6:
720 #endif /* INET6 */
721 bound = curlwp_bind();
722 var = l2tp_getref_variant(sc, &psref);
723 if (var == NULL) {
724 curlwp_bindx(bound);
725 error = EADDRNOTAVAIL;
726 goto bad;
727 }
728 if (var->lv_psrc == NULL) {
729 l2tp_putref_variant(var, &psref);
730 curlwp_bindx(bound);
731 error = EADDRNOTAVAIL;
732 goto bad;
733 }
734 src = var->lv_psrc;
735 switch (cmd) {
736 #ifdef INET
737 case SIOCGIFPSRCADDR:
738 dst = &ifr->ifr_addr;
739 size = sizeof(ifr->ifr_addr);
740 break;
741 #endif /* INET */
742 #ifdef INET6
743 case SIOCGIFPSRCADDR_IN6:
744 dst = (struct sockaddr *)
745 &(((struct in6_ifreq *)data)->ifr_addr);
746 size = sizeof(((struct in6_ifreq *)data)->ifr_addr);
747 break;
748 #endif /* INET6 */
749 default:
750 l2tp_putref_variant(var, &psref);
751 curlwp_bindx(bound);
752 error = EADDRNOTAVAIL;
753 goto bad;
754 }
755 if (src->sa_len > size) {
756 l2tp_putref_variant(var, &psref);
757 curlwp_bindx(bound);
758 return EINVAL;
759 }
760 sockaddr_copy(dst, src->sa_len, src);
761 l2tp_putref_variant(var, &psref);
762 curlwp_bindx(bound);
763 break;
764
765 case SIOCGIFPDSTADDR:
766 #ifdef INET6
767 case SIOCGIFPDSTADDR_IN6:
768 #endif /* INET6 */
769 bound = curlwp_bind();
770 var = l2tp_getref_variant(sc, &psref);
771 if (var == NULL) {
772 curlwp_bindx(bound);
773 error = EADDRNOTAVAIL;
774 goto bad;
775 }
776 if (var->lv_pdst == NULL) {
777 l2tp_putref_variant(var, &psref);
778 curlwp_bindx(bound);
779 error = EADDRNOTAVAIL;
780 goto bad;
781 }
782 src = var->lv_pdst;
783 switch (cmd) {
784 #ifdef INET
785 case SIOCGIFPDSTADDR:
786 dst = &ifr->ifr_addr;
787 size = sizeof(ifr->ifr_addr);
788 break;
789 #endif /* INET */
790 #ifdef INET6
791 case SIOCGIFPDSTADDR_IN6:
792 dst = (struct sockaddr *)
793 &(((struct in6_ifreq *)data)->ifr_addr);
794 size = sizeof(((struct in6_ifreq *)data)->ifr_addr);
795 break;
796 #endif /* INET6 */
797 default:
798 l2tp_putref_variant(var, &psref);
799 curlwp_bindx(bound);
800 error = EADDRNOTAVAIL;
801 goto bad;
802 }
803 if (src->sa_len > size) {
804 l2tp_putref_variant(var, &psref);
805 curlwp_bindx(bound);
806 return EINVAL;
807 }
808 sockaddr_copy(dst, src->sa_len, src);
809 l2tp_putref_variant(var, &psref);
810 curlwp_bindx(bound);
811 break;
812
813 case SIOCGLIFPHYADDR:
814 bound = curlwp_bind();
815 var = l2tp_getref_variant(sc, &psref);
816 if (var == NULL) {
817 curlwp_bindx(bound);
818 error = EADDRNOTAVAIL;
819 goto bad;
820 }
821 if (var->lv_psrc == NULL || var->lv_pdst == NULL) {
822 l2tp_putref_variant(var, &psref);
823 curlwp_bindx(bound);
824 error = EADDRNOTAVAIL;
825 goto bad;
826 }
827
828 /* copy src */
829 src = var->lv_psrc;
830 dst = (struct sockaddr *)
831 &(((struct if_laddrreq *)data)->addr);
832 size = sizeof(((struct if_laddrreq *)data)->addr);
833 if (src->sa_len > size) {
834 l2tp_putref_variant(var, &psref);
835 curlwp_bindx(bound);
836 return EINVAL;
837 }
838 sockaddr_copy(dst, src->sa_len, src);
839
840 /* copy dst */
841 src = var->lv_pdst;
842 dst = (struct sockaddr *)
843 &(((struct if_laddrreq *)data)->dstaddr);
844 size = sizeof(((struct if_laddrreq *)data)->dstaddr);
845 if (src->sa_len > size) {
846 l2tp_putref_variant(var, &psref);
847 curlwp_bindx(bound);
848 return EINVAL;
849 }
850 sockaddr_copy(dst, src->sa_len, src);
851 l2tp_putref_variant(var, &psref);
852 curlwp_bindx(bound);
853 break;
854
855 case SIOCSL2TPSESSION:
856 if ((error = copyin(ifr->ifr_data, &l2tpr, sizeof(l2tpr))) != 0)
857 break;
858
859 /* session id must not zero */
860 if (l2tpr.my_sess_id == 0 || l2tpr.peer_sess_id == 0)
861 return EINVAL;
862
863 bound = curlwp_bind();
864 var_tmp = l2tp_lookup_session_ref(l2tpr.my_sess_id, &psref);
865 if (var_tmp != NULL) {
866 /* duplicate session id */
867 log(LOG_WARNING, "%s: duplicate session id %" PRIu32 " of %s\n",
868 sc->l2tp_ec.ec_if.if_xname, l2tpr.my_sess_id,
869 var_tmp->lv_softc->l2tp_ec.ec_if.if_xname);
870 psref_release(&psref, &var_tmp->lv_psref,
871 lv_psref_class);
872 curlwp_bindx(bound);
873 return EINVAL;
874 }
875 curlwp_bindx(bound);
876
877 error = l2tp_set_session(sc, l2tpr.my_sess_id, l2tpr.peer_sess_id);
878 break;
879 case SIOCDL2TPSESSION:
880 l2tp_clear_session(sc);
881 break;
882 case SIOCSL2TPCOOKIE:
883 if ((error = copyin(ifr->ifr_data, &l2tpr, sizeof(l2tpr))) != 0)
884 break;
885
886 error = l2tp_set_cookie(sc, l2tpr.my_cookie, l2tpr.my_cookie_len,
887 l2tpr.peer_cookie, l2tpr.peer_cookie_len);
888 break;
889 case SIOCDL2TPCOOKIE:
890 l2tp_clear_cookie(sc);
891 break;
892 case SIOCSL2TPSTATE:
893 if ((error = copyin(ifr->ifr_data, &l2tpr, sizeof(l2tpr))) != 0)
894 break;
895
896 l2tp_set_state(sc, l2tpr.state);
897 break;
898 case SIOCGL2TP:
899 /* get L2TPV3 session info */
900 memset(&l2tpr, 0, sizeof(l2tpr));
901
902 bound = curlwp_bind();
903 var = l2tp_getref_variant(sc, &psref);
904 if (var == NULL) {
905 curlwp_bindx(bound);
906 error = EADDRNOTAVAIL;
907 goto bad;
908 }
909
910 l2tpr.state = var->lv_state;
911 l2tpr.my_sess_id = var->lv_my_sess_id;
912 l2tpr.peer_sess_id = var->lv_peer_sess_id;
913 l2tpr.my_cookie = var->lv_my_cookie;
914 l2tpr.my_cookie_len = var->lv_my_cookie_len;
915 l2tpr.peer_cookie = var->lv_peer_cookie;
916 l2tpr.peer_cookie_len = var->lv_peer_cookie_len;
917 l2tp_putref_variant(var, &psref);
918 curlwp_bindx(bound);
919
920 error = copyout(&l2tpr, ifr->ifr_data, sizeof(l2tpr));
921 break;
922
923 default:
924 error = ifioctl_common(ifp, cmd, data);
925 break;
926 }
927 bad:
928 return error;
929 }
930
931 static int
932 l2tp_set_tunnel(struct ifnet *ifp, struct sockaddr *src, struct sockaddr *dst)
933 {
934 struct l2tp_softc *sc = container_of(ifp, struct l2tp_softc,
935 l2tp_ec.ec_if);
936 struct sockaddr *osrc, *odst;
937 struct sockaddr *nsrc, *ndst;
938 struct l2tp_variant *ovar, *nvar;
939 int error;
940
941 nsrc = sockaddr_dup(src, M_WAITOK);
942 ndst = sockaddr_dup(dst, M_WAITOK);
943
944 nvar = kmem_alloc(sizeof(*nvar), KM_SLEEP);
945
946 error = encap_lock_enter();
947 if (error)
948 goto error;
949
950 mutex_enter(&sc->l2tp_lock);
951
952 ovar = sc->l2tp_var;
953 osrc = ovar->lv_psrc;
954 odst = ovar->lv_pdst;
955 *nvar = *ovar;
956 psref_target_init(&nvar->lv_psref, lv_psref_class);
957 nvar->lv_psrc = nsrc;
958 nvar->lv_pdst = ndst;
959 error = l2tp_encap_attach(nvar);
960 if (error) {
961 mutex_exit(&sc->l2tp_lock);
962 encap_lock_exit();
963 goto error;
964 }
965 membar_producer();
966 l2tp_variant_update(sc, nvar);
967
968 mutex_exit(&sc->l2tp_lock);
969
970 (void)l2tp_encap_detach(ovar);
971 encap_lock_exit();
972
973 if (osrc)
974 sockaddr_free(osrc);
975 if (odst)
976 sockaddr_free(odst);
977 kmem_free(ovar, sizeof(*ovar));
978
979 return 0;
980
981 error:
982 sockaddr_free(nsrc);
983 sockaddr_free(ndst);
984 kmem_free(nvar, sizeof(*nvar));
985
986 return error;
987 }
988
989 static void
990 l2tp_delete_tunnel(struct ifnet *ifp)
991 {
992 struct l2tp_softc *sc = container_of(ifp, struct l2tp_softc,
993 l2tp_ec.ec_if);
994 struct sockaddr *osrc, *odst;
995 struct l2tp_variant *ovar, *nvar;
996 int error;
997
998 nvar = kmem_alloc(sizeof(*nvar), KM_SLEEP);
999
1000 error = encap_lock_enter();
1001 if (error) {
1002 kmem_free(nvar, sizeof(*nvar));
1003 return;
1004 }
1005 mutex_enter(&sc->l2tp_lock);
1006
1007 ovar = sc->l2tp_var;
1008 osrc = ovar->lv_psrc;
1009 odst = ovar->lv_pdst;
1010 *nvar = *ovar;
1011 psref_target_init(&nvar->lv_psref, lv_psref_class);
1012 nvar->lv_psrc = NULL;
1013 nvar->lv_pdst = NULL;
1014 membar_producer();
1015 l2tp_variant_update(sc, nvar);
1016
1017 mutex_exit(&sc->l2tp_lock);
1018
1019 (void)l2tp_encap_detach(ovar);
1020 encap_lock_exit();
1021
1022 if (osrc)
1023 sockaddr_free(osrc);
1024 if (odst)
1025 sockaddr_free(odst);
1026 kmem_free(ovar, sizeof(*ovar));
1027 }
1028
1029 static int
1030 id_hash_func(uint32_t id, u_long mask)
1031 {
1032 uint32_t hash;
1033
1034 hash = (id >> 16) ^ id;
1035 hash = (hash >> 4) ^ hash;
1036
1037 return hash & mask;
1038 }
1039
1040 static void
1041 l2tp_hash_init(void)
1042 {
1043
1044 l2tp_hash.lists = hashinit(L2TP_ID_HASH_SIZE, HASH_PSLIST, true,
1045 &l2tp_hash.mask);
1046 }
1047
1048 static int
1049 l2tp_hash_fini(void)
1050 {
1051 int i;
1052
1053 mutex_enter(&l2tp_hash.lock);
1054
1055 for (i = 0; i < l2tp_hash.mask + 1; i++) {
1056 if (PSLIST_WRITER_FIRST(&l2tp_hash.lists[i], struct l2tp_softc,
1057 l2tp_hash) != NULL) {
1058 mutex_exit(&l2tp_hash.lock);
1059 return EBUSY;
1060 }
1061 }
1062 for (i = 0; i < l2tp_hash.mask + 1; i++)
1063 PSLIST_DESTROY(&l2tp_hash.lists[i]);
1064
1065 mutex_exit(&l2tp_hash.lock);
1066
1067 hashdone(l2tp_hash.lists, HASH_PSLIST, l2tp_hash.mask);
1068
1069 return 0;
1070 }
1071
1072 static int
1073 l2tp_set_session(struct l2tp_softc *sc, uint32_t my_sess_id,
1074 uint32_t peer_sess_id)
1075 {
1076 uint32_t idx;
1077 struct l2tp_variant *nvar;
1078 struct l2tp_variant *ovar;
1079 struct ifnet *ifp = &sc->l2tp_ec.ec_if;
1080
1081 nvar = kmem_alloc(sizeof(*nvar), KM_SLEEP);
1082
1083 mutex_enter(&sc->l2tp_lock);
1084 ovar = sc->l2tp_var;
1085 *nvar = *ovar;
1086 psref_target_init(&nvar->lv_psref, lv_psref_class);
1087 nvar->lv_my_sess_id = my_sess_id;
1088 nvar->lv_peer_sess_id = peer_sess_id;
1089 membar_producer();
1090
1091 mutex_enter(&l2tp_hash.lock);
1092 if (ovar->lv_my_sess_id > 0 && ovar->lv_peer_sess_id > 0) {
1093 PSLIST_WRITER_REMOVE(sc, l2tp_hash);
1094 pserialize_perform(l2tp_psz);
1095 }
1096 mutex_exit(&l2tp_hash.lock);
1097 PSLIST_ENTRY_DESTROY(sc, l2tp_hash);
1098
1099 l2tp_variant_update(sc, nvar);
1100 mutex_exit(&sc->l2tp_lock);
1101
1102 idx = id_hash_func(nvar->lv_my_sess_id, l2tp_hash.mask);
1103 if ((ifp->if_flags & IFF_DEBUG) != 0)
1104 log(LOG_DEBUG, "%s: add hash entry: sess_id=%" PRIu32 ", idx=%" PRIu32 "\n",
1105 sc->l2tp_ec.ec_if.if_xname, nvar->lv_my_sess_id, idx);
1106
1107 PSLIST_ENTRY_INIT(sc, l2tp_hash);
1108 mutex_enter(&l2tp_hash.lock);
1109 PSLIST_WRITER_INSERT_HEAD(&l2tp_hash.lists[idx], sc, l2tp_hash);
1110 mutex_exit(&l2tp_hash.lock);
1111
1112 kmem_free(ovar, sizeof(*ovar));
1113 return 0;
1114 }
1115
1116 static int
1117 l2tp_clear_session(struct l2tp_softc *sc)
1118 {
1119 struct l2tp_variant *nvar;
1120 struct l2tp_variant *ovar;
1121
1122 nvar = kmem_alloc(sizeof(*nvar), KM_SLEEP);
1123
1124 mutex_enter(&sc->l2tp_lock);
1125 ovar = sc->l2tp_var;
1126 *nvar = *ovar;
1127 psref_target_init(&nvar->lv_psref, lv_psref_class);
1128 nvar->lv_my_sess_id = 0;
1129 nvar->lv_peer_sess_id = 0;
1130 membar_producer();
1131
1132 mutex_enter(&l2tp_hash.lock);
1133 if (ovar->lv_my_sess_id > 0 && ovar->lv_peer_sess_id > 0) {
1134 PSLIST_WRITER_REMOVE(sc, l2tp_hash);
1135 pserialize_perform(l2tp_psz);
1136 }
1137 mutex_exit(&l2tp_hash.lock);
1138
1139 l2tp_variant_update(sc, nvar);
1140 mutex_exit(&sc->l2tp_lock);
1141 kmem_free(ovar, sizeof(*ovar));
1142 return 0;
1143 }
1144
1145 struct l2tp_variant *
1146 l2tp_lookup_session_ref(uint32_t id, struct psref *psref)
1147 {
1148 int idx;
1149 int s;
1150 struct l2tp_softc *sc;
1151
1152 idx = id_hash_func(id, l2tp_hash.mask);
1153
1154 s = pserialize_read_enter();
1155 PSLIST_READER_FOREACH(sc, &l2tp_hash.lists[idx], struct l2tp_softc,
1156 l2tp_hash) {
1157 struct l2tp_variant *var = sc->l2tp_var;
1158 if (var == NULL)
1159 continue;
1160 if (var->lv_my_sess_id != id)
1161 continue;
1162 psref_acquire(psref, &var->lv_psref, lv_psref_class);
1163 pserialize_read_exit(s);
1164 return var;
1165 }
1166 pserialize_read_exit(s);
1167 return NULL;
1168 }
1169
1170 /*
1171 * l2tp_variant update API.
1172 *
1173 * Assumption:
1174 * reader side dereferences sc->l2tp_var in reader critical section only,
1175 * that is, all of reader sides do not reader the sc->l2tp_var after
1176 * pserialize_perform().
1177 */
1178 static void
1179 l2tp_variant_update(struct l2tp_softc *sc, struct l2tp_variant *nvar)
1180 {
1181 struct ifnet *ifp = &sc->l2tp_ec.ec_if;
1182 struct l2tp_variant *ovar = sc->l2tp_var;
1183
1184 KASSERT(mutex_owned(&sc->l2tp_lock));
1185
1186 sc->l2tp_var = nvar;
1187 pserialize_perform(l2tp_psz);
1188 psref_target_destroy(&ovar->lv_psref, lv_psref_class);
1189
1190 /*
1191 * In the manual of atomic_swap_ptr(3), there is no mention if 2nd
1192 * argument is rewrite or not. So, use sc->l2tp_var instead of nvar.
1193 */
1194 if (sc->l2tp_var != NULL) {
1195 if (sc->l2tp_var->lv_psrc != NULL
1196 && sc->l2tp_var->lv_pdst != NULL)
1197 ifp->if_flags |= IFF_RUNNING;
1198 else
1199 ifp->if_flags &= ~IFF_RUNNING;
1200 }
1201 }
1202
1203 static int
1204 l2tp_set_cookie(struct l2tp_softc *sc, uint64_t my_cookie, u_int my_cookie_len,
1205 uint64_t peer_cookie, u_int peer_cookie_len)
1206 {
1207 struct l2tp_variant *nvar;
1208
1209 if (my_cookie == 0 || peer_cookie == 0)
1210 return EINVAL;
1211
1212 if (my_cookie_len != 4 && my_cookie_len != 8
1213 && peer_cookie_len != 4 && peer_cookie_len != 8)
1214 return EINVAL;
1215
1216 nvar = kmem_alloc(sizeof(*nvar), KM_SLEEP);
1217
1218 mutex_enter(&sc->l2tp_lock);
1219
1220 *nvar = *sc->l2tp_var;
1221 psref_target_init(&nvar->lv_psref, lv_psref_class);
1222 nvar->lv_my_cookie = my_cookie;
1223 nvar->lv_my_cookie_len = my_cookie_len;
1224 nvar->lv_peer_cookie = peer_cookie;
1225 nvar->lv_peer_cookie_len = peer_cookie_len;
1226 nvar->lv_use_cookie = L2TP_COOKIE_ON;
1227 membar_producer();
1228 l2tp_variant_update(sc, nvar);
1229
1230 mutex_exit(&sc->l2tp_lock);
1231
1232 struct ifnet *ifp = &sc->l2tp_ec.ec_if;
1233 if ((ifp->if_flags & IFF_DEBUG) != 0) {
1234 log(LOG_DEBUG,
1235 "%s: set cookie: "
1236 "local cookie_len=%u local cookie=%" PRIu64 ", "
1237 "remote cookie_len=%u remote cookie=%" PRIu64 "\n",
1238 ifp->if_xname, my_cookie_len, my_cookie,
1239 peer_cookie_len, peer_cookie);
1240 }
1241
1242 return 0;
1243 }
1244
1245 static void
1246 l2tp_clear_cookie(struct l2tp_softc *sc)
1247 {
1248 struct l2tp_variant *nvar;
1249
1250 nvar = kmem_alloc(sizeof(*nvar), KM_SLEEP);
1251
1252 mutex_enter(&sc->l2tp_lock);
1253
1254 *nvar = *sc->l2tp_var;
1255 psref_target_init(&nvar->lv_psref, lv_psref_class);
1256 nvar->lv_my_cookie = 0;
1257 nvar->lv_my_cookie_len = 0;
1258 nvar->lv_peer_cookie = 0;
1259 nvar->lv_peer_cookie_len = 0;
1260 nvar->lv_use_cookie = L2TP_COOKIE_OFF;
1261 membar_producer();
1262 l2tp_variant_update(sc, nvar);
1263
1264 mutex_exit(&sc->l2tp_lock);
1265 }
1266
1267 static void
1268 l2tp_set_state(struct l2tp_softc *sc, int state)
1269 {
1270 struct ifnet *ifp = &sc->l2tp_ec.ec_if;
1271 struct l2tp_variant *nvar;
1272
1273 nvar = kmem_alloc(sizeof(*nvar), KM_SLEEP);
1274
1275 mutex_enter(&sc->l2tp_lock);
1276
1277 *nvar = *sc->l2tp_var;
1278 psref_target_init(&nvar->lv_psref, lv_psref_class);
1279 nvar->lv_state = state;
1280 membar_producer();
1281 l2tp_variant_update(sc, nvar);
1282
1283 if (nvar->lv_state == L2TP_STATE_UP) {
1284 ifp->if_link_state = LINK_STATE_UP;
1285 } else {
1286 ifp->if_link_state = LINK_STATE_DOWN;
1287 }
1288
1289 mutex_exit(&sc->l2tp_lock);
1290
1291 #ifdef NOTYET
1292 vlan_linkstate_notify(ifp, ifp->if_link_state);
1293 #endif
1294 }
1295
1296 static int
1297 l2tp_encap_attach(struct l2tp_variant *var)
1298 {
1299 int error;
1300
1301 if (var == NULL || var->lv_psrc == NULL)
1302 return EINVAL;
1303
1304 switch (var->lv_psrc->sa_family) {
1305 #ifdef INET
1306 case AF_INET:
1307 error = in_l2tp_attach(var);
1308 break;
1309 #endif
1310 #ifdef INET6
1311 case AF_INET6:
1312 error = in6_l2tp_attach(var);
1313 break;
1314 #endif
1315 default:
1316 error = EINVAL;
1317 break;
1318 }
1319
1320 return error;
1321 }
1322
1323 static int
1324 l2tp_encap_detach(struct l2tp_variant *var)
1325 {
1326 int error;
1327
1328 if (var == NULL || var->lv_psrc == NULL)
1329 return EINVAL;
1330
1331 switch (var->lv_psrc->sa_family) {
1332 #ifdef INET
1333 case AF_INET:
1334 error = in_l2tp_detach(var);
1335 break;
1336 #endif
1337 #ifdef INET6
1338 case AF_INET6:
1339 error = in6_l2tp_detach(var);
1340 break;
1341 #endif
1342 default:
1343 error = EINVAL;
1344 break;
1345 }
1346
1347 return error;
1348 }
1349
1350 int
1351 l2tp_check_nesting(struct ifnet *ifp, struct mbuf *m)
1352 {
1353
1354 return if_tunnel_check_nesting(ifp, m, max_l2tp_nesting);
1355 }
1356
1357 /*
1358 * Module infrastructure
1359 */
1360 #include "if_module.h"
1361
1362 IF_MODULE(MODULE_CLASS_DRIVER, l2tp, "")
1363
1364
1365 /* TODO: IP_TCPMSS support */
1366 #ifdef IP_TCPMSS
1367 static int l2tp_need_tcpmss_clamp(struct ifnet *);
1368 #ifdef INET
1369 static struct mbuf *l2tp_tcpmss4_clamp(struct ifnet *, struct mbuf *);
1370 #endif
1371 #ifdef INET6
1372 static struct mbuf *l2tp_tcpmss6_clamp(struct ifnet *, struct mbuf *);
1373 #endif
1374
1375 struct mbuf *
1376 l2tp_tcpmss_clamp(struct ifnet *ifp, struct mbuf *m)
1377 {
1378 struct ether_header *eh;
1379 struct ether_vlan_header evh;
1380
1381 if (!l2tp_need_tcpmss_clamp(ifp)) {
1382 return m;
1383 }
1384
1385 if (m->m_pkthdr.len < sizeof(evh)) {
1386 m_freem(m);
1387 return NULL;
1388 }
1389
1390 /* save ether header */
1391 m_copydata(m, 0, sizeof(evh), (void *)&evh);
1392 eh = (struct ether_header *)&evh;
1393
1394 switch (ntohs(eh->ether_type)) {
1395 case ETHERTYPE_VLAN: /* Ether + VLAN */
1396 if (m->m_pkthdr.len <= sizeof(struct ether_vlan_header))
1397 break;
1398 m_adj(m, sizeof(struct ether_vlan_header));
1399 switch (ntohs(evh.evl_proto)) {
1400 #ifdef INET
1401 case ETHERTYPE_IP: /* Ether + VLAN + IPv4 */
1402 m = l2tp_tcpmss4_clamp(ifp, m);
1403 if (m == NULL)
1404 return NULL;
1405 break;
1406 #endif /* INET */
1407 #ifdef INET6
1408 case ETHERTYPE_IPV6: /* Ether + VLAN + IPv6 */
1409 m = l2tp_tcpmss6_clamp(ifp, m);
1410 if (m == NULL)
1411 return NULL;
1412 break;
1413 #endif /* INET6 */
1414 default:
1415 break;
1416 }
1417
1418 /* restore ether header */
1419 M_PREPEND(m, sizeof(struct ether_vlan_header),
1420 M_DONTWAIT);
1421 if (m == NULL)
1422 return NULL;
1423 *mtod(m, struct ether_vlan_header *) = evh;
1424 break;
1425
1426 #ifdef INET
1427 case ETHERTYPE_IP: /* Ether + IPv4 */
1428 if (m->m_pkthdr.len <= sizeof(struct ether_header))
1429 break;
1430 m_adj(m, sizeof(struct ether_header));
1431 m = l2tp_tcpmss4_clamp(ifp, m);
1432 if (m == NULL)
1433 return NULL;
1434 /* restore ether header */
1435 M_PREPEND(m, sizeof(struct ether_header), M_DONTWAIT);
1436 if (m == NULL)
1437 return NULL;
1438 *mtod(m, struct ether_header *) = *eh;
1439 break;
1440 #endif /* INET */
1441
1442 #ifdef INET6
1443 case ETHERTYPE_IPV6: /* Ether + IPv6 */
1444 if (m->m_pkthdr.len <= sizeof(struct ether_header))
1445 break;
1446 m_adj(m, sizeof(struct ether_header));
1447 m = l2tp_tcpmss6_clamp(ifp, m);
1448 if (m == NULL)
1449 return NULL;
1450 /* restore ether header */
1451 M_PREPEND(m, sizeof(struct ether_header), M_DONTWAIT);
1452 if (m == NULL)
1453 return NULL;
1454 *mtod(m, struct ether_header *) = *eh;
1455 break;
1456 #endif /* INET6 */
1457
1458 default:
1459 break;
1460 }
1461
1462 return m;
1463 }
1464
1465 static int
1466 l2tp_need_tcpmss_clamp(struct ifnet *ifp)
1467 {
1468 int ret = 0;
1469
1470 #ifdef INET
1471 if (ifp->if_tcpmss != 0)
1472 ret = 1;
1473 #endif
1474
1475 #ifdef INET6
1476 if (ifp->if_tcpmss6 != 0)
1477 ret = 1;
1478 #endif
1479
1480 return ret;
1481 }
1482
1483 #ifdef INET
1484 static struct mbuf *
1485 l2tp_tcpmss4_clamp(struct ifnet *ifp, struct mbuf *m)
1486 {
1487
1488 if (ifp->if_tcpmss != 0) {
1489 return ip_tcpmss(m, (ifp->if_tcpmss < 0) ?
1490 ifp->if_mtu - IP_TCPMSS_EXTLEN :
1491 ifp->if_tcpmss);
1492 }
1493 return m;
1494 }
1495 #endif /* INET */
1496
1497 #ifdef INET6
1498 static struct mbuf *
1499 l2tp_tcpmss6_clamp(struct ifnet *ifp, struct mbuf *m)
1500 {
1501 int ip6hdrlen;
1502
1503 if (ifp->if_tcpmss6 != 0 &&
1504 ip6_tcpmss_applicable(m, &ip6hdrlen)) {
1505 return ip6_tcpmss(m, ip6hdrlen,
1506 (ifp->if_tcpmss6 < 0) ?
1507 ifp->if_mtu - IP6_TCPMSS_EXTLEN :
1508 ifp->if_tcpmss6);
1509 }
1510 return m;
1511 }
1512 #endif /* INET6 */
1513
1514 #endif /* IP_TCPMSS */
1515