if_l2tp.c revision 1.9 1 /* $NetBSD: if_l2tp.c,v 1.9 2017/04/13 00:12:10 knakahara Exp $ */
2
3 /*
4 * Copyright (c) 2017 Internet Initiative Japan Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /*
30 * L2TPv3 kernel interface
31 */
32
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: if_l2tp.c,v 1.9 2017/04/13 00:12:10 knakahara Exp $");
35
36 #ifdef _KERNEL_OPT
37 #include "opt_inet.h"
38 #endif
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 #include <sys/mbuf.h>
44 #include <sys/socket.h>
45 #include <sys/sockio.h>
46 #include <sys/errno.h>
47 #include <sys/ioctl.h>
48 #include <sys/time.h>
49 #include <sys/syslog.h>
50 #include <sys/proc.h>
51 #include <sys/conf.h>
52 #include <sys/kauth.h>
53 #include <sys/cpu.h>
54 #include <sys/cprng.h>
55 #include <sys/intr.h>
56 #include <sys/kmem.h>
57 #include <sys/mutex.h>
58 #include <sys/atomic.h>
59 #include <sys/pserialize.h>
60 #include <sys/device.h>
61 #include <sys/module.h>
62
63 #include <net/if.h>
64 #include <net/if_dl.h>
65 #include <net/if_ether.h>
66 #include <net/if_types.h>
67 #include <net/netisr.h>
68 #include <net/route.h>
69 #include <net/bpf.h>
70 #include <net/if_vlanvar.h>
71
72 #include <netinet/in.h>
73 #include <netinet/in_systm.h>
74 #include <netinet/ip.h>
75 #include <netinet/ip_encap.h>
76 #ifdef INET
77 #include <netinet/in_var.h>
78 #include <netinet/in_l2tp.h>
79 #endif /* INET */
80 #ifdef INET6
81 #include <netinet6/in6_l2tp.h>
82 #endif
83
84 #include <net/if_l2tp.h>
85
86 #include <net/if_vlanvar.h>
87
88 /* TODO: IP_TCPMSS support */
89 #undef IP_TCPMSS
90 #ifdef IP_TCPMSS
91 #include <netinet/ip_tcpmss.h>
92 #endif
93
94 #include <net/bpf.h>
95 #include <net/net_osdep.h>
96
97 /*
98 * l2tp global variable definitions
99 */
100 LIST_HEAD(l2tp_sclist, l2tp_softc);
101 static struct {
102 struct l2tp_sclist list;
103 kmutex_t lock;
104 } l2tp_softcs __cacheline_aligned;
105
106
107 #if !defined(L2TP_ID_HASH_SIZE)
108 #define L2TP_ID_HASH_SIZE 64
109 #endif
110 static struct {
111 kmutex_t lock;
112 struct pslist_head *lists;
113 u_long mask;
114 } l2tp_hash __cacheline_aligned = {
115 .lists = NULL,
116 };
117
118 pserialize_t l2tp_psz __read_mostly;
119 struct psref_class *lv_psref_class __read_mostly;
120
121 static void l2tp_ro_init_pc(void *, void *, struct cpu_info *);
122 static void l2tp_ro_fini_pc(void *, void *, struct cpu_info *);
123
124 static int l2tp_clone_create(struct if_clone *, int);
125 static int l2tp_clone_destroy(struct ifnet *);
126
127 struct if_clone l2tp_cloner =
128 IF_CLONE_INITIALIZER("l2tp", l2tp_clone_create, l2tp_clone_destroy);
129
130 static int l2tp_output(struct ifnet *, struct mbuf *,
131 const struct sockaddr *, const struct rtentry *);
132 static void l2tpintr(struct l2tp_variant *);
133
134 static void l2tp_hash_init(void);
135 static int l2tp_hash_fini(void);
136
137 static void l2tp_start(struct ifnet *);
138 static int l2tp_transmit(struct ifnet *, struct mbuf *);
139
140 static int l2tp_set_tunnel(struct ifnet *, struct sockaddr *,
141 struct sockaddr *);
142 static void l2tp_delete_tunnel(struct ifnet *);
143
144 static int id_hash_func(uint32_t, u_long);
145
146 static void l2tp_variant_update(struct l2tp_softc *, struct l2tp_variant *);
147 static int l2tp_set_session(struct l2tp_softc *, uint32_t, uint32_t);
148 static int l2tp_clear_session(struct l2tp_softc *);
149 static int l2tp_set_cookie(struct l2tp_softc *, uint64_t, u_int, uint64_t, u_int);
150 static void l2tp_clear_cookie(struct l2tp_softc *);
151 static void l2tp_set_state(struct l2tp_softc *, int);
152 static int l2tp_encap_attach(struct l2tp_variant *);
153 static int l2tp_encap_detach(struct l2tp_variant *);
154
155 #ifndef MAX_L2TP_NEST
156 /*
157 * This macro controls the upper limitation on nesting of l2tp tunnels.
158 * Since, setting a large value to this macro with a careless configuration
159 * may introduce system crash, we don't allow any nestings by default.
160 * If you need to configure nested l2tp tunnels, you can define this macro
161 * in your kernel configuration file. However, if you do so, please be
162 * careful to configure the tunnels so that it won't make a loop.
163 */
164 /*
165 * XXX
166 * Currently, if in_l2tp_output recursively calls, it causes locking against
167 * myself of struct l2tp_ro->lr_lock. So, nested l2tp tunnels is prohibited.
168 */
169 #define MAX_L2TP_NEST 0
170 #endif
171
172 static int max_l2tp_nesting = MAX_L2TP_NEST;
173
174 /* ARGSUSED */
175 void
176 l2tpattach(int count)
177 {
178 /*
179 * Nothing to do here, initialization is handled by the
180 * module initialization code in l2tpinit() below).
181 */
182 }
183
184 static void
185 l2tpinit(void)
186 {
187
188 mutex_init(&l2tp_softcs.lock, MUTEX_DEFAULT, IPL_NONE);
189 LIST_INIT(&l2tp_softcs.list);
190
191 mutex_init(&l2tp_hash.lock, MUTEX_DEFAULT, IPL_NONE);
192 l2tp_psz = pserialize_create();
193 lv_psref_class = psref_class_create("l2tpvar", IPL_SOFTNET);
194 if_clone_attach(&l2tp_cloner);
195
196 l2tp_hash_init();
197 }
198
199 static int
200 l2tpdetach(void)
201 {
202 int error;
203
204 mutex_enter(&l2tp_softcs.lock);
205 if (!LIST_EMPTY(&l2tp_softcs.list)) {
206 mutex_exit(&l2tp_softcs.lock);
207 return EBUSY;
208 }
209 mutex_exit(&l2tp_softcs.lock);
210
211 error = l2tp_hash_fini();
212 if (error)
213 return error;
214
215 if_clone_detach(&l2tp_cloner);
216 psref_class_destroy(lv_psref_class);
217 pserialize_destroy(l2tp_psz);
218 mutex_destroy(&l2tp_hash.lock);
219
220 mutex_destroy(&l2tp_softcs.lock);
221
222 return error;
223 }
224
225 static int
226 l2tp_clone_create(struct if_clone *ifc, int unit)
227 {
228 struct l2tp_softc *sc;
229 struct l2tp_variant *var;
230
231 sc = kmem_zalloc(sizeof(struct l2tp_softc), KM_SLEEP);
232 var = kmem_zalloc(sizeof(struct l2tp_variant), KM_SLEEP);
233
234 var->lv_softc = sc;
235 var->lv_state = L2TP_STATE_DOWN;
236 var->lv_use_cookie = L2TP_COOKIE_OFF;
237 psref_target_init(&var->lv_psref, lv_psref_class);
238
239 sc->l2tp_var = var;
240 mutex_init(&sc->l2tp_lock, MUTEX_DEFAULT, IPL_NONE);
241 PSLIST_ENTRY_INIT(sc, l2tp_hash);
242
243 if_initname(&sc->l2tp_ec.ec_if, ifc->ifc_name, unit);
244
245 l2tpattach0(sc);
246
247 sc->l2tp_ro_percpu = percpu_alloc(sizeof(struct l2tp_ro));
248 KASSERTMSG(sc->l2tp_ro_percpu != NULL,
249 "failed to allocate sc->l2tp_ro_percpu");
250 percpu_foreach(sc->l2tp_ro_percpu, l2tp_ro_init_pc, NULL);
251
252 mutex_enter(&l2tp_softcs.lock);
253 LIST_INSERT_HEAD(&l2tp_softcs.list, sc, l2tp_list);
254 mutex_exit(&l2tp_softcs.lock);
255
256 return (0);
257 }
258
259 void
260 l2tpattach0(struct l2tp_softc *sc)
261 {
262
263 sc->l2tp_ec.ec_if.if_addrlen = 0;
264 sc->l2tp_ec.ec_if.if_mtu = L2TP_MTU;
265 sc->l2tp_ec.ec_if.if_flags = IFF_POINTOPOINT|IFF_MULTICAST|IFF_SIMPLEX;
266 sc->l2tp_ec.ec_if.if_ioctl = l2tp_ioctl;
267 sc->l2tp_ec.ec_if.if_output = l2tp_output;
268 sc->l2tp_ec.ec_if.if_type = IFT_L2TP;
269 sc->l2tp_ec.ec_if.if_dlt = DLT_NULL;
270 sc->l2tp_ec.ec_if.if_start = l2tp_start;
271 sc->l2tp_ec.ec_if.if_transmit = l2tp_transmit;
272 sc->l2tp_ec.ec_if._if_input = ether_input;
273 IFQ_SET_READY(&sc->l2tp_ec.ec_if.if_snd);
274 if_attach(&sc->l2tp_ec.ec_if);
275 if_alloc_sadl(&sc->l2tp_ec.ec_if);
276 bpf_attach(&sc->l2tp_ec.ec_if, DLT_EN10MB, sizeof(struct ether_header));
277 }
278
279 void
280 l2tp_ro_init_pc(void *p, void *arg __unused, struct cpu_info *ci __unused)
281 {
282 struct l2tp_ro *lro = p;
283
284 mutex_init(&lro->lr_lock, MUTEX_DEFAULT, IPL_NONE);
285 }
286
287 void
288 l2tp_ro_fini_pc(void *p, void *arg __unused, struct cpu_info *ci __unused)
289 {
290 struct l2tp_ro *lro = p;
291
292 rtcache_free(&lro->lr_ro);
293
294 mutex_destroy(&lro->lr_lock);
295 }
296
297 static int
298 l2tp_clone_destroy(struct ifnet *ifp)
299 {
300 struct l2tp_variant *var;
301 struct l2tp_softc *sc = container_of(ifp, struct l2tp_softc,
302 l2tp_ec.ec_if);
303
304 l2tp_clear_session(sc);
305 l2tp_delete_tunnel(&sc->l2tp_ec.ec_if);
306 /*
307 * To avoid for l2tp_transmit() to access sc->l2tp_var after free it.
308 */
309 mutex_enter(&sc->l2tp_lock);
310 var = sc->l2tp_var;
311 l2tp_variant_update(sc, NULL);
312 mutex_exit(&sc->l2tp_lock);
313
314 mutex_enter(&l2tp_softcs.lock);
315 LIST_REMOVE(sc, l2tp_list);
316 mutex_exit(&l2tp_softcs.lock);
317
318 bpf_detach(ifp);
319
320 if_detach(ifp);
321
322 percpu_foreach(sc->l2tp_ro_percpu, l2tp_ro_fini_pc, NULL);
323 percpu_free(sc->l2tp_ro_percpu, sizeof(struct l2tp_ro));
324
325 kmem_free(var, sizeof(struct l2tp_variant));
326 mutex_destroy(&sc->l2tp_lock);
327 kmem_free(sc, sizeof(struct l2tp_softc));
328
329 return 0;
330 }
331
332 static int
333 l2tp_output(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *dst,
334 const struct rtentry *rt)
335 {
336 struct l2tp_softc *sc = container_of(ifp, struct l2tp_softc,
337 l2tp_ec.ec_if);
338 struct l2tp_variant *var;
339 struct psref psref;
340 int error = 0;
341
342 var = l2tp_getref_variant(sc, &psref);
343 if (var == NULL) {
344 m_freem(m);
345 return ENETDOWN;
346 }
347
348 IFQ_CLASSIFY(&ifp->if_snd, m, dst->sa_family);
349
350 m->m_flags &= ~(M_BCAST|M_MCAST);
351
352 if ((ifp->if_flags & IFF_UP) == 0) {
353 m_freem(m);
354 error = ENETDOWN;
355 goto end;
356 }
357
358 if (var->lv_psrc == NULL || var->lv_pdst == NULL) {
359 m_freem(m);
360 error = ENETDOWN;
361 goto end;
362 }
363
364 /* XXX should we check if our outer source is legal? */
365
366 /* use DLT_NULL encapsulation here to pass inner af type */
367 M_PREPEND(m, sizeof(int), M_DONTWAIT);
368 if (!m) {
369 error = ENOBUFS;
370 goto end;
371 }
372 *mtod(m, int *) = dst->sa_family;
373
374 IFQ_ENQUEUE(&ifp->if_snd, m, error);
375 if (error)
376 goto end;
377
378 /*
379 * direct call to avoid infinite loop at l2tpintr()
380 */
381 l2tpintr(var);
382
383 error = 0;
384
385 end:
386 l2tp_putref_variant(var, &psref);
387 if (error)
388 ifp->if_oerrors++;
389
390 return error;
391 }
392
393 static void
394 l2tpintr(struct l2tp_variant *var)
395 {
396 struct l2tp_softc *sc;
397 struct ifnet *ifp;
398 struct mbuf *m;
399 int error;
400
401 KASSERT(psref_held(&var->lv_psref, lv_psref_class));
402
403 sc = var->lv_softc;
404 ifp = &sc->l2tp_ec.ec_if;
405
406 /* output processing */
407 if (var->lv_my_sess_id == 0 || var->lv_peer_sess_id == 0) {
408 IFQ_PURGE(&ifp->if_snd);
409 return;
410 }
411
412 for (;;) {
413 IFQ_DEQUEUE(&ifp->if_snd, m);
414 if (m == NULL)
415 break;
416 m->m_flags &= ~(M_BCAST|M_MCAST);
417 bpf_mtap(ifp, m);
418 switch (var->lv_psrc->sa_family) {
419 #ifdef INET
420 case AF_INET:
421 error = in_l2tp_output(var, m);
422 break;
423 #endif
424 #ifdef INET6
425 case AF_INET6:
426 error = in6_l2tp_output(var, m);
427 break;
428 #endif
429 default:
430 m_freem(m);
431 error = ENETDOWN;
432 break;
433 }
434
435 if (error)
436 ifp->if_oerrors++;
437 else {
438 ifp->if_opackets++;
439 /*
440 * obytes is incremented at ether_output() or
441 * bridge_enqueue().
442 */
443 }
444 }
445
446 }
447
448 void
449 l2tp_input(struct mbuf *m, struct ifnet *ifp)
450 {
451
452 KASSERT(ifp != NULL);
453
454 if (0 == (mtod(m, u_long) & 0x03)) {
455 /* copy and align head of payload */
456 struct mbuf *m_head;
457 int copy_length;
458
459 #define L2TP_COPY_LENGTH 60
460 #define L2TP_LINK_HDR_ROOM (MHLEN - L2TP_COPY_LENGTH - 4/*round4(2)*/)
461
462 if (m->m_pkthdr.len < L2TP_COPY_LENGTH) {
463 copy_length = m->m_pkthdr.len;
464 } else {
465 copy_length = L2TP_COPY_LENGTH;
466 }
467
468 if (m->m_len < copy_length) {
469 m = m_pullup(m, copy_length);
470 if (m == NULL)
471 return;
472 }
473
474 MGETHDR(m_head, M_DONTWAIT, MT_HEADER);
475 if (m_head == NULL) {
476 m_freem(m);
477 return;
478 }
479 M_COPY_PKTHDR(m_head, m);
480
481 m_head->m_data += 2 /* align */ + L2TP_LINK_HDR_ROOM;
482 memcpy(m_head->m_data, m->m_data, copy_length);
483 m_head->m_len = copy_length;
484 m->m_data += copy_length;
485 m->m_len -= copy_length;
486
487 /* construct chain */
488 if (m->m_len == 0) {
489 m_head->m_next = m_free(m); /* not m_freem */
490 } else {
491 /*
492 * copyed mtag in previous call M_COPY_PKTHDR
493 * but don't delete mtag in case cutt of M_PKTHDR flag
494 */
495 m_tag_delete_chain(m, NULL);
496 m->m_flags &= ~M_PKTHDR;
497 m_head->m_next = m;
498 }
499
500 /* override m */
501 m = m_head;
502 }
503
504 m_set_rcvif(m, ifp);
505
506 /*
507 * bpf_mtap() and ifp->if_ipackets++ is done in if_input()
508 *
509 * obytes is incremented at ether_output() or bridge_enqueue().
510 */
511 if_percpuq_enqueue(ifp->if_percpuq, m);
512 }
513
514 void
515 l2tp_start(struct ifnet *ifp)
516 {
517 struct psref psref;
518 struct l2tp_variant *var;
519 struct l2tp_softc *sc = container_of(ifp, struct l2tp_softc,
520 l2tp_ec.ec_if);
521
522 var = l2tp_getref_variant(sc, &psref);
523 if (var == NULL)
524 return;
525
526 if (var->lv_psrc == NULL || var->lv_pdst == NULL)
527 return;
528
529 l2tpintr(var);
530 l2tp_putref_variant(var, &psref);
531 }
532
533 int
534 l2tp_transmit(struct ifnet *ifp, struct mbuf *m)
535 {
536 int error;
537 struct psref psref;
538 struct l2tp_variant *var;
539 struct l2tp_softc *sc = container_of(ifp, struct l2tp_softc,
540 l2tp_ec.ec_if);
541
542 var = l2tp_getref_variant(sc, &psref);
543 if (var == NULL) {
544 m_freem(m);
545 return ENETDOWN;
546 }
547
548 if (var->lv_psrc == NULL || var->lv_pdst == NULL) {
549 m_freem(m);
550 error = ENETDOWN;
551 goto out;
552 }
553
554 m->m_flags &= ~(M_BCAST|M_MCAST);
555 bpf_mtap(ifp, m);
556 switch (var->lv_psrc->sa_family) {
557 #ifdef INET
558 case AF_INET:
559 error = in_l2tp_output(var, m);
560 break;
561 #endif
562 #ifdef INET6
563 case AF_INET6:
564 error = in6_l2tp_output(var, m);
565 break;
566 #endif
567 default:
568 m_freem(m);
569 error = ENETDOWN;
570 break;
571 }
572
573 if (error)
574 ifp->if_oerrors++;
575 else {
576 ifp->if_opackets++;
577 /*
578 * obytes is incremented at ether_output() or bridge_enqueue().
579 */
580 }
581
582 out:
583 l2tp_putref_variant(var, &psref);
584 return error;
585 }
586
587 /* XXX how should we handle IPv6 scope on SIOC[GS]IFPHYADDR? */
588 int
589 l2tp_ioctl(struct ifnet *ifp, u_long cmd, void *data)
590 {
591 struct l2tp_softc *sc = container_of(ifp, struct l2tp_softc,
592 l2tp_ec.ec_if);
593 struct l2tp_variant *var, *var_tmp;
594 struct ifreq *ifr = data;
595 int error = 0, size;
596 struct sockaddr *dst, *src;
597 struct l2tp_req l2tpr;
598 u_long mtu;
599 int bound;
600 struct psref psref;
601
602 switch (cmd) {
603 case SIOCSIFADDR:
604 ifp->if_flags |= IFF_UP;
605 break;
606
607 case SIOCSIFDSTADDR:
608 break;
609
610 case SIOCADDMULTI:
611 case SIOCDELMULTI:
612 switch (ifr->ifr_addr.sa_family) {
613 #ifdef INET
614 case AF_INET: /* IP supports Multicast */
615 break;
616 #endif /* INET */
617 #ifdef INET6
618 case AF_INET6: /* IP6 supports Multicast */
619 break;
620 #endif /* INET6 */
621 default: /* Other protocols doesn't support Multicast */
622 error = EAFNOSUPPORT;
623 break;
624 }
625 break;
626
627 case SIOCSIFMTU:
628 mtu = ifr->ifr_mtu;
629 if (mtu < L2TP_MTU_MIN || mtu > L2TP_MTU_MAX)
630 return (EINVAL);
631 ifp->if_mtu = mtu;
632 break;
633
634 #ifdef INET
635 case SIOCSIFPHYADDR:
636 src = (struct sockaddr *)
637 &(((struct in_aliasreq *)data)->ifra_addr);
638 dst = (struct sockaddr *)
639 &(((struct in_aliasreq *)data)->ifra_dstaddr);
640 if (src->sa_family != AF_INET || dst->sa_family != AF_INET)
641 return EAFNOSUPPORT;
642 else if (src->sa_len != sizeof(struct sockaddr_in)
643 || dst->sa_len != sizeof(struct sockaddr_in))
644 return EINVAL;
645
646 error = l2tp_set_tunnel(&sc->l2tp_ec.ec_if, src, dst);
647 break;
648
649 #endif /* INET */
650 #ifdef INET6
651 case SIOCSIFPHYADDR_IN6:
652 src = (struct sockaddr *)
653 &(((struct in6_aliasreq *)data)->ifra_addr);
654 dst = (struct sockaddr *)
655 &(((struct in6_aliasreq *)data)->ifra_dstaddr);
656 if (src->sa_family != AF_INET6 || dst->sa_family != AF_INET6)
657 return EAFNOSUPPORT;
658 else if (src->sa_len != sizeof(struct sockaddr_in6)
659 || dst->sa_len != sizeof(struct sockaddr_in6))
660 return EINVAL;
661
662 error = l2tp_set_tunnel(&sc->l2tp_ec.ec_if, src, dst);
663 break;
664
665 #endif /* INET6 */
666 case SIOCSLIFPHYADDR:
667 src = (struct sockaddr *)
668 &(((struct if_laddrreq *)data)->addr);
669 dst = (struct sockaddr *)
670 &(((struct if_laddrreq *)data)->dstaddr);
671 if (src->sa_family != dst->sa_family)
672 return EINVAL;
673 else if (src->sa_family == AF_INET
674 && src->sa_len != sizeof(struct sockaddr_in))
675 return EINVAL;
676 else if (src->sa_family == AF_INET6
677 && src->sa_len != sizeof(struct sockaddr_in6))
678 return EINVAL;
679 else if (dst->sa_family == AF_INET
680 && dst->sa_len != sizeof(struct sockaddr_in))
681 return EINVAL;
682 else if (dst->sa_family == AF_INET6
683 && dst->sa_len != sizeof(struct sockaddr_in6))
684 return EINVAL;
685
686 error = l2tp_set_tunnel(&sc->l2tp_ec.ec_if, src, dst);
687 break;
688
689 case SIOCDIFPHYADDR:
690 l2tp_delete_tunnel(&sc->l2tp_ec.ec_if);
691 break;
692
693 case SIOCGIFPSRCADDR:
694 #ifdef INET6
695 case SIOCGIFPSRCADDR_IN6:
696 #endif /* INET6 */
697 bound = curlwp_bind();
698 var = l2tp_getref_variant(sc, &psref);
699 if (var == NULL) {
700 curlwp_bindx(bound);
701 error = EADDRNOTAVAIL;
702 goto bad;
703 }
704 if (var->lv_psrc == NULL) {
705 l2tp_putref_variant(var, &psref);
706 curlwp_bindx(bound);
707 error = EADDRNOTAVAIL;
708 goto bad;
709 }
710 src = var->lv_psrc;
711 switch (cmd) {
712 #ifdef INET
713 case SIOCGIFPSRCADDR:
714 dst = &ifr->ifr_addr;
715 size = sizeof(ifr->ifr_addr);
716 break;
717 #endif /* INET */
718 #ifdef INET6
719 case SIOCGIFPSRCADDR_IN6:
720 dst = (struct sockaddr *)
721 &(((struct in6_ifreq *)data)->ifr_addr);
722 size = sizeof(((struct in6_ifreq *)data)->ifr_addr);
723 break;
724 #endif /* INET6 */
725 default:
726 l2tp_putref_variant(var, &psref);
727 curlwp_bindx(bound);
728 error = EADDRNOTAVAIL;
729 goto bad;
730 }
731 if (src->sa_len > size) {
732 l2tp_putref_variant(var, &psref);
733 curlwp_bindx(bound);
734 return EINVAL;
735 }
736 sockaddr_copy(dst, src->sa_len, src);
737 l2tp_putref_variant(var, &psref);
738 curlwp_bindx(bound);
739 break;
740
741 case SIOCGIFPDSTADDR:
742 #ifdef INET6
743 case SIOCGIFPDSTADDR_IN6:
744 #endif /* INET6 */
745 bound = curlwp_bind();
746 var = l2tp_getref_variant(sc, &psref);
747 if (var == NULL) {
748 curlwp_bindx(bound);
749 error = EADDRNOTAVAIL;
750 goto bad;
751 }
752 if (var->lv_pdst == NULL) {
753 l2tp_putref_variant(var, &psref);
754 curlwp_bindx(bound);
755 error = EADDRNOTAVAIL;
756 goto bad;
757 }
758 src = var->lv_pdst;
759 switch (cmd) {
760 #ifdef INET
761 case SIOCGIFPDSTADDR:
762 dst = &ifr->ifr_addr;
763 size = sizeof(ifr->ifr_addr);
764 break;
765 #endif /* INET */
766 #ifdef INET6
767 case SIOCGIFPDSTADDR_IN6:
768 dst = (struct sockaddr *)
769 &(((struct in6_ifreq *)data)->ifr_addr);
770 size = sizeof(((struct in6_ifreq *)data)->ifr_addr);
771 break;
772 #endif /* INET6 */
773 default:
774 l2tp_putref_variant(var, &psref);
775 curlwp_bindx(bound);
776 error = EADDRNOTAVAIL;
777 goto bad;
778 }
779 if (src->sa_len > size) {
780 l2tp_putref_variant(var, &psref);
781 curlwp_bindx(bound);
782 return EINVAL;
783 }
784 sockaddr_copy(dst, src->sa_len, src);
785 l2tp_putref_variant(var, &psref);
786 curlwp_bindx(bound);
787 break;
788
789 case SIOCGLIFPHYADDR:
790 bound = curlwp_bind();
791 var = l2tp_getref_variant(sc, &psref);
792 if (var == NULL) {
793 curlwp_bindx(bound);
794 error = EADDRNOTAVAIL;
795 goto bad;
796 }
797 if (var->lv_psrc == NULL || var->lv_pdst == NULL) {
798 l2tp_putref_variant(var, &psref);
799 curlwp_bindx(bound);
800 error = EADDRNOTAVAIL;
801 goto bad;
802 }
803
804 /* copy src */
805 src = var->lv_psrc;
806 dst = (struct sockaddr *)
807 &(((struct if_laddrreq *)data)->addr);
808 size = sizeof(((struct if_laddrreq *)data)->addr);
809 if (src->sa_len > size) {
810 l2tp_putref_variant(var, &psref);
811 curlwp_bindx(bound);
812 return EINVAL;
813 }
814 sockaddr_copy(dst, src->sa_len, src);
815
816 /* copy dst */
817 src = var->lv_pdst;
818 dst = (struct sockaddr *)
819 &(((struct if_laddrreq *)data)->dstaddr);
820 size = sizeof(((struct if_laddrreq *)data)->dstaddr);
821 if (src->sa_len > size) {
822 l2tp_putref_variant(var, &psref);
823 curlwp_bindx(bound);
824 return EINVAL;
825 }
826 sockaddr_copy(dst, src->sa_len, src);
827 l2tp_putref_variant(var, &psref);
828 curlwp_bindx(bound);
829 break;
830
831 case SIOCSL2TPSESSION:
832 if ((error = copyin(ifr->ifr_data, &l2tpr, sizeof(l2tpr))) != 0)
833 break;
834
835 /* session id must not zero */
836 if (l2tpr.my_sess_id == 0 || l2tpr.peer_sess_id == 0)
837 return EINVAL;
838
839 bound = curlwp_bind();
840 var_tmp = l2tp_lookup_session_ref(l2tpr.my_sess_id, &psref);
841 if (var_tmp != NULL) {
842 /* duplicate session id */
843 log(LOG_WARNING, "%s: duplicate session id %" PRIu32 " of %s\n",
844 sc->l2tp_ec.ec_if.if_xname, l2tpr.my_sess_id,
845 var_tmp->lv_softc->l2tp_ec.ec_if.if_xname);
846 psref_release(&psref, &var_tmp->lv_psref,
847 lv_psref_class);
848 curlwp_bindx(bound);
849 return EINVAL;
850 }
851 curlwp_bindx(bound);
852
853 error = l2tp_set_session(sc, l2tpr.my_sess_id, l2tpr.peer_sess_id);
854 break;
855 case SIOCDL2TPSESSION:
856 l2tp_clear_session(sc);
857 break;
858 case SIOCSL2TPCOOKIE:
859 if ((error = copyin(ifr->ifr_data, &l2tpr, sizeof(l2tpr))) != 0)
860 break;
861
862 error = l2tp_set_cookie(sc, l2tpr.my_cookie, l2tpr.my_cookie_len,
863 l2tpr.peer_cookie, l2tpr.peer_cookie_len);
864 break;
865 case SIOCDL2TPCOOKIE:
866 l2tp_clear_cookie(sc);
867 break;
868 case SIOCSL2TPSTATE:
869 if ((error = copyin(ifr->ifr_data, &l2tpr, sizeof(l2tpr))) != 0)
870 break;
871
872 l2tp_set_state(sc, l2tpr.state);
873 break;
874 case SIOCGL2TP:
875 /* get L2TPV3 session info */
876 memset(&l2tpr, 0, sizeof(l2tpr));
877
878 bound = curlwp_bind();
879 var = l2tp_getref_variant(sc, &psref);
880 if (var == NULL) {
881 curlwp_bindx(bound);
882 error = EADDRNOTAVAIL;
883 goto bad;
884 }
885
886 l2tpr.state = var->lv_state;
887 l2tpr.my_sess_id = var->lv_my_sess_id;
888 l2tpr.peer_sess_id = var->lv_peer_sess_id;
889 l2tpr.my_cookie = var->lv_my_cookie;
890 l2tpr.my_cookie_len = var->lv_my_cookie_len;
891 l2tpr.peer_cookie = var->lv_peer_cookie;
892 l2tpr.peer_cookie_len = var->lv_peer_cookie_len;
893 l2tp_putref_variant(var, &psref);
894 curlwp_bindx(bound);
895
896 error = copyout(&l2tpr, ifr->ifr_data, sizeof(l2tpr));
897 break;
898
899 default:
900 error = ifioctl_common(ifp, cmd, data);
901 break;
902 }
903 bad:
904 return error;
905 }
906
907 static int
908 l2tp_set_tunnel(struct ifnet *ifp, struct sockaddr *src, struct sockaddr *dst)
909 {
910 struct l2tp_softc *sc = container_of(ifp, struct l2tp_softc,
911 l2tp_ec.ec_if);
912 struct sockaddr *osrc, *odst;
913 struct sockaddr *nsrc, *ndst;
914 struct l2tp_variant *ovar, *nvar;
915 int error;
916
917 nsrc = sockaddr_dup(src, M_WAITOK);
918 ndst = sockaddr_dup(dst, M_WAITOK);
919
920 nvar = kmem_alloc(sizeof(*nvar), KM_SLEEP);
921
922 error = encap_lock_enter();
923 if (error)
924 goto error;
925
926 mutex_enter(&sc->l2tp_lock);
927
928 ovar = sc->l2tp_var;
929 osrc = ovar->lv_psrc;
930 odst = ovar->lv_pdst;
931 *nvar = *ovar;
932 psref_target_init(&nvar->lv_psref, lv_psref_class);
933 nvar->lv_psrc = nsrc;
934 nvar->lv_pdst = ndst;
935 error = l2tp_encap_attach(nvar);
936 if (error) {
937 mutex_exit(&sc->l2tp_lock);
938 encap_lock_exit();
939 goto error;
940 }
941 membar_producer();
942 l2tp_variant_update(sc, nvar);
943
944 mutex_exit(&sc->l2tp_lock);
945
946 (void)l2tp_encap_detach(ovar);
947 encap_lock_exit();
948
949 if (osrc)
950 sockaddr_free(osrc);
951 if (odst)
952 sockaddr_free(odst);
953 kmem_free(ovar, sizeof(*ovar));
954
955 return 0;
956
957 error:
958 sockaddr_free(nsrc);
959 sockaddr_free(ndst);
960 kmem_free(nvar, sizeof(*nvar));
961
962 return error;
963 }
964
965 static void
966 l2tp_delete_tunnel(struct ifnet *ifp)
967 {
968 struct l2tp_softc *sc = container_of(ifp, struct l2tp_softc,
969 l2tp_ec.ec_if);
970 struct sockaddr *osrc, *odst;
971 struct l2tp_variant *ovar, *nvar;
972 int error;
973
974 nvar = kmem_alloc(sizeof(*nvar), KM_SLEEP);
975
976 error = encap_lock_enter();
977 if (error) {
978 kmem_free(nvar, sizeof(*nvar));
979 return;
980 }
981 mutex_enter(&sc->l2tp_lock);
982
983 ovar = sc->l2tp_var;
984 osrc = ovar->lv_psrc;
985 odst = ovar->lv_pdst;
986 *nvar = *ovar;
987 psref_target_init(&nvar->lv_psref, lv_psref_class);
988 nvar->lv_psrc = NULL;
989 nvar->lv_pdst = NULL;
990 membar_producer();
991 l2tp_variant_update(sc, nvar);
992
993 mutex_exit(&sc->l2tp_lock);
994
995 (void)l2tp_encap_detach(ovar);
996 encap_lock_exit();
997
998 if (osrc)
999 sockaddr_free(osrc);
1000 if (odst)
1001 sockaddr_free(odst);
1002 kmem_free(ovar, sizeof(*ovar));
1003 }
1004
1005 static int
1006 id_hash_func(uint32_t id, u_long mask)
1007 {
1008 uint32_t hash;
1009
1010 hash = (id >> 16) ^ id;
1011 hash = (hash >> 4) ^ hash;
1012
1013 return hash & mask;
1014 }
1015
1016 static void
1017 l2tp_hash_init(void)
1018 {
1019
1020 l2tp_hash.lists = hashinit(L2TP_ID_HASH_SIZE, HASH_PSLIST, true,
1021 &l2tp_hash.mask);
1022 }
1023
1024 static int
1025 l2tp_hash_fini(void)
1026 {
1027 int i;
1028
1029 mutex_enter(&l2tp_hash.lock);
1030
1031 for (i = 0; i < l2tp_hash.mask + 1; i++) {
1032 if (PSLIST_WRITER_FIRST(&l2tp_hash.lists[i], struct l2tp_softc,
1033 l2tp_hash) != NULL) {
1034 mutex_exit(&l2tp_hash.lock);
1035 return EBUSY;
1036 }
1037 }
1038 for (i = 0; i < l2tp_hash.mask + 1; i++)
1039 PSLIST_DESTROY(&l2tp_hash.lists[i]);
1040
1041 mutex_exit(&l2tp_hash.lock);
1042
1043 hashdone(l2tp_hash.lists, HASH_PSLIST, l2tp_hash.mask);
1044
1045 return 0;
1046 }
1047
1048 static int
1049 l2tp_set_session(struct l2tp_softc *sc, uint32_t my_sess_id,
1050 uint32_t peer_sess_id)
1051 {
1052 uint32_t idx;
1053 struct l2tp_variant *nvar;
1054 struct l2tp_variant *ovar;
1055 struct ifnet *ifp = &sc->l2tp_ec.ec_if;
1056
1057 nvar = kmem_alloc(sizeof(*nvar), KM_SLEEP);
1058
1059 mutex_enter(&sc->l2tp_lock);
1060 ovar = sc->l2tp_var;
1061 *nvar = *ovar;
1062 psref_target_init(&nvar->lv_psref, lv_psref_class);
1063 nvar->lv_my_sess_id = my_sess_id;
1064 nvar->lv_peer_sess_id = peer_sess_id;
1065 membar_producer();
1066
1067 mutex_enter(&l2tp_hash.lock);
1068 if (ovar->lv_my_sess_id > 0 && ovar->lv_peer_sess_id > 0) {
1069 PSLIST_WRITER_REMOVE(sc, l2tp_hash);
1070 pserialize_perform(l2tp_psz);
1071 }
1072 mutex_exit(&l2tp_hash.lock);
1073
1074 l2tp_variant_update(sc, nvar);
1075 mutex_exit(&sc->l2tp_lock);
1076
1077 idx = id_hash_func(nvar->lv_my_sess_id, l2tp_hash.mask);
1078 if ((ifp->if_flags & IFF_DEBUG) != 0)
1079 log(LOG_DEBUG, "%s: add hash entry: sess_id=%" PRIu32 ", idx=%" PRIu32 "\n",
1080 sc->l2tp_ec.ec_if.if_xname, nvar->lv_my_sess_id, idx);
1081
1082 mutex_enter(&l2tp_hash.lock);
1083 PSLIST_WRITER_INSERT_HEAD(&l2tp_hash.lists[idx], sc, l2tp_hash);
1084 mutex_exit(&l2tp_hash.lock);
1085
1086 kmem_free(ovar, sizeof(*ovar));
1087 return 0;
1088 }
1089
1090 static int
1091 l2tp_clear_session(struct l2tp_softc *sc)
1092 {
1093 struct l2tp_variant *nvar;
1094 struct l2tp_variant *ovar;
1095
1096 nvar = kmem_alloc(sizeof(*nvar), KM_SLEEP);
1097
1098 mutex_enter(&sc->l2tp_lock);
1099 ovar = sc->l2tp_var;
1100 *nvar = *ovar;
1101 psref_target_init(&nvar->lv_psref, lv_psref_class);
1102 nvar->lv_my_sess_id = 0;
1103 nvar->lv_peer_sess_id = 0;
1104 membar_producer();
1105
1106 mutex_enter(&l2tp_hash.lock);
1107 if (ovar->lv_my_sess_id > 0 && ovar->lv_peer_sess_id > 0) {
1108 PSLIST_WRITER_REMOVE(sc, l2tp_hash);
1109 pserialize_perform(l2tp_psz);
1110 }
1111 mutex_exit(&l2tp_hash.lock);
1112
1113 l2tp_variant_update(sc, nvar);
1114 mutex_exit(&sc->l2tp_lock);
1115 kmem_free(ovar, sizeof(*ovar));
1116 return 0;
1117 }
1118
1119 struct l2tp_variant *
1120 l2tp_lookup_session_ref(uint32_t id, struct psref *psref)
1121 {
1122 int idx;
1123 int s;
1124 struct l2tp_softc *sc;
1125
1126 idx = id_hash_func(id, l2tp_hash.mask);
1127
1128 s = pserialize_read_enter();
1129 PSLIST_READER_FOREACH(sc, &l2tp_hash.lists[idx], struct l2tp_softc,
1130 l2tp_hash) {
1131 struct l2tp_variant *var = sc->l2tp_var;
1132 if (var == NULL)
1133 continue;
1134 if (var->lv_my_sess_id != id)
1135 continue;
1136 psref_acquire(psref, &var->lv_psref, lv_psref_class);
1137 pserialize_read_exit(s);
1138 return var;
1139 }
1140 pserialize_read_exit(s);
1141 return NULL;
1142 }
1143
1144 /*
1145 * l2tp_variant update API.
1146 *
1147 * Assumption:
1148 * reader side dereferences sc->l2tp_var in reader critical section only,
1149 * that is, all of reader sides do not reader the sc->l2tp_var after
1150 * pserialize_perform().
1151 */
1152 static void
1153 l2tp_variant_update(struct l2tp_softc *sc, struct l2tp_variant *nvar)
1154 {
1155 struct ifnet *ifp = &sc->l2tp_ec.ec_if;
1156 struct l2tp_variant *ovar = sc->l2tp_var;
1157
1158 KASSERT(mutex_owned(&sc->l2tp_lock));
1159
1160 sc->l2tp_var = nvar;
1161 pserialize_perform(l2tp_psz);
1162 psref_target_destroy(&ovar->lv_psref, lv_psref_class);
1163
1164 /*
1165 * In the manual of atomic_swap_ptr(3), there is no mention if 2nd
1166 * argument is rewrite or not. So, use sc->l2tp_var instead of nvar.
1167 */
1168 if (sc->l2tp_var != NULL) {
1169 if (sc->l2tp_var->lv_psrc != NULL
1170 && sc->l2tp_var->lv_pdst != NULL)
1171 ifp->if_flags |= IFF_RUNNING;
1172 else
1173 ifp->if_flags &= ~IFF_RUNNING;
1174 }
1175 }
1176
1177 static int
1178 l2tp_set_cookie(struct l2tp_softc *sc, uint64_t my_cookie, u_int my_cookie_len,
1179 uint64_t peer_cookie, u_int peer_cookie_len)
1180 {
1181 struct l2tp_variant *nvar;
1182
1183 if (my_cookie == 0 || peer_cookie == 0)
1184 return EINVAL;
1185
1186 if (my_cookie_len != 4 && my_cookie_len != 8
1187 && peer_cookie_len != 4 && peer_cookie_len != 8)
1188 return EINVAL;
1189
1190 nvar = kmem_alloc(sizeof(*nvar), KM_SLEEP);
1191
1192 mutex_enter(&sc->l2tp_lock);
1193
1194 *nvar = *sc->l2tp_var;
1195 psref_target_init(&nvar->lv_psref, lv_psref_class);
1196 nvar->lv_my_cookie = my_cookie;
1197 nvar->lv_my_cookie_len = my_cookie_len;
1198 nvar->lv_peer_cookie = peer_cookie;
1199 nvar->lv_peer_cookie_len = peer_cookie_len;
1200 nvar->lv_use_cookie = L2TP_COOKIE_ON;
1201 membar_producer();
1202 l2tp_variant_update(sc, nvar);
1203
1204 mutex_exit(&sc->l2tp_lock);
1205
1206 struct ifnet *ifp = &sc->l2tp_ec.ec_if;
1207 if ((ifp->if_flags & IFF_DEBUG) != 0) {
1208 log(LOG_DEBUG,
1209 "%s: set cookie: "
1210 "local cookie_len=%u local cookie=%" PRIu64 ", "
1211 "remote cookie_len=%u remote cookie=%" PRIu64 "\n",
1212 ifp->if_xname, my_cookie_len, my_cookie,
1213 peer_cookie_len, peer_cookie);
1214 }
1215
1216 return 0;
1217 }
1218
1219 static void
1220 l2tp_clear_cookie(struct l2tp_softc *sc)
1221 {
1222 struct l2tp_variant *nvar;
1223
1224 nvar = kmem_alloc(sizeof(*nvar), KM_SLEEP);
1225
1226 mutex_enter(&sc->l2tp_lock);
1227
1228 *nvar = *sc->l2tp_var;
1229 psref_target_init(&nvar->lv_psref, lv_psref_class);
1230 nvar->lv_my_cookie = 0;
1231 nvar->lv_my_cookie_len = 0;
1232 nvar->lv_peer_cookie = 0;
1233 nvar->lv_peer_cookie_len = 0;
1234 nvar->lv_use_cookie = L2TP_COOKIE_OFF;
1235 membar_producer();
1236 l2tp_variant_update(sc, nvar);
1237
1238 mutex_exit(&sc->l2tp_lock);
1239 }
1240
1241 static void
1242 l2tp_set_state(struct l2tp_softc *sc, int state)
1243 {
1244 struct ifnet *ifp = &sc->l2tp_ec.ec_if;
1245 struct l2tp_variant *nvar;
1246
1247 nvar = kmem_alloc(sizeof(*nvar), KM_SLEEP);
1248
1249 mutex_enter(&sc->l2tp_lock);
1250
1251 *nvar = *sc->l2tp_var;
1252 psref_target_init(&nvar->lv_psref, lv_psref_class);
1253 nvar->lv_state = state;
1254 membar_producer();
1255 l2tp_variant_update(sc, nvar);
1256
1257 if (nvar->lv_state == L2TP_STATE_UP) {
1258 ifp->if_link_state = LINK_STATE_UP;
1259 } else {
1260 ifp->if_link_state = LINK_STATE_DOWN;
1261 }
1262
1263 mutex_exit(&sc->l2tp_lock);
1264
1265 #ifdef NOTYET
1266 vlan_linkstate_notify(ifp, ifp->if_link_state);
1267 #endif
1268 }
1269
1270 static int
1271 l2tp_encap_attach(struct l2tp_variant *var)
1272 {
1273 int error;
1274
1275 if (var == NULL || var->lv_psrc == NULL)
1276 return EINVAL;
1277
1278 switch (var->lv_psrc->sa_family) {
1279 #ifdef INET
1280 case AF_INET:
1281 error = in_l2tp_attach(var);
1282 break;
1283 #endif
1284 #ifdef INET6
1285 case AF_INET6:
1286 error = in6_l2tp_attach(var);
1287 break;
1288 #endif
1289 default:
1290 error = EINVAL;
1291 break;
1292 }
1293
1294 return error;
1295 }
1296
1297 static int
1298 l2tp_encap_detach(struct l2tp_variant *var)
1299 {
1300 int error;
1301
1302 if (var == NULL || var->lv_psrc == NULL)
1303 return EINVAL;
1304
1305 switch (var->lv_psrc->sa_family) {
1306 #ifdef INET
1307 case AF_INET:
1308 error = in_l2tp_detach(var);
1309 break;
1310 #endif
1311 #ifdef INET6
1312 case AF_INET6:
1313 error = in6_l2tp_detach(var);
1314 break;
1315 #endif
1316 default:
1317 error = EINVAL;
1318 break;
1319 }
1320
1321 return error;
1322 }
1323
1324 /*
1325 * TODO:
1326 * unify with gif_check_nesting().
1327 */
1328 int
1329 l2tp_check_nesting(struct ifnet *ifp, struct mbuf *m)
1330 {
1331 struct m_tag *mtag;
1332 int *count;
1333
1334 mtag = m_tag_find(m, PACKET_TAG_TUNNEL_INFO, NULL);
1335 if (mtag != NULL) {
1336 count = (int *)(mtag + 1);
1337 if (++(*count) > max_l2tp_nesting) {
1338 log(LOG_NOTICE,
1339 "%s: recursively called too many times(%d)\n",
1340 if_name(ifp),
1341 *count);
1342 return EIO;
1343 }
1344 } else {
1345 mtag = m_tag_get(PACKET_TAG_TUNNEL_INFO, sizeof(*count),
1346 M_NOWAIT);
1347 if (mtag != NULL) {
1348 m_tag_prepend(m, mtag);
1349 count = (int *)(mtag + 1);
1350 *count = 0;
1351 }
1352 #ifdef L2TP_DEBUG
1353 else {
1354 log(LOG_DEBUG,
1355 "%s: m_tag_get() failed, recursion calls are not prevented.\n",
1356 if_name(ifp));
1357 }
1358 #endif
1359 }
1360
1361 return 0;
1362 }
1363
1364 /*
1365 * Module infrastructure
1366 */
1367 #include "if_module.h"
1368
1369 IF_MODULE(MODULE_CLASS_DRIVER, l2tp, "")
1370
1371
1372 /* TODO: IP_TCPMSS support */
1373 #ifdef IP_TCPMSS
1374 static int l2tp_need_tcpmss_clamp(struct ifnet *);
1375 #ifdef INET
1376 static struct mbuf *l2tp_tcpmss4_clamp(struct ifnet *, struct mbuf *);
1377 #endif
1378 #ifdef INET6
1379 static struct mbuf *l2tp_tcpmss6_clamp(struct ifnet *, struct mbuf *);
1380 #endif
1381
1382 struct mbuf *
1383 l2tp_tcpmss_clamp(struct ifnet *ifp, struct mbuf *m)
1384 {
1385
1386 if (l2tp_need_tcpmss_clamp(ifp)) {
1387 struct ether_header *eh;
1388 struct ether_vlan_header evh;
1389
1390 /* save ether header */
1391 m_copydata(m, 0, sizeof(evh), (void *)&evh);
1392 eh = (struct ether_header *)&evh;
1393
1394 switch (ntohs(eh->ether_type)) {
1395 case ETHERTYPE_VLAN: /* Ether + VLAN */
1396 if (m->m_pkthdr.len <= sizeof(struct ether_vlan_header))
1397 break;
1398 m_adj(m, sizeof(struct ether_vlan_header));
1399 switch (ntohs(evh.evl_proto)) {
1400 #ifdef INET
1401 case ETHERTYPE_IP: /* Ether + VLAN + IPv4 */
1402 m = l2tp_tcpmss4_clamp(ifp, m);
1403 if (m == NULL)
1404 return NULL;
1405 break;
1406 #endif /* INET */
1407 #ifdef INET6
1408 case ETHERTYPE_IPV6: /* Ether + VLAN + IPv6 */
1409 m = l2tp_tcpmss6_clamp(ifp, m);
1410 if (m == NULL)
1411 return NULL;
1412 break;
1413 #endif /* INET6 */
1414 default:
1415 break;
1416 }
1417 /* restore ether header */
1418 M_PREPEND(m, sizeof(struct ether_vlan_header),
1419 M_DONTWAIT);
1420 if (m == NULL)
1421 return NULL;
1422 *mtod(m, struct ether_vlan_header *) = evh;
1423 break;
1424 #ifdef INET
1425 case ETHERTYPE_IP: /* Ether + IPv4 */
1426 if (m->m_pkthdr.len <= sizeof(struct ether_header))
1427 break;
1428 m_adj(m, sizeof(struct ether_header));
1429 m = l2tp_tcpmss4_clamp(ifp, m);
1430 if (m == NULL)
1431 return NULL;
1432 /* restore ether header */
1433 M_PREPEND(m, sizeof(struct ether_header), M_DONTWAIT);
1434 if (m == NULL)
1435 return NULL;
1436 *mtod(m, struct ether_header *) = *eh;
1437 break;
1438 #endif /* INET */
1439 #ifdef INET6
1440 case ETHERTYPE_IPV6: /* Ether + IPv6 */
1441 if (m->m_pkthdr.len <= sizeof(struct ether_header))
1442 break;
1443 m_adj(m, sizeof(struct ether_header));
1444 m = l2tp_tcpmss6_clamp(ifp, m);
1445 if (m == NULL)
1446 return NULL;
1447 /* restore ether header */
1448 M_PREPEND(m, sizeof(struct ether_header), M_DONTWAIT);
1449 if (m == NULL)
1450 return NULL;
1451 *mtod(m, struct ether_header *) = *eh;
1452 break;
1453 #endif /* INET6 */
1454 default:
1455 break;
1456 }
1457 }
1458
1459 return m;
1460 }
1461
1462 static int
1463 l2tp_need_tcpmss_clamp(struct ifnet *ifp)
1464 {
1465 int ret = 0;
1466
1467 #ifdef INET
1468 if (ifp->if_tcpmss != 0)
1469 ret = 1;
1470 #endif /* INET */
1471
1472 #ifdef INET6
1473 if (ifp->if_tcpmss6 != 0)
1474 ret = 1;
1475 #endif /* INET6 */
1476
1477 return ret;
1478 }
1479
1480 #ifdef INET
1481 static struct mbuf *
1482 l2tp_tcpmss4_clamp(struct ifnet *ifp, struct mbuf *m)
1483 {
1484
1485 if (ifp->if_tcpmss != 0) {
1486 return ip_tcpmss(m, (ifp->if_tcpmss < 0) ?
1487 ifp->if_mtu - IP_TCPMSS_EXTLEN :
1488 ifp->if_tcpmss);
1489 }
1490 return m;
1491 }
1492 #endif /* INET */
1493
1494 #ifdef INET6
1495 static struct mbuf *
1496 l2tp_tcpmss6_clamp(struct ifnet *ifp, struct mbuf *m)
1497 {
1498 int ip6hdrlen;
1499
1500 if (ifp->if_tcpmss6 != 0 &&
1501 ip6_tcpmss_applicable(m, &ip6hdrlen)) {
1502 return ip6_tcpmss(m, ip6hdrlen,
1503 (ifp->if_tcpmss6 < 0) ?
1504 ifp->if_mtu - IP6_TCPMSS_EXTLEN :
1505 ifp->if_tcpmss6);
1506 }
1507 return m;
1508 }
1509 #endif /* INET6 */
1510
1511 #endif /* IP_TCPMSS */
1512