if_l2tp.c revision 1.33 1 /* $NetBSD: if_l2tp.c,v 1.33 2018/12/27 07:56:11 knakahara Exp $ */
2
3 /*
4 * Copyright (c) 2017 Internet Initiative Japan Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /*
30 * L2TPv3 kernel interface
31 */
32
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: if_l2tp.c,v 1.33 2018/12/27 07:56:11 knakahara Exp $");
35
36 #ifdef _KERNEL_OPT
37 #include "opt_inet.h"
38 #include "opt_net_mpsafe.h"
39 #endif
40
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/kernel.h>
44 #include <sys/mbuf.h>
45 #include <sys/socket.h>
46 #include <sys/sockio.h>
47 #include <sys/errno.h>
48 #include <sys/ioctl.h>
49 #include <sys/time.h>
50 #include <sys/syslog.h>
51 #include <sys/proc.h>
52 #include <sys/conf.h>
53 #include <sys/kauth.h>
54 #include <sys/cpu.h>
55 #include <sys/cprng.h>
56 #include <sys/intr.h>
57 #include <sys/kmem.h>
58 #include <sys/mutex.h>
59 #include <sys/atomic.h>
60 #include <sys/pserialize.h>
61 #include <sys/device.h>
62 #include <sys/module.h>
63
64 #include <net/if.h>
65 #include <net/if_dl.h>
66 #include <net/if_ether.h>
67 #include <net/if_types.h>
68 #include <net/netisr.h>
69 #include <net/route.h>
70 #include <net/bpf.h>
71 #include <net/if_vlanvar.h>
72
73 #include <netinet/in.h>
74 #include <netinet/in_systm.h>
75 #include <netinet/ip.h>
76 #include <netinet/ip_encap.h>
77 #ifdef INET
78 #include <netinet/in_var.h>
79 #include <netinet/in_l2tp.h>
80 #endif /* INET */
81 #ifdef INET6
82 #include <netinet6/in6_l2tp.h>
83 #endif
84
85 #include <net/if_l2tp.h>
86
87 #include <net/if_vlanvar.h>
88
89 /* TODO: IP_TCPMSS support */
90 #undef IP_TCPMSS
91 #ifdef IP_TCPMSS
92 #include <netinet/ip_tcpmss.h>
93 #endif
94
95 /*
96 * l2tp global variable definitions
97 */
98 LIST_HEAD(l2tp_sclist, l2tp_softc);
99 static struct {
100 struct l2tp_sclist list;
101 kmutex_t lock;
102 } l2tp_softcs __cacheline_aligned;
103
104
105 #if !defined(L2TP_ID_HASH_SIZE)
106 #define L2TP_ID_HASH_SIZE 64
107 #endif
108 static struct {
109 kmutex_t lock;
110 struct pslist_head *lists;
111 u_long mask;
112 } l2tp_hash __cacheline_aligned = {
113 .lists = NULL,
114 };
115
116 pserialize_t l2tp_psz __read_mostly;
117 struct psref_class *lv_psref_class __read_mostly;
118
119 static void l2tp_ro_init_pc(void *, void *, struct cpu_info *);
120 static void l2tp_ro_fini_pc(void *, void *, struct cpu_info *);
121
122 static int l2tp_clone_create(struct if_clone *, int);
123 static int l2tp_clone_destroy(struct ifnet *);
124
125 struct if_clone l2tp_cloner =
126 IF_CLONE_INITIALIZER("l2tp", l2tp_clone_create, l2tp_clone_destroy);
127
128 static int l2tp_output(struct ifnet *, struct mbuf *,
129 const struct sockaddr *, const struct rtentry *);
130 static void l2tpintr(struct l2tp_variant *);
131
132 static void l2tp_hash_init(void);
133 static int l2tp_hash_fini(void);
134
135 static void l2tp_start(struct ifnet *);
136 static int l2tp_transmit(struct ifnet *, struct mbuf *);
137
138 static int l2tp_set_tunnel(struct ifnet *, struct sockaddr *,
139 struct sockaddr *);
140 static void l2tp_delete_tunnel(struct ifnet *);
141
142 static int id_hash_func(uint32_t, u_long);
143
144 static void l2tp_variant_update(struct l2tp_softc *, struct l2tp_variant *);
145 static int l2tp_set_session(struct l2tp_softc *, uint32_t, uint32_t);
146 static int l2tp_clear_session(struct l2tp_softc *);
147 static int l2tp_set_cookie(struct l2tp_softc *, uint64_t, u_int, uint64_t, u_int);
148 static void l2tp_clear_cookie(struct l2tp_softc *);
149 static void l2tp_set_state(struct l2tp_softc *, int);
150 static int l2tp_encap_attach(struct l2tp_variant *);
151 static int l2tp_encap_detach(struct l2tp_variant *);
152
153 #ifndef MAX_L2TP_NEST
154 /*
155 * This macro controls the upper limitation on nesting of l2tp tunnels.
156 * Since, setting a large value to this macro with a careless configuration
157 * may introduce system crash, we don't allow any nestings by default.
158 * If you need to configure nested l2tp tunnels, you can define this macro
159 * in your kernel configuration file. However, if you do so, please be
160 * careful to configure the tunnels so that it won't make a loop.
161 */
162 /*
163 * XXX
164 * Currently, if in_l2tp_output recursively calls, it causes locking against
165 * myself of struct l2tp_ro->lr_lock. So, nested l2tp tunnels is prohibited.
166 */
167 #define MAX_L2TP_NEST 0
168 #endif
169
170 static int max_l2tp_nesting = MAX_L2TP_NEST;
171
172 /* ARGSUSED */
173 void
174 l2tpattach(int count)
175 {
176 /*
177 * Nothing to do here, initialization is handled by the
178 * module initialization code in l2tpinit() below).
179 */
180 }
181
182 static void
183 l2tpinit(void)
184 {
185
186 mutex_init(&l2tp_softcs.lock, MUTEX_DEFAULT, IPL_NONE);
187 LIST_INIT(&l2tp_softcs.list);
188
189 mutex_init(&l2tp_hash.lock, MUTEX_DEFAULT, IPL_NONE);
190 l2tp_psz = pserialize_create();
191 lv_psref_class = psref_class_create("l2tpvar", IPL_SOFTNET);
192 if_clone_attach(&l2tp_cloner);
193
194 l2tp_hash_init();
195 }
196
197 static int
198 l2tpdetach(void)
199 {
200 int error;
201
202 mutex_enter(&l2tp_softcs.lock);
203 if (!LIST_EMPTY(&l2tp_softcs.list)) {
204 mutex_exit(&l2tp_softcs.lock);
205 return EBUSY;
206 }
207 mutex_exit(&l2tp_softcs.lock);
208
209 error = l2tp_hash_fini();
210 if (error)
211 return error;
212
213 if_clone_detach(&l2tp_cloner);
214 psref_class_destroy(lv_psref_class);
215 pserialize_destroy(l2tp_psz);
216 mutex_destroy(&l2tp_hash.lock);
217
218 mutex_destroy(&l2tp_softcs.lock);
219
220 return error;
221 }
222
223 static int
224 l2tp_clone_create(struct if_clone *ifc, int unit)
225 {
226 struct l2tp_softc *sc;
227 struct l2tp_variant *var;
228 int rv;
229
230 sc = kmem_zalloc(sizeof(struct l2tp_softc), KM_SLEEP);
231 if_initname(&sc->l2tp_ec.ec_if, ifc->ifc_name, unit);
232 rv = l2tpattach0(sc);
233 if (rv != 0) {
234 kmem_free(sc, sizeof(struct l2tp_softc));
235 return rv;
236 }
237
238 var = kmem_zalloc(sizeof(struct l2tp_variant), KM_SLEEP);
239 var->lv_softc = sc;
240 var->lv_state = L2TP_STATE_DOWN;
241 var->lv_use_cookie = L2TP_COOKIE_OFF;
242 psref_target_init(&var->lv_psref, lv_psref_class);
243
244 sc->l2tp_var = var;
245 mutex_init(&sc->l2tp_lock, MUTEX_DEFAULT, IPL_NONE);
246 sc->l2tp_psz = pserialize_create();
247 PSLIST_ENTRY_INIT(sc, l2tp_hash);
248
249 sc->l2tp_ro_percpu = percpu_alloc(sizeof(struct l2tp_ro));
250 percpu_foreach(sc->l2tp_ro_percpu, l2tp_ro_init_pc, NULL);
251
252 mutex_enter(&l2tp_softcs.lock);
253 LIST_INSERT_HEAD(&l2tp_softcs.list, sc, l2tp_list);
254 mutex_exit(&l2tp_softcs.lock);
255
256 return (0);
257 }
258
259 int
260 l2tpattach0(struct l2tp_softc *sc)
261 {
262 int rv;
263
264 sc->l2tp_ec.ec_if.if_addrlen = 0;
265 sc->l2tp_ec.ec_if.if_mtu = L2TP_MTU;
266 sc->l2tp_ec.ec_if.if_flags = IFF_POINTOPOINT|IFF_MULTICAST|IFF_SIMPLEX;
267 sc->l2tp_ec.ec_if.if_extflags = IFEF_NO_LINK_STATE_CHANGE;
268 #ifdef NET_MPSAFE
269 sc->l2tp_ec.ec_if.if_extflags |= IFEF_MPSAFE;
270 #endif
271 sc->l2tp_ec.ec_if.if_ioctl = l2tp_ioctl;
272 sc->l2tp_ec.ec_if.if_output = l2tp_output;
273 sc->l2tp_ec.ec_if.if_type = IFT_L2TP;
274 sc->l2tp_ec.ec_if.if_dlt = DLT_NULL;
275 sc->l2tp_ec.ec_if.if_start = l2tp_start;
276 sc->l2tp_ec.ec_if.if_transmit = l2tp_transmit;
277 sc->l2tp_ec.ec_if._if_input = ether_input;
278 IFQ_SET_READY(&sc->l2tp_ec.ec_if.if_snd);
279 /* XXX
280 * It may improve performance to use if_initialize()/if_register()
281 * so that l2tp_input() calls if_input() instead of
282 * if_percpuq_enqueue(). However, that causes recursive softnet_lock
283 * when NET_MPSAFE is not set.
284 */
285 rv = if_attach(&sc->l2tp_ec.ec_if);
286 if (rv != 0)
287 return rv;
288 if_alloc_sadl(&sc->l2tp_ec.ec_if);
289 bpf_attach(&sc->l2tp_ec.ec_if, DLT_EN10MB, sizeof(struct ether_header));
290
291 return 0;
292 }
293
294 void
295 l2tp_ro_init_pc(void *p, void *arg __unused, struct cpu_info *ci __unused)
296 {
297 struct l2tp_ro *lro = p;
298
299 lro->lr_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
300 }
301
302 void
303 l2tp_ro_fini_pc(void *p, void *arg __unused, struct cpu_info *ci __unused)
304 {
305 struct l2tp_ro *lro = p;
306
307 rtcache_free(&lro->lr_ro);
308
309 mutex_obj_free(lro->lr_lock);
310 }
311
312 static int
313 l2tp_clone_destroy(struct ifnet *ifp)
314 {
315 struct l2tp_variant *var;
316 struct l2tp_softc *sc = container_of(ifp, struct l2tp_softc,
317 l2tp_ec.ec_if);
318
319 l2tp_clear_session(sc);
320 l2tp_delete_tunnel(&sc->l2tp_ec.ec_if);
321 /*
322 * To avoid for l2tp_transmit() to access sc->l2tp_var after free it.
323 */
324 mutex_enter(&sc->l2tp_lock);
325 var = sc->l2tp_var;
326 l2tp_variant_update(sc, NULL);
327 mutex_exit(&sc->l2tp_lock);
328
329 mutex_enter(&l2tp_softcs.lock);
330 LIST_REMOVE(sc, l2tp_list);
331 mutex_exit(&l2tp_softcs.lock);
332
333 bpf_detach(ifp);
334
335 if_detach(ifp);
336
337 percpu_foreach(sc->l2tp_ro_percpu, l2tp_ro_fini_pc, NULL);
338 percpu_free(sc->l2tp_ro_percpu, sizeof(struct l2tp_ro));
339
340 kmem_free(var, sizeof(struct l2tp_variant));
341 pserialize_destroy(sc->l2tp_psz);
342 mutex_destroy(&sc->l2tp_lock);
343 kmem_free(sc, sizeof(struct l2tp_softc));
344
345 return 0;
346 }
347
348 static int
349 l2tp_output(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *dst,
350 const struct rtentry *rt)
351 {
352 struct l2tp_softc *sc = container_of(ifp, struct l2tp_softc,
353 l2tp_ec.ec_if);
354 struct l2tp_variant *var;
355 struct psref psref;
356 int error = 0;
357
358 var = l2tp_getref_variant(sc, &psref);
359 if (var == NULL) {
360 m_freem(m);
361 return ENETDOWN;
362 }
363
364 IFQ_CLASSIFY(&ifp->if_snd, m, dst->sa_family);
365
366 m->m_flags &= ~(M_BCAST|M_MCAST);
367
368 if ((ifp->if_flags & IFF_UP) == 0) {
369 m_freem(m);
370 error = ENETDOWN;
371 goto end;
372 }
373
374 if (var->lv_psrc == NULL || var->lv_pdst == NULL) {
375 m_freem(m);
376 error = ENETDOWN;
377 goto end;
378 }
379
380 /* XXX should we check if our outer source is legal? */
381
382 /* use DLT_NULL encapsulation here to pass inner af type */
383 M_PREPEND(m, sizeof(int), M_DONTWAIT);
384 if (!m) {
385 error = ENOBUFS;
386 goto end;
387 }
388 *mtod(m, int *) = dst->sa_family;
389
390 IFQ_ENQUEUE(&ifp->if_snd, m, error);
391 if (error)
392 goto end;
393
394 /*
395 * direct call to avoid infinite loop at l2tpintr()
396 */
397 l2tpintr(var);
398
399 error = 0;
400
401 end:
402 l2tp_putref_variant(var, &psref);
403 if (error)
404 ifp->if_oerrors++;
405
406 return error;
407 }
408
409 static void
410 l2tpintr(struct l2tp_variant *var)
411 {
412 struct l2tp_softc *sc;
413 struct ifnet *ifp;
414 struct mbuf *m;
415 int error;
416
417 KASSERT(psref_held(&var->lv_psref, lv_psref_class));
418
419 sc = var->lv_softc;
420 ifp = &sc->l2tp_ec.ec_if;
421
422 /* output processing */
423 if (var->lv_my_sess_id == 0 || var->lv_peer_sess_id == 0) {
424 IFQ_PURGE(&ifp->if_snd);
425 return;
426 }
427
428 for (;;) {
429 int len;
430
431 IFQ_DEQUEUE(&ifp->if_snd, m);
432 if (m == NULL)
433 break;
434 len = m->m_pkthdr.len;
435 m->m_flags &= ~(M_BCAST|M_MCAST);
436 bpf_mtap(ifp, m, BPF_D_OUT);
437 switch (var->lv_psrc->sa_family) {
438 #ifdef INET
439 case AF_INET:
440 error = in_l2tp_output(var, m);
441 break;
442 #endif
443 #ifdef INET6
444 case AF_INET6:
445 error = in6_l2tp_output(var, m);
446 break;
447 #endif
448 default:
449 m_freem(m);
450 error = ENETDOWN;
451 break;
452 }
453
454 if (error)
455 ifp->if_oerrors++;
456 else {
457 ifp->if_opackets++;
458 ifp->if_obytes += len;
459 }
460 }
461 }
462
463 void
464 l2tp_input(struct mbuf *m, struct ifnet *ifp)
465 {
466 vaddr_t addr;
467
468 KASSERT(ifp != NULL);
469
470 /*
471 * Currently, l2tp(4) supports only ethernet as inner protocol.
472 */
473 if (m->m_pkthdr.len < sizeof(struct ether_header)) {
474 m_freem(m);
475 return;
476 }
477
478 /*
479 * If the head of the payload is not aligned, align it.
480 */
481 addr = mtod(m, vaddr_t);
482 if ((addr & 0x03) != 0x2) {
483 /* copy and align head of payload */
484 struct mbuf *m_head;
485 int copy_length;
486 u_int pad = roundup(sizeof(struct ether_header), 4)
487 - sizeof(struct ether_header);
488
489 #define L2TP_COPY_LENGTH 60
490
491 if (m->m_pkthdr.len < L2TP_COPY_LENGTH) {
492 copy_length = m->m_pkthdr.len;
493 } else {
494 copy_length = L2TP_COPY_LENGTH;
495 }
496
497 if (m->m_len < copy_length) {
498 m = m_pullup(m, copy_length);
499 if (m == NULL)
500 return;
501 }
502
503 MGETHDR(m_head, M_DONTWAIT, MT_HEADER);
504 if (m_head == NULL) {
505 m_freem(m);
506 return;
507 }
508 m_move_pkthdr(m_head, m);
509
510 /*
511 * m_head should be:
512 * L2TP_COPY_LENGTH
513 * <- + roundup(pad, 4) - pad ->
514 * +-------+--------+-----+--------------+-------------+
515 * | m_hdr | pkthdr | ... | ether header | payload |
516 * +-------+--------+-----+--------------+-------------+
517 * ^ ^
518 * m_data 4 byte aligned
519 */
520 m_align(m_head, L2TP_COPY_LENGTH + roundup(pad, 4));
521 m_head->m_data += pad;
522
523 memcpy(mtod(m_head, void *), mtod(m, void *), copy_length);
524 m_head->m_len = copy_length;
525 m->m_data += copy_length;
526 m->m_len -= copy_length;
527
528 /* construct chain */
529 if (m->m_len == 0) {
530 m_head->m_next = m_free(m);
531 } else {
532 m_head->m_next = m;
533 }
534
535 /* override m */
536 m = m_head;
537 }
538
539 m_set_rcvif(m, ifp);
540
541 /*
542 * bpf_mtap() and ifp->if_ipackets++ is done in if_input()
543 *
544 * obytes is incremented at ether_output() or bridge_enqueue().
545 */
546 if_percpuq_enqueue(ifp->if_percpuq, m);
547 }
548
549 void
550 l2tp_start(struct ifnet *ifp)
551 {
552 struct psref psref;
553 struct l2tp_variant *var;
554 struct l2tp_softc *sc = container_of(ifp, struct l2tp_softc,
555 l2tp_ec.ec_if);
556
557 var = l2tp_getref_variant(sc, &psref);
558 if (var == NULL)
559 return;
560
561 if (var->lv_psrc == NULL || var->lv_pdst == NULL)
562 return;
563
564 l2tpintr(var);
565 l2tp_putref_variant(var, &psref);
566 }
567
568 int
569 l2tp_transmit(struct ifnet *ifp, struct mbuf *m)
570 {
571 int error;
572 int len;
573 struct psref psref;
574 struct l2tp_variant *var;
575 struct l2tp_softc *sc = container_of(ifp, struct l2tp_softc,
576 l2tp_ec.ec_if);
577
578 var = l2tp_getref_variant(sc, &psref);
579 if (var == NULL) {
580 m_freem(m);
581 return ENETDOWN;
582 }
583
584 if (var->lv_psrc == NULL || var->lv_pdst == NULL) {
585 m_freem(m);
586 error = ENETDOWN;
587 goto out;
588 }
589
590 len = m->m_pkthdr.len;
591 m->m_flags &= ~(M_BCAST|M_MCAST);
592 bpf_mtap(ifp, m, BPF_D_OUT);
593 switch (var->lv_psrc->sa_family) {
594 #ifdef INET
595 case AF_INET:
596 error = in_l2tp_output(var, m);
597 break;
598 #endif
599 #ifdef INET6
600 case AF_INET6:
601 error = in6_l2tp_output(var, m);
602 break;
603 #endif
604 default:
605 m_freem(m);
606 error = ENETDOWN;
607 break;
608 }
609
610 if (error)
611 ifp->if_oerrors++;
612 else {
613 ifp->if_opackets++;
614 ifp->if_obytes += len;
615 }
616
617 out:
618 l2tp_putref_variant(var, &psref);
619 return error;
620 }
621
622 /* XXX how should we handle IPv6 scope on SIOC[GS]IFPHYADDR? */
623 int
624 l2tp_ioctl(struct ifnet *ifp, u_long cmd, void *data)
625 {
626 struct l2tp_softc *sc = container_of(ifp, struct l2tp_softc,
627 l2tp_ec.ec_if);
628 struct l2tp_variant *var, *var_tmp;
629 struct ifreq *ifr = data;
630 int error = 0, size;
631 struct sockaddr *dst, *src;
632 struct l2tp_req l2tpr;
633 u_long mtu;
634 int bound;
635 struct psref psref;
636
637 switch (cmd) {
638 case SIOCSIFADDR:
639 ifp->if_flags |= IFF_UP;
640 break;
641
642 case SIOCSIFDSTADDR:
643 break;
644
645 case SIOCADDMULTI:
646 case SIOCDELMULTI:
647 switch (ifr->ifr_addr.sa_family) {
648 #ifdef INET
649 case AF_INET: /* IP supports Multicast */
650 break;
651 #endif /* INET */
652 #ifdef INET6
653 case AF_INET6: /* IP6 supports Multicast */
654 break;
655 #endif /* INET6 */
656 default: /* Other protocols doesn't support Multicast */
657 error = EAFNOSUPPORT;
658 break;
659 }
660 break;
661
662 case SIOCSIFMTU:
663 mtu = ifr->ifr_mtu;
664 if (mtu < L2TP_MTU_MIN || mtu > L2TP_MTU_MAX)
665 return (EINVAL);
666 ifp->if_mtu = mtu;
667 break;
668
669 #ifdef INET
670 case SIOCSIFPHYADDR:
671 src = (struct sockaddr *)
672 &(((struct in_aliasreq *)data)->ifra_addr);
673 dst = (struct sockaddr *)
674 &(((struct in_aliasreq *)data)->ifra_dstaddr);
675 if (src->sa_family != AF_INET || dst->sa_family != AF_INET)
676 return EAFNOSUPPORT;
677 else if (src->sa_len != sizeof(struct sockaddr_in)
678 || dst->sa_len != sizeof(struct sockaddr_in))
679 return EINVAL;
680
681 error = l2tp_set_tunnel(&sc->l2tp_ec.ec_if, src, dst);
682 break;
683
684 #endif /* INET */
685 #ifdef INET6
686 case SIOCSIFPHYADDR_IN6:
687 src = (struct sockaddr *)
688 &(((struct in6_aliasreq *)data)->ifra_addr);
689 dst = (struct sockaddr *)
690 &(((struct in6_aliasreq *)data)->ifra_dstaddr);
691 if (src->sa_family != AF_INET6 || dst->sa_family != AF_INET6)
692 return EAFNOSUPPORT;
693 else if (src->sa_len != sizeof(struct sockaddr_in6)
694 || dst->sa_len != sizeof(struct sockaddr_in6))
695 return EINVAL;
696
697 error = l2tp_set_tunnel(&sc->l2tp_ec.ec_if, src, dst);
698 break;
699
700 #endif /* INET6 */
701 case SIOCSLIFPHYADDR:
702 src = (struct sockaddr *)
703 &(((struct if_laddrreq *)data)->addr);
704 dst = (struct sockaddr *)
705 &(((struct if_laddrreq *)data)->dstaddr);
706 if (src->sa_family != dst->sa_family)
707 return EINVAL;
708 else if (src->sa_family == AF_INET
709 && src->sa_len != sizeof(struct sockaddr_in))
710 return EINVAL;
711 else if (src->sa_family == AF_INET6
712 && src->sa_len != sizeof(struct sockaddr_in6))
713 return EINVAL;
714 else if (dst->sa_family == AF_INET
715 && dst->sa_len != sizeof(struct sockaddr_in))
716 return EINVAL;
717 else if (dst->sa_family == AF_INET6
718 && dst->sa_len != sizeof(struct sockaddr_in6))
719 return EINVAL;
720
721 error = l2tp_set_tunnel(&sc->l2tp_ec.ec_if, src, dst);
722 break;
723
724 case SIOCDIFPHYADDR:
725 l2tp_delete_tunnel(&sc->l2tp_ec.ec_if);
726 break;
727
728 case SIOCGIFPSRCADDR:
729 #ifdef INET6
730 case SIOCGIFPSRCADDR_IN6:
731 #endif /* INET6 */
732 bound = curlwp_bind();
733 var = l2tp_getref_variant(sc, &psref);
734 if (var == NULL) {
735 curlwp_bindx(bound);
736 error = EADDRNOTAVAIL;
737 goto bad;
738 }
739 if (var->lv_psrc == NULL) {
740 l2tp_putref_variant(var, &psref);
741 curlwp_bindx(bound);
742 error = EADDRNOTAVAIL;
743 goto bad;
744 }
745 src = var->lv_psrc;
746 switch (cmd) {
747 #ifdef INET
748 case SIOCGIFPSRCADDR:
749 dst = &ifr->ifr_addr;
750 size = sizeof(ifr->ifr_addr);
751 break;
752 #endif /* INET */
753 #ifdef INET6
754 case SIOCGIFPSRCADDR_IN6:
755 dst = (struct sockaddr *)
756 &(((struct in6_ifreq *)data)->ifr_addr);
757 size = sizeof(((struct in6_ifreq *)data)->ifr_addr);
758 break;
759 #endif /* INET6 */
760 default:
761 l2tp_putref_variant(var, &psref);
762 curlwp_bindx(bound);
763 error = EADDRNOTAVAIL;
764 goto bad;
765 }
766 if (src->sa_len > size) {
767 l2tp_putref_variant(var, &psref);
768 curlwp_bindx(bound);
769 return EINVAL;
770 }
771 sockaddr_copy(dst, src->sa_len, src);
772 l2tp_putref_variant(var, &psref);
773 curlwp_bindx(bound);
774 break;
775
776 case SIOCGIFPDSTADDR:
777 #ifdef INET6
778 case SIOCGIFPDSTADDR_IN6:
779 #endif /* INET6 */
780 bound = curlwp_bind();
781 var = l2tp_getref_variant(sc, &psref);
782 if (var == NULL) {
783 curlwp_bindx(bound);
784 error = EADDRNOTAVAIL;
785 goto bad;
786 }
787 if (var->lv_pdst == NULL) {
788 l2tp_putref_variant(var, &psref);
789 curlwp_bindx(bound);
790 error = EADDRNOTAVAIL;
791 goto bad;
792 }
793 src = var->lv_pdst;
794 switch (cmd) {
795 #ifdef INET
796 case SIOCGIFPDSTADDR:
797 dst = &ifr->ifr_addr;
798 size = sizeof(ifr->ifr_addr);
799 break;
800 #endif /* INET */
801 #ifdef INET6
802 case SIOCGIFPDSTADDR_IN6:
803 dst = (struct sockaddr *)
804 &(((struct in6_ifreq *)data)->ifr_addr);
805 size = sizeof(((struct in6_ifreq *)data)->ifr_addr);
806 break;
807 #endif /* INET6 */
808 default:
809 l2tp_putref_variant(var, &psref);
810 curlwp_bindx(bound);
811 error = EADDRNOTAVAIL;
812 goto bad;
813 }
814 if (src->sa_len > size) {
815 l2tp_putref_variant(var, &psref);
816 curlwp_bindx(bound);
817 return EINVAL;
818 }
819 sockaddr_copy(dst, src->sa_len, src);
820 l2tp_putref_variant(var, &psref);
821 curlwp_bindx(bound);
822 break;
823
824 case SIOCGLIFPHYADDR:
825 bound = curlwp_bind();
826 var = l2tp_getref_variant(sc, &psref);
827 if (var == NULL) {
828 curlwp_bindx(bound);
829 error = EADDRNOTAVAIL;
830 goto bad;
831 }
832 if (var->lv_psrc == NULL || var->lv_pdst == NULL) {
833 l2tp_putref_variant(var, &psref);
834 curlwp_bindx(bound);
835 error = EADDRNOTAVAIL;
836 goto bad;
837 }
838
839 /* copy src */
840 src = var->lv_psrc;
841 dst = (struct sockaddr *)
842 &(((struct if_laddrreq *)data)->addr);
843 size = sizeof(((struct if_laddrreq *)data)->addr);
844 if (src->sa_len > size) {
845 l2tp_putref_variant(var, &psref);
846 curlwp_bindx(bound);
847 return EINVAL;
848 }
849 sockaddr_copy(dst, src->sa_len, src);
850
851 /* copy dst */
852 src = var->lv_pdst;
853 dst = (struct sockaddr *)
854 &(((struct if_laddrreq *)data)->dstaddr);
855 size = sizeof(((struct if_laddrreq *)data)->dstaddr);
856 if (src->sa_len > size) {
857 l2tp_putref_variant(var, &psref);
858 curlwp_bindx(bound);
859 return EINVAL;
860 }
861 sockaddr_copy(dst, src->sa_len, src);
862 l2tp_putref_variant(var, &psref);
863 curlwp_bindx(bound);
864 break;
865
866 case SIOCSL2TPSESSION:
867 if ((error = copyin(ifr->ifr_data, &l2tpr, sizeof(l2tpr))) != 0)
868 break;
869
870 /* session id must not zero */
871 if (l2tpr.my_sess_id == 0 || l2tpr.peer_sess_id == 0)
872 return EINVAL;
873
874 bound = curlwp_bind();
875 var_tmp = l2tp_lookup_session_ref(l2tpr.my_sess_id, &psref);
876 if (var_tmp != NULL) {
877 /* duplicate session id */
878 log(LOG_WARNING, "%s: duplicate session id %" PRIu32 " of %s\n",
879 sc->l2tp_ec.ec_if.if_xname, l2tpr.my_sess_id,
880 var_tmp->lv_softc->l2tp_ec.ec_if.if_xname);
881 psref_release(&psref, &var_tmp->lv_psref,
882 lv_psref_class);
883 curlwp_bindx(bound);
884 return EINVAL;
885 }
886 curlwp_bindx(bound);
887
888 error = l2tp_set_session(sc, l2tpr.my_sess_id, l2tpr.peer_sess_id);
889 break;
890 case SIOCDL2TPSESSION:
891 l2tp_clear_session(sc);
892 break;
893 case SIOCSL2TPCOOKIE:
894 if ((error = copyin(ifr->ifr_data, &l2tpr, sizeof(l2tpr))) != 0)
895 break;
896
897 error = l2tp_set_cookie(sc, l2tpr.my_cookie, l2tpr.my_cookie_len,
898 l2tpr.peer_cookie, l2tpr.peer_cookie_len);
899 break;
900 case SIOCDL2TPCOOKIE:
901 l2tp_clear_cookie(sc);
902 break;
903 case SIOCSL2TPSTATE:
904 if ((error = copyin(ifr->ifr_data, &l2tpr, sizeof(l2tpr))) != 0)
905 break;
906
907 l2tp_set_state(sc, l2tpr.state);
908 break;
909 case SIOCGL2TP:
910 /* get L2TPV3 session info */
911 memset(&l2tpr, 0, sizeof(l2tpr));
912
913 bound = curlwp_bind();
914 var = l2tp_getref_variant(sc, &psref);
915 if (var == NULL) {
916 curlwp_bindx(bound);
917 error = EADDRNOTAVAIL;
918 goto bad;
919 }
920
921 l2tpr.state = var->lv_state;
922 l2tpr.my_sess_id = var->lv_my_sess_id;
923 l2tpr.peer_sess_id = var->lv_peer_sess_id;
924 l2tpr.my_cookie = var->lv_my_cookie;
925 l2tpr.my_cookie_len = var->lv_my_cookie_len;
926 l2tpr.peer_cookie = var->lv_peer_cookie;
927 l2tpr.peer_cookie_len = var->lv_peer_cookie_len;
928 l2tp_putref_variant(var, &psref);
929 curlwp_bindx(bound);
930
931 error = copyout(&l2tpr, ifr->ifr_data, sizeof(l2tpr));
932 break;
933
934 default:
935 error = ifioctl_common(ifp, cmd, data);
936 break;
937 }
938 bad:
939 return error;
940 }
941
942 static int
943 l2tp_set_tunnel(struct ifnet *ifp, struct sockaddr *src, struct sockaddr *dst)
944 {
945 struct l2tp_softc *sc = container_of(ifp, struct l2tp_softc,
946 l2tp_ec.ec_if);
947 struct sockaddr *osrc, *odst;
948 struct sockaddr *nsrc, *ndst;
949 struct l2tp_variant *ovar, *nvar;
950 int error;
951
952 nsrc = sockaddr_dup(src, M_WAITOK);
953 ndst = sockaddr_dup(dst, M_WAITOK);
954
955 nvar = kmem_alloc(sizeof(*nvar), KM_SLEEP);
956
957 error = encap_lock_enter();
958 if (error)
959 goto error;
960
961 mutex_enter(&sc->l2tp_lock);
962
963 ovar = sc->l2tp_var;
964 osrc = ovar->lv_psrc;
965 odst = ovar->lv_pdst;
966 *nvar = *ovar;
967 psref_target_init(&nvar->lv_psref, lv_psref_class);
968 nvar->lv_psrc = nsrc;
969 nvar->lv_pdst = ndst;
970 error = l2tp_encap_attach(nvar);
971 if (error) {
972 mutex_exit(&sc->l2tp_lock);
973 encap_lock_exit();
974 goto error;
975 }
976 membar_producer();
977 l2tp_variant_update(sc, nvar);
978
979 mutex_exit(&sc->l2tp_lock);
980
981 (void)l2tp_encap_detach(ovar);
982 encap_lock_exit();
983
984 if (osrc)
985 sockaddr_free(osrc);
986 if (odst)
987 sockaddr_free(odst);
988 kmem_free(ovar, sizeof(*ovar));
989
990 return 0;
991
992 error:
993 sockaddr_free(nsrc);
994 sockaddr_free(ndst);
995 kmem_free(nvar, sizeof(*nvar));
996
997 return error;
998 }
999
1000 static void
1001 l2tp_delete_tunnel(struct ifnet *ifp)
1002 {
1003 struct l2tp_softc *sc = container_of(ifp, struct l2tp_softc,
1004 l2tp_ec.ec_if);
1005 struct sockaddr *osrc, *odst;
1006 struct l2tp_variant *ovar, *nvar;
1007 int error;
1008
1009 nvar = kmem_alloc(sizeof(*nvar), KM_SLEEP);
1010
1011 error = encap_lock_enter();
1012 if (error) {
1013 kmem_free(nvar, sizeof(*nvar));
1014 return;
1015 }
1016 mutex_enter(&sc->l2tp_lock);
1017
1018 ovar = sc->l2tp_var;
1019 osrc = ovar->lv_psrc;
1020 odst = ovar->lv_pdst;
1021 *nvar = *ovar;
1022 psref_target_init(&nvar->lv_psref, lv_psref_class);
1023 nvar->lv_psrc = NULL;
1024 nvar->lv_pdst = NULL;
1025 membar_producer();
1026 l2tp_variant_update(sc, nvar);
1027
1028 mutex_exit(&sc->l2tp_lock);
1029
1030 (void)l2tp_encap_detach(ovar);
1031 encap_lock_exit();
1032
1033 if (osrc)
1034 sockaddr_free(osrc);
1035 if (odst)
1036 sockaddr_free(odst);
1037 kmem_free(ovar, sizeof(*ovar));
1038 }
1039
1040 static int
1041 id_hash_func(uint32_t id, u_long mask)
1042 {
1043 uint32_t hash;
1044
1045 hash = (id >> 16) ^ id;
1046 hash = (hash >> 4) ^ hash;
1047
1048 return hash & mask;
1049 }
1050
1051 static void
1052 l2tp_hash_init(void)
1053 {
1054
1055 l2tp_hash.lists = hashinit(L2TP_ID_HASH_SIZE, HASH_PSLIST, true,
1056 &l2tp_hash.mask);
1057 }
1058
1059 static int
1060 l2tp_hash_fini(void)
1061 {
1062 int i;
1063
1064 mutex_enter(&l2tp_hash.lock);
1065
1066 for (i = 0; i < l2tp_hash.mask + 1; i++) {
1067 if (PSLIST_WRITER_FIRST(&l2tp_hash.lists[i], struct l2tp_softc,
1068 l2tp_hash) != NULL) {
1069 mutex_exit(&l2tp_hash.lock);
1070 return EBUSY;
1071 }
1072 }
1073 for (i = 0; i < l2tp_hash.mask + 1; i++)
1074 PSLIST_DESTROY(&l2tp_hash.lists[i]);
1075
1076 mutex_exit(&l2tp_hash.lock);
1077
1078 hashdone(l2tp_hash.lists, HASH_PSLIST, l2tp_hash.mask);
1079
1080 return 0;
1081 }
1082
1083 static int
1084 l2tp_set_session(struct l2tp_softc *sc, uint32_t my_sess_id,
1085 uint32_t peer_sess_id)
1086 {
1087 uint32_t idx;
1088 struct l2tp_variant *nvar;
1089 struct l2tp_variant *ovar;
1090 struct ifnet *ifp = &sc->l2tp_ec.ec_if;
1091
1092 nvar = kmem_alloc(sizeof(*nvar), KM_SLEEP);
1093
1094 mutex_enter(&sc->l2tp_lock);
1095 ovar = sc->l2tp_var;
1096 *nvar = *ovar;
1097 psref_target_init(&nvar->lv_psref, lv_psref_class);
1098 nvar->lv_my_sess_id = my_sess_id;
1099 nvar->lv_peer_sess_id = peer_sess_id;
1100 membar_producer();
1101
1102 mutex_enter(&l2tp_hash.lock);
1103 if (ovar->lv_my_sess_id > 0 && ovar->lv_peer_sess_id > 0) {
1104 PSLIST_WRITER_REMOVE(sc, l2tp_hash);
1105 pserialize_perform(l2tp_psz);
1106 }
1107 mutex_exit(&l2tp_hash.lock);
1108 PSLIST_ENTRY_DESTROY(sc, l2tp_hash);
1109
1110 l2tp_variant_update(sc, nvar);
1111 mutex_exit(&sc->l2tp_lock);
1112
1113 idx = id_hash_func(nvar->lv_my_sess_id, l2tp_hash.mask);
1114 if ((ifp->if_flags & IFF_DEBUG) != 0)
1115 log(LOG_DEBUG, "%s: add hash entry: sess_id=%" PRIu32 ", idx=%" PRIu32 "\n",
1116 sc->l2tp_ec.ec_if.if_xname, nvar->lv_my_sess_id, idx);
1117
1118 PSLIST_ENTRY_INIT(sc, l2tp_hash);
1119 mutex_enter(&l2tp_hash.lock);
1120 PSLIST_WRITER_INSERT_HEAD(&l2tp_hash.lists[idx], sc, l2tp_hash);
1121 mutex_exit(&l2tp_hash.lock);
1122
1123 kmem_free(ovar, sizeof(*ovar));
1124 return 0;
1125 }
1126
1127 static int
1128 l2tp_clear_session(struct l2tp_softc *sc)
1129 {
1130 struct l2tp_variant *nvar;
1131 struct l2tp_variant *ovar;
1132
1133 nvar = kmem_alloc(sizeof(*nvar), KM_SLEEP);
1134
1135 mutex_enter(&sc->l2tp_lock);
1136 ovar = sc->l2tp_var;
1137 *nvar = *ovar;
1138 psref_target_init(&nvar->lv_psref, lv_psref_class);
1139 nvar->lv_my_sess_id = 0;
1140 nvar->lv_peer_sess_id = 0;
1141 membar_producer();
1142
1143 mutex_enter(&l2tp_hash.lock);
1144 if (ovar->lv_my_sess_id > 0 && ovar->lv_peer_sess_id > 0) {
1145 PSLIST_WRITER_REMOVE(sc, l2tp_hash);
1146 pserialize_perform(l2tp_psz);
1147 }
1148 mutex_exit(&l2tp_hash.lock);
1149
1150 l2tp_variant_update(sc, nvar);
1151 mutex_exit(&sc->l2tp_lock);
1152 kmem_free(ovar, sizeof(*ovar));
1153 return 0;
1154 }
1155
1156 struct l2tp_variant *
1157 l2tp_lookup_session_ref(uint32_t id, struct psref *psref)
1158 {
1159 int idx;
1160 int s;
1161 struct l2tp_softc *sc;
1162
1163 idx = id_hash_func(id, l2tp_hash.mask);
1164
1165 s = pserialize_read_enter();
1166 PSLIST_READER_FOREACH(sc, &l2tp_hash.lists[idx], struct l2tp_softc,
1167 l2tp_hash) {
1168 struct l2tp_variant *var = sc->l2tp_var;
1169 if (var == NULL)
1170 continue;
1171 if (var->lv_my_sess_id != id)
1172 continue;
1173 psref_acquire(psref, &var->lv_psref, lv_psref_class);
1174 pserialize_read_exit(s);
1175 return var;
1176 }
1177 pserialize_read_exit(s);
1178 return NULL;
1179 }
1180
1181 /*
1182 * l2tp_variant update API.
1183 *
1184 * Assumption:
1185 * reader side dereferences sc->l2tp_var in reader critical section only,
1186 * that is, all of reader sides do not reader the sc->l2tp_var after
1187 * pserialize_perform().
1188 */
1189 static void
1190 l2tp_variant_update(struct l2tp_softc *sc, struct l2tp_variant *nvar)
1191 {
1192 struct ifnet *ifp = &sc->l2tp_ec.ec_if;
1193 struct l2tp_variant *ovar = sc->l2tp_var;
1194
1195 KASSERT(mutex_owned(&sc->l2tp_lock));
1196
1197 sc->l2tp_var = nvar;
1198 pserialize_perform(sc->l2tp_psz);
1199 psref_target_destroy(&ovar->lv_psref, lv_psref_class);
1200
1201 /*
1202 * In the manual of atomic_swap_ptr(3), there is no mention if 2nd
1203 * argument is rewrite or not. So, use sc->l2tp_var instead of nvar.
1204 */
1205 if (sc->l2tp_var != NULL) {
1206 if (sc->l2tp_var->lv_psrc != NULL
1207 && sc->l2tp_var->lv_pdst != NULL)
1208 ifp->if_flags |= IFF_RUNNING;
1209 else
1210 ifp->if_flags &= ~IFF_RUNNING;
1211 }
1212 }
1213
1214 static int
1215 l2tp_set_cookie(struct l2tp_softc *sc, uint64_t my_cookie, u_int my_cookie_len,
1216 uint64_t peer_cookie, u_int peer_cookie_len)
1217 {
1218 struct l2tp_variant *nvar;
1219
1220 if (my_cookie == 0 || peer_cookie == 0)
1221 return EINVAL;
1222
1223 if (my_cookie_len != 4 && my_cookie_len != 8
1224 && peer_cookie_len != 4 && peer_cookie_len != 8)
1225 return EINVAL;
1226
1227 nvar = kmem_alloc(sizeof(*nvar), KM_SLEEP);
1228
1229 mutex_enter(&sc->l2tp_lock);
1230
1231 *nvar = *sc->l2tp_var;
1232 psref_target_init(&nvar->lv_psref, lv_psref_class);
1233 nvar->lv_my_cookie = my_cookie;
1234 nvar->lv_my_cookie_len = my_cookie_len;
1235 nvar->lv_peer_cookie = peer_cookie;
1236 nvar->lv_peer_cookie_len = peer_cookie_len;
1237 nvar->lv_use_cookie = L2TP_COOKIE_ON;
1238 membar_producer();
1239 l2tp_variant_update(sc, nvar);
1240
1241 mutex_exit(&sc->l2tp_lock);
1242
1243 struct ifnet *ifp = &sc->l2tp_ec.ec_if;
1244 if ((ifp->if_flags & IFF_DEBUG) != 0) {
1245 log(LOG_DEBUG,
1246 "%s: set cookie: "
1247 "local cookie_len=%u local cookie=%" PRIu64 ", "
1248 "remote cookie_len=%u remote cookie=%" PRIu64 "\n",
1249 ifp->if_xname, my_cookie_len, my_cookie,
1250 peer_cookie_len, peer_cookie);
1251 }
1252
1253 return 0;
1254 }
1255
1256 static void
1257 l2tp_clear_cookie(struct l2tp_softc *sc)
1258 {
1259 struct l2tp_variant *nvar;
1260
1261 nvar = kmem_alloc(sizeof(*nvar), KM_SLEEP);
1262
1263 mutex_enter(&sc->l2tp_lock);
1264
1265 *nvar = *sc->l2tp_var;
1266 psref_target_init(&nvar->lv_psref, lv_psref_class);
1267 nvar->lv_my_cookie = 0;
1268 nvar->lv_my_cookie_len = 0;
1269 nvar->lv_peer_cookie = 0;
1270 nvar->lv_peer_cookie_len = 0;
1271 nvar->lv_use_cookie = L2TP_COOKIE_OFF;
1272 membar_producer();
1273 l2tp_variant_update(sc, nvar);
1274
1275 mutex_exit(&sc->l2tp_lock);
1276 }
1277
1278 static void
1279 l2tp_set_state(struct l2tp_softc *sc, int state)
1280 {
1281 struct ifnet *ifp = &sc->l2tp_ec.ec_if;
1282 struct l2tp_variant *nvar;
1283
1284 nvar = kmem_alloc(sizeof(*nvar), KM_SLEEP);
1285
1286 mutex_enter(&sc->l2tp_lock);
1287
1288 *nvar = *sc->l2tp_var;
1289 psref_target_init(&nvar->lv_psref, lv_psref_class);
1290 nvar->lv_state = state;
1291 membar_producer();
1292 l2tp_variant_update(sc, nvar);
1293
1294 if (nvar->lv_state == L2TP_STATE_UP) {
1295 ifp->if_link_state = LINK_STATE_UP;
1296 } else {
1297 ifp->if_link_state = LINK_STATE_DOWN;
1298 }
1299
1300 mutex_exit(&sc->l2tp_lock);
1301
1302 #ifdef NOTYET
1303 vlan_linkstate_notify(ifp, ifp->if_link_state);
1304 #endif
1305 }
1306
1307 static int
1308 l2tp_encap_attach(struct l2tp_variant *var)
1309 {
1310 int error;
1311
1312 if (var == NULL || var->lv_psrc == NULL)
1313 return EINVAL;
1314
1315 switch (var->lv_psrc->sa_family) {
1316 #ifdef INET
1317 case AF_INET:
1318 error = in_l2tp_attach(var);
1319 break;
1320 #endif
1321 #ifdef INET6
1322 case AF_INET6:
1323 error = in6_l2tp_attach(var);
1324 break;
1325 #endif
1326 default:
1327 error = EINVAL;
1328 break;
1329 }
1330
1331 return error;
1332 }
1333
1334 static int
1335 l2tp_encap_detach(struct l2tp_variant *var)
1336 {
1337 int error;
1338
1339 if (var == NULL || var->lv_psrc == NULL)
1340 return EINVAL;
1341
1342 switch (var->lv_psrc->sa_family) {
1343 #ifdef INET
1344 case AF_INET:
1345 error = in_l2tp_detach(var);
1346 break;
1347 #endif
1348 #ifdef INET6
1349 case AF_INET6:
1350 error = in6_l2tp_detach(var);
1351 break;
1352 #endif
1353 default:
1354 error = EINVAL;
1355 break;
1356 }
1357
1358 return error;
1359 }
1360
1361 int
1362 l2tp_check_nesting(struct ifnet *ifp, struct mbuf *m)
1363 {
1364
1365 return if_tunnel_check_nesting(ifp, m, max_l2tp_nesting);
1366 }
1367
1368 /*
1369 * Module infrastructure
1370 */
1371 #include "if_module.h"
1372
1373 IF_MODULE(MODULE_CLASS_DRIVER, l2tp, "")
1374
1375
1376 /* TODO: IP_TCPMSS support */
1377 #ifdef IP_TCPMSS
1378 static int l2tp_need_tcpmss_clamp(struct ifnet *);
1379 #ifdef INET
1380 static struct mbuf *l2tp_tcpmss4_clamp(struct ifnet *, struct mbuf *);
1381 #endif
1382 #ifdef INET6
1383 static struct mbuf *l2tp_tcpmss6_clamp(struct ifnet *, struct mbuf *);
1384 #endif
1385
1386 struct mbuf *
1387 l2tp_tcpmss_clamp(struct ifnet *ifp, struct mbuf *m)
1388 {
1389 struct ether_header *eh;
1390 struct ether_vlan_header evh;
1391
1392 if (!l2tp_need_tcpmss_clamp(ifp)) {
1393 return m;
1394 }
1395
1396 if (m->m_pkthdr.len < sizeof(evh)) {
1397 m_freem(m);
1398 return NULL;
1399 }
1400
1401 /* save ether header */
1402 m_copydata(m, 0, sizeof(evh), (void *)&evh);
1403 eh = (struct ether_header *)&evh;
1404
1405 switch (ntohs(eh->ether_type)) {
1406 case ETHERTYPE_VLAN: /* Ether + VLAN */
1407 if (m->m_pkthdr.len <= sizeof(struct ether_vlan_header))
1408 break;
1409 m_adj(m, sizeof(struct ether_vlan_header));
1410 switch (ntohs(evh.evl_proto)) {
1411 #ifdef INET
1412 case ETHERTYPE_IP: /* Ether + VLAN + IPv4 */
1413 m = l2tp_tcpmss4_clamp(ifp, m);
1414 if (m == NULL)
1415 return NULL;
1416 break;
1417 #endif /* INET */
1418 #ifdef INET6
1419 case ETHERTYPE_IPV6: /* Ether + VLAN + IPv6 */
1420 m = l2tp_tcpmss6_clamp(ifp, m);
1421 if (m == NULL)
1422 return NULL;
1423 break;
1424 #endif /* INET6 */
1425 default:
1426 break;
1427 }
1428
1429 /* restore ether header */
1430 M_PREPEND(m, sizeof(struct ether_vlan_header),
1431 M_DONTWAIT);
1432 if (m == NULL)
1433 return NULL;
1434 *mtod(m, struct ether_vlan_header *) = evh;
1435 break;
1436
1437 #ifdef INET
1438 case ETHERTYPE_IP: /* Ether + IPv4 */
1439 if (m->m_pkthdr.len <= sizeof(struct ether_header))
1440 break;
1441 m_adj(m, sizeof(struct ether_header));
1442 m = l2tp_tcpmss4_clamp(ifp, m);
1443 if (m == NULL)
1444 return NULL;
1445 /* restore ether header */
1446 M_PREPEND(m, sizeof(struct ether_header), M_DONTWAIT);
1447 if (m == NULL)
1448 return NULL;
1449 *mtod(m, struct ether_header *) = *eh;
1450 break;
1451 #endif /* INET */
1452
1453 #ifdef INET6
1454 case ETHERTYPE_IPV6: /* Ether + IPv6 */
1455 if (m->m_pkthdr.len <= sizeof(struct ether_header))
1456 break;
1457 m_adj(m, sizeof(struct ether_header));
1458 m = l2tp_tcpmss6_clamp(ifp, m);
1459 if (m == NULL)
1460 return NULL;
1461 /* restore ether header */
1462 M_PREPEND(m, sizeof(struct ether_header), M_DONTWAIT);
1463 if (m == NULL)
1464 return NULL;
1465 *mtod(m, struct ether_header *) = *eh;
1466 break;
1467 #endif /* INET6 */
1468
1469 default:
1470 break;
1471 }
1472
1473 return m;
1474 }
1475
1476 static int
1477 l2tp_need_tcpmss_clamp(struct ifnet *ifp)
1478 {
1479 int ret = 0;
1480
1481 #ifdef INET
1482 if (ifp->if_tcpmss != 0)
1483 ret = 1;
1484 #endif
1485
1486 #ifdef INET6
1487 if (ifp->if_tcpmss6 != 0)
1488 ret = 1;
1489 #endif
1490
1491 return ret;
1492 }
1493
1494 #ifdef INET
1495 static struct mbuf *
1496 l2tp_tcpmss4_clamp(struct ifnet *ifp, struct mbuf *m)
1497 {
1498
1499 if (ifp->if_tcpmss != 0) {
1500 return ip_tcpmss(m, (ifp->if_tcpmss < 0) ?
1501 ifp->if_mtu - IP_TCPMSS_EXTLEN :
1502 ifp->if_tcpmss);
1503 }
1504 return m;
1505 }
1506 #endif /* INET */
1507
1508 #ifdef INET6
1509 static struct mbuf *
1510 l2tp_tcpmss6_clamp(struct ifnet *ifp, struct mbuf *m)
1511 {
1512 int ip6hdrlen;
1513
1514 if (ifp->if_tcpmss6 != 0 &&
1515 ip6_tcpmss_applicable(m, &ip6hdrlen)) {
1516 return ip6_tcpmss(m, ip6hdrlen,
1517 (ifp->if_tcpmss6 < 0) ?
1518 ifp->if_mtu - IP6_TCPMSS_EXTLEN :
1519 ifp->if_tcpmss6);
1520 }
1521 return m;
1522 }
1523 #endif /* INET6 */
1524
1525 #endif /* IP_TCPMSS */
1526