if_l2tp.c revision 1.35.2.1 1 /* $NetBSD: if_l2tp.c,v 1.35.2.1 2019/09/24 03:10:35 martin Exp $ */
2
3 /*
4 * Copyright (c) 2017 Internet Initiative Japan Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /*
30 * L2TPv3 kernel interface
31 */
32
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: if_l2tp.c,v 1.35.2.1 2019/09/24 03:10:35 martin Exp $");
35
36 #ifdef _KERNEL_OPT
37 #include "opt_inet.h"
38 #include "opt_net_mpsafe.h"
39 #endif
40
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/kernel.h>
44 #include <sys/mbuf.h>
45 #include <sys/socket.h>
46 #include <sys/sockio.h>
47 #include <sys/errno.h>
48 #include <sys/ioctl.h>
49 #include <sys/time.h>
50 #include <sys/syslog.h>
51 #include <sys/proc.h>
52 #include <sys/conf.h>
53 #include <sys/kauth.h>
54 #include <sys/cpu.h>
55 #include <sys/cprng.h>
56 #include <sys/intr.h>
57 #include <sys/kmem.h>
58 #include <sys/mutex.h>
59 #include <sys/atomic.h>
60 #include <sys/pserialize.h>
61 #include <sys/device.h>
62 #include <sys/module.h>
63
64 #include <net/if.h>
65 #include <net/if_dl.h>
66 #include <net/if_ether.h>
67 #include <net/if_types.h>
68 #include <net/netisr.h>
69 #include <net/route.h>
70 #include <net/bpf.h>
71 #include <net/if_vlanvar.h>
72
73 #include <netinet/in.h>
74 #include <netinet/in_systm.h>
75 #include <netinet/ip.h>
76 #include <netinet/ip_encap.h>
77 #ifdef INET
78 #include <netinet/in_var.h>
79 #include <netinet/in_l2tp.h>
80 #endif /* INET */
81 #ifdef INET6
82 #include <netinet6/in6_l2tp.h>
83 #endif
84
85 #include <net/if_l2tp.h>
86
87 #include <net/if_vlanvar.h>
88
89 /* TODO: IP_TCPMSS support */
90 #undef IP_TCPMSS
91 #ifdef IP_TCPMSS
92 #include <netinet/ip_tcpmss.h>
93 #endif
94
95 /*
96 * l2tp global variable definitions
97 */
98 static struct {
99 LIST_HEAD(l2tp_sclist, l2tp_softc) list;
100 kmutex_t lock;
101 } l2tp_softcs __cacheline_aligned;
102
103
104 #if !defined(L2TP_ID_HASH_SIZE)
105 #define L2TP_ID_HASH_SIZE 64
106 #endif
107 static struct {
108 kmutex_t lock;
109 struct pslist_head *lists;
110 u_long mask;
111 } l2tp_hash __cacheline_aligned = {
112 .lists = NULL,
113 };
114
115 pserialize_t l2tp_psz __read_mostly;
116 struct psref_class *lv_psref_class __read_mostly;
117
118 static void l2tp_ifq_init_pc(void *, void *, struct cpu_info *);
119 static void l2tp_ifq_fini_pc(void *, void *, struct cpu_info *);
120
121 static int l2tp_clone_create(struct if_clone *, int);
122 static int l2tp_clone_destroy(struct ifnet *);
123
124 struct if_clone l2tp_cloner =
125 IF_CLONE_INITIALIZER("l2tp", l2tp_clone_create, l2tp_clone_destroy);
126
127 static int l2tp_tx_enqueue(struct l2tp_variant *, struct mbuf *);
128 static int l2tp_output(struct ifnet *, struct mbuf *,
129 const struct sockaddr *, const struct rtentry *);
130 static void l2tp_sendit(struct l2tp_variant *, struct mbuf *);
131 static void l2tpintr(struct l2tp_variant *);
132 static void l2tpintr_softint(void *);
133
134 static void l2tp_hash_init(void);
135 static int l2tp_hash_fini(void);
136
137 static void l2tp_start(struct ifnet *);
138 static int l2tp_transmit(struct ifnet *, struct mbuf *);
139
140 static int l2tp_set_tunnel(struct ifnet *, struct sockaddr *,
141 struct sockaddr *);
142 static void l2tp_delete_tunnel(struct ifnet *);
143
144 static int id_hash_func(uint32_t, u_long);
145
146 static void l2tp_variant_update(struct l2tp_softc *, struct l2tp_variant *);
147 static int l2tp_set_session(struct l2tp_softc *, uint32_t, uint32_t);
148 static int l2tp_clear_session(struct l2tp_softc *);
149 static int l2tp_set_cookie(struct l2tp_softc *, uint64_t, u_int, uint64_t, u_int);
150 static void l2tp_clear_cookie(struct l2tp_softc *);
151 static void l2tp_set_state(struct l2tp_softc *, int);
152 static int l2tp_encap_attach(struct l2tp_variant *);
153 static int l2tp_encap_detach(struct l2tp_variant *);
154
155 static inline struct ifqueue *
156 l2tp_ifq_percpu_getref(percpu_t *pc)
157 {
158
159 return *(struct ifqueue **)percpu_getref(pc);
160 }
161
162 static inline void
163 l2tp_ifq_percpu_putref(percpu_t *pc)
164 {
165
166 percpu_putref(pc);
167 }
168
169 #ifndef MAX_L2TP_NEST
170 /*
171 * This macro controls the upper limitation on nesting of l2tp tunnels.
172 * Since, setting a large value to this macro with a careless configuration
173 * may introduce system crash, we don't allow any nestings by default.
174 * If you need to configure nested l2tp tunnels, you can define this macro
175 * in your kernel configuration file. However, if you do so, please be
176 * careful to configure the tunnels so that it won't make a loop.
177 */
178 /*
179 * XXX
180 * Currently, if in_l2tp_output recursively calls, it causes locking against
181 * myself of struct l2tp_ro->lr_lock. So, nested l2tp tunnels is prohibited.
182 */
183 #define MAX_L2TP_NEST 0
184 #endif
185
186 static int max_l2tp_nesting = MAX_L2TP_NEST;
187
188 /* ARGSUSED */
189 void
190 l2tpattach(int count)
191 {
192 /*
193 * Nothing to do here, initialization is handled by the
194 * module initialization code in l2tpinit() below).
195 */
196 }
197
198 static void
199 l2tpinit(void)
200 {
201
202 mutex_init(&l2tp_softcs.lock, MUTEX_DEFAULT, IPL_NONE);
203 LIST_INIT(&l2tp_softcs.list);
204
205 mutex_init(&l2tp_hash.lock, MUTEX_DEFAULT, IPL_NONE);
206 l2tp_psz = pserialize_create();
207 lv_psref_class = psref_class_create("l2tpvar", IPL_SOFTNET);
208 if_clone_attach(&l2tp_cloner);
209
210 l2tp_hash_init();
211 }
212
213 static int
214 l2tpdetach(void)
215 {
216 int error;
217
218 mutex_enter(&l2tp_softcs.lock);
219 if (!LIST_EMPTY(&l2tp_softcs.list)) {
220 mutex_exit(&l2tp_softcs.lock);
221 return EBUSY;
222 }
223 mutex_exit(&l2tp_softcs.lock);
224
225 error = l2tp_hash_fini();
226 if (error)
227 return error;
228
229 if_clone_detach(&l2tp_cloner);
230 psref_class_destroy(lv_psref_class);
231 pserialize_destroy(l2tp_psz);
232 mutex_destroy(&l2tp_hash.lock);
233
234 mutex_destroy(&l2tp_softcs.lock);
235
236 return error;
237 }
238
239 static int
240 l2tp_clone_create(struct if_clone *ifc, int unit)
241 {
242 struct l2tp_softc *sc;
243 struct l2tp_variant *var;
244 int rv;
245 u_int si_flags = SOFTINT_NET;
246 #ifdef NET_MPSAFE
247 si_flags |= SOFTINT_MPSAFE;
248 #endif
249 sc = kmem_zalloc(sizeof(struct l2tp_softc), KM_SLEEP);
250 if_initname(&sc->l2tp_ec.ec_if, ifc->ifc_name, unit);
251 rv = l2tpattach0(sc);
252 if (rv != 0) {
253 kmem_free(sc, sizeof(struct l2tp_softc));
254 return rv;
255 }
256
257 var = kmem_zalloc(sizeof(struct l2tp_variant), KM_SLEEP);
258 var->lv_softc = sc;
259 var->lv_state = L2TP_STATE_DOWN;
260 var->lv_use_cookie = L2TP_COOKIE_OFF;
261 psref_target_init(&var->lv_psref, lv_psref_class);
262
263 sc->l2tp_var = var;
264 mutex_init(&sc->l2tp_lock, MUTEX_DEFAULT, IPL_NONE);
265 sc->l2tp_psz = pserialize_create();
266 PSLIST_ENTRY_INIT(sc, l2tp_hash);
267
268 sc->l2tp_ro_percpu = if_tunnel_alloc_ro_percpu();
269
270 sc->l2tp_ifq_percpu = percpu_alloc(sizeof(struct ifqueue *));
271 percpu_foreach(sc->l2tp_ifq_percpu, l2tp_ifq_init_pc, NULL);
272 sc->l2tp_si = softint_establish(si_flags, l2tpintr_softint, sc);
273
274 mutex_enter(&l2tp_softcs.lock);
275 LIST_INSERT_HEAD(&l2tp_softcs.list, sc, l2tp_list);
276 mutex_exit(&l2tp_softcs.lock);
277
278 return (0);
279 }
280
281 int
282 l2tpattach0(struct l2tp_softc *sc)
283 {
284 int rv;
285
286 sc->l2tp_ec.ec_if.if_addrlen = 0;
287 sc->l2tp_ec.ec_if.if_mtu = L2TP_MTU;
288 sc->l2tp_ec.ec_if.if_flags = IFF_POINTOPOINT|IFF_MULTICAST|IFF_SIMPLEX;
289 sc->l2tp_ec.ec_if.if_extflags = IFEF_NO_LINK_STATE_CHANGE;
290 #ifdef NET_MPSAFE
291 sc->l2tp_ec.ec_if.if_extflags |= IFEF_MPSAFE;
292 #endif
293 sc->l2tp_ec.ec_if.if_ioctl = l2tp_ioctl;
294 sc->l2tp_ec.ec_if.if_output = l2tp_output;
295 sc->l2tp_ec.ec_if.if_type = IFT_L2TP;
296 sc->l2tp_ec.ec_if.if_dlt = DLT_NULL;
297 sc->l2tp_ec.ec_if.if_start = l2tp_start;
298 sc->l2tp_ec.ec_if.if_transmit = l2tp_transmit;
299 sc->l2tp_ec.ec_if._if_input = ether_input;
300 IFQ_SET_READY(&sc->l2tp_ec.ec_if.if_snd);
301 /* XXX
302 * It may improve performance to use if_initialize()/if_register()
303 * so that l2tp_input() calls if_input() instead of
304 * if_percpuq_enqueue(). However, that causes recursive softnet_lock
305 * when NET_MPSAFE is not set.
306 */
307 rv = if_attach(&sc->l2tp_ec.ec_if);
308 if (rv != 0)
309 return rv;
310 if_alloc_sadl(&sc->l2tp_ec.ec_if);
311 bpf_attach(&sc->l2tp_ec.ec_if, DLT_EN10MB, sizeof(struct ether_header));
312
313 return 0;
314 }
315
316 void
317 l2tp_ifq_init_pc(void *p, void *arg __unused, struct cpu_info *ci __unused)
318 {
319 struct ifqueue **ifqp = p;
320
321 *ifqp = kmem_zalloc(sizeof(**ifqp), KM_SLEEP);
322 (*ifqp)->ifq_maxlen = IFQ_MAXLEN;
323 }
324
325 void
326 l2tp_ifq_fini_pc(void *p, void *arg __unused, struct cpu_info *ci __unused)
327 {
328 struct ifqueue **ifqp = p;
329
330 kmem_free(*ifqp, sizeof(**ifqp));
331 }
332
333 static int
334 l2tp_clone_destroy(struct ifnet *ifp)
335 {
336 struct l2tp_variant *var;
337 struct l2tp_softc *sc = container_of(ifp, struct l2tp_softc,
338 l2tp_ec.ec_if);
339
340 l2tp_clear_session(sc);
341 l2tp_delete_tunnel(&sc->l2tp_ec.ec_if);
342 /*
343 * To avoid for l2tp_transmit() and l2tpintr_softint() to access
344 * sc->l2tp_var after free it.
345 */
346 mutex_enter(&sc->l2tp_lock);
347 var = sc->l2tp_var;
348 l2tp_variant_update(sc, NULL);
349 mutex_exit(&sc->l2tp_lock);
350
351 softint_disestablish(sc->l2tp_si);
352 percpu_foreach(sc->l2tp_ifq_percpu, l2tp_ifq_fini_pc, NULL);
353 percpu_free(sc->l2tp_ifq_percpu, sizeof(struct ifqueue *));
354
355 mutex_enter(&l2tp_softcs.lock);
356 LIST_REMOVE(sc, l2tp_list);
357 mutex_exit(&l2tp_softcs.lock);
358
359 bpf_detach(ifp);
360
361 if_detach(ifp);
362
363 if_tunnel_free_ro_percpu(sc->l2tp_ro_percpu);
364
365 kmem_free(var, sizeof(struct l2tp_variant));
366 pserialize_destroy(sc->l2tp_psz);
367 mutex_destroy(&sc->l2tp_lock);
368 kmem_free(sc, sizeof(struct l2tp_softc));
369
370 return 0;
371 }
372
373 static int
374 l2tp_tx_enqueue(struct l2tp_variant *var, struct mbuf *m)
375 {
376 struct l2tp_softc *sc;
377 struct ifnet *ifp;
378 struct ifqueue *ifq;
379 int s;
380
381 KASSERT(psref_held(&var->lv_psref, lv_psref_class));
382
383 sc = var->lv_softc;
384 ifp = &sc->l2tp_ec.ec_if;
385
386 s = splsoftnet();
387 ifq = l2tp_ifq_percpu_getref(sc->l2tp_ifq_percpu);
388 if (IF_QFULL(ifq)) {
389 ifp->if_oerrors++;
390 l2tp_ifq_percpu_putref(sc->l2tp_ifq_percpu);
391 splx(s);
392 m_freem(m);
393 return ENOBUFS;
394 }
395
396 IF_ENQUEUE(ifq, m);
397 percpu_putref(sc->l2tp_ifq_percpu);
398 softint_schedule(sc->l2tp_si);
399 /* counter is incremented in l2tpintr() */
400 splx(s);
401 return 0;
402 }
403
404 static int
405 l2tp_output(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *dst,
406 const struct rtentry *rt)
407 {
408 struct l2tp_softc *sc = container_of(ifp, struct l2tp_softc,
409 l2tp_ec.ec_if);
410 struct l2tp_variant *var;
411 struct psref psref;
412 int error = 0;
413
414 var = l2tp_getref_variant(sc, &psref);
415 if (var == NULL) {
416 m_freem(m);
417 return ENETDOWN;
418 }
419
420 IFQ_CLASSIFY(&ifp->if_snd, m, dst->sa_family);
421
422 m->m_flags &= ~(M_BCAST|M_MCAST);
423
424 if ((ifp->if_flags & IFF_UP) == 0) {
425 m_freem(m);
426 error = ENETDOWN;
427 goto end;
428 }
429
430 if (var->lv_psrc == NULL || var->lv_pdst == NULL) {
431 m_freem(m);
432 error = ENETDOWN;
433 goto end;
434 }
435
436 /* XXX should we check if our outer source is legal? */
437
438 /* use DLT_NULL encapsulation here to pass inner af type */
439 M_PREPEND(m, sizeof(int), M_DONTWAIT);
440 if (!m) {
441 error = ENOBUFS;
442 goto end;
443 }
444 *mtod(m, int *) = dst->sa_family;
445
446 error = l2tp_tx_enqueue(var, m);
447 end:
448 l2tp_putref_variant(var, &psref);
449 if (error)
450 ifp->if_oerrors++;
451
452 return error;
453 }
454
455 static void
456 l2tp_sendit(struct l2tp_variant *var, struct mbuf *m)
457 {
458 int len;
459 int error;
460 struct l2tp_softc *sc;
461 struct ifnet *ifp;
462
463 KASSERT(psref_held(&var->lv_psref, lv_psref_class));
464
465 sc = var->lv_softc;
466 ifp = &sc->l2tp_ec.ec_if;
467
468 len = m->m_pkthdr.len;
469 m->m_flags &= ~(M_BCAST|M_MCAST);
470 bpf_mtap(ifp, m, BPF_D_OUT);
471
472 switch (var->lv_psrc->sa_family) {
473 #ifdef INET
474 case AF_INET:
475 error = in_l2tp_output(var, m);
476 break;
477 #endif
478 #ifdef INET6
479 case AF_INET6:
480 error = in6_l2tp_output(var, m);
481 break;
482 #endif
483 default:
484 m_freem(m);
485 error = ENETDOWN;
486 break;
487 }
488 if (error) {
489 ifp->if_oerrors++;
490 } else {
491 ifp->if_opackets++;
492 ifp->if_obytes += len;
493 }
494 }
495
496 static void
497 l2tpintr(struct l2tp_variant *var)
498 {
499 struct l2tp_softc *sc;
500 struct ifnet *ifp;
501 struct mbuf *m;
502 struct ifqueue *ifq;
503 u_int cpuid = cpu_index(curcpu());
504
505 KASSERT(psref_held(&var->lv_psref, lv_psref_class));
506
507 sc = var->lv_softc;
508 ifp = &sc->l2tp_ec.ec_if;
509
510 /* output processing */
511 if (var->lv_my_sess_id == 0 || var->lv_peer_sess_id == 0) {
512 ifq = l2tp_ifq_percpu_getref(sc->l2tp_ifq_percpu);
513 IF_PURGE(ifq);
514 l2tp_ifq_percpu_putref(sc->l2tp_ifq_percpu);
515 if (cpuid == 0)
516 IFQ_PURGE(&ifp->if_snd);
517 return;
518 }
519
520 /* Currently, l2tpintr() is always called in softint context. */
521 ifq = l2tp_ifq_percpu_getref(sc->l2tp_ifq_percpu);
522 for (;;) {
523 IF_DEQUEUE(ifq, m);
524 if (m != NULL)
525 l2tp_sendit(var, m);
526 else
527 break;
528 }
529 l2tp_ifq_percpu_putref(sc->l2tp_ifq_percpu);
530
531 if (cpuid == 0) {
532 for (;;) {
533 IFQ_DEQUEUE(&ifp->if_snd, m);
534 if (m != NULL)
535 l2tp_sendit(var, m);
536 else
537 break;
538 }
539 }
540 }
541
542 static void
543 l2tpintr_softint(void *arg)
544 {
545 struct l2tp_variant *var;
546 struct psref psref;
547 struct l2tp_softc *sc = arg;
548
549 var = l2tp_getref_variant(sc, &psref);
550 if (var == NULL)
551 return;
552
553 l2tpintr(var);
554 l2tp_putref_variant(var, &psref);
555 }
556
557 void
558 l2tp_input(struct mbuf *m, struct ifnet *ifp)
559 {
560 vaddr_t addr;
561
562 KASSERT(ifp != NULL);
563
564 /*
565 * Currently, l2tp(4) supports only ethernet as inner protocol.
566 */
567 if (m->m_pkthdr.len < sizeof(struct ether_header)) {
568 m_freem(m);
569 return;
570 }
571
572 /*
573 * If the head of the payload is not aligned, align it.
574 */
575 addr = mtod(m, vaddr_t);
576 if ((addr & 0x03) != 0x2) {
577 /* copy and align head of payload */
578 struct mbuf *m_head;
579 int copy_length;
580 u_int pad = roundup(sizeof(struct ether_header), 4)
581 - sizeof(struct ether_header);
582
583 #define L2TP_COPY_LENGTH 60
584
585 if (m->m_pkthdr.len < L2TP_COPY_LENGTH) {
586 copy_length = m->m_pkthdr.len;
587 } else {
588 copy_length = L2TP_COPY_LENGTH;
589 }
590
591 if (m->m_len < copy_length) {
592 m = m_pullup(m, copy_length);
593 if (m == NULL)
594 return;
595 }
596
597 MGETHDR(m_head, M_DONTWAIT, MT_HEADER);
598 if (m_head == NULL) {
599 m_freem(m);
600 return;
601 }
602 m_move_pkthdr(m_head, m);
603
604 /*
605 * m_head should be:
606 * L2TP_COPY_LENGTH
607 * <- + roundup(pad, 4) - pad ->
608 * +-------+--------+-----+--------------+-------------+
609 * | m_hdr | pkthdr | ... | ether header | payload |
610 * +-------+--------+-----+--------------+-------------+
611 * ^ ^
612 * m_data 4 byte aligned
613 */
614 m_align(m_head, L2TP_COPY_LENGTH + roundup(pad, 4));
615 m_head->m_data += pad;
616
617 memcpy(mtod(m_head, void *), mtod(m, void *), copy_length);
618 m_head->m_len = copy_length;
619 m->m_data += copy_length;
620 m->m_len -= copy_length;
621
622 /* construct chain */
623 if (m->m_len == 0) {
624 m_head->m_next = m_free(m);
625 } else {
626 m_head->m_next = m;
627 }
628
629 /* override m */
630 m = m_head;
631 }
632
633 m_set_rcvif(m, ifp);
634
635 /*
636 * bpf_mtap() and ifp->if_ipackets++ is done in if_input()
637 *
638 * obytes is incremented at ether_output() or bridge_enqueue().
639 */
640 if_percpuq_enqueue(ifp->if_percpuq, m);
641 }
642
643 void
644 l2tp_start(struct ifnet *ifp)
645 {
646 struct psref psref;
647 struct l2tp_variant *var;
648 struct l2tp_softc *sc = container_of(ifp, struct l2tp_softc,
649 l2tp_ec.ec_if);
650
651 var = l2tp_getref_variant(sc, &psref);
652 if (var == NULL)
653 return;
654
655 if (var->lv_psrc == NULL || var->lv_pdst == NULL)
656 return;
657
658 softint_schedule(sc->l2tp_si);
659 l2tp_putref_variant(var, &psref);
660 }
661
662 int
663 l2tp_transmit(struct ifnet *ifp, struct mbuf *m)
664 {
665 int error;
666 struct psref psref;
667 struct l2tp_variant *var;
668 struct l2tp_softc *sc = container_of(ifp, struct l2tp_softc,
669 l2tp_ec.ec_if);
670
671 var = l2tp_getref_variant(sc, &psref);
672 if (var == NULL) {
673 m_freem(m);
674 return ENETDOWN;
675 }
676
677 if (var->lv_psrc == NULL || var->lv_pdst == NULL) {
678 m_freem(m);
679 error = ENETDOWN;
680 goto out;
681 }
682
683 m->m_flags &= ~(M_BCAST|M_MCAST);
684
685 error = l2tp_tx_enqueue(var, m);
686 out:
687 l2tp_putref_variant(var, &psref);
688 return error;
689 }
690
691 /* XXX how should we handle IPv6 scope on SIOC[GS]IFPHYADDR? */
692 int
693 l2tp_ioctl(struct ifnet *ifp, u_long cmd, void *data)
694 {
695 struct l2tp_softc *sc = container_of(ifp, struct l2tp_softc,
696 l2tp_ec.ec_if);
697 struct l2tp_variant *var, *var_tmp;
698 struct ifreq *ifr = data;
699 int error = 0, size;
700 struct sockaddr *dst, *src;
701 struct l2tp_req l2tpr;
702 u_long mtu;
703 int bound;
704 struct psref psref;
705
706 switch (cmd) {
707 case SIOCSIFADDR:
708 ifp->if_flags |= IFF_UP;
709 break;
710
711 case SIOCSIFDSTADDR:
712 break;
713
714 case SIOCADDMULTI:
715 case SIOCDELMULTI:
716 switch (ifr->ifr_addr.sa_family) {
717 #ifdef INET
718 case AF_INET: /* IP supports Multicast */
719 break;
720 #endif /* INET */
721 #ifdef INET6
722 case AF_INET6: /* IP6 supports Multicast */
723 break;
724 #endif /* INET6 */
725 default: /* Other protocols doesn't support Multicast */
726 error = EAFNOSUPPORT;
727 break;
728 }
729 break;
730
731 case SIOCSIFMTU:
732 mtu = ifr->ifr_mtu;
733 if (mtu < L2TP_MTU_MIN || mtu > L2TP_MTU_MAX)
734 return (EINVAL);
735 ifp->if_mtu = mtu;
736 break;
737
738 #ifdef INET
739 case SIOCSIFPHYADDR:
740 src = (struct sockaddr *)
741 &(((struct in_aliasreq *)data)->ifra_addr);
742 dst = (struct sockaddr *)
743 &(((struct in_aliasreq *)data)->ifra_dstaddr);
744 if (src->sa_family != AF_INET || dst->sa_family != AF_INET)
745 return EAFNOSUPPORT;
746 else if (src->sa_len != sizeof(struct sockaddr_in)
747 || dst->sa_len != sizeof(struct sockaddr_in))
748 return EINVAL;
749
750 error = l2tp_set_tunnel(&sc->l2tp_ec.ec_if, src, dst);
751 break;
752
753 #endif /* INET */
754 #ifdef INET6
755 case SIOCSIFPHYADDR_IN6:
756 src = (struct sockaddr *)
757 &(((struct in6_aliasreq *)data)->ifra_addr);
758 dst = (struct sockaddr *)
759 &(((struct in6_aliasreq *)data)->ifra_dstaddr);
760 if (src->sa_family != AF_INET6 || dst->sa_family != AF_INET6)
761 return EAFNOSUPPORT;
762 else if (src->sa_len != sizeof(struct sockaddr_in6)
763 || dst->sa_len != sizeof(struct sockaddr_in6))
764 return EINVAL;
765
766 error = l2tp_set_tunnel(&sc->l2tp_ec.ec_if, src, dst);
767 break;
768
769 #endif /* INET6 */
770 case SIOCSLIFPHYADDR:
771 src = (struct sockaddr *)
772 &(((struct if_laddrreq *)data)->addr);
773 dst = (struct sockaddr *)
774 &(((struct if_laddrreq *)data)->dstaddr);
775 if (src->sa_family != dst->sa_family)
776 return EINVAL;
777 else if (src->sa_family == AF_INET
778 && src->sa_len != sizeof(struct sockaddr_in))
779 return EINVAL;
780 else if (src->sa_family == AF_INET6
781 && src->sa_len != sizeof(struct sockaddr_in6))
782 return EINVAL;
783 else if (dst->sa_family == AF_INET
784 && dst->sa_len != sizeof(struct sockaddr_in))
785 return EINVAL;
786 else if (dst->sa_family == AF_INET6
787 && dst->sa_len != sizeof(struct sockaddr_in6))
788 return EINVAL;
789
790 error = l2tp_set_tunnel(&sc->l2tp_ec.ec_if, src, dst);
791 break;
792
793 case SIOCDIFPHYADDR:
794 l2tp_delete_tunnel(&sc->l2tp_ec.ec_if);
795 break;
796
797 case SIOCGIFPSRCADDR:
798 #ifdef INET6
799 case SIOCGIFPSRCADDR_IN6:
800 #endif /* INET6 */
801 bound = curlwp_bind();
802 var = l2tp_getref_variant(sc, &psref);
803 if (var == NULL) {
804 curlwp_bindx(bound);
805 error = EADDRNOTAVAIL;
806 goto bad;
807 }
808 if (var->lv_psrc == NULL) {
809 l2tp_putref_variant(var, &psref);
810 curlwp_bindx(bound);
811 error = EADDRNOTAVAIL;
812 goto bad;
813 }
814 src = var->lv_psrc;
815 switch (cmd) {
816 #ifdef INET
817 case SIOCGIFPSRCADDR:
818 dst = &ifr->ifr_addr;
819 size = sizeof(ifr->ifr_addr);
820 break;
821 #endif /* INET */
822 #ifdef INET6
823 case SIOCGIFPSRCADDR_IN6:
824 dst = (struct sockaddr *)
825 &(((struct in6_ifreq *)data)->ifr_addr);
826 size = sizeof(((struct in6_ifreq *)data)->ifr_addr);
827 break;
828 #endif /* INET6 */
829 default:
830 l2tp_putref_variant(var, &psref);
831 curlwp_bindx(bound);
832 error = EADDRNOTAVAIL;
833 goto bad;
834 }
835 if (src->sa_len > size) {
836 l2tp_putref_variant(var, &psref);
837 curlwp_bindx(bound);
838 return EINVAL;
839 }
840 sockaddr_copy(dst, src->sa_len, src);
841 l2tp_putref_variant(var, &psref);
842 curlwp_bindx(bound);
843 break;
844
845 case SIOCGIFPDSTADDR:
846 #ifdef INET6
847 case SIOCGIFPDSTADDR_IN6:
848 #endif /* INET6 */
849 bound = curlwp_bind();
850 var = l2tp_getref_variant(sc, &psref);
851 if (var == NULL) {
852 curlwp_bindx(bound);
853 error = EADDRNOTAVAIL;
854 goto bad;
855 }
856 if (var->lv_pdst == NULL) {
857 l2tp_putref_variant(var, &psref);
858 curlwp_bindx(bound);
859 error = EADDRNOTAVAIL;
860 goto bad;
861 }
862 src = var->lv_pdst;
863 switch (cmd) {
864 #ifdef INET
865 case SIOCGIFPDSTADDR:
866 dst = &ifr->ifr_addr;
867 size = sizeof(ifr->ifr_addr);
868 break;
869 #endif /* INET */
870 #ifdef INET6
871 case SIOCGIFPDSTADDR_IN6:
872 dst = (struct sockaddr *)
873 &(((struct in6_ifreq *)data)->ifr_addr);
874 size = sizeof(((struct in6_ifreq *)data)->ifr_addr);
875 break;
876 #endif /* INET6 */
877 default:
878 l2tp_putref_variant(var, &psref);
879 curlwp_bindx(bound);
880 error = EADDRNOTAVAIL;
881 goto bad;
882 }
883 if (src->sa_len > size) {
884 l2tp_putref_variant(var, &psref);
885 curlwp_bindx(bound);
886 return EINVAL;
887 }
888 sockaddr_copy(dst, src->sa_len, src);
889 l2tp_putref_variant(var, &psref);
890 curlwp_bindx(bound);
891 break;
892
893 case SIOCGLIFPHYADDR:
894 bound = curlwp_bind();
895 var = l2tp_getref_variant(sc, &psref);
896 if (var == NULL) {
897 curlwp_bindx(bound);
898 error = EADDRNOTAVAIL;
899 goto bad;
900 }
901 if (var->lv_psrc == NULL || var->lv_pdst == NULL) {
902 l2tp_putref_variant(var, &psref);
903 curlwp_bindx(bound);
904 error = EADDRNOTAVAIL;
905 goto bad;
906 }
907
908 /* copy src */
909 src = var->lv_psrc;
910 dst = (struct sockaddr *)
911 &(((struct if_laddrreq *)data)->addr);
912 size = sizeof(((struct if_laddrreq *)data)->addr);
913 if (src->sa_len > size) {
914 l2tp_putref_variant(var, &psref);
915 curlwp_bindx(bound);
916 return EINVAL;
917 }
918 sockaddr_copy(dst, src->sa_len, src);
919
920 /* copy dst */
921 src = var->lv_pdst;
922 dst = (struct sockaddr *)
923 &(((struct if_laddrreq *)data)->dstaddr);
924 size = sizeof(((struct if_laddrreq *)data)->dstaddr);
925 if (src->sa_len > size) {
926 l2tp_putref_variant(var, &psref);
927 curlwp_bindx(bound);
928 return EINVAL;
929 }
930 sockaddr_copy(dst, src->sa_len, src);
931 l2tp_putref_variant(var, &psref);
932 curlwp_bindx(bound);
933 break;
934
935 case SIOCSL2TPSESSION:
936 if ((error = copyin(ifr->ifr_data, &l2tpr, sizeof(l2tpr))) != 0)
937 break;
938
939 /* session id must not zero */
940 if (l2tpr.my_sess_id == 0 || l2tpr.peer_sess_id == 0)
941 return EINVAL;
942
943 bound = curlwp_bind();
944 var_tmp = l2tp_lookup_session_ref(l2tpr.my_sess_id, &psref);
945 if (var_tmp != NULL) {
946 /* duplicate session id */
947 log(LOG_WARNING, "%s: duplicate session id %" PRIu32 " of %s\n",
948 sc->l2tp_ec.ec_if.if_xname, l2tpr.my_sess_id,
949 var_tmp->lv_softc->l2tp_ec.ec_if.if_xname);
950 psref_release(&psref, &var_tmp->lv_psref,
951 lv_psref_class);
952 curlwp_bindx(bound);
953 return EINVAL;
954 }
955 curlwp_bindx(bound);
956
957 error = l2tp_set_session(sc, l2tpr.my_sess_id, l2tpr.peer_sess_id);
958 break;
959 case SIOCDL2TPSESSION:
960 l2tp_clear_session(sc);
961 break;
962 case SIOCSL2TPCOOKIE:
963 if ((error = copyin(ifr->ifr_data, &l2tpr, sizeof(l2tpr))) != 0)
964 break;
965
966 error = l2tp_set_cookie(sc, l2tpr.my_cookie, l2tpr.my_cookie_len,
967 l2tpr.peer_cookie, l2tpr.peer_cookie_len);
968 break;
969 case SIOCDL2TPCOOKIE:
970 l2tp_clear_cookie(sc);
971 break;
972 case SIOCSL2TPSTATE:
973 if ((error = copyin(ifr->ifr_data, &l2tpr, sizeof(l2tpr))) != 0)
974 break;
975
976 l2tp_set_state(sc, l2tpr.state);
977 break;
978 case SIOCGL2TP:
979 /* get L2TPV3 session info */
980 memset(&l2tpr, 0, sizeof(l2tpr));
981
982 bound = curlwp_bind();
983 var = l2tp_getref_variant(sc, &psref);
984 if (var == NULL) {
985 curlwp_bindx(bound);
986 error = EADDRNOTAVAIL;
987 goto bad;
988 }
989
990 l2tpr.state = var->lv_state;
991 l2tpr.my_sess_id = var->lv_my_sess_id;
992 l2tpr.peer_sess_id = var->lv_peer_sess_id;
993 l2tpr.my_cookie = var->lv_my_cookie;
994 l2tpr.my_cookie_len = var->lv_my_cookie_len;
995 l2tpr.peer_cookie = var->lv_peer_cookie;
996 l2tpr.peer_cookie_len = var->lv_peer_cookie_len;
997 l2tp_putref_variant(var, &psref);
998 curlwp_bindx(bound);
999
1000 error = copyout(&l2tpr, ifr->ifr_data, sizeof(l2tpr));
1001 break;
1002
1003 default:
1004 error = ifioctl_common(ifp, cmd, data);
1005 break;
1006 }
1007 bad:
1008 return error;
1009 }
1010
1011 static int
1012 l2tp_set_tunnel(struct ifnet *ifp, struct sockaddr *src, struct sockaddr *dst)
1013 {
1014 struct l2tp_softc *sc = container_of(ifp, struct l2tp_softc,
1015 l2tp_ec.ec_if);
1016 struct sockaddr *osrc, *odst;
1017 struct sockaddr *nsrc, *ndst;
1018 struct l2tp_variant *ovar, *nvar;
1019 int error;
1020
1021 nsrc = sockaddr_dup(src, M_WAITOK);
1022 ndst = sockaddr_dup(dst, M_WAITOK);
1023
1024 nvar = kmem_alloc(sizeof(*nvar), KM_SLEEP);
1025
1026 error = encap_lock_enter();
1027 if (error)
1028 goto error;
1029
1030 mutex_enter(&sc->l2tp_lock);
1031
1032 ovar = sc->l2tp_var;
1033 osrc = ovar->lv_psrc;
1034 odst = ovar->lv_pdst;
1035 *nvar = *ovar;
1036 psref_target_init(&nvar->lv_psref, lv_psref_class);
1037 nvar->lv_psrc = nsrc;
1038 nvar->lv_pdst = ndst;
1039 error = l2tp_encap_attach(nvar);
1040 if (error) {
1041 mutex_exit(&sc->l2tp_lock);
1042 encap_lock_exit();
1043 goto error;
1044 }
1045 membar_producer();
1046 l2tp_variant_update(sc, nvar);
1047
1048 mutex_exit(&sc->l2tp_lock);
1049
1050 (void)l2tp_encap_detach(ovar);
1051 encap_lock_exit();
1052
1053 if (osrc)
1054 sockaddr_free(osrc);
1055 if (odst)
1056 sockaddr_free(odst);
1057 kmem_free(ovar, sizeof(*ovar));
1058
1059 return 0;
1060
1061 error:
1062 sockaddr_free(nsrc);
1063 sockaddr_free(ndst);
1064 kmem_free(nvar, sizeof(*nvar));
1065
1066 return error;
1067 }
1068
1069 static void
1070 l2tp_delete_tunnel(struct ifnet *ifp)
1071 {
1072 struct l2tp_softc *sc = container_of(ifp, struct l2tp_softc,
1073 l2tp_ec.ec_if);
1074 struct sockaddr *osrc, *odst;
1075 struct l2tp_variant *ovar, *nvar;
1076 int error;
1077
1078 nvar = kmem_alloc(sizeof(*nvar), KM_SLEEP);
1079
1080 error = encap_lock_enter();
1081 if (error) {
1082 kmem_free(nvar, sizeof(*nvar));
1083 return;
1084 }
1085 mutex_enter(&sc->l2tp_lock);
1086
1087 ovar = sc->l2tp_var;
1088 osrc = ovar->lv_psrc;
1089 odst = ovar->lv_pdst;
1090 *nvar = *ovar;
1091 psref_target_init(&nvar->lv_psref, lv_psref_class);
1092 nvar->lv_psrc = NULL;
1093 nvar->lv_pdst = NULL;
1094 membar_producer();
1095 l2tp_variant_update(sc, nvar);
1096
1097 mutex_exit(&sc->l2tp_lock);
1098
1099 (void)l2tp_encap_detach(ovar);
1100 encap_lock_exit();
1101
1102 if (osrc)
1103 sockaddr_free(osrc);
1104 if (odst)
1105 sockaddr_free(odst);
1106 kmem_free(ovar, sizeof(*ovar));
1107 }
1108
1109 static int
1110 id_hash_func(uint32_t id, u_long mask)
1111 {
1112 uint32_t hash;
1113
1114 hash = (id >> 16) ^ id;
1115 hash = (hash >> 4) ^ hash;
1116
1117 return hash & mask;
1118 }
1119
1120 static void
1121 l2tp_hash_init(void)
1122 {
1123
1124 l2tp_hash.lists = hashinit(L2TP_ID_HASH_SIZE, HASH_PSLIST, true,
1125 &l2tp_hash.mask);
1126 }
1127
1128 static int
1129 l2tp_hash_fini(void)
1130 {
1131 int i;
1132
1133 mutex_enter(&l2tp_hash.lock);
1134
1135 for (i = 0; i < l2tp_hash.mask + 1; i++) {
1136 if (PSLIST_WRITER_FIRST(&l2tp_hash.lists[i], struct l2tp_softc,
1137 l2tp_hash) != NULL) {
1138 mutex_exit(&l2tp_hash.lock);
1139 return EBUSY;
1140 }
1141 }
1142 for (i = 0; i < l2tp_hash.mask + 1; i++)
1143 PSLIST_DESTROY(&l2tp_hash.lists[i]);
1144
1145 mutex_exit(&l2tp_hash.lock);
1146
1147 hashdone(l2tp_hash.lists, HASH_PSLIST, l2tp_hash.mask);
1148
1149 return 0;
1150 }
1151
1152 static int
1153 l2tp_set_session(struct l2tp_softc *sc, uint32_t my_sess_id,
1154 uint32_t peer_sess_id)
1155 {
1156 uint32_t idx;
1157 struct l2tp_variant *nvar;
1158 struct l2tp_variant *ovar;
1159 struct ifnet *ifp = &sc->l2tp_ec.ec_if;
1160
1161 nvar = kmem_alloc(sizeof(*nvar), KM_SLEEP);
1162
1163 mutex_enter(&sc->l2tp_lock);
1164 ovar = sc->l2tp_var;
1165 *nvar = *ovar;
1166 psref_target_init(&nvar->lv_psref, lv_psref_class);
1167 nvar->lv_my_sess_id = my_sess_id;
1168 nvar->lv_peer_sess_id = peer_sess_id;
1169 membar_producer();
1170
1171 mutex_enter(&l2tp_hash.lock);
1172 if (ovar->lv_my_sess_id > 0 && ovar->lv_peer_sess_id > 0) {
1173 PSLIST_WRITER_REMOVE(sc, l2tp_hash);
1174 pserialize_perform(l2tp_psz);
1175 }
1176 mutex_exit(&l2tp_hash.lock);
1177 PSLIST_ENTRY_DESTROY(sc, l2tp_hash);
1178
1179 l2tp_variant_update(sc, nvar);
1180 mutex_exit(&sc->l2tp_lock);
1181
1182 idx = id_hash_func(nvar->lv_my_sess_id, l2tp_hash.mask);
1183 if ((ifp->if_flags & IFF_DEBUG) != 0)
1184 log(LOG_DEBUG, "%s: add hash entry: sess_id=%" PRIu32 ", idx=%" PRIu32 "\n",
1185 sc->l2tp_ec.ec_if.if_xname, nvar->lv_my_sess_id, idx);
1186
1187 PSLIST_ENTRY_INIT(sc, l2tp_hash);
1188 mutex_enter(&l2tp_hash.lock);
1189 PSLIST_WRITER_INSERT_HEAD(&l2tp_hash.lists[idx], sc, l2tp_hash);
1190 mutex_exit(&l2tp_hash.lock);
1191
1192 kmem_free(ovar, sizeof(*ovar));
1193 return 0;
1194 }
1195
1196 static int
1197 l2tp_clear_session(struct l2tp_softc *sc)
1198 {
1199 struct l2tp_variant *nvar;
1200 struct l2tp_variant *ovar;
1201
1202 nvar = kmem_alloc(sizeof(*nvar), KM_SLEEP);
1203
1204 mutex_enter(&sc->l2tp_lock);
1205 ovar = sc->l2tp_var;
1206 *nvar = *ovar;
1207 psref_target_init(&nvar->lv_psref, lv_psref_class);
1208 nvar->lv_my_sess_id = 0;
1209 nvar->lv_peer_sess_id = 0;
1210 membar_producer();
1211
1212 mutex_enter(&l2tp_hash.lock);
1213 if (ovar->lv_my_sess_id > 0 && ovar->lv_peer_sess_id > 0) {
1214 PSLIST_WRITER_REMOVE(sc, l2tp_hash);
1215 pserialize_perform(l2tp_psz);
1216 }
1217 mutex_exit(&l2tp_hash.lock);
1218
1219 l2tp_variant_update(sc, nvar);
1220 mutex_exit(&sc->l2tp_lock);
1221 kmem_free(ovar, sizeof(*ovar));
1222 return 0;
1223 }
1224
1225 struct l2tp_variant *
1226 l2tp_lookup_session_ref(uint32_t id, struct psref *psref)
1227 {
1228 int idx;
1229 int s;
1230 struct l2tp_softc *sc;
1231
1232 idx = id_hash_func(id, l2tp_hash.mask);
1233
1234 s = pserialize_read_enter();
1235 PSLIST_READER_FOREACH(sc, &l2tp_hash.lists[idx], struct l2tp_softc,
1236 l2tp_hash) {
1237 struct l2tp_variant *var = sc->l2tp_var;
1238 if (var == NULL)
1239 continue;
1240 if (var->lv_my_sess_id != id)
1241 continue;
1242 psref_acquire(psref, &var->lv_psref, lv_psref_class);
1243 pserialize_read_exit(s);
1244 return var;
1245 }
1246 pserialize_read_exit(s);
1247 return NULL;
1248 }
1249
1250 /*
1251 * l2tp_variant update API.
1252 *
1253 * Assumption:
1254 * reader side dereferences sc->l2tp_var in reader critical section only,
1255 * that is, all of reader sides do not reader the sc->l2tp_var after
1256 * pserialize_perform().
1257 */
1258 static void
1259 l2tp_variant_update(struct l2tp_softc *sc, struct l2tp_variant *nvar)
1260 {
1261 struct ifnet *ifp = &sc->l2tp_ec.ec_if;
1262 struct l2tp_variant *ovar = sc->l2tp_var;
1263
1264 KASSERT(mutex_owned(&sc->l2tp_lock));
1265
1266 sc->l2tp_var = nvar;
1267 pserialize_perform(sc->l2tp_psz);
1268 psref_target_destroy(&ovar->lv_psref, lv_psref_class);
1269
1270 /*
1271 * In the manual of atomic_swap_ptr(3), there is no mention if 2nd
1272 * argument is rewrite or not. So, use sc->l2tp_var instead of nvar.
1273 */
1274 if (sc->l2tp_var != NULL) {
1275 if (sc->l2tp_var->lv_psrc != NULL
1276 && sc->l2tp_var->lv_pdst != NULL)
1277 ifp->if_flags |= IFF_RUNNING;
1278 else
1279 ifp->if_flags &= ~IFF_RUNNING;
1280 }
1281 }
1282
1283 static int
1284 l2tp_set_cookie(struct l2tp_softc *sc, uint64_t my_cookie, u_int my_cookie_len,
1285 uint64_t peer_cookie, u_int peer_cookie_len)
1286 {
1287 struct l2tp_variant *nvar;
1288
1289 if (my_cookie == 0 || peer_cookie == 0)
1290 return EINVAL;
1291
1292 if (my_cookie_len != 4 && my_cookie_len != 8
1293 && peer_cookie_len != 4 && peer_cookie_len != 8)
1294 return EINVAL;
1295
1296 nvar = kmem_alloc(sizeof(*nvar), KM_SLEEP);
1297
1298 mutex_enter(&sc->l2tp_lock);
1299
1300 *nvar = *sc->l2tp_var;
1301 psref_target_init(&nvar->lv_psref, lv_psref_class);
1302 nvar->lv_my_cookie = my_cookie;
1303 nvar->lv_my_cookie_len = my_cookie_len;
1304 nvar->lv_peer_cookie = peer_cookie;
1305 nvar->lv_peer_cookie_len = peer_cookie_len;
1306 nvar->lv_use_cookie = L2TP_COOKIE_ON;
1307 membar_producer();
1308 l2tp_variant_update(sc, nvar);
1309
1310 mutex_exit(&sc->l2tp_lock);
1311
1312 struct ifnet *ifp = &sc->l2tp_ec.ec_if;
1313 if ((ifp->if_flags & IFF_DEBUG) != 0) {
1314 log(LOG_DEBUG,
1315 "%s: set cookie: "
1316 "local cookie_len=%u local cookie=%" PRIu64 ", "
1317 "remote cookie_len=%u remote cookie=%" PRIu64 "\n",
1318 ifp->if_xname, my_cookie_len, my_cookie,
1319 peer_cookie_len, peer_cookie);
1320 }
1321
1322 return 0;
1323 }
1324
1325 static void
1326 l2tp_clear_cookie(struct l2tp_softc *sc)
1327 {
1328 struct l2tp_variant *nvar;
1329
1330 nvar = kmem_alloc(sizeof(*nvar), KM_SLEEP);
1331
1332 mutex_enter(&sc->l2tp_lock);
1333
1334 *nvar = *sc->l2tp_var;
1335 psref_target_init(&nvar->lv_psref, lv_psref_class);
1336 nvar->lv_my_cookie = 0;
1337 nvar->lv_my_cookie_len = 0;
1338 nvar->lv_peer_cookie = 0;
1339 nvar->lv_peer_cookie_len = 0;
1340 nvar->lv_use_cookie = L2TP_COOKIE_OFF;
1341 membar_producer();
1342 l2tp_variant_update(sc, nvar);
1343
1344 mutex_exit(&sc->l2tp_lock);
1345 }
1346
1347 static void
1348 l2tp_set_state(struct l2tp_softc *sc, int state)
1349 {
1350 struct ifnet *ifp = &sc->l2tp_ec.ec_if;
1351 struct l2tp_variant *nvar;
1352
1353 nvar = kmem_alloc(sizeof(*nvar), KM_SLEEP);
1354
1355 mutex_enter(&sc->l2tp_lock);
1356
1357 *nvar = *sc->l2tp_var;
1358 psref_target_init(&nvar->lv_psref, lv_psref_class);
1359 nvar->lv_state = state;
1360 membar_producer();
1361 l2tp_variant_update(sc, nvar);
1362
1363 if (nvar->lv_state == L2TP_STATE_UP) {
1364 ifp->if_link_state = LINK_STATE_UP;
1365 } else {
1366 ifp->if_link_state = LINK_STATE_DOWN;
1367 }
1368
1369 mutex_exit(&sc->l2tp_lock);
1370
1371 #ifdef NOTYET
1372 vlan_linkstate_notify(ifp, ifp->if_link_state);
1373 #endif
1374 }
1375
1376 static int
1377 l2tp_encap_attach(struct l2tp_variant *var)
1378 {
1379 int error;
1380
1381 if (var == NULL || var->lv_psrc == NULL)
1382 return EINVAL;
1383
1384 switch (var->lv_psrc->sa_family) {
1385 #ifdef INET
1386 case AF_INET:
1387 error = in_l2tp_attach(var);
1388 break;
1389 #endif
1390 #ifdef INET6
1391 case AF_INET6:
1392 error = in6_l2tp_attach(var);
1393 break;
1394 #endif
1395 default:
1396 error = EINVAL;
1397 break;
1398 }
1399
1400 return error;
1401 }
1402
1403 static int
1404 l2tp_encap_detach(struct l2tp_variant *var)
1405 {
1406 int error;
1407
1408 if (var == NULL || var->lv_psrc == NULL)
1409 return EINVAL;
1410
1411 switch (var->lv_psrc->sa_family) {
1412 #ifdef INET
1413 case AF_INET:
1414 error = in_l2tp_detach(var);
1415 break;
1416 #endif
1417 #ifdef INET6
1418 case AF_INET6:
1419 error = in6_l2tp_detach(var);
1420 break;
1421 #endif
1422 default:
1423 error = EINVAL;
1424 break;
1425 }
1426
1427 return error;
1428 }
1429
1430 int
1431 l2tp_check_nesting(struct ifnet *ifp, struct mbuf *m)
1432 {
1433
1434 return if_tunnel_check_nesting(ifp, m, max_l2tp_nesting);
1435 }
1436
1437 /*
1438 * Module infrastructure
1439 */
1440 #include "if_module.h"
1441
1442 IF_MODULE(MODULE_CLASS_DRIVER, l2tp, NULL)
1443
1444
1445 /* TODO: IP_TCPMSS support */
1446 #ifdef IP_TCPMSS
1447 static int l2tp_need_tcpmss_clamp(struct ifnet *);
1448 #ifdef INET
1449 static struct mbuf *l2tp_tcpmss4_clamp(struct ifnet *, struct mbuf *);
1450 #endif
1451 #ifdef INET6
1452 static struct mbuf *l2tp_tcpmss6_clamp(struct ifnet *, struct mbuf *);
1453 #endif
1454
1455 struct mbuf *
1456 l2tp_tcpmss_clamp(struct ifnet *ifp, struct mbuf *m)
1457 {
1458 struct ether_header *eh;
1459 struct ether_vlan_header evh;
1460
1461 if (!l2tp_need_tcpmss_clamp(ifp)) {
1462 return m;
1463 }
1464
1465 if (m->m_pkthdr.len < sizeof(evh)) {
1466 m_freem(m);
1467 return NULL;
1468 }
1469
1470 /* save ether header */
1471 m_copydata(m, 0, sizeof(evh), (void *)&evh);
1472 eh = (struct ether_header *)&evh;
1473
1474 switch (ntohs(eh->ether_type)) {
1475 case ETHERTYPE_VLAN: /* Ether + VLAN */
1476 if (m->m_pkthdr.len <= sizeof(struct ether_vlan_header))
1477 break;
1478 m_adj(m, sizeof(struct ether_vlan_header));
1479 switch (ntohs(evh.evl_proto)) {
1480 #ifdef INET
1481 case ETHERTYPE_IP: /* Ether + VLAN + IPv4 */
1482 m = l2tp_tcpmss4_clamp(ifp, m);
1483 if (m == NULL)
1484 return NULL;
1485 break;
1486 #endif /* INET */
1487 #ifdef INET6
1488 case ETHERTYPE_IPV6: /* Ether + VLAN + IPv6 */
1489 m = l2tp_tcpmss6_clamp(ifp, m);
1490 if (m == NULL)
1491 return NULL;
1492 break;
1493 #endif /* INET6 */
1494 default:
1495 break;
1496 }
1497
1498 /* restore ether header */
1499 M_PREPEND(m, sizeof(struct ether_vlan_header),
1500 M_DONTWAIT);
1501 if (m == NULL)
1502 return NULL;
1503 *mtod(m, struct ether_vlan_header *) = evh;
1504 break;
1505
1506 #ifdef INET
1507 case ETHERTYPE_IP: /* Ether + IPv4 */
1508 if (m->m_pkthdr.len <= sizeof(struct ether_header))
1509 break;
1510 m_adj(m, sizeof(struct ether_header));
1511 m = l2tp_tcpmss4_clamp(ifp, m);
1512 if (m == NULL)
1513 return NULL;
1514 /* restore ether header */
1515 M_PREPEND(m, sizeof(struct ether_header), M_DONTWAIT);
1516 if (m == NULL)
1517 return NULL;
1518 *mtod(m, struct ether_header *) = *eh;
1519 break;
1520 #endif /* INET */
1521
1522 #ifdef INET6
1523 case ETHERTYPE_IPV6: /* Ether + IPv6 */
1524 if (m->m_pkthdr.len <= sizeof(struct ether_header))
1525 break;
1526 m_adj(m, sizeof(struct ether_header));
1527 m = l2tp_tcpmss6_clamp(ifp, m);
1528 if (m == NULL)
1529 return NULL;
1530 /* restore ether header */
1531 M_PREPEND(m, sizeof(struct ether_header), M_DONTWAIT);
1532 if (m == NULL)
1533 return NULL;
1534 *mtod(m, struct ether_header *) = *eh;
1535 break;
1536 #endif /* INET6 */
1537
1538 default:
1539 break;
1540 }
1541
1542 return m;
1543 }
1544
1545 static int
1546 l2tp_need_tcpmss_clamp(struct ifnet *ifp)
1547 {
1548 int ret = 0;
1549
1550 #ifdef INET
1551 if (ifp->if_tcpmss != 0)
1552 ret = 1;
1553 #endif
1554
1555 #ifdef INET6
1556 if (ifp->if_tcpmss6 != 0)
1557 ret = 1;
1558 #endif
1559
1560 return ret;
1561 }
1562
1563 #ifdef INET
1564 static struct mbuf *
1565 l2tp_tcpmss4_clamp(struct ifnet *ifp, struct mbuf *m)
1566 {
1567
1568 if (ifp->if_tcpmss != 0) {
1569 return ip_tcpmss(m, (ifp->if_tcpmss < 0) ?
1570 ifp->if_mtu - IP_TCPMSS_EXTLEN :
1571 ifp->if_tcpmss);
1572 }
1573 return m;
1574 }
1575 #endif /* INET */
1576
1577 #ifdef INET6
1578 static struct mbuf *
1579 l2tp_tcpmss6_clamp(struct ifnet *ifp, struct mbuf *m)
1580 {
1581 int ip6hdrlen;
1582
1583 if (ifp->if_tcpmss6 != 0 &&
1584 ip6_tcpmss_applicable(m, &ip6hdrlen)) {
1585 return ip6_tcpmss(m, ip6hdrlen,
1586 (ifp->if_tcpmss6 < 0) ?
1587 ifp->if_mtu - IP6_TCPMSS_EXTLEN :
1588 ifp->if_tcpmss6);
1589 }
1590 return m;
1591 }
1592 #endif /* INET6 */
1593
1594 #endif /* IP_TCPMSS */
1595