if_l2tp.c revision 1.44 1 /* $NetBSD: if_l2tp.c,v 1.44 2020/10/15 02:54:10 roy Exp $ */
2
3 /*
4 * Copyright (c) 2017 Internet Initiative Japan Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /*
30 * L2TPv3 kernel interface
31 */
32
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: if_l2tp.c,v 1.44 2020/10/15 02:54:10 roy Exp $");
35
36 #ifdef _KERNEL_OPT
37 #include "opt_inet.h"
38 #include "opt_net_mpsafe.h"
39 #endif
40
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/kernel.h>
44 #include <sys/mbuf.h>
45 #include <sys/socket.h>
46 #include <sys/sockio.h>
47 #include <sys/errno.h>
48 #include <sys/ioctl.h>
49 #include <sys/time.h>
50 #include <sys/syslog.h>
51 #include <sys/proc.h>
52 #include <sys/conf.h>
53 #include <sys/kauth.h>
54 #include <sys/cpu.h>
55 #include <sys/cprng.h>
56 #include <sys/intr.h>
57 #include <sys/kmem.h>
58 #include <sys/mutex.h>
59 #include <sys/atomic.h>
60 #include <sys/pserialize.h>
61 #include <sys/device.h>
62 #include <sys/module.h>
63
64 #include <net/if.h>
65 #include <net/if_dl.h>
66 #include <net/if_ether.h>
67 #include <net/if_types.h>
68 #include <net/netisr.h>
69 #include <net/route.h>
70 #include <net/bpf.h>
71 #include <net/if_vlanvar.h>
72
73 #include <netinet/in.h>
74 #include <netinet/in_systm.h>
75 #include <netinet/ip.h>
76 #include <netinet/ip_encap.h>
77 #ifdef INET
78 #include <netinet/in_var.h>
79 #include <netinet/in_l2tp.h>
80 #endif /* INET */
81 #ifdef INET6
82 #include <netinet6/in6_l2tp.h>
83 #endif
84
85 #include <net/if_l2tp.h>
86
87 #include <net/if_vlanvar.h>
88
89 /* TODO: IP_TCPMSS support */
90 #undef IP_TCPMSS
91 #ifdef IP_TCPMSS
92 #include <netinet/ip_tcpmss.h>
93 #endif
94
95 /*
96 * l2tp global variable definitions
97 */
98 static struct {
99 LIST_HEAD(l2tp_sclist, l2tp_softc) list;
100 kmutex_t lock;
101 } l2tp_softcs __cacheline_aligned;
102
103
104 #if !defined(L2TP_ID_HASH_SIZE)
105 #define L2TP_ID_HASH_SIZE 64
106 #endif
107 static struct {
108 kmutex_t lock;
109 struct pslist_head *lists;
110 u_long mask;
111 } l2tp_hash __cacheline_aligned = {
112 .lists = NULL,
113 };
114
115 pserialize_t l2tp_psz __read_mostly;
116 struct psref_class *lv_psref_class __read_mostly;
117
118 static void l2tp_ifq_init_pc(void *, void *, struct cpu_info *);
119 static void l2tp_ifq_fini_pc(void *, void *, struct cpu_info *);
120
121 static int l2tp_clone_create(struct if_clone *, int);
122 static int l2tp_clone_destroy(struct ifnet *);
123
124 struct if_clone l2tp_cloner =
125 IF_CLONE_INITIALIZER("l2tp", l2tp_clone_create, l2tp_clone_destroy);
126
127 static int l2tp_tx_enqueue(struct l2tp_variant *, struct mbuf *);
128 static int l2tp_output(struct ifnet *, struct mbuf *,
129 const struct sockaddr *, const struct rtentry *);
130 static void l2tp_sendit(struct l2tp_variant *, struct mbuf *);
131 static void l2tpintr(struct l2tp_variant *);
132 static void l2tpintr_softint(void *);
133
134 static void l2tp_hash_init(void);
135 static int l2tp_hash_fini(void);
136
137 static void l2tp_start(struct ifnet *);
138 static int l2tp_transmit(struct ifnet *, struct mbuf *);
139
140 static int l2tp_set_tunnel(struct ifnet *, struct sockaddr *,
141 struct sockaddr *);
142 static void l2tp_delete_tunnel(struct ifnet *);
143
144 static int id_hash_func(uint32_t, u_long);
145
146 static void l2tp_variant_update(struct l2tp_softc *, struct l2tp_variant *);
147 static int l2tp_set_session(struct l2tp_softc *, uint32_t, uint32_t);
148 static int l2tp_clear_session(struct l2tp_softc *);
149 static int l2tp_set_cookie(struct l2tp_softc *, uint64_t, u_int, uint64_t, u_int);
150 static void l2tp_clear_cookie(struct l2tp_softc *);
151 static void l2tp_set_state(struct l2tp_softc *, int);
152 static int l2tp_encap_attach(struct l2tp_variant *);
153 static int l2tp_encap_detach(struct l2tp_variant *);
154
155 static inline struct ifqueue *
156 l2tp_ifq_percpu_getref(percpu_t *pc)
157 {
158
159 return *(struct ifqueue **)percpu_getref(pc);
160 }
161
162 static inline void
163 l2tp_ifq_percpu_putref(percpu_t *pc)
164 {
165
166 percpu_putref(pc);
167 }
168
169 #ifndef MAX_L2TP_NEST
170 /*
171 * This macro controls the upper limitation on nesting of l2tp tunnels.
172 * Since, setting a large value to this macro with a careless configuration
173 * may introduce system crash, we don't allow any nestings by default.
174 * If you need to configure nested l2tp tunnels, you can define this macro
175 * in your kernel configuration file. However, if you do so, please be
176 * careful to configure the tunnels so that it won't make a loop.
177 */
178 /*
179 * XXX
180 * Currently, if in_l2tp_output recursively calls, it causes locking against
181 * myself of struct l2tp_ro->lr_lock. So, nested l2tp tunnels is prohibited.
182 */
183 #define MAX_L2TP_NEST 0
184 #endif
185
186 static int max_l2tp_nesting = MAX_L2TP_NEST;
187
188 /* ARGSUSED */
189 void
190 l2tpattach(int count)
191 {
192 /*
193 * Nothing to do here, initialization is handled by the
194 * module initialization code in l2tpinit() below).
195 */
196 }
197
198 static void
199 l2tpinit(void)
200 {
201
202 mutex_init(&l2tp_softcs.lock, MUTEX_DEFAULT, IPL_NONE);
203 LIST_INIT(&l2tp_softcs.list);
204
205 mutex_init(&l2tp_hash.lock, MUTEX_DEFAULT, IPL_NONE);
206 l2tp_psz = pserialize_create();
207 lv_psref_class = psref_class_create("l2tpvar", IPL_SOFTNET);
208 if_clone_attach(&l2tp_cloner);
209
210 l2tp_hash_init();
211 }
212
213 static int
214 l2tpdetach(void)
215 {
216 int error;
217
218 mutex_enter(&l2tp_softcs.lock);
219 if (!LIST_EMPTY(&l2tp_softcs.list)) {
220 mutex_exit(&l2tp_softcs.lock);
221 return EBUSY;
222 }
223 mutex_exit(&l2tp_softcs.lock);
224
225 error = l2tp_hash_fini();
226 if (error)
227 return error;
228
229 if_clone_detach(&l2tp_cloner);
230 psref_class_destroy(lv_psref_class);
231 pserialize_destroy(l2tp_psz);
232 mutex_destroy(&l2tp_hash.lock);
233
234 mutex_destroy(&l2tp_softcs.lock);
235
236 return error;
237 }
238
239 static int
240 l2tp_clone_create(struct if_clone *ifc, int unit)
241 {
242 struct l2tp_softc *sc;
243 struct l2tp_variant *var;
244 int rv;
245 u_int si_flags = SOFTINT_NET;
246 #ifdef NET_MPSAFE
247 si_flags |= SOFTINT_MPSAFE;
248 #endif
249 sc = kmem_zalloc(sizeof(struct l2tp_softc), KM_SLEEP);
250 if_initname(&sc->l2tp_ec.ec_if, ifc->ifc_name, unit);
251 rv = l2tpattach0(sc);
252 if (rv != 0) {
253 kmem_free(sc, sizeof(struct l2tp_softc));
254 return rv;
255 }
256
257 var = kmem_zalloc(sizeof(struct l2tp_variant), KM_SLEEP);
258 var->lv_softc = sc;
259 var->lv_state = L2TP_STATE_DOWN;
260 var->lv_use_cookie = L2TP_COOKIE_OFF;
261 psref_target_init(&var->lv_psref, lv_psref_class);
262
263 sc->l2tp_var = var;
264 mutex_init(&sc->l2tp_lock, MUTEX_DEFAULT, IPL_NONE);
265 sc->l2tp_psz = pserialize_create();
266 PSLIST_ENTRY_INIT(sc, l2tp_hash);
267
268 sc->l2tp_ro_percpu = if_tunnel_alloc_ro_percpu();
269
270 sc->l2tp_ifq_percpu = percpu_create(sizeof(struct ifqueue *),
271 l2tp_ifq_init_pc, l2tp_ifq_fini_pc, NULL);
272 sc->l2tp_si = softint_establish(si_flags, l2tpintr_softint, sc);
273
274 mutex_enter(&l2tp_softcs.lock);
275 LIST_INSERT_HEAD(&l2tp_softcs.list, sc, l2tp_list);
276 mutex_exit(&l2tp_softcs.lock);
277
278 return (0);
279 }
280
281 int
282 l2tpattach0(struct l2tp_softc *sc)
283 {
284 int rv;
285
286 sc->l2tp_ec.ec_if.if_addrlen = 0;
287 sc->l2tp_ec.ec_if.if_mtu = L2TP_MTU;
288 sc->l2tp_ec.ec_if.if_flags = IFF_POINTOPOINT|IFF_MULTICAST|IFF_SIMPLEX;
289 #ifdef NET_MPSAFE
290 sc->l2tp_ec.ec_if.if_extflags = IFEF_MPSAFE;
291 #endif
292 sc->l2tp_ec.ec_if.if_ioctl = l2tp_ioctl;
293 sc->l2tp_ec.ec_if.if_output = l2tp_output;
294 sc->l2tp_ec.ec_if.if_type = IFT_L2TP;
295 sc->l2tp_ec.ec_if.if_dlt = DLT_NULL;
296 sc->l2tp_ec.ec_if.if_start = l2tp_start;
297 sc->l2tp_ec.ec_if.if_transmit = l2tp_transmit;
298 sc->l2tp_ec.ec_if._if_input = ether_input;
299 IFQ_SET_READY(&sc->l2tp_ec.ec_if.if_snd);
300
301 #ifdef MBUFTRACE
302 struct ethercom *ec = &sc->l2tp_ec;
303 struct ifnet *ifp = &sc->l2tp_ec.ec_if;
304
305 strlcpy(ec->ec_tx_mowner.mo_name, ifp->if_xname,
306 sizeof(ec->ec_tx_mowner.mo_name));
307 strlcpy(ec->ec_tx_mowner.mo_descr, "tx",
308 sizeof(ec->ec_tx_mowner.mo_descr));
309 strlcpy(ec->ec_rx_mowner.mo_name, ifp->if_xname,
310 sizeof(ec->ec_rx_mowner.mo_name));
311 strlcpy(ec->ec_rx_mowner.mo_descr, "rx",
312 sizeof(ec->ec_rx_mowner.mo_descr));
313 MOWNER_ATTACH(&ec->ec_tx_mowner);
314 MOWNER_ATTACH(&ec->ec_rx_mowner);
315 ifp->if_mowner = &ec->ec_tx_mowner;
316 #endif
317
318 /* XXX
319 * It may improve performance to use if_initialize()/if_register()
320 * so that l2tp_input() calls if_input() instead of
321 * if_percpuq_enqueue(). However, that causes recursive softnet_lock
322 * when NET_MPSAFE is not set.
323 */
324 rv = if_initialize(&sc->l2tp_ec.ec_if);
325 if (rv != 0)
326 return rv;
327 sc->l2tp_ec.ec_if.if_link_state = LINK_STATE_DOWN;
328 if_alloc_sadl(&sc->l2tp_ec.ec_if);
329 bpf_attach(&sc->l2tp_ec.ec_if, DLT_EN10MB, sizeof(struct ether_header));
330 if_register(&sc->l2tp_ec.ec_if);
331
332 return 0;
333 }
334
335 void
336 l2tp_ifq_init_pc(void *p, void *arg __unused, struct cpu_info *ci __unused)
337 {
338 struct ifqueue **ifqp = p;
339
340 *ifqp = kmem_zalloc(sizeof(**ifqp), KM_SLEEP);
341 (*ifqp)->ifq_maxlen = IFQ_MAXLEN;
342 }
343
344 void
345 l2tp_ifq_fini_pc(void *p, void *arg __unused, struct cpu_info *ci __unused)
346 {
347 struct ifqueue **ifqp = p;
348
349 kmem_free(*ifqp, sizeof(**ifqp));
350 }
351
352 static int
353 l2tp_clone_destroy(struct ifnet *ifp)
354 {
355 struct l2tp_variant *var;
356 struct l2tp_softc *sc = container_of(ifp, struct l2tp_softc,
357 l2tp_ec.ec_if);
358
359 l2tp_clear_session(sc);
360 l2tp_delete_tunnel(&sc->l2tp_ec.ec_if);
361 /*
362 * To avoid for l2tp_transmit() and l2tpintr_softint() to access
363 * sc->l2tp_var after free it.
364 */
365 mutex_enter(&sc->l2tp_lock);
366 var = sc->l2tp_var;
367 l2tp_variant_update(sc, NULL);
368 mutex_exit(&sc->l2tp_lock);
369
370 softint_disestablish(sc->l2tp_si);
371 percpu_free(sc->l2tp_ifq_percpu, sizeof(struct ifqueue *));
372
373 mutex_enter(&l2tp_softcs.lock);
374 LIST_REMOVE(sc, l2tp_list);
375 mutex_exit(&l2tp_softcs.lock);
376
377 bpf_detach(ifp);
378
379 if_detach(ifp);
380
381 if_tunnel_free_ro_percpu(sc->l2tp_ro_percpu);
382
383 kmem_free(var, sizeof(struct l2tp_variant));
384 pserialize_destroy(sc->l2tp_psz);
385 mutex_destroy(&sc->l2tp_lock);
386 kmem_free(sc, sizeof(struct l2tp_softc));
387
388 return 0;
389 }
390
391 static int
392 l2tp_tx_enqueue(struct l2tp_variant *var, struct mbuf *m)
393 {
394 struct l2tp_softc *sc;
395 struct ifnet *ifp;
396 struct ifqueue *ifq;
397 int s;
398
399 KASSERT(psref_held(&var->lv_psref, lv_psref_class));
400
401 sc = var->lv_softc;
402 ifp = &sc->l2tp_ec.ec_if;
403
404 s = splsoftnet();
405 ifq = l2tp_ifq_percpu_getref(sc->l2tp_ifq_percpu);
406 if (IF_QFULL(ifq)) {
407 if_statinc(ifp, if_oerrors);
408 l2tp_ifq_percpu_putref(sc->l2tp_ifq_percpu);
409 splx(s);
410 m_freem(m);
411 return ENOBUFS;
412 }
413
414 IF_ENQUEUE(ifq, m);
415 percpu_putref(sc->l2tp_ifq_percpu);
416 softint_schedule(sc->l2tp_si);
417 /* counter is incremented in l2tpintr() */
418 splx(s);
419 return 0;
420 }
421
422 static int
423 l2tp_output(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *dst,
424 const struct rtentry *rt)
425 {
426 struct l2tp_softc *sc = container_of(ifp, struct l2tp_softc,
427 l2tp_ec.ec_if);
428 struct l2tp_variant *var;
429 struct psref psref;
430 int error = 0;
431
432 var = l2tp_getref_variant(sc, &psref);
433 if (var == NULL) {
434 m_freem(m);
435 return ENETDOWN;
436 }
437
438 IFQ_CLASSIFY(&ifp->if_snd, m, dst->sa_family);
439
440 m->m_flags &= ~(M_BCAST|M_MCAST);
441
442 if ((ifp->if_flags & IFF_UP) == 0) {
443 m_freem(m);
444 error = ENETDOWN;
445 goto end;
446 }
447
448 if (var->lv_psrc == NULL || var->lv_pdst == NULL) {
449 m_freem(m);
450 error = ENETDOWN;
451 goto end;
452 }
453
454 /* XXX should we check if our outer source is legal? */
455
456 /* use DLT_NULL encapsulation here to pass inner af type */
457 M_PREPEND(m, sizeof(int), M_DONTWAIT);
458 if (!m) {
459 error = ENOBUFS;
460 goto end;
461 }
462 *mtod(m, int *) = dst->sa_family;
463
464 error = l2tp_tx_enqueue(var, m);
465 end:
466 l2tp_putref_variant(var, &psref);
467 if (error)
468 if_statinc(ifp, if_oerrors);
469
470 return error;
471 }
472
473 static void
474 l2tp_sendit(struct l2tp_variant *var, struct mbuf *m)
475 {
476 int len;
477 int error;
478 struct l2tp_softc *sc;
479 struct ifnet *ifp;
480
481 KASSERT(psref_held(&var->lv_psref, lv_psref_class));
482
483 sc = var->lv_softc;
484 ifp = &sc->l2tp_ec.ec_if;
485
486 len = m->m_pkthdr.len;
487 m->m_flags &= ~(M_BCAST|M_MCAST);
488 bpf_mtap(ifp, m, BPF_D_OUT);
489
490 switch (var->lv_psrc->sa_family) {
491 #ifdef INET
492 case AF_INET:
493 error = in_l2tp_output(var, m);
494 break;
495 #endif
496 #ifdef INET6
497 case AF_INET6:
498 error = in6_l2tp_output(var, m);
499 break;
500 #endif
501 default:
502 m_freem(m);
503 error = ENETDOWN;
504 break;
505 }
506 if (error) {
507 if_statinc(ifp, if_oerrors);
508 } else {
509 if_statadd2(ifp, if_opackets, 1, if_obytes, len);
510 }
511 }
512
513 static void
514 l2tpintr(struct l2tp_variant *var)
515 {
516 struct l2tp_softc *sc;
517 struct ifnet *ifp;
518 struct mbuf *m;
519 struct ifqueue *ifq;
520 u_int cpuid = cpu_index(curcpu());
521
522 KASSERT(psref_held(&var->lv_psref, lv_psref_class));
523
524 sc = var->lv_softc;
525 ifp = &sc->l2tp_ec.ec_if;
526
527 /* output processing */
528 if (var->lv_my_sess_id == 0 || var->lv_peer_sess_id == 0) {
529 ifq = l2tp_ifq_percpu_getref(sc->l2tp_ifq_percpu);
530 IF_PURGE(ifq);
531 l2tp_ifq_percpu_putref(sc->l2tp_ifq_percpu);
532 if (cpuid == 0)
533 IFQ_PURGE(&ifp->if_snd);
534 return;
535 }
536
537 /* Currently, l2tpintr() is always called in softint context. */
538 ifq = l2tp_ifq_percpu_getref(sc->l2tp_ifq_percpu);
539 for (;;) {
540 IF_DEQUEUE(ifq, m);
541 if (m != NULL)
542 l2tp_sendit(var, m);
543 else
544 break;
545 }
546 l2tp_ifq_percpu_putref(sc->l2tp_ifq_percpu);
547
548 if (cpuid == 0) {
549 for (;;) {
550 IFQ_DEQUEUE(&ifp->if_snd, m);
551 if (m != NULL)
552 l2tp_sendit(var, m);
553 else
554 break;
555 }
556 }
557 }
558
559 static void
560 l2tpintr_softint(void *arg)
561 {
562 struct l2tp_variant *var;
563 struct psref psref;
564 struct l2tp_softc *sc = arg;
565
566 var = l2tp_getref_variant(sc, &psref);
567 if (var == NULL)
568 return;
569
570 l2tpintr(var);
571 l2tp_putref_variant(var, &psref);
572 }
573
574 void
575 l2tp_input(struct mbuf *m, struct ifnet *ifp)
576 {
577 vaddr_t addr;
578
579 KASSERT(ifp != NULL);
580
581 /*
582 * Currently, l2tp(4) supports only ethernet as inner protocol.
583 */
584 if (m->m_pkthdr.len < sizeof(struct ether_header)) {
585 m_freem(m);
586 return;
587 }
588
589 /*
590 * If the head of the payload is not aligned, align it.
591 */
592 addr = mtod(m, vaddr_t);
593 if ((addr & 0x03) != 0x2) {
594 /* copy and align head of payload */
595 struct mbuf *m_head;
596 int copy_length;
597 u_int pad = roundup(sizeof(struct ether_header), 4)
598 - sizeof(struct ether_header);
599
600 #define L2TP_COPY_LENGTH 60
601
602 if (m->m_pkthdr.len < L2TP_COPY_LENGTH) {
603 copy_length = m->m_pkthdr.len;
604 } else {
605 copy_length = L2TP_COPY_LENGTH;
606 }
607
608 if (m->m_len < copy_length) {
609 m = m_pullup(m, copy_length);
610 if (m == NULL)
611 return;
612 }
613
614 MGETHDR(m_head, M_DONTWAIT, MT_HEADER);
615 if (m_head == NULL) {
616 m_freem(m);
617 return;
618 }
619 m_move_pkthdr(m_head, m);
620
621 /*
622 * m_head should be:
623 * L2TP_COPY_LENGTH
624 * <- + roundup(pad, 4) - pad ->
625 * +-------+--------+-----+--------------+-------------+
626 * | m_hdr | pkthdr | ... | ether header | payload |
627 * +-------+--------+-----+--------------+-------------+
628 * ^ ^
629 * m_data 4 byte aligned
630 */
631 m_align(m_head, L2TP_COPY_LENGTH + roundup(pad, 4));
632 m_head->m_data += pad;
633
634 memcpy(mtod(m_head, void *), mtod(m, void *), copy_length);
635 m_head->m_len = copy_length;
636 m->m_data += copy_length;
637 m->m_len -= copy_length;
638
639 /* construct chain */
640 if (m->m_len == 0) {
641 m_head->m_next = m_free(m);
642 } else {
643 m_head->m_next = m;
644 }
645
646 /* override m */
647 m = m_head;
648 }
649
650 m_set_rcvif(m, ifp);
651
652 /*
653 * bpf_mtap() and ifp->if_ipackets++ is done in if_input()
654 *
655 * obytes is incremented at ether_output() or bridge_enqueue().
656 */
657 if_percpuq_enqueue(ifp->if_percpuq, m);
658 }
659
660 void
661 l2tp_start(struct ifnet *ifp)
662 {
663 struct psref psref;
664 struct l2tp_variant *var;
665 struct l2tp_softc *sc = container_of(ifp, struct l2tp_softc,
666 l2tp_ec.ec_if);
667
668 var = l2tp_getref_variant(sc, &psref);
669 if (var == NULL)
670 return;
671
672 if (var->lv_psrc == NULL || var->lv_pdst == NULL)
673 return;
674
675 kpreempt_disable();
676 softint_schedule(sc->l2tp_si);
677 kpreempt_enable();
678 l2tp_putref_variant(var, &psref);
679 }
680
681 int
682 l2tp_transmit(struct ifnet *ifp, struct mbuf *m)
683 {
684 int error;
685 struct psref psref;
686 struct l2tp_variant *var;
687 struct l2tp_softc *sc = container_of(ifp, struct l2tp_softc,
688 l2tp_ec.ec_if);
689
690 var = l2tp_getref_variant(sc, &psref);
691 if (var == NULL) {
692 m_freem(m);
693 return ENETDOWN;
694 }
695
696 if (var->lv_psrc == NULL || var->lv_pdst == NULL) {
697 m_freem(m);
698 error = ENETDOWN;
699 goto out;
700 }
701
702 m->m_flags &= ~(M_BCAST|M_MCAST);
703
704 error = l2tp_tx_enqueue(var, m);
705 out:
706 l2tp_putref_variant(var, &psref);
707 return error;
708 }
709
710 /* XXX how should we handle IPv6 scope on SIOC[GS]IFPHYADDR? */
711 int
712 l2tp_ioctl(struct ifnet *ifp, u_long cmd, void *data)
713 {
714 struct l2tp_softc *sc = container_of(ifp, struct l2tp_softc,
715 l2tp_ec.ec_if);
716 struct l2tp_variant *var, *var_tmp;
717 struct ifreq *ifr = data;
718 int error = 0, size;
719 struct sockaddr *dst, *src;
720 struct l2tp_req l2tpr;
721 u_long mtu;
722 int bound;
723 struct psref psref;
724
725 switch (cmd) {
726 case SIOCSIFADDR:
727 ifp->if_flags |= IFF_UP;
728 break;
729
730 case SIOCSIFDSTADDR:
731 break;
732
733 case SIOCADDMULTI:
734 case SIOCDELMULTI:
735 switch (ifr->ifr_addr.sa_family) {
736 #ifdef INET
737 case AF_INET: /* IP supports Multicast */
738 break;
739 #endif /* INET */
740 #ifdef INET6
741 case AF_INET6: /* IP6 supports Multicast */
742 break;
743 #endif /* INET6 */
744 default: /* Other protocols doesn't support Multicast */
745 error = EAFNOSUPPORT;
746 break;
747 }
748 break;
749
750 case SIOCSIFMTU:
751 mtu = ifr->ifr_mtu;
752 if (mtu < L2TP_MTU_MIN || mtu > L2TP_MTU_MAX)
753 return (EINVAL);
754 ifp->if_mtu = mtu;
755 break;
756
757 #ifdef INET
758 case SIOCSIFPHYADDR:
759 src = (struct sockaddr *)
760 &(((struct in_aliasreq *)data)->ifra_addr);
761 dst = (struct sockaddr *)
762 &(((struct in_aliasreq *)data)->ifra_dstaddr);
763 if (src->sa_family != AF_INET || dst->sa_family != AF_INET)
764 return EAFNOSUPPORT;
765 else if (src->sa_len != sizeof(struct sockaddr_in)
766 || dst->sa_len != sizeof(struct sockaddr_in))
767 return EINVAL;
768
769 error = l2tp_set_tunnel(&sc->l2tp_ec.ec_if, src, dst);
770 break;
771
772 #endif /* INET */
773 #ifdef INET6
774 case SIOCSIFPHYADDR_IN6:
775 src = (struct sockaddr *)
776 &(((struct in6_aliasreq *)data)->ifra_addr);
777 dst = (struct sockaddr *)
778 &(((struct in6_aliasreq *)data)->ifra_dstaddr);
779 if (src->sa_family != AF_INET6 || dst->sa_family != AF_INET6)
780 return EAFNOSUPPORT;
781 else if (src->sa_len != sizeof(struct sockaddr_in6)
782 || dst->sa_len != sizeof(struct sockaddr_in6))
783 return EINVAL;
784
785 error = l2tp_set_tunnel(&sc->l2tp_ec.ec_if, src, dst);
786 break;
787
788 #endif /* INET6 */
789 case SIOCSLIFPHYADDR:
790 src = (struct sockaddr *)
791 &(((struct if_laddrreq *)data)->addr);
792 dst = (struct sockaddr *)
793 &(((struct if_laddrreq *)data)->dstaddr);
794 if (src->sa_family != dst->sa_family)
795 return EINVAL;
796 else if (src->sa_family == AF_INET
797 && src->sa_len != sizeof(struct sockaddr_in))
798 return EINVAL;
799 else if (src->sa_family == AF_INET6
800 && src->sa_len != sizeof(struct sockaddr_in6))
801 return EINVAL;
802 else if (dst->sa_family == AF_INET
803 && dst->sa_len != sizeof(struct sockaddr_in))
804 return EINVAL;
805 else if (dst->sa_family == AF_INET6
806 && dst->sa_len != sizeof(struct sockaddr_in6))
807 return EINVAL;
808
809 error = l2tp_set_tunnel(&sc->l2tp_ec.ec_if, src, dst);
810 break;
811
812 case SIOCDIFPHYADDR:
813 l2tp_delete_tunnel(&sc->l2tp_ec.ec_if);
814 if_link_state_change(&sc->l2tp_ec.ec_if, LINK_STATE_DOWN);
815 break;
816
817 case SIOCGIFPSRCADDR:
818 #ifdef INET6
819 case SIOCGIFPSRCADDR_IN6:
820 #endif /* INET6 */
821 bound = curlwp_bind();
822 var = l2tp_getref_variant(sc, &psref);
823 if (var == NULL) {
824 curlwp_bindx(bound);
825 error = EADDRNOTAVAIL;
826 goto bad;
827 }
828 if (var->lv_psrc == NULL) {
829 l2tp_putref_variant(var, &psref);
830 curlwp_bindx(bound);
831 error = EADDRNOTAVAIL;
832 goto bad;
833 }
834 src = var->lv_psrc;
835 switch (cmd) {
836 #ifdef INET
837 case SIOCGIFPSRCADDR:
838 dst = &ifr->ifr_addr;
839 size = sizeof(ifr->ifr_addr);
840 break;
841 #endif /* INET */
842 #ifdef INET6
843 case SIOCGIFPSRCADDR_IN6:
844 dst = (struct sockaddr *)
845 &(((struct in6_ifreq *)data)->ifr_addr);
846 size = sizeof(((struct in6_ifreq *)data)->ifr_addr);
847 break;
848 #endif /* INET6 */
849 default:
850 l2tp_putref_variant(var, &psref);
851 curlwp_bindx(bound);
852 error = EADDRNOTAVAIL;
853 goto bad;
854 }
855 if (src->sa_len > size) {
856 l2tp_putref_variant(var, &psref);
857 curlwp_bindx(bound);
858 return EINVAL;
859 }
860 sockaddr_copy(dst, src->sa_len, src);
861 l2tp_putref_variant(var, &psref);
862 curlwp_bindx(bound);
863 break;
864
865 case SIOCGIFPDSTADDR:
866 #ifdef INET6
867 case SIOCGIFPDSTADDR_IN6:
868 #endif /* INET6 */
869 bound = curlwp_bind();
870 var = l2tp_getref_variant(sc, &psref);
871 if (var == NULL) {
872 curlwp_bindx(bound);
873 error = EADDRNOTAVAIL;
874 goto bad;
875 }
876 if (var->lv_pdst == NULL) {
877 l2tp_putref_variant(var, &psref);
878 curlwp_bindx(bound);
879 error = EADDRNOTAVAIL;
880 goto bad;
881 }
882 src = var->lv_pdst;
883 switch (cmd) {
884 #ifdef INET
885 case SIOCGIFPDSTADDR:
886 dst = &ifr->ifr_addr;
887 size = sizeof(ifr->ifr_addr);
888 break;
889 #endif /* INET */
890 #ifdef INET6
891 case SIOCGIFPDSTADDR_IN6:
892 dst = (struct sockaddr *)
893 &(((struct in6_ifreq *)data)->ifr_addr);
894 size = sizeof(((struct in6_ifreq *)data)->ifr_addr);
895 break;
896 #endif /* INET6 */
897 default:
898 l2tp_putref_variant(var, &psref);
899 curlwp_bindx(bound);
900 error = EADDRNOTAVAIL;
901 goto bad;
902 }
903 if (src->sa_len > size) {
904 l2tp_putref_variant(var, &psref);
905 curlwp_bindx(bound);
906 return EINVAL;
907 }
908 sockaddr_copy(dst, src->sa_len, src);
909 l2tp_putref_variant(var, &psref);
910 curlwp_bindx(bound);
911 break;
912
913 case SIOCGLIFPHYADDR:
914 bound = curlwp_bind();
915 var = l2tp_getref_variant(sc, &psref);
916 if (var == NULL) {
917 curlwp_bindx(bound);
918 error = EADDRNOTAVAIL;
919 goto bad;
920 }
921 if (var->lv_psrc == NULL || var->lv_pdst == NULL) {
922 l2tp_putref_variant(var, &psref);
923 curlwp_bindx(bound);
924 error = EADDRNOTAVAIL;
925 goto bad;
926 }
927
928 /* copy src */
929 src = var->lv_psrc;
930 dst = (struct sockaddr *)
931 &(((struct if_laddrreq *)data)->addr);
932 size = sizeof(((struct if_laddrreq *)data)->addr);
933 if (src->sa_len > size) {
934 l2tp_putref_variant(var, &psref);
935 curlwp_bindx(bound);
936 return EINVAL;
937 }
938 sockaddr_copy(dst, src->sa_len, src);
939
940 /* copy dst */
941 src = var->lv_pdst;
942 dst = (struct sockaddr *)
943 &(((struct if_laddrreq *)data)->dstaddr);
944 size = sizeof(((struct if_laddrreq *)data)->dstaddr);
945 if (src->sa_len > size) {
946 l2tp_putref_variant(var, &psref);
947 curlwp_bindx(bound);
948 return EINVAL;
949 }
950 sockaddr_copy(dst, src->sa_len, src);
951 l2tp_putref_variant(var, &psref);
952 curlwp_bindx(bound);
953 break;
954
955 case SIOCSL2TPSESSION:
956 if ((error = copyin(ifr->ifr_data, &l2tpr, sizeof(l2tpr))) != 0)
957 break;
958
959 /* session id must not zero */
960 if (l2tpr.my_sess_id == 0 || l2tpr.peer_sess_id == 0)
961 return EINVAL;
962
963 bound = curlwp_bind();
964 var_tmp = l2tp_lookup_session_ref(l2tpr.my_sess_id, &psref);
965 if (var_tmp != NULL) {
966 /* duplicate session id */
967 log(LOG_WARNING, "%s: duplicate session id %" PRIu32 " of %s\n",
968 sc->l2tp_ec.ec_if.if_xname, l2tpr.my_sess_id,
969 var_tmp->lv_softc->l2tp_ec.ec_if.if_xname);
970 psref_release(&psref, &var_tmp->lv_psref,
971 lv_psref_class);
972 curlwp_bindx(bound);
973 return EINVAL;
974 }
975 curlwp_bindx(bound);
976
977 error = l2tp_set_session(sc, l2tpr.my_sess_id, l2tpr.peer_sess_id);
978 break;
979 case SIOCDL2TPSESSION:
980 l2tp_clear_session(sc);
981 break;
982 case SIOCSL2TPCOOKIE:
983 if ((error = copyin(ifr->ifr_data, &l2tpr, sizeof(l2tpr))) != 0)
984 break;
985
986 error = l2tp_set_cookie(sc, l2tpr.my_cookie, l2tpr.my_cookie_len,
987 l2tpr.peer_cookie, l2tpr.peer_cookie_len);
988 break;
989 case SIOCDL2TPCOOKIE:
990 l2tp_clear_cookie(sc);
991 break;
992 case SIOCSL2TPSTATE:
993 if ((error = copyin(ifr->ifr_data, &l2tpr, sizeof(l2tpr))) != 0)
994 break;
995
996 l2tp_set_state(sc, l2tpr.state);
997 break;
998 case SIOCGL2TP:
999 /* get L2TPV3 session info */
1000 memset(&l2tpr, 0, sizeof(l2tpr));
1001
1002 bound = curlwp_bind();
1003 var = l2tp_getref_variant(sc, &psref);
1004 if (var == NULL) {
1005 curlwp_bindx(bound);
1006 error = EADDRNOTAVAIL;
1007 goto bad;
1008 }
1009
1010 l2tpr.state = var->lv_state;
1011 l2tpr.my_sess_id = var->lv_my_sess_id;
1012 l2tpr.peer_sess_id = var->lv_peer_sess_id;
1013 l2tpr.my_cookie = var->lv_my_cookie;
1014 l2tpr.my_cookie_len = var->lv_my_cookie_len;
1015 l2tpr.peer_cookie = var->lv_peer_cookie;
1016 l2tpr.peer_cookie_len = var->lv_peer_cookie_len;
1017 l2tp_putref_variant(var, &psref);
1018 curlwp_bindx(bound);
1019
1020 error = copyout(&l2tpr, ifr->ifr_data, sizeof(l2tpr));
1021 break;
1022
1023 default:
1024 error = ifioctl_common(ifp, cmd, data);
1025 break;
1026 }
1027 bad:
1028 return error;
1029 }
1030
1031 static int
1032 l2tp_set_tunnel(struct ifnet *ifp, struct sockaddr *src, struct sockaddr *dst)
1033 {
1034 struct l2tp_softc *sc = container_of(ifp, struct l2tp_softc,
1035 l2tp_ec.ec_if);
1036 struct sockaddr *osrc, *odst;
1037 struct sockaddr *nsrc, *ndst;
1038 struct l2tp_variant *ovar, *nvar;
1039 int error;
1040
1041 nsrc = sockaddr_dup(src, M_WAITOK);
1042 ndst = sockaddr_dup(dst, M_WAITOK);
1043
1044 nvar = kmem_alloc(sizeof(*nvar), KM_SLEEP);
1045
1046 error = encap_lock_enter();
1047 if (error)
1048 goto error;
1049
1050 mutex_enter(&sc->l2tp_lock);
1051
1052 ovar = sc->l2tp_var;
1053 osrc = ovar->lv_psrc;
1054 odst = ovar->lv_pdst;
1055 *nvar = *ovar;
1056 psref_target_init(&nvar->lv_psref, lv_psref_class);
1057 nvar->lv_psrc = nsrc;
1058 nvar->lv_pdst = ndst;
1059 error = l2tp_encap_attach(nvar);
1060 if (error) {
1061 mutex_exit(&sc->l2tp_lock);
1062 encap_lock_exit();
1063 goto error;
1064 }
1065 l2tp_variant_update(sc, nvar);
1066
1067 mutex_exit(&sc->l2tp_lock);
1068
1069 (void)l2tp_encap_detach(ovar);
1070 encap_lock_exit();
1071
1072 if (osrc)
1073 sockaddr_free(osrc);
1074 if (odst)
1075 sockaddr_free(odst);
1076 kmem_free(ovar, sizeof(*ovar));
1077
1078 if_link_state_change(ifp, LINK_STATE_UP);
1079 return 0;
1080
1081 error:
1082 sockaddr_free(nsrc);
1083 sockaddr_free(ndst);
1084 kmem_free(nvar, sizeof(*nvar));
1085
1086 return error;
1087 }
1088
1089 static void
1090 l2tp_delete_tunnel(struct ifnet *ifp)
1091 {
1092 struct l2tp_softc *sc = container_of(ifp, struct l2tp_softc,
1093 l2tp_ec.ec_if);
1094 struct sockaddr *osrc, *odst;
1095 struct l2tp_variant *ovar, *nvar;
1096 int error;
1097
1098 nvar = kmem_alloc(sizeof(*nvar), KM_SLEEP);
1099
1100 error = encap_lock_enter();
1101 if (error) {
1102 kmem_free(nvar, sizeof(*nvar));
1103 return;
1104 }
1105 mutex_enter(&sc->l2tp_lock);
1106
1107 ovar = sc->l2tp_var;
1108 osrc = ovar->lv_psrc;
1109 odst = ovar->lv_pdst;
1110 *nvar = *ovar;
1111 psref_target_init(&nvar->lv_psref, lv_psref_class);
1112 nvar->lv_psrc = NULL;
1113 nvar->lv_pdst = NULL;
1114 l2tp_variant_update(sc, nvar);
1115
1116 mutex_exit(&sc->l2tp_lock);
1117
1118 (void)l2tp_encap_detach(ovar);
1119 encap_lock_exit();
1120
1121 if (osrc)
1122 sockaddr_free(osrc);
1123 if (odst)
1124 sockaddr_free(odst);
1125 kmem_free(ovar, sizeof(*ovar));
1126 }
1127
1128 static int
1129 id_hash_func(uint32_t id, u_long mask)
1130 {
1131 uint32_t hash;
1132
1133 hash = (id >> 16) ^ id;
1134 hash = (hash >> 4) ^ hash;
1135
1136 return hash & mask;
1137 }
1138
1139 static void
1140 l2tp_hash_init(void)
1141 {
1142
1143 l2tp_hash.lists = hashinit(L2TP_ID_HASH_SIZE, HASH_PSLIST, true,
1144 &l2tp_hash.mask);
1145 }
1146
1147 static int
1148 l2tp_hash_fini(void)
1149 {
1150 int i;
1151
1152 mutex_enter(&l2tp_hash.lock);
1153
1154 for (i = 0; i < l2tp_hash.mask + 1; i++) {
1155 if (PSLIST_WRITER_FIRST(&l2tp_hash.lists[i], struct l2tp_softc,
1156 l2tp_hash) != NULL) {
1157 mutex_exit(&l2tp_hash.lock);
1158 return EBUSY;
1159 }
1160 }
1161 for (i = 0; i < l2tp_hash.mask + 1; i++)
1162 PSLIST_DESTROY(&l2tp_hash.lists[i]);
1163
1164 mutex_exit(&l2tp_hash.lock);
1165
1166 hashdone(l2tp_hash.lists, HASH_PSLIST, l2tp_hash.mask);
1167
1168 return 0;
1169 }
1170
1171 static int
1172 l2tp_set_session(struct l2tp_softc *sc, uint32_t my_sess_id,
1173 uint32_t peer_sess_id)
1174 {
1175 uint32_t idx;
1176 struct l2tp_variant *nvar;
1177 struct l2tp_variant *ovar;
1178 struct ifnet *ifp = &sc->l2tp_ec.ec_if;
1179
1180 nvar = kmem_alloc(sizeof(*nvar), KM_SLEEP);
1181
1182 mutex_enter(&sc->l2tp_lock);
1183 ovar = sc->l2tp_var;
1184 *nvar = *ovar;
1185 psref_target_init(&nvar->lv_psref, lv_psref_class);
1186 nvar->lv_my_sess_id = my_sess_id;
1187 nvar->lv_peer_sess_id = peer_sess_id;
1188
1189 mutex_enter(&l2tp_hash.lock);
1190 if (ovar->lv_my_sess_id > 0 && ovar->lv_peer_sess_id > 0) {
1191 PSLIST_WRITER_REMOVE(sc, l2tp_hash);
1192 pserialize_perform(l2tp_psz);
1193 }
1194 mutex_exit(&l2tp_hash.lock);
1195 PSLIST_ENTRY_DESTROY(sc, l2tp_hash);
1196
1197 l2tp_variant_update(sc, nvar);
1198 mutex_exit(&sc->l2tp_lock);
1199
1200 idx = id_hash_func(nvar->lv_my_sess_id, l2tp_hash.mask);
1201 if ((ifp->if_flags & IFF_DEBUG) != 0)
1202 log(LOG_DEBUG, "%s: add hash entry: sess_id=%" PRIu32 ", idx=%" PRIu32 "\n",
1203 sc->l2tp_ec.ec_if.if_xname, nvar->lv_my_sess_id, idx);
1204
1205 PSLIST_ENTRY_INIT(sc, l2tp_hash);
1206 mutex_enter(&l2tp_hash.lock);
1207 PSLIST_WRITER_INSERT_HEAD(&l2tp_hash.lists[idx], sc, l2tp_hash);
1208 mutex_exit(&l2tp_hash.lock);
1209
1210 kmem_free(ovar, sizeof(*ovar));
1211 return 0;
1212 }
1213
1214 static int
1215 l2tp_clear_session(struct l2tp_softc *sc)
1216 {
1217 struct l2tp_variant *nvar;
1218 struct l2tp_variant *ovar;
1219
1220 nvar = kmem_alloc(sizeof(*nvar), KM_SLEEP);
1221
1222 mutex_enter(&sc->l2tp_lock);
1223 ovar = sc->l2tp_var;
1224 *nvar = *ovar;
1225 psref_target_init(&nvar->lv_psref, lv_psref_class);
1226 nvar->lv_my_sess_id = 0;
1227 nvar->lv_peer_sess_id = 0;
1228
1229 mutex_enter(&l2tp_hash.lock);
1230 if (ovar->lv_my_sess_id > 0 && ovar->lv_peer_sess_id > 0) {
1231 PSLIST_WRITER_REMOVE(sc, l2tp_hash);
1232 pserialize_perform(l2tp_psz);
1233 }
1234 mutex_exit(&l2tp_hash.lock);
1235
1236 l2tp_variant_update(sc, nvar);
1237 mutex_exit(&sc->l2tp_lock);
1238 kmem_free(ovar, sizeof(*ovar));
1239 return 0;
1240 }
1241
1242 struct l2tp_variant *
1243 l2tp_lookup_session_ref(uint32_t id, struct psref *psref)
1244 {
1245 int idx;
1246 int s;
1247 struct l2tp_softc *sc;
1248
1249 idx = id_hash_func(id, l2tp_hash.mask);
1250
1251 s = pserialize_read_enter();
1252 PSLIST_READER_FOREACH(sc, &l2tp_hash.lists[idx], struct l2tp_softc,
1253 l2tp_hash) {
1254 struct l2tp_variant *var = atomic_load_consume(&sc->l2tp_var);
1255 if (var == NULL)
1256 continue;
1257 if (var->lv_my_sess_id != id)
1258 continue;
1259 psref_acquire(psref, &var->lv_psref, lv_psref_class);
1260 pserialize_read_exit(s);
1261 return var;
1262 }
1263 pserialize_read_exit(s);
1264 return NULL;
1265 }
1266
1267 /*
1268 * l2tp_variant update API.
1269 *
1270 * Assumption:
1271 * reader side dereferences sc->l2tp_var in reader critical section only,
1272 * that is, all of reader sides do not reader the sc->l2tp_var after
1273 * pserialize_perform().
1274 */
1275 static void
1276 l2tp_variant_update(struct l2tp_softc *sc, struct l2tp_variant *nvar)
1277 {
1278 struct ifnet *ifp = &sc->l2tp_ec.ec_if;
1279 struct l2tp_variant *ovar = sc->l2tp_var;
1280
1281 KASSERT(mutex_owned(&sc->l2tp_lock));
1282
1283 atomic_store_release(&sc->l2tp_var, nvar);
1284 pserialize_perform(sc->l2tp_psz);
1285 psref_target_destroy(&ovar->lv_psref, lv_psref_class);
1286
1287 if (nvar != NULL) {
1288 if (nvar->lv_psrc != NULL && nvar->lv_pdst != NULL)
1289 ifp->if_flags |= IFF_RUNNING;
1290 else
1291 ifp->if_flags &= ~IFF_RUNNING;
1292 }
1293 }
1294
1295 static int
1296 l2tp_set_cookie(struct l2tp_softc *sc, uint64_t my_cookie, u_int my_cookie_len,
1297 uint64_t peer_cookie, u_int peer_cookie_len)
1298 {
1299 struct l2tp_variant *nvar;
1300
1301 if (my_cookie == 0 || peer_cookie == 0)
1302 return EINVAL;
1303
1304 if (my_cookie_len != 4 && my_cookie_len != 8
1305 && peer_cookie_len != 4 && peer_cookie_len != 8)
1306 return EINVAL;
1307
1308 nvar = kmem_alloc(sizeof(*nvar), KM_SLEEP);
1309
1310 mutex_enter(&sc->l2tp_lock);
1311
1312 *nvar = *sc->l2tp_var;
1313 psref_target_init(&nvar->lv_psref, lv_psref_class);
1314 nvar->lv_my_cookie = my_cookie;
1315 nvar->lv_my_cookie_len = my_cookie_len;
1316 nvar->lv_peer_cookie = peer_cookie;
1317 nvar->lv_peer_cookie_len = peer_cookie_len;
1318 nvar->lv_use_cookie = L2TP_COOKIE_ON;
1319 l2tp_variant_update(sc, nvar);
1320
1321 mutex_exit(&sc->l2tp_lock);
1322
1323 struct ifnet *ifp = &sc->l2tp_ec.ec_if;
1324 if ((ifp->if_flags & IFF_DEBUG) != 0) {
1325 log(LOG_DEBUG,
1326 "%s: set cookie: "
1327 "local cookie_len=%u local cookie=%" PRIu64 ", "
1328 "remote cookie_len=%u remote cookie=%" PRIu64 "\n",
1329 ifp->if_xname, my_cookie_len, my_cookie,
1330 peer_cookie_len, peer_cookie);
1331 }
1332
1333 return 0;
1334 }
1335
1336 static void
1337 l2tp_clear_cookie(struct l2tp_softc *sc)
1338 {
1339 struct l2tp_variant *nvar;
1340
1341 nvar = kmem_alloc(sizeof(*nvar), KM_SLEEP);
1342
1343 mutex_enter(&sc->l2tp_lock);
1344
1345 *nvar = *sc->l2tp_var;
1346 psref_target_init(&nvar->lv_psref, lv_psref_class);
1347 nvar->lv_my_cookie = 0;
1348 nvar->lv_my_cookie_len = 0;
1349 nvar->lv_peer_cookie = 0;
1350 nvar->lv_peer_cookie_len = 0;
1351 nvar->lv_use_cookie = L2TP_COOKIE_OFF;
1352 l2tp_variant_update(sc, nvar);
1353
1354 mutex_exit(&sc->l2tp_lock);
1355 }
1356
1357 static void
1358 l2tp_set_state(struct l2tp_softc *sc, int state)
1359 {
1360 struct ifnet *ifp = &sc->l2tp_ec.ec_if;
1361 struct l2tp_variant *nvar;
1362
1363 nvar = kmem_alloc(sizeof(*nvar), KM_SLEEP);
1364
1365 mutex_enter(&sc->l2tp_lock);
1366
1367 *nvar = *sc->l2tp_var;
1368 psref_target_init(&nvar->lv_psref, lv_psref_class);
1369 nvar->lv_state = state;
1370 l2tp_variant_update(sc, nvar);
1371
1372 if (nvar->lv_state == L2TP_STATE_UP) {
1373 ifp->if_link_state = LINK_STATE_UP;
1374 } else {
1375 ifp->if_link_state = LINK_STATE_DOWN;
1376 }
1377
1378 mutex_exit(&sc->l2tp_lock);
1379
1380 #ifdef NOTYET
1381 vlan_linkstate_notify(ifp, ifp->if_link_state);
1382 #endif
1383 }
1384
1385 static int
1386 l2tp_encap_attach(struct l2tp_variant *var)
1387 {
1388 int error;
1389
1390 if (var == NULL || var->lv_psrc == NULL)
1391 return EINVAL;
1392
1393 switch (var->lv_psrc->sa_family) {
1394 #ifdef INET
1395 case AF_INET:
1396 error = in_l2tp_attach(var);
1397 break;
1398 #endif
1399 #ifdef INET6
1400 case AF_INET6:
1401 error = in6_l2tp_attach(var);
1402 break;
1403 #endif
1404 default:
1405 error = EINVAL;
1406 break;
1407 }
1408
1409 return error;
1410 }
1411
1412 static int
1413 l2tp_encap_detach(struct l2tp_variant *var)
1414 {
1415 int error;
1416
1417 if (var == NULL || var->lv_psrc == NULL)
1418 return EINVAL;
1419
1420 switch (var->lv_psrc->sa_family) {
1421 #ifdef INET
1422 case AF_INET:
1423 error = in_l2tp_detach(var);
1424 break;
1425 #endif
1426 #ifdef INET6
1427 case AF_INET6:
1428 error = in6_l2tp_detach(var);
1429 break;
1430 #endif
1431 default:
1432 error = EINVAL;
1433 break;
1434 }
1435
1436 return error;
1437 }
1438
1439 int
1440 l2tp_check_nesting(struct ifnet *ifp, struct mbuf *m)
1441 {
1442
1443 return if_tunnel_check_nesting(ifp, m, max_l2tp_nesting);
1444 }
1445
1446 /*
1447 * Module infrastructure
1448 */
1449 #include "if_module.h"
1450
1451 IF_MODULE(MODULE_CLASS_DRIVER, l2tp, NULL)
1452
1453
1454 /* TODO: IP_TCPMSS support */
1455 #ifdef IP_TCPMSS
1456 static int l2tp_need_tcpmss_clamp(struct ifnet *);
1457 #ifdef INET
1458 static struct mbuf *l2tp_tcpmss4_clamp(struct ifnet *, struct mbuf *);
1459 #endif
1460 #ifdef INET6
1461 static struct mbuf *l2tp_tcpmss6_clamp(struct ifnet *, struct mbuf *);
1462 #endif
1463
1464 struct mbuf *
1465 l2tp_tcpmss_clamp(struct ifnet *ifp, struct mbuf *m)
1466 {
1467 struct ether_header *eh;
1468 struct ether_vlan_header evh;
1469
1470 if (!l2tp_need_tcpmss_clamp(ifp)) {
1471 return m;
1472 }
1473
1474 if (m->m_pkthdr.len < sizeof(evh)) {
1475 m_freem(m);
1476 return NULL;
1477 }
1478
1479 /* save ether header */
1480 m_copydata(m, 0, sizeof(evh), (void *)&evh);
1481 eh = (struct ether_header *)&evh;
1482
1483 switch (ntohs(eh->ether_type)) {
1484 case ETHERTYPE_VLAN: /* Ether + VLAN */
1485 if (m->m_pkthdr.len <= sizeof(struct ether_vlan_header))
1486 break;
1487 m_adj(m, sizeof(struct ether_vlan_header));
1488 switch (ntohs(evh.evl_proto)) {
1489 #ifdef INET
1490 case ETHERTYPE_IP: /* Ether + VLAN + IPv4 */
1491 m = l2tp_tcpmss4_clamp(ifp, m);
1492 if (m == NULL)
1493 return NULL;
1494 break;
1495 #endif /* INET */
1496 #ifdef INET6
1497 case ETHERTYPE_IPV6: /* Ether + VLAN + IPv6 */
1498 m = l2tp_tcpmss6_clamp(ifp, m);
1499 if (m == NULL)
1500 return NULL;
1501 break;
1502 #endif /* INET6 */
1503 default:
1504 break;
1505 }
1506
1507 /* restore ether header */
1508 M_PREPEND(m, sizeof(struct ether_vlan_header),
1509 M_DONTWAIT);
1510 if (m == NULL)
1511 return NULL;
1512 *mtod(m, struct ether_vlan_header *) = evh;
1513 break;
1514
1515 #ifdef INET
1516 case ETHERTYPE_IP: /* Ether + IPv4 */
1517 if (m->m_pkthdr.len <= sizeof(struct ether_header))
1518 break;
1519 m_adj(m, sizeof(struct ether_header));
1520 m = l2tp_tcpmss4_clamp(ifp, m);
1521 if (m == NULL)
1522 return NULL;
1523 /* restore ether header */
1524 M_PREPEND(m, sizeof(struct ether_header), M_DONTWAIT);
1525 if (m == NULL)
1526 return NULL;
1527 *mtod(m, struct ether_header *) = *eh;
1528 break;
1529 #endif /* INET */
1530
1531 #ifdef INET6
1532 case ETHERTYPE_IPV6: /* Ether + IPv6 */
1533 if (m->m_pkthdr.len <= sizeof(struct ether_header))
1534 break;
1535 m_adj(m, sizeof(struct ether_header));
1536 m = l2tp_tcpmss6_clamp(ifp, m);
1537 if (m == NULL)
1538 return NULL;
1539 /* restore ether header */
1540 M_PREPEND(m, sizeof(struct ether_header), M_DONTWAIT);
1541 if (m == NULL)
1542 return NULL;
1543 *mtod(m, struct ether_header *) = *eh;
1544 break;
1545 #endif /* INET6 */
1546
1547 default:
1548 break;
1549 }
1550
1551 return m;
1552 }
1553
1554 static int
1555 l2tp_need_tcpmss_clamp(struct ifnet *ifp)
1556 {
1557 int ret = 0;
1558
1559 #ifdef INET
1560 if (ifp->if_tcpmss != 0)
1561 ret = 1;
1562 #endif
1563
1564 #ifdef INET6
1565 if (ifp->if_tcpmss6 != 0)
1566 ret = 1;
1567 #endif
1568
1569 return ret;
1570 }
1571
1572 #ifdef INET
1573 static struct mbuf *
1574 l2tp_tcpmss4_clamp(struct ifnet *ifp, struct mbuf *m)
1575 {
1576
1577 if (ifp->if_tcpmss != 0) {
1578 return ip_tcpmss(m, (ifp->if_tcpmss < 0) ?
1579 ifp->if_mtu - IP_TCPMSS_EXTLEN :
1580 ifp->if_tcpmss);
1581 }
1582 return m;
1583 }
1584 #endif /* INET */
1585
1586 #ifdef INET6
1587 static struct mbuf *
1588 l2tp_tcpmss6_clamp(struct ifnet *ifp, struct mbuf *m)
1589 {
1590 int ip6hdrlen;
1591
1592 if (ifp->if_tcpmss6 != 0 &&
1593 ip6_tcpmss_applicable(m, &ip6hdrlen)) {
1594 return ip6_tcpmss(m, ip6hdrlen,
1595 (ifp->if_tcpmss6 < 0) ?
1596 ifp->if_mtu - IP6_TCPMSS_EXTLEN :
1597 ifp->if_tcpmss6);
1598 }
1599 return m;
1600 }
1601 #endif /* INET6 */
1602
1603 #endif /* IP_TCPMSS */
1604