if_l2tp.c revision 1.37 1 /* $NetBSD: if_l2tp.c,v 1.37 2019/09/19 04:59:42 knakahara Exp $ */
2
3 /*
4 * Copyright (c) 2017 Internet Initiative Japan Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /*
30 * L2TPv3 kernel interface
31 */
32
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: if_l2tp.c,v 1.37 2019/09/19 04:59:42 knakahara Exp $");
35
36 #ifdef _KERNEL_OPT
37 #include "opt_inet.h"
38 #include "opt_net_mpsafe.h"
39 #endif
40
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/kernel.h>
44 #include <sys/mbuf.h>
45 #include <sys/socket.h>
46 #include <sys/sockio.h>
47 #include <sys/errno.h>
48 #include <sys/ioctl.h>
49 #include <sys/time.h>
50 #include <sys/syslog.h>
51 #include <sys/proc.h>
52 #include <sys/conf.h>
53 #include <sys/kauth.h>
54 #include <sys/cpu.h>
55 #include <sys/cprng.h>
56 #include <sys/intr.h>
57 #include <sys/kmem.h>
58 #include <sys/mutex.h>
59 #include <sys/atomic.h>
60 #include <sys/pserialize.h>
61 #include <sys/device.h>
62 #include <sys/module.h>
63
64 #include <net/if.h>
65 #include <net/if_dl.h>
66 #include <net/if_ether.h>
67 #include <net/if_types.h>
68 #include <net/netisr.h>
69 #include <net/route.h>
70 #include <net/bpf.h>
71 #include <net/if_vlanvar.h>
72
73 #include <netinet/in.h>
74 #include <netinet/in_systm.h>
75 #include <netinet/ip.h>
76 #include <netinet/ip_encap.h>
77 #ifdef INET
78 #include <netinet/in_var.h>
79 #include <netinet/in_l2tp.h>
80 #endif /* INET */
81 #ifdef INET6
82 #include <netinet6/in6_l2tp.h>
83 #endif
84
85 #include <net/if_l2tp.h>
86
87 #include <net/if_vlanvar.h>
88
89 /* TODO: IP_TCPMSS support */
90 #undef IP_TCPMSS
91 #ifdef IP_TCPMSS
92 #include <netinet/ip_tcpmss.h>
93 #endif
94
95 /*
96 * l2tp global variable definitions
97 */
98 static struct {
99 LIST_HEAD(l2tp_sclist, l2tp_softc) list;
100 kmutex_t lock;
101 } l2tp_softcs __cacheline_aligned;
102
103
104 #if !defined(L2TP_ID_HASH_SIZE)
105 #define L2TP_ID_HASH_SIZE 64
106 #endif
107 static struct {
108 kmutex_t lock;
109 struct pslist_head *lists;
110 u_long mask;
111 } l2tp_hash __cacheline_aligned = {
112 .lists = NULL,
113 };
114
115 pserialize_t l2tp_psz __read_mostly;
116 struct psref_class *lv_psref_class __read_mostly;
117
118 static void l2tp_ro_init_pc(void *, void *, struct cpu_info *);
119 static void l2tp_ro_fini_pc(void *, void *, struct cpu_info *);
120
121 static void l2tp_ifq_init_pc(void *, void *, struct cpu_info *);
122
123 static int l2tp_clone_create(struct if_clone *, int);
124 static int l2tp_clone_destroy(struct ifnet *);
125
126 struct if_clone l2tp_cloner =
127 IF_CLONE_INITIALIZER("l2tp", l2tp_clone_create, l2tp_clone_destroy);
128
129 static int l2tp_tx_enqueue(struct l2tp_variant *, struct mbuf *);
130 static int l2tp_output(struct ifnet *, struct mbuf *,
131 const struct sockaddr *, const struct rtentry *);
132 static void l2tp_sendit(struct l2tp_variant *, struct mbuf *);
133 static void l2tpintr(struct l2tp_variant *);
134 static void l2tpintr_softint(void *);
135
136 static void l2tp_hash_init(void);
137 static int l2tp_hash_fini(void);
138
139 static void l2tp_start(struct ifnet *);
140 static int l2tp_transmit(struct ifnet *, struct mbuf *);
141
142 static int l2tp_set_tunnel(struct ifnet *, struct sockaddr *,
143 struct sockaddr *);
144 static void l2tp_delete_tunnel(struct ifnet *);
145
146 static int id_hash_func(uint32_t, u_long);
147
148 static void l2tp_variant_update(struct l2tp_softc *, struct l2tp_variant *);
149 static int l2tp_set_session(struct l2tp_softc *, uint32_t, uint32_t);
150 static int l2tp_clear_session(struct l2tp_softc *);
151 static int l2tp_set_cookie(struct l2tp_softc *, uint64_t, u_int, uint64_t, u_int);
152 static void l2tp_clear_cookie(struct l2tp_softc *);
153 static void l2tp_set_state(struct l2tp_softc *, int);
154 static int l2tp_encap_attach(struct l2tp_variant *);
155 static int l2tp_encap_detach(struct l2tp_variant *);
156
157 #ifndef MAX_L2TP_NEST
158 /*
159 * This macro controls the upper limitation on nesting of l2tp tunnels.
160 * Since, setting a large value to this macro with a careless configuration
161 * may introduce system crash, we don't allow any nestings by default.
162 * If you need to configure nested l2tp tunnels, you can define this macro
163 * in your kernel configuration file. However, if you do so, please be
164 * careful to configure the tunnels so that it won't make a loop.
165 */
166 /*
167 * XXX
168 * Currently, if in_l2tp_output recursively calls, it causes locking against
169 * myself of struct l2tp_ro->lr_lock. So, nested l2tp tunnels is prohibited.
170 */
171 #define MAX_L2TP_NEST 0
172 #endif
173
174 static int max_l2tp_nesting = MAX_L2TP_NEST;
175
176 /* ARGSUSED */
177 void
178 l2tpattach(int count)
179 {
180 /*
181 * Nothing to do here, initialization is handled by the
182 * module initialization code in l2tpinit() below).
183 */
184 }
185
186 static void
187 l2tpinit(void)
188 {
189
190 mutex_init(&l2tp_softcs.lock, MUTEX_DEFAULT, IPL_NONE);
191 LIST_INIT(&l2tp_softcs.list);
192
193 mutex_init(&l2tp_hash.lock, MUTEX_DEFAULT, IPL_NONE);
194 l2tp_psz = pserialize_create();
195 lv_psref_class = psref_class_create("l2tpvar", IPL_SOFTNET);
196 if_clone_attach(&l2tp_cloner);
197
198 l2tp_hash_init();
199 }
200
201 static int
202 l2tpdetach(void)
203 {
204 int error;
205
206 mutex_enter(&l2tp_softcs.lock);
207 if (!LIST_EMPTY(&l2tp_softcs.list)) {
208 mutex_exit(&l2tp_softcs.lock);
209 return EBUSY;
210 }
211 mutex_exit(&l2tp_softcs.lock);
212
213 error = l2tp_hash_fini();
214 if (error)
215 return error;
216
217 if_clone_detach(&l2tp_cloner);
218 psref_class_destroy(lv_psref_class);
219 pserialize_destroy(l2tp_psz);
220 mutex_destroy(&l2tp_hash.lock);
221
222 mutex_destroy(&l2tp_softcs.lock);
223
224 return error;
225 }
226
227 static int
228 l2tp_clone_create(struct if_clone *ifc, int unit)
229 {
230 struct l2tp_softc *sc;
231 struct l2tp_variant *var;
232 int rv;
233 u_int si_flags = SOFTINT_NET;
234 #ifdef NET_MPSAFE
235 si_flags |= SOFTINT_MPSAFE;
236 #endif
237 sc = kmem_zalloc(sizeof(struct l2tp_softc), KM_SLEEP);
238 if_initname(&sc->l2tp_ec.ec_if, ifc->ifc_name, unit);
239 rv = l2tpattach0(sc);
240 if (rv != 0) {
241 kmem_free(sc, sizeof(struct l2tp_softc));
242 return rv;
243 }
244
245 var = kmem_zalloc(sizeof(struct l2tp_variant), KM_SLEEP);
246 var->lv_softc = sc;
247 var->lv_state = L2TP_STATE_DOWN;
248 var->lv_use_cookie = L2TP_COOKIE_OFF;
249 psref_target_init(&var->lv_psref, lv_psref_class);
250
251 sc->l2tp_var = var;
252 mutex_init(&sc->l2tp_lock, MUTEX_DEFAULT, IPL_NONE);
253 sc->l2tp_psz = pserialize_create();
254 PSLIST_ENTRY_INIT(sc, l2tp_hash);
255
256 sc->l2tp_ro_percpu = percpu_alloc(sizeof(struct l2tp_ro));
257 percpu_foreach(sc->l2tp_ro_percpu, l2tp_ro_init_pc, NULL);
258
259 sc->l2tp_ifq_percpu = percpu_alloc(sizeof(struct ifqueue));
260 percpu_foreach(sc->l2tp_ifq_percpu, l2tp_ifq_init_pc, NULL);
261 sc->l2tp_si = softint_establish(si_flags, l2tpintr_softint, sc);
262
263 mutex_enter(&l2tp_softcs.lock);
264 LIST_INSERT_HEAD(&l2tp_softcs.list, sc, l2tp_list);
265 mutex_exit(&l2tp_softcs.lock);
266
267 return (0);
268 }
269
270 int
271 l2tpattach0(struct l2tp_softc *sc)
272 {
273 int rv;
274
275 sc->l2tp_ec.ec_if.if_addrlen = 0;
276 sc->l2tp_ec.ec_if.if_mtu = L2TP_MTU;
277 sc->l2tp_ec.ec_if.if_flags = IFF_POINTOPOINT|IFF_MULTICAST|IFF_SIMPLEX;
278 sc->l2tp_ec.ec_if.if_extflags = IFEF_NO_LINK_STATE_CHANGE;
279 #ifdef NET_MPSAFE
280 sc->l2tp_ec.ec_if.if_extflags |= IFEF_MPSAFE;
281 #endif
282 sc->l2tp_ec.ec_if.if_ioctl = l2tp_ioctl;
283 sc->l2tp_ec.ec_if.if_output = l2tp_output;
284 sc->l2tp_ec.ec_if.if_type = IFT_L2TP;
285 sc->l2tp_ec.ec_if.if_dlt = DLT_NULL;
286 sc->l2tp_ec.ec_if.if_start = l2tp_start;
287 sc->l2tp_ec.ec_if.if_transmit = l2tp_transmit;
288 sc->l2tp_ec.ec_if._if_input = ether_input;
289 IFQ_SET_READY(&sc->l2tp_ec.ec_if.if_snd);
290
291 #ifdef MBUFTRACE
292 struct ethercom *ec = &sc->l2tp_ec;
293 struct ifnet *ifp = &sc->l2tp_ec.ec_if;
294
295 strlcpy(ec->ec_tx_mowner.mo_name, ifp->if_xname,
296 sizeof(ec->ec_tx_mowner.mo_name));
297 strlcpy(ec->ec_tx_mowner.mo_descr, "tx",
298 sizeof(ec->ec_tx_mowner.mo_descr));
299 strlcpy(ec->ec_rx_mowner.mo_name, ifp->if_xname,
300 sizeof(ec->ec_rx_mowner.mo_name));
301 strlcpy(ec->ec_rx_mowner.mo_descr, "rx",
302 sizeof(ec->ec_rx_mowner.mo_descr));
303 MOWNER_ATTACH(&ec->ec_tx_mowner);
304 MOWNER_ATTACH(&ec->ec_rx_mowner);
305 ifp->if_mowner = &ec->ec_tx_mowner;
306 #endif
307
308 /* XXX
309 * It may improve performance to use if_initialize()/if_register()
310 * so that l2tp_input() calls if_input() instead of
311 * if_percpuq_enqueue(). However, that causes recursive softnet_lock
312 * when NET_MPSAFE is not set.
313 */
314 rv = if_attach(&sc->l2tp_ec.ec_if);
315 if (rv != 0)
316 return rv;
317 if_alloc_sadl(&sc->l2tp_ec.ec_if);
318 bpf_attach(&sc->l2tp_ec.ec_if, DLT_EN10MB, sizeof(struct ether_header));
319
320 return 0;
321 }
322
323 void
324 l2tp_ro_init_pc(void *p, void *arg __unused, struct cpu_info *ci __unused)
325 {
326 struct l2tp_ro *lro = p;
327
328 lro->lr_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
329 }
330
331 void
332 l2tp_ro_fini_pc(void *p, void *arg __unused, struct cpu_info *ci __unused)
333 {
334 struct l2tp_ro *lro = p;
335
336 rtcache_free(&lro->lr_ro);
337
338 mutex_obj_free(lro->lr_lock);
339 }
340
341 void
342 l2tp_ifq_init_pc(void *p, void *arg __unused, struct cpu_info *ci __unused)
343 {
344 struct ifqueue *ifq = p;
345
346 memset(ifq, 0, sizeof(*ifq));
347 ifq->ifq_maxlen = IFQ_MAXLEN;
348 }
349
350 static int
351 l2tp_clone_destroy(struct ifnet *ifp)
352 {
353 struct l2tp_variant *var;
354 struct l2tp_softc *sc = container_of(ifp, struct l2tp_softc,
355 l2tp_ec.ec_if);
356
357 l2tp_clear_session(sc);
358 l2tp_delete_tunnel(&sc->l2tp_ec.ec_if);
359 /*
360 * To avoid for l2tp_transmit() and l2tpintr_softint() to access
361 * sc->l2tp_var after free it.
362 */
363 mutex_enter(&sc->l2tp_lock);
364 var = sc->l2tp_var;
365 l2tp_variant_update(sc, NULL);
366 mutex_exit(&sc->l2tp_lock);
367
368 softint_disestablish(sc->l2tp_si);
369 percpu_free(sc->l2tp_ifq_percpu, sizeof(struct ifqueue));
370
371 mutex_enter(&l2tp_softcs.lock);
372 LIST_REMOVE(sc, l2tp_list);
373 mutex_exit(&l2tp_softcs.lock);
374
375 bpf_detach(ifp);
376
377 if_detach(ifp);
378
379 percpu_foreach(sc->l2tp_ro_percpu, l2tp_ro_fini_pc, NULL);
380 percpu_free(sc->l2tp_ro_percpu, sizeof(struct l2tp_ro));
381
382 kmem_free(var, sizeof(struct l2tp_variant));
383 pserialize_destroy(sc->l2tp_psz);
384 mutex_destroy(&sc->l2tp_lock);
385 kmem_free(sc, sizeof(struct l2tp_softc));
386
387 return 0;
388 }
389
390 static int
391 l2tp_tx_enqueue(struct l2tp_variant *var, struct mbuf *m)
392 {
393 struct l2tp_softc *sc;
394 struct ifnet *ifp;
395 struct ifqueue *ifq;
396 int s;
397
398 KASSERT(psref_held(&var->lv_psref, lv_psref_class));
399
400 sc = var->lv_softc;
401 ifp = &sc->l2tp_ec.ec_if;
402
403 s = splsoftnet();
404 ifq = percpu_getref(sc->l2tp_ifq_percpu);
405 if (IF_QFULL(ifq)) {
406 ifp->if_oerrors++;
407 percpu_putref(sc->l2tp_ifq_percpu);
408 splx(s);
409 m_freem(m);
410 return ENOBUFS;
411 }
412
413 IF_ENQUEUE(ifq, m);
414 percpu_putref(sc->l2tp_ifq_percpu);
415 softint_schedule(sc->l2tp_si);
416 /* counter is incremented in l2tpintr() */
417 splx(s);
418 return 0;
419 }
420
421 static int
422 l2tp_output(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *dst,
423 const struct rtentry *rt)
424 {
425 struct l2tp_softc *sc = container_of(ifp, struct l2tp_softc,
426 l2tp_ec.ec_if);
427 struct l2tp_variant *var;
428 struct psref psref;
429 int error = 0;
430
431 var = l2tp_getref_variant(sc, &psref);
432 if (var == NULL) {
433 m_freem(m);
434 return ENETDOWN;
435 }
436
437 IFQ_CLASSIFY(&ifp->if_snd, m, dst->sa_family);
438
439 m->m_flags &= ~(M_BCAST|M_MCAST);
440
441 if ((ifp->if_flags & IFF_UP) == 0) {
442 m_freem(m);
443 error = ENETDOWN;
444 goto end;
445 }
446
447 if (var->lv_psrc == NULL || var->lv_pdst == NULL) {
448 m_freem(m);
449 error = ENETDOWN;
450 goto end;
451 }
452
453 /* XXX should we check if our outer source is legal? */
454
455 /* use DLT_NULL encapsulation here to pass inner af type */
456 M_PREPEND(m, sizeof(int), M_DONTWAIT);
457 if (!m) {
458 error = ENOBUFS;
459 goto end;
460 }
461 *mtod(m, int *) = dst->sa_family;
462
463 error = l2tp_tx_enqueue(var, m);
464 end:
465 l2tp_putref_variant(var, &psref);
466 if (error)
467 ifp->if_oerrors++;
468
469 return error;
470 }
471
472 static void
473 l2tp_sendit(struct l2tp_variant *var, struct mbuf *m)
474 {
475 int len;
476 int error;
477 struct l2tp_softc *sc;
478 struct ifnet *ifp;
479
480 KASSERT(psref_held(&var->lv_psref, lv_psref_class));
481
482 sc = var->lv_softc;
483 ifp = &sc->l2tp_ec.ec_if;
484
485 len = m->m_pkthdr.len;
486 m->m_flags &= ~(M_BCAST|M_MCAST);
487 bpf_mtap(ifp, m, BPF_D_OUT);
488
489 switch (var->lv_psrc->sa_family) {
490 #ifdef INET
491 case AF_INET:
492 error = in_l2tp_output(var, m);
493 break;
494 #endif
495 #ifdef INET6
496 case AF_INET6:
497 error = in6_l2tp_output(var, m);
498 break;
499 #endif
500 default:
501 m_freem(m);
502 error = ENETDOWN;
503 break;
504 }
505 if (error) {
506 ifp->if_oerrors++;
507 } else {
508 ifp->if_opackets++;
509 ifp->if_obytes += len;
510 }
511 }
512
513 static void
514 l2tpintr(struct l2tp_variant *var)
515 {
516 struct l2tp_softc *sc;
517 struct ifnet *ifp;
518 struct mbuf *m;
519 struct ifqueue *ifq;
520 u_int cpuid = cpu_index(curcpu());
521
522 KASSERT(psref_held(&var->lv_psref, lv_psref_class));
523
524 sc = var->lv_softc;
525 ifp = &sc->l2tp_ec.ec_if;
526
527 /* output processing */
528 if (var->lv_my_sess_id == 0 || var->lv_peer_sess_id == 0) {
529 ifq = percpu_getref(sc->l2tp_ifq_percpu);
530 IF_PURGE(ifq);
531 percpu_putref(sc->l2tp_ifq_percpu);
532 if (cpuid == 0)
533 IFQ_PURGE(&ifp->if_snd);
534 return;
535 }
536
537 /* Currently, l2tpintr() is always called in softint context. */
538 ifq = percpu_getref(sc->l2tp_ifq_percpu);
539 for (;;) {
540 IF_DEQUEUE(ifq, m);
541 if (m != NULL)
542 l2tp_sendit(var, m);
543 else
544 break;
545 }
546 percpu_putref(sc->l2tp_ifq_percpu);
547
548 if (cpuid == 0) {
549 for (;;) {
550 IFQ_DEQUEUE(&ifp->if_snd, m);
551 if (m != NULL)
552 l2tp_sendit(var, m);
553 else
554 break;
555 }
556 }
557 }
558
559 static void
560 l2tpintr_softint(void *arg)
561 {
562 struct l2tp_variant *var;
563 struct psref psref;
564 struct l2tp_softc *sc = arg;
565
566 var = l2tp_getref_variant(sc, &psref);
567 if (var == NULL)
568 return;
569
570 l2tpintr(var);
571 l2tp_putref_variant(var, &psref);
572 }
573
574 void
575 l2tp_input(struct mbuf *m, struct ifnet *ifp)
576 {
577 vaddr_t addr;
578
579 KASSERT(ifp != NULL);
580
581 /*
582 * Currently, l2tp(4) supports only ethernet as inner protocol.
583 */
584 if (m->m_pkthdr.len < sizeof(struct ether_header)) {
585 m_freem(m);
586 return;
587 }
588
589 /*
590 * If the head of the payload is not aligned, align it.
591 */
592 addr = mtod(m, vaddr_t);
593 if ((addr & 0x03) != 0x2) {
594 /* copy and align head of payload */
595 struct mbuf *m_head;
596 int copy_length;
597 u_int pad = roundup(sizeof(struct ether_header), 4)
598 - sizeof(struct ether_header);
599
600 #define L2TP_COPY_LENGTH 60
601
602 if (m->m_pkthdr.len < L2TP_COPY_LENGTH) {
603 copy_length = m->m_pkthdr.len;
604 } else {
605 copy_length = L2TP_COPY_LENGTH;
606 }
607
608 if (m->m_len < copy_length) {
609 m = m_pullup(m, copy_length);
610 if (m == NULL)
611 return;
612 }
613
614 MGETHDR(m_head, M_DONTWAIT, MT_HEADER);
615 if (m_head == NULL) {
616 m_freem(m);
617 return;
618 }
619 m_move_pkthdr(m_head, m);
620
621 /*
622 * m_head should be:
623 * L2TP_COPY_LENGTH
624 * <- + roundup(pad, 4) - pad ->
625 * +-------+--------+-----+--------------+-------------+
626 * | m_hdr | pkthdr | ... | ether header | payload |
627 * +-------+--------+-----+--------------+-------------+
628 * ^ ^
629 * m_data 4 byte aligned
630 */
631 m_align(m_head, L2TP_COPY_LENGTH + roundup(pad, 4));
632 m_head->m_data += pad;
633
634 memcpy(mtod(m_head, void *), mtod(m, void *), copy_length);
635 m_head->m_len = copy_length;
636 m->m_data += copy_length;
637 m->m_len -= copy_length;
638
639 /* construct chain */
640 if (m->m_len == 0) {
641 m_head->m_next = m_free(m);
642 } else {
643 m_head->m_next = m;
644 }
645
646 /* override m */
647 m = m_head;
648 }
649
650 m_set_rcvif(m, ifp);
651
652 /*
653 * bpf_mtap() and ifp->if_ipackets++ is done in if_input()
654 *
655 * obytes is incremented at ether_output() or bridge_enqueue().
656 */
657 if_percpuq_enqueue(ifp->if_percpuq, m);
658 }
659
660 void
661 l2tp_start(struct ifnet *ifp)
662 {
663 struct psref psref;
664 struct l2tp_variant *var;
665 struct l2tp_softc *sc = container_of(ifp, struct l2tp_softc,
666 l2tp_ec.ec_if);
667
668 var = l2tp_getref_variant(sc, &psref);
669 if (var == NULL)
670 return;
671
672 if (var->lv_psrc == NULL || var->lv_pdst == NULL)
673 return;
674
675 softint_schedule(sc->l2tp_si);
676 l2tp_putref_variant(var, &psref);
677 }
678
679 int
680 l2tp_transmit(struct ifnet *ifp, struct mbuf *m)
681 {
682 int error;
683 struct psref psref;
684 struct l2tp_variant *var;
685 struct l2tp_softc *sc = container_of(ifp, struct l2tp_softc,
686 l2tp_ec.ec_if);
687
688 var = l2tp_getref_variant(sc, &psref);
689 if (var == NULL) {
690 m_freem(m);
691 return ENETDOWN;
692 }
693
694 if (var->lv_psrc == NULL || var->lv_pdst == NULL) {
695 m_freem(m);
696 error = ENETDOWN;
697 goto out;
698 }
699
700 m->m_flags &= ~(M_BCAST|M_MCAST);
701
702 error = l2tp_tx_enqueue(var, m);
703 out:
704 l2tp_putref_variant(var, &psref);
705 return error;
706 }
707
708 /* XXX how should we handle IPv6 scope on SIOC[GS]IFPHYADDR? */
709 int
710 l2tp_ioctl(struct ifnet *ifp, u_long cmd, void *data)
711 {
712 struct l2tp_softc *sc = container_of(ifp, struct l2tp_softc,
713 l2tp_ec.ec_if);
714 struct l2tp_variant *var, *var_tmp;
715 struct ifreq *ifr = data;
716 int error = 0, size;
717 struct sockaddr *dst, *src;
718 struct l2tp_req l2tpr;
719 u_long mtu;
720 int bound;
721 struct psref psref;
722
723 switch (cmd) {
724 case SIOCSIFADDR:
725 ifp->if_flags |= IFF_UP;
726 break;
727
728 case SIOCSIFDSTADDR:
729 break;
730
731 case SIOCADDMULTI:
732 case SIOCDELMULTI:
733 switch (ifr->ifr_addr.sa_family) {
734 #ifdef INET
735 case AF_INET: /* IP supports Multicast */
736 break;
737 #endif /* INET */
738 #ifdef INET6
739 case AF_INET6: /* IP6 supports Multicast */
740 break;
741 #endif /* INET6 */
742 default: /* Other protocols doesn't support Multicast */
743 error = EAFNOSUPPORT;
744 break;
745 }
746 break;
747
748 case SIOCSIFMTU:
749 mtu = ifr->ifr_mtu;
750 if (mtu < L2TP_MTU_MIN || mtu > L2TP_MTU_MAX)
751 return (EINVAL);
752 ifp->if_mtu = mtu;
753 break;
754
755 #ifdef INET
756 case SIOCSIFPHYADDR:
757 src = (struct sockaddr *)
758 &(((struct in_aliasreq *)data)->ifra_addr);
759 dst = (struct sockaddr *)
760 &(((struct in_aliasreq *)data)->ifra_dstaddr);
761 if (src->sa_family != AF_INET || dst->sa_family != AF_INET)
762 return EAFNOSUPPORT;
763 else if (src->sa_len != sizeof(struct sockaddr_in)
764 || dst->sa_len != sizeof(struct sockaddr_in))
765 return EINVAL;
766
767 error = l2tp_set_tunnel(&sc->l2tp_ec.ec_if, src, dst);
768 break;
769
770 #endif /* INET */
771 #ifdef INET6
772 case SIOCSIFPHYADDR_IN6:
773 src = (struct sockaddr *)
774 &(((struct in6_aliasreq *)data)->ifra_addr);
775 dst = (struct sockaddr *)
776 &(((struct in6_aliasreq *)data)->ifra_dstaddr);
777 if (src->sa_family != AF_INET6 || dst->sa_family != AF_INET6)
778 return EAFNOSUPPORT;
779 else if (src->sa_len != sizeof(struct sockaddr_in6)
780 || dst->sa_len != sizeof(struct sockaddr_in6))
781 return EINVAL;
782
783 error = l2tp_set_tunnel(&sc->l2tp_ec.ec_if, src, dst);
784 break;
785
786 #endif /* INET6 */
787 case SIOCSLIFPHYADDR:
788 src = (struct sockaddr *)
789 &(((struct if_laddrreq *)data)->addr);
790 dst = (struct sockaddr *)
791 &(((struct if_laddrreq *)data)->dstaddr);
792 if (src->sa_family != dst->sa_family)
793 return EINVAL;
794 else if (src->sa_family == AF_INET
795 && src->sa_len != sizeof(struct sockaddr_in))
796 return EINVAL;
797 else if (src->sa_family == AF_INET6
798 && src->sa_len != sizeof(struct sockaddr_in6))
799 return EINVAL;
800 else if (dst->sa_family == AF_INET
801 && dst->sa_len != sizeof(struct sockaddr_in))
802 return EINVAL;
803 else if (dst->sa_family == AF_INET6
804 && dst->sa_len != sizeof(struct sockaddr_in6))
805 return EINVAL;
806
807 error = l2tp_set_tunnel(&sc->l2tp_ec.ec_if, src, dst);
808 break;
809
810 case SIOCDIFPHYADDR:
811 l2tp_delete_tunnel(&sc->l2tp_ec.ec_if);
812 break;
813
814 case SIOCGIFPSRCADDR:
815 #ifdef INET6
816 case SIOCGIFPSRCADDR_IN6:
817 #endif /* INET6 */
818 bound = curlwp_bind();
819 var = l2tp_getref_variant(sc, &psref);
820 if (var == NULL) {
821 curlwp_bindx(bound);
822 error = EADDRNOTAVAIL;
823 goto bad;
824 }
825 if (var->lv_psrc == NULL) {
826 l2tp_putref_variant(var, &psref);
827 curlwp_bindx(bound);
828 error = EADDRNOTAVAIL;
829 goto bad;
830 }
831 src = var->lv_psrc;
832 switch (cmd) {
833 #ifdef INET
834 case SIOCGIFPSRCADDR:
835 dst = &ifr->ifr_addr;
836 size = sizeof(ifr->ifr_addr);
837 break;
838 #endif /* INET */
839 #ifdef INET6
840 case SIOCGIFPSRCADDR_IN6:
841 dst = (struct sockaddr *)
842 &(((struct in6_ifreq *)data)->ifr_addr);
843 size = sizeof(((struct in6_ifreq *)data)->ifr_addr);
844 break;
845 #endif /* INET6 */
846 default:
847 l2tp_putref_variant(var, &psref);
848 curlwp_bindx(bound);
849 error = EADDRNOTAVAIL;
850 goto bad;
851 }
852 if (src->sa_len > size) {
853 l2tp_putref_variant(var, &psref);
854 curlwp_bindx(bound);
855 return EINVAL;
856 }
857 sockaddr_copy(dst, src->sa_len, src);
858 l2tp_putref_variant(var, &psref);
859 curlwp_bindx(bound);
860 break;
861
862 case SIOCGIFPDSTADDR:
863 #ifdef INET6
864 case SIOCGIFPDSTADDR_IN6:
865 #endif /* INET6 */
866 bound = curlwp_bind();
867 var = l2tp_getref_variant(sc, &psref);
868 if (var == NULL) {
869 curlwp_bindx(bound);
870 error = EADDRNOTAVAIL;
871 goto bad;
872 }
873 if (var->lv_pdst == NULL) {
874 l2tp_putref_variant(var, &psref);
875 curlwp_bindx(bound);
876 error = EADDRNOTAVAIL;
877 goto bad;
878 }
879 src = var->lv_pdst;
880 switch (cmd) {
881 #ifdef INET
882 case SIOCGIFPDSTADDR:
883 dst = &ifr->ifr_addr;
884 size = sizeof(ifr->ifr_addr);
885 break;
886 #endif /* INET */
887 #ifdef INET6
888 case SIOCGIFPDSTADDR_IN6:
889 dst = (struct sockaddr *)
890 &(((struct in6_ifreq *)data)->ifr_addr);
891 size = sizeof(((struct in6_ifreq *)data)->ifr_addr);
892 break;
893 #endif /* INET6 */
894 default:
895 l2tp_putref_variant(var, &psref);
896 curlwp_bindx(bound);
897 error = EADDRNOTAVAIL;
898 goto bad;
899 }
900 if (src->sa_len > size) {
901 l2tp_putref_variant(var, &psref);
902 curlwp_bindx(bound);
903 return EINVAL;
904 }
905 sockaddr_copy(dst, src->sa_len, src);
906 l2tp_putref_variant(var, &psref);
907 curlwp_bindx(bound);
908 break;
909
910 case SIOCGLIFPHYADDR:
911 bound = curlwp_bind();
912 var = l2tp_getref_variant(sc, &psref);
913 if (var == NULL) {
914 curlwp_bindx(bound);
915 error = EADDRNOTAVAIL;
916 goto bad;
917 }
918 if (var->lv_psrc == NULL || var->lv_pdst == NULL) {
919 l2tp_putref_variant(var, &psref);
920 curlwp_bindx(bound);
921 error = EADDRNOTAVAIL;
922 goto bad;
923 }
924
925 /* copy src */
926 src = var->lv_psrc;
927 dst = (struct sockaddr *)
928 &(((struct if_laddrreq *)data)->addr);
929 size = sizeof(((struct if_laddrreq *)data)->addr);
930 if (src->sa_len > size) {
931 l2tp_putref_variant(var, &psref);
932 curlwp_bindx(bound);
933 return EINVAL;
934 }
935 sockaddr_copy(dst, src->sa_len, src);
936
937 /* copy dst */
938 src = var->lv_pdst;
939 dst = (struct sockaddr *)
940 &(((struct if_laddrreq *)data)->dstaddr);
941 size = sizeof(((struct if_laddrreq *)data)->dstaddr);
942 if (src->sa_len > size) {
943 l2tp_putref_variant(var, &psref);
944 curlwp_bindx(bound);
945 return EINVAL;
946 }
947 sockaddr_copy(dst, src->sa_len, src);
948 l2tp_putref_variant(var, &psref);
949 curlwp_bindx(bound);
950 break;
951
952 case SIOCSL2TPSESSION:
953 if ((error = copyin(ifr->ifr_data, &l2tpr, sizeof(l2tpr))) != 0)
954 break;
955
956 /* session id must not zero */
957 if (l2tpr.my_sess_id == 0 || l2tpr.peer_sess_id == 0)
958 return EINVAL;
959
960 bound = curlwp_bind();
961 var_tmp = l2tp_lookup_session_ref(l2tpr.my_sess_id, &psref);
962 if (var_tmp != NULL) {
963 /* duplicate session id */
964 log(LOG_WARNING, "%s: duplicate session id %" PRIu32 " of %s\n",
965 sc->l2tp_ec.ec_if.if_xname, l2tpr.my_sess_id,
966 var_tmp->lv_softc->l2tp_ec.ec_if.if_xname);
967 psref_release(&psref, &var_tmp->lv_psref,
968 lv_psref_class);
969 curlwp_bindx(bound);
970 return EINVAL;
971 }
972 curlwp_bindx(bound);
973
974 error = l2tp_set_session(sc, l2tpr.my_sess_id, l2tpr.peer_sess_id);
975 break;
976 case SIOCDL2TPSESSION:
977 l2tp_clear_session(sc);
978 break;
979 case SIOCSL2TPCOOKIE:
980 if ((error = copyin(ifr->ifr_data, &l2tpr, sizeof(l2tpr))) != 0)
981 break;
982
983 error = l2tp_set_cookie(sc, l2tpr.my_cookie, l2tpr.my_cookie_len,
984 l2tpr.peer_cookie, l2tpr.peer_cookie_len);
985 break;
986 case SIOCDL2TPCOOKIE:
987 l2tp_clear_cookie(sc);
988 break;
989 case SIOCSL2TPSTATE:
990 if ((error = copyin(ifr->ifr_data, &l2tpr, sizeof(l2tpr))) != 0)
991 break;
992
993 l2tp_set_state(sc, l2tpr.state);
994 break;
995 case SIOCGL2TP:
996 /* get L2TPV3 session info */
997 memset(&l2tpr, 0, sizeof(l2tpr));
998
999 bound = curlwp_bind();
1000 var = l2tp_getref_variant(sc, &psref);
1001 if (var == NULL) {
1002 curlwp_bindx(bound);
1003 error = EADDRNOTAVAIL;
1004 goto bad;
1005 }
1006
1007 l2tpr.state = var->lv_state;
1008 l2tpr.my_sess_id = var->lv_my_sess_id;
1009 l2tpr.peer_sess_id = var->lv_peer_sess_id;
1010 l2tpr.my_cookie = var->lv_my_cookie;
1011 l2tpr.my_cookie_len = var->lv_my_cookie_len;
1012 l2tpr.peer_cookie = var->lv_peer_cookie;
1013 l2tpr.peer_cookie_len = var->lv_peer_cookie_len;
1014 l2tp_putref_variant(var, &psref);
1015 curlwp_bindx(bound);
1016
1017 error = copyout(&l2tpr, ifr->ifr_data, sizeof(l2tpr));
1018 break;
1019
1020 default:
1021 error = ifioctl_common(ifp, cmd, data);
1022 break;
1023 }
1024 bad:
1025 return error;
1026 }
1027
1028 static int
1029 l2tp_set_tunnel(struct ifnet *ifp, struct sockaddr *src, struct sockaddr *dst)
1030 {
1031 struct l2tp_softc *sc = container_of(ifp, struct l2tp_softc,
1032 l2tp_ec.ec_if);
1033 struct sockaddr *osrc, *odst;
1034 struct sockaddr *nsrc, *ndst;
1035 struct l2tp_variant *ovar, *nvar;
1036 int error;
1037
1038 nsrc = sockaddr_dup(src, M_WAITOK);
1039 ndst = sockaddr_dup(dst, M_WAITOK);
1040
1041 nvar = kmem_alloc(sizeof(*nvar), KM_SLEEP);
1042
1043 error = encap_lock_enter();
1044 if (error)
1045 goto error;
1046
1047 mutex_enter(&sc->l2tp_lock);
1048
1049 ovar = sc->l2tp_var;
1050 osrc = ovar->lv_psrc;
1051 odst = ovar->lv_pdst;
1052 *nvar = *ovar;
1053 psref_target_init(&nvar->lv_psref, lv_psref_class);
1054 nvar->lv_psrc = nsrc;
1055 nvar->lv_pdst = ndst;
1056 error = l2tp_encap_attach(nvar);
1057 if (error) {
1058 mutex_exit(&sc->l2tp_lock);
1059 encap_lock_exit();
1060 goto error;
1061 }
1062 membar_producer();
1063 l2tp_variant_update(sc, nvar);
1064
1065 mutex_exit(&sc->l2tp_lock);
1066
1067 (void)l2tp_encap_detach(ovar);
1068 encap_lock_exit();
1069
1070 if (osrc)
1071 sockaddr_free(osrc);
1072 if (odst)
1073 sockaddr_free(odst);
1074 kmem_free(ovar, sizeof(*ovar));
1075
1076 return 0;
1077
1078 error:
1079 sockaddr_free(nsrc);
1080 sockaddr_free(ndst);
1081 kmem_free(nvar, sizeof(*nvar));
1082
1083 return error;
1084 }
1085
1086 static void
1087 l2tp_delete_tunnel(struct ifnet *ifp)
1088 {
1089 struct l2tp_softc *sc = container_of(ifp, struct l2tp_softc,
1090 l2tp_ec.ec_if);
1091 struct sockaddr *osrc, *odst;
1092 struct l2tp_variant *ovar, *nvar;
1093 int error;
1094
1095 nvar = kmem_alloc(sizeof(*nvar), KM_SLEEP);
1096
1097 error = encap_lock_enter();
1098 if (error) {
1099 kmem_free(nvar, sizeof(*nvar));
1100 return;
1101 }
1102 mutex_enter(&sc->l2tp_lock);
1103
1104 ovar = sc->l2tp_var;
1105 osrc = ovar->lv_psrc;
1106 odst = ovar->lv_pdst;
1107 *nvar = *ovar;
1108 psref_target_init(&nvar->lv_psref, lv_psref_class);
1109 nvar->lv_psrc = NULL;
1110 nvar->lv_pdst = NULL;
1111 membar_producer();
1112 l2tp_variant_update(sc, nvar);
1113
1114 mutex_exit(&sc->l2tp_lock);
1115
1116 (void)l2tp_encap_detach(ovar);
1117 encap_lock_exit();
1118
1119 if (osrc)
1120 sockaddr_free(osrc);
1121 if (odst)
1122 sockaddr_free(odst);
1123 kmem_free(ovar, sizeof(*ovar));
1124 }
1125
1126 static int
1127 id_hash_func(uint32_t id, u_long mask)
1128 {
1129 uint32_t hash;
1130
1131 hash = (id >> 16) ^ id;
1132 hash = (hash >> 4) ^ hash;
1133
1134 return hash & mask;
1135 }
1136
1137 static void
1138 l2tp_hash_init(void)
1139 {
1140
1141 l2tp_hash.lists = hashinit(L2TP_ID_HASH_SIZE, HASH_PSLIST, true,
1142 &l2tp_hash.mask);
1143 }
1144
1145 static int
1146 l2tp_hash_fini(void)
1147 {
1148 int i;
1149
1150 mutex_enter(&l2tp_hash.lock);
1151
1152 for (i = 0; i < l2tp_hash.mask + 1; i++) {
1153 if (PSLIST_WRITER_FIRST(&l2tp_hash.lists[i], struct l2tp_softc,
1154 l2tp_hash) != NULL) {
1155 mutex_exit(&l2tp_hash.lock);
1156 return EBUSY;
1157 }
1158 }
1159 for (i = 0; i < l2tp_hash.mask + 1; i++)
1160 PSLIST_DESTROY(&l2tp_hash.lists[i]);
1161
1162 mutex_exit(&l2tp_hash.lock);
1163
1164 hashdone(l2tp_hash.lists, HASH_PSLIST, l2tp_hash.mask);
1165
1166 return 0;
1167 }
1168
1169 static int
1170 l2tp_set_session(struct l2tp_softc *sc, uint32_t my_sess_id,
1171 uint32_t peer_sess_id)
1172 {
1173 uint32_t idx;
1174 struct l2tp_variant *nvar;
1175 struct l2tp_variant *ovar;
1176 struct ifnet *ifp = &sc->l2tp_ec.ec_if;
1177
1178 nvar = kmem_alloc(sizeof(*nvar), KM_SLEEP);
1179
1180 mutex_enter(&sc->l2tp_lock);
1181 ovar = sc->l2tp_var;
1182 *nvar = *ovar;
1183 psref_target_init(&nvar->lv_psref, lv_psref_class);
1184 nvar->lv_my_sess_id = my_sess_id;
1185 nvar->lv_peer_sess_id = peer_sess_id;
1186 membar_producer();
1187
1188 mutex_enter(&l2tp_hash.lock);
1189 if (ovar->lv_my_sess_id > 0 && ovar->lv_peer_sess_id > 0) {
1190 PSLIST_WRITER_REMOVE(sc, l2tp_hash);
1191 pserialize_perform(l2tp_psz);
1192 }
1193 mutex_exit(&l2tp_hash.lock);
1194 PSLIST_ENTRY_DESTROY(sc, l2tp_hash);
1195
1196 l2tp_variant_update(sc, nvar);
1197 mutex_exit(&sc->l2tp_lock);
1198
1199 idx = id_hash_func(nvar->lv_my_sess_id, l2tp_hash.mask);
1200 if ((ifp->if_flags & IFF_DEBUG) != 0)
1201 log(LOG_DEBUG, "%s: add hash entry: sess_id=%" PRIu32 ", idx=%" PRIu32 "\n",
1202 sc->l2tp_ec.ec_if.if_xname, nvar->lv_my_sess_id, idx);
1203
1204 PSLIST_ENTRY_INIT(sc, l2tp_hash);
1205 mutex_enter(&l2tp_hash.lock);
1206 PSLIST_WRITER_INSERT_HEAD(&l2tp_hash.lists[idx], sc, l2tp_hash);
1207 mutex_exit(&l2tp_hash.lock);
1208
1209 kmem_free(ovar, sizeof(*ovar));
1210 return 0;
1211 }
1212
1213 static int
1214 l2tp_clear_session(struct l2tp_softc *sc)
1215 {
1216 struct l2tp_variant *nvar;
1217 struct l2tp_variant *ovar;
1218
1219 nvar = kmem_alloc(sizeof(*nvar), KM_SLEEP);
1220
1221 mutex_enter(&sc->l2tp_lock);
1222 ovar = sc->l2tp_var;
1223 *nvar = *ovar;
1224 psref_target_init(&nvar->lv_psref, lv_psref_class);
1225 nvar->lv_my_sess_id = 0;
1226 nvar->lv_peer_sess_id = 0;
1227 membar_producer();
1228
1229 mutex_enter(&l2tp_hash.lock);
1230 if (ovar->lv_my_sess_id > 0 && ovar->lv_peer_sess_id > 0) {
1231 PSLIST_WRITER_REMOVE(sc, l2tp_hash);
1232 pserialize_perform(l2tp_psz);
1233 }
1234 mutex_exit(&l2tp_hash.lock);
1235
1236 l2tp_variant_update(sc, nvar);
1237 mutex_exit(&sc->l2tp_lock);
1238 kmem_free(ovar, sizeof(*ovar));
1239 return 0;
1240 }
1241
1242 struct l2tp_variant *
1243 l2tp_lookup_session_ref(uint32_t id, struct psref *psref)
1244 {
1245 int idx;
1246 int s;
1247 struct l2tp_softc *sc;
1248
1249 idx = id_hash_func(id, l2tp_hash.mask);
1250
1251 s = pserialize_read_enter();
1252 PSLIST_READER_FOREACH(sc, &l2tp_hash.lists[idx], struct l2tp_softc,
1253 l2tp_hash) {
1254 struct l2tp_variant *var = sc->l2tp_var;
1255 if (var == NULL)
1256 continue;
1257 if (var->lv_my_sess_id != id)
1258 continue;
1259 psref_acquire(psref, &var->lv_psref, lv_psref_class);
1260 pserialize_read_exit(s);
1261 return var;
1262 }
1263 pserialize_read_exit(s);
1264 return NULL;
1265 }
1266
1267 /*
1268 * l2tp_variant update API.
1269 *
1270 * Assumption:
1271 * reader side dereferences sc->l2tp_var in reader critical section only,
1272 * that is, all of reader sides do not reader the sc->l2tp_var after
1273 * pserialize_perform().
1274 */
1275 static void
1276 l2tp_variant_update(struct l2tp_softc *sc, struct l2tp_variant *nvar)
1277 {
1278 struct ifnet *ifp = &sc->l2tp_ec.ec_if;
1279 struct l2tp_variant *ovar = sc->l2tp_var;
1280
1281 KASSERT(mutex_owned(&sc->l2tp_lock));
1282
1283 sc->l2tp_var = nvar;
1284 pserialize_perform(sc->l2tp_psz);
1285 psref_target_destroy(&ovar->lv_psref, lv_psref_class);
1286
1287 /*
1288 * In the manual of atomic_swap_ptr(3), there is no mention if 2nd
1289 * argument is rewrite or not. So, use sc->l2tp_var instead of nvar.
1290 */
1291 if (sc->l2tp_var != NULL) {
1292 if (sc->l2tp_var->lv_psrc != NULL
1293 && sc->l2tp_var->lv_pdst != NULL)
1294 ifp->if_flags |= IFF_RUNNING;
1295 else
1296 ifp->if_flags &= ~IFF_RUNNING;
1297 }
1298 }
1299
1300 static int
1301 l2tp_set_cookie(struct l2tp_softc *sc, uint64_t my_cookie, u_int my_cookie_len,
1302 uint64_t peer_cookie, u_int peer_cookie_len)
1303 {
1304 struct l2tp_variant *nvar;
1305
1306 if (my_cookie == 0 || peer_cookie == 0)
1307 return EINVAL;
1308
1309 if (my_cookie_len != 4 && my_cookie_len != 8
1310 && peer_cookie_len != 4 && peer_cookie_len != 8)
1311 return EINVAL;
1312
1313 nvar = kmem_alloc(sizeof(*nvar), KM_SLEEP);
1314
1315 mutex_enter(&sc->l2tp_lock);
1316
1317 *nvar = *sc->l2tp_var;
1318 psref_target_init(&nvar->lv_psref, lv_psref_class);
1319 nvar->lv_my_cookie = my_cookie;
1320 nvar->lv_my_cookie_len = my_cookie_len;
1321 nvar->lv_peer_cookie = peer_cookie;
1322 nvar->lv_peer_cookie_len = peer_cookie_len;
1323 nvar->lv_use_cookie = L2TP_COOKIE_ON;
1324 membar_producer();
1325 l2tp_variant_update(sc, nvar);
1326
1327 mutex_exit(&sc->l2tp_lock);
1328
1329 struct ifnet *ifp = &sc->l2tp_ec.ec_if;
1330 if ((ifp->if_flags & IFF_DEBUG) != 0) {
1331 log(LOG_DEBUG,
1332 "%s: set cookie: "
1333 "local cookie_len=%u local cookie=%" PRIu64 ", "
1334 "remote cookie_len=%u remote cookie=%" PRIu64 "\n",
1335 ifp->if_xname, my_cookie_len, my_cookie,
1336 peer_cookie_len, peer_cookie);
1337 }
1338
1339 return 0;
1340 }
1341
1342 static void
1343 l2tp_clear_cookie(struct l2tp_softc *sc)
1344 {
1345 struct l2tp_variant *nvar;
1346
1347 nvar = kmem_alloc(sizeof(*nvar), KM_SLEEP);
1348
1349 mutex_enter(&sc->l2tp_lock);
1350
1351 *nvar = *sc->l2tp_var;
1352 psref_target_init(&nvar->lv_psref, lv_psref_class);
1353 nvar->lv_my_cookie = 0;
1354 nvar->lv_my_cookie_len = 0;
1355 nvar->lv_peer_cookie = 0;
1356 nvar->lv_peer_cookie_len = 0;
1357 nvar->lv_use_cookie = L2TP_COOKIE_OFF;
1358 membar_producer();
1359 l2tp_variant_update(sc, nvar);
1360
1361 mutex_exit(&sc->l2tp_lock);
1362 }
1363
1364 static void
1365 l2tp_set_state(struct l2tp_softc *sc, int state)
1366 {
1367 struct ifnet *ifp = &sc->l2tp_ec.ec_if;
1368 struct l2tp_variant *nvar;
1369
1370 nvar = kmem_alloc(sizeof(*nvar), KM_SLEEP);
1371
1372 mutex_enter(&sc->l2tp_lock);
1373
1374 *nvar = *sc->l2tp_var;
1375 psref_target_init(&nvar->lv_psref, lv_psref_class);
1376 nvar->lv_state = state;
1377 membar_producer();
1378 l2tp_variant_update(sc, nvar);
1379
1380 if (nvar->lv_state == L2TP_STATE_UP) {
1381 ifp->if_link_state = LINK_STATE_UP;
1382 } else {
1383 ifp->if_link_state = LINK_STATE_DOWN;
1384 }
1385
1386 mutex_exit(&sc->l2tp_lock);
1387
1388 #ifdef NOTYET
1389 vlan_linkstate_notify(ifp, ifp->if_link_state);
1390 #endif
1391 }
1392
1393 static int
1394 l2tp_encap_attach(struct l2tp_variant *var)
1395 {
1396 int error;
1397
1398 if (var == NULL || var->lv_psrc == NULL)
1399 return EINVAL;
1400
1401 switch (var->lv_psrc->sa_family) {
1402 #ifdef INET
1403 case AF_INET:
1404 error = in_l2tp_attach(var);
1405 break;
1406 #endif
1407 #ifdef INET6
1408 case AF_INET6:
1409 error = in6_l2tp_attach(var);
1410 break;
1411 #endif
1412 default:
1413 error = EINVAL;
1414 break;
1415 }
1416
1417 return error;
1418 }
1419
1420 static int
1421 l2tp_encap_detach(struct l2tp_variant *var)
1422 {
1423 int error;
1424
1425 if (var == NULL || var->lv_psrc == NULL)
1426 return EINVAL;
1427
1428 switch (var->lv_psrc->sa_family) {
1429 #ifdef INET
1430 case AF_INET:
1431 error = in_l2tp_detach(var);
1432 break;
1433 #endif
1434 #ifdef INET6
1435 case AF_INET6:
1436 error = in6_l2tp_detach(var);
1437 break;
1438 #endif
1439 default:
1440 error = EINVAL;
1441 break;
1442 }
1443
1444 return error;
1445 }
1446
1447 int
1448 l2tp_check_nesting(struct ifnet *ifp, struct mbuf *m)
1449 {
1450
1451 return if_tunnel_check_nesting(ifp, m, max_l2tp_nesting);
1452 }
1453
1454 /*
1455 * Module infrastructure
1456 */
1457 #include "if_module.h"
1458
1459 IF_MODULE(MODULE_CLASS_DRIVER, l2tp, NULL)
1460
1461
1462 /* TODO: IP_TCPMSS support */
1463 #ifdef IP_TCPMSS
1464 static int l2tp_need_tcpmss_clamp(struct ifnet *);
1465 #ifdef INET
1466 static struct mbuf *l2tp_tcpmss4_clamp(struct ifnet *, struct mbuf *);
1467 #endif
1468 #ifdef INET6
1469 static struct mbuf *l2tp_tcpmss6_clamp(struct ifnet *, struct mbuf *);
1470 #endif
1471
1472 struct mbuf *
1473 l2tp_tcpmss_clamp(struct ifnet *ifp, struct mbuf *m)
1474 {
1475 struct ether_header *eh;
1476 struct ether_vlan_header evh;
1477
1478 if (!l2tp_need_tcpmss_clamp(ifp)) {
1479 return m;
1480 }
1481
1482 if (m->m_pkthdr.len < sizeof(evh)) {
1483 m_freem(m);
1484 return NULL;
1485 }
1486
1487 /* save ether header */
1488 m_copydata(m, 0, sizeof(evh), (void *)&evh);
1489 eh = (struct ether_header *)&evh;
1490
1491 switch (ntohs(eh->ether_type)) {
1492 case ETHERTYPE_VLAN: /* Ether + VLAN */
1493 if (m->m_pkthdr.len <= sizeof(struct ether_vlan_header))
1494 break;
1495 m_adj(m, sizeof(struct ether_vlan_header));
1496 switch (ntohs(evh.evl_proto)) {
1497 #ifdef INET
1498 case ETHERTYPE_IP: /* Ether + VLAN + IPv4 */
1499 m = l2tp_tcpmss4_clamp(ifp, m);
1500 if (m == NULL)
1501 return NULL;
1502 break;
1503 #endif /* INET */
1504 #ifdef INET6
1505 case ETHERTYPE_IPV6: /* Ether + VLAN + IPv6 */
1506 m = l2tp_tcpmss6_clamp(ifp, m);
1507 if (m == NULL)
1508 return NULL;
1509 break;
1510 #endif /* INET6 */
1511 default:
1512 break;
1513 }
1514
1515 /* restore ether header */
1516 M_PREPEND(m, sizeof(struct ether_vlan_header),
1517 M_DONTWAIT);
1518 if (m == NULL)
1519 return NULL;
1520 *mtod(m, struct ether_vlan_header *) = evh;
1521 break;
1522
1523 #ifdef INET
1524 case ETHERTYPE_IP: /* Ether + IPv4 */
1525 if (m->m_pkthdr.len <= sizeof(struct ether_header))
1526 break;
1527 m_adj(m, sizeof(struct ether_header));
1528 m = l2tp_tcpmss4_clamp(ifp, m);
1529 if (m == NULL)
1530 return NULL;
1531 /* restore ether header */
1532 M_PREPEND(m, sizeof(struct ether_header), M_DONTWAIT);
1533 if (m == NULL)
1534 return NULL;
1535 *mtod(m, struct ether_header *) = *eh;
1536 break;
1537 #endif /* INET */
1538
1539 #ifdef INET6
1540 case ETHERTYPE_IPV6: /* Ether + IPv6 */
1541 if (m->m_pkthdr.len <= sizeof(struct ether_header))
1542 break;
1543 m_adj(m, sizeof(struct ether_header));
1544 m = l2tp_tcpmss6_clamp(ifp, m);
1545 if (m == NULL)
1546 return NULL;
1547 /* restore ether header */
1548 M_PREPEND(m, sizeof(struct ether_header), M_DONTWAIT);
1549 if (m == NULL)
1550 return NULL;
1551 *mtod(m, struct ether_header *) = *eh;
1552 break;
1553 #endif /* INET6 */
1554
1555 default:
1556 break;
1557 }
1558
1559 return m;
1560 }
1561
1562 static int
1563 l2tp_need_tcpmss_clamp(struct ifnet *ifp)
1564 {
1565 int ret = 0;
1566
1567 #ifdef INET
1568 if (ifp->if_tcpmss != 0)
1569 ret = 1;
1570 #endif
1571
1572 #ifdef INET6
1573 if (ifp->if_tcpmss6 != 0)
1574 ret = 1;
1575 #endif
1576
1577 return ret;
1578 }
1579
1580 #ifdef INET
1581 static struct mbuf *
1582 l2tp_tcpmss4_clamp(struct ifnet *ifp, struct mbuf *m)
1583 {
1584
1585 if (ifp->if_tcpmss != 0) {
1586 return ip_tcpmss(m, (ifp->if_tcpmss < 0) ?
1587 ifp->if_mtu - IP_TCPMSS_EXTLEN :
1588 ifp->if_tcpmss);
1589 }
1590 return m;
1591 }
1592 #endif /* INET */
1593
1594 #ifdef INET6
1595 static struct mbuf *
1596 l2tp_tcpmss6_clamp(struct ifnet *ifp, struct mbuf *m)
1597 {
1598 int ip6hdrlen;
1599
1600 if (ifp->if_tcpmss6 != 0 &&
1601 ip6_tcpmss_applicable(m, &ip6hdrlen)) {
1602 return ip6_tcpmss(m, ip6hdrlen,
1603 (ifp->if_tcpmss6 < 0) ?
1604 ifp->if_mtu - IP6_TCPMSS_EXTLEN :
1605 ifp->if_tcpmss6);
1606 }
1607 return m;
1608 }
1609 #endif /* INET6 */
1610
1611 #endif /* IP_TCPMSS */
1612