if_vlan.c revision 1.130.2.2 1 /* $NetBSD: if_vlan.c,v 1.130.2.2 2020/04/08 14:08:57 martin Exp $ */
2
3 /*
4 * Copyright (c) 2000, 2001 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran, and by Jason R. Thorpe of Zembu Labs, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Copyright 1998 Massachusetts Institute of Technology
34 *
35 * Permission to use, copy, modify, and distribute this software and
36 * its documentation for any purpose and without fee is hereby
37 * granted, provided that both the above copyright notice and this
38 * permission notice appear in all copies, that both the above
39 * copyright notice and this permission notice appear in all
40 * supporting documentation, and that the name of M.I.T. not be used
41 * in advertising or publicity pertaining to distribution of the
42 * software without specific, written prior permission. M.I.T. makes
43 * no representations about the suitability of this software for any
44 * purpose. It is provided "as is" without express or implied
45 * warranty.
46 *
47 * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS
48 * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
49 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
50 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
51 * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
52 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
53 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
54 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
55 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
56 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
57 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 *
60 * from FreeBSD: if_vlan.c,v 1.16 2000/03/26 15:21:40 charnier Exp
61 * via OpenBSD: if_vlan.c,v 1.4 2000/05/15 19:15:00 chris Exp
62 */
63
64 /*
65 * if_vlan.c - pseudo-device driver for IEEE 802.1Q virtual LANs. Might be
66 * extended some day to also handle IEEE 802.1P priority tagging. This is
67 * sort of sneaky in the implementation, since we need to pretend to be
68 * enough of an Ethernet implementation to make ARP work. The way we do
69 * this is by telling everyone that we are an Ethernet interface, and then
70 * catch the packets that ether_output() left on our output queue when it
71 * calls if_start(), rewrite them for use by the real outgoing interface,
72 * and ask it to send them.
73 *
74 * TODO:
75 *
76 * - Need some way to notify vlan interfaces when the parent
77 * interface changes MTU.
78 */
79
80 #include <sys/cdefs.h>
81 __KERNEL_RCSID(0, "$NetBSD: if_vlan.c,v 1.130.2.2 2020/04/08 14:08:57 martin Exp $");
82
83 #ifdef _KERNEL_OPT
84 #include "opt_inet.h"
85 #include "opt_net_mpsafe.h"
86 #endif
87
88 #include <sys/param.h>
89 #include <sys/systm.h>
90 #include <sys/kernel.h>
91 #include <sys/mbuf.h>
92 #include <sys/queue.h>
93 #include <sys/socket.h>
94 #include <sys/sockio.h>
95 #include <sys/systm.h>
96 #include <sys/proc.h>
97 #include <sys/kauth.h>
98 #include <sys/mutex.h>
99 #include <sys/kmem.h>
100 #include <sys/cpu.h>
101 #include <sys/pserialize.h>
102 #include <sys/psref.h>
103 #include <sys/pslist.h>
104 #include <sys/atomic.h>
105 #include <sys/device.h>
106 #include <sys/module.h>
107
108 #include <net/bpf.h>
109 #include <net/if.h>
110 #include <net/if_dl.h>
111 #include <net/if_types.h>
112 #include <net/if_ether.h>
113 #include <net/if_vlanvar.h>
114
115 #ifdef INET
116 #include <netinet/in.h>
117 #include <netinet/if_inarp.h>
118 #endif
119 #ifdef INET6
120 #include <netinet6/in6_ifattach.h>
121 #include <netinet6/in6_var.h>
122 #endif
123
124 #include "ioconf.h"
125
126 struct vlan_mc_entry {
127 LIST_ENTRY(vlan_mc_entry) mc_entries;
128 /*
129 * A key to identify this entry. The mc_addr below can't be
130 * used since multiple sockaddr may mapped into the same
131 * ether_multi (e.g., AF_UNSPEC).
132 */
133 union {
134 struct ether_multi *mcu_enm;
135 } mc_u;
136 struct sockaddr_storage mc_addr;
137 };
138
139 #define mc_enm mc_u.mcu_enm
140
141
142 struct ifvlan_linkmib {
143 struct ifvlan *ifvm_ifvlan;
144 const struct vlan_multisw *ifvm_msw;
145 int ifvm_encaplen; /* encapsulation length */
146 int ifvm_mtufudge; /* MTU fudged by this much */
147 int ifvm_mintu; /* min transmission unit */
148 uint16_t ifvm_proto; /* encapsulation ethertype */
149 uint16_t ifvm_tag; /* tag to apply on packets */
150 struct ifnet *ifvm_p; /* parent interface of this vlan */
151
152 struct psref_target ifvm_psref;
153 };
154
155 struct ifvlan {
156 union {
157 struct ethercom ifvu_ec;
158 } ifv_u;
159 struct ifvlan_linkmib *ifv_mib; /*
160 * reader must use vlan_getref_linkmib()
161 * instead of direct dereference
162 */
163 kmutex_t ifv_lock; /* writer lock for ifv_mib */
164 pserialize_t ifv_psz;
165
166 LIST_HEAD(__vlan_mchead, vlan_mc_entry) ifv_mc_listhead;
167 LIST_ENTRY(ifvlan) ifv_list;
168 struct pslist_entry ifv_hash;
169 int ifv_flags;
170 };
171
172 #define IFVF_PROMISC 0x01 /* promiscuous mode enabled */
173
174 #define ifv_ec ifv_u.ifvu_ec
175
176 #define ifv_if ifv_ec.ec_if
177
178 #define ifv_msw ifv_mib.ifvm_msw
179 #define ifv_encaplen ifv_mib.ifvm_encaplen
180 #define ifv_mtufudge ifv_mib.ifvm_mtufudge
181 #define ifv_mintu ifv_mib.ifvm_mintu
182 #define ifv_tag ifv_mib.ifvm_tag
183
184 struct vlan_multisw {
185 int (*vmsw_addmulti)(struct ifvlan *, struct ifreq *);
186 int (*vmsw_delmulti)(struct ifvlan *, struct ifreq *);
187 void (*vmsw_purgemulti)(struct ifvlan *);
188 };
189
190 static int vlan_ether_addmulti(struct ifvlan *, struct ifreq *);
191 static int vlan_ether_delmulti(struct ifvlan *, struct ifreq *);
192 static void vlan_ether_purgemulti(struct ifvlan *);
193
194 const struct vlan_multisw vlan_ether_multisw = {
195 .vmsw_addmulti = vlan_ether_addmulti,
196 .vmsw_delmulti = vlan_ether_delmulti,
197 .vmsw_purgemulti = vlan_ether_purgemulti,
198 };
199
200 static int vlan_clone_create(struct if_clone *, int);
201 static int vlan_clone_destroy(struct ifnet *);
202 static int vlan_config(struct ifvlan *, struct ifnet *,
203 uint16_t);
204 static int vlan_ioctl(struct ifnet *, u_long, void *);
205 static void vlan_start(struct ifnet *);
206 static int vlan_transmit(struct ifnet *, struct mbuf *);
207 static void vlan_unconfig(struct ifnet *);
208 static int vlan_unconfig_locked(struct ifvlan *,
209 struct ifvlan_linkmib *);
210 static void vlan_hash_init(void);
211 static int vlan_hash_fini(void);
212 static int vlan_tag_hash(uint16_t, u_long);
213 static struct ifvlan_linkmib* vlan_getref_linkmib(struct ifvlan *,
214 struct psref *);
215 static void vlan_putref_linkmib(struct ifvlan_linkmib *,
216 struct psref *);
217 static void vlan_linkmib_update(struct ifvlan *,
218 struct ifvlan_linkmib *);
219 static struct ifvlan_linkmib* vlan_lookup_tag_psref(struct ifnet *,
220 uint16_t, struct psref *);
221
222 LIST_HEAD(vlan_ifvlist, ifvlan);
223 static struct {
224 kmutex_t lock;
225 struct vlan_ifvlist list;
226 } ifv_list __cacheline_aligned;
227
228
229 #if !defined(VLAN_TAG_HASH_SIZE)
230 #define VLAN_TAG_HASH_SIZE 32
231 #endif
232 static struct {
233 kmutex_t lock;
234 struct pslist_head *lists;
235 u_long mask;
236 } ifv_hash __cacheline_aligned = {
237 .lists = NULL,
238 .mask = 0,
239 };
240
241 pserialize_t vlan_psz __read_mostly;
242 static struct psref_class *ifvm_psref_class __read_mostly;
243
244 struct if_clone vlan_cloner =
245 IF_CLONE_INITIALIZER("vlan", vlan_clone_create, vlan_clone_destroy);
246
247 /* Used to pad ethernet frames with < ETHER_MIN_LEN bytes */
248 static char vlan_zero_pad_buff[ETHER_MIN_LEN];
249
250 static inline int
251 vlan_safe_ifpromisc(struct ifnet *ifp, int pswitch)
252 {
253 int e;
254
255 KERNEL_LOCK_UNLESS_NET_MPSAFE();
256 e = ifpromisc(ifp, pswitch);
257 KERNEL_UNLOCK_UNLESS_NET_MPSAFE();
258
259 return e;
260 }
261
262 static inline int
263 vlan_safe_ifpromisc_locked(struct ifnet *ifp, int pswitch)
264 {
265 int e;
266
267 KERNEL_LOCK_UNLESS_NET_MPSAFE();
268 e = ifpromisc_locked(ifp, pswitch);
269 KERNEL_UNLOCK_UNLESS_NET_MPSAFE();
270
271 return e;
272 }
273
274 void
275 vlanattach(int n)
276 {
277
278 /*
279 * Nothing to do here, initialization is handled by the
280 * module initialization code in vlaninit() below.
281 */
282 }
283
284 static void
285 vlaninit(void)
286 {
287 mutex_init(&ifv_list.lock, MUTEX_DEFAULT, IPL_NONE);
288 LIST_INIT(&ifv_list.list);
289
290 mutex_init(&ifv_hash.lock, MUTEX_DEFAULT, IPL_NONE);
291 vlan_psz = pserialize_create();
292 ifvm_psref_class = psref_class_create("vlanlinkmib", IPL_SOFTNET);
293 if_clone_attach(&vlan_cloner);
294
295 vlan_hash_init();
296 MODULE_HOOK_SET(if_vlan_vlan_input_hook, vlan_input);
297 }
298
299 static int
300 vlandetach(void)
301 {
302 bool is_empty;
303 int error;
304
305 mutex_enter(&ifv_list.lock);
306 is_empty = LIST_EMPTY(&ifv_list.list);
307 mutex_exit(&ifv_list.lock);
308
309 if (!is_empty)
310 return EBUSY;
311
312 error = vlan_hash_fini();
313 if (error != 0)
314 return error;
315
316 if_clone_detach(&vlan_cloner);
317 psref_class_destroy(ifvm_psref_class);
318 pserialize_destroy(vlan_psz);
319 mutex_destroy(&ifv_hash.lock);
320 mutex_destroy(&ifv_list.lock);
321
322 MODULE_HOOK_UNSET(if_vlan_vlan_input_hook);
323 return 0;
324 }
325
326 static void
327 vlan_reset_linkname(struct ifnet *ifp)
328 {
329
330 /*
331 * We start out with a "802.1Q VLAN" type and zero-length
332 * addresses. When we attach to a parent interface, we
333 * inherit its type, address length, address, and data link
334 * type.
335 */
336
337 ifp->if_type = IFT_L2VLAN;
338 ifp->if_addrlen = 0;
339 ifp->if_dlt = DLT_NULL;
340 if_alloc_sadl(ifp);
341 }
342
343 static int
344 vlan_clone_create(struct if_clone *ifc, int unit)
345 {
346 struct ifvlan *ifv;
347 struct ifnet *ifp;
348 struct ifvlan_linkmib *mib;
349 int rv;
350
351 ifv = malloc(sizeof(struct ifvlan), M_DEVBUF, M_WAITOK|M_ZERO);
352 mib = kmem_zalloc(sizeof(struct ifvlan_linkmib), KM_SLEEP);
353 ifp = &ifv->ifv_if;
354 LIST_INIT(&ifv->ifv_mc_listhead);
355
356 mib->ifvm_ifvlan = ifv;
357 mib->ifvm_p = NULL;
358 psref_target_init(&mib->ifvm_psref, ifvm_psref_class);
359
360 mutex_init(&ifv->ifv_lock, MUTEX_DEFAULT, IPL_NONE);
361 ifv->ifv_psz = pserialize_create();
362 ifv->ifv_mib = mib;
363
364 mutex_enter(&ifv_list.lock);
365 LIST_INSERT_HEAD(&ifv_list.list, ifv, ifv_list);
366 mutex_exit(&ifv_list.lock);
367
368 if_initname(ifp, ifc->ifc_name, unit);
369 ifp->if_softc = ifv;
370 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
371 ifp->if_extflags = IFEF_NO_LINK_STATE_CHANGE;
372 #ifdef NET_MPSAFE
373 ifp->if_extflags |= IFEF_MPSAFE;
374 #endif
375 ifp->if_start = vlan_start;
376 ifp->if_transmit = vlan_transmit;
377 ifp->if_ioctl = vlan_ioctl;
378 IFQ_SET_READY(&ifp->if_snd);
379
380 rv = if_initialize(ifp);
381 if (rv != 0) {
382 aprint_error("%s: if_initialize failed(%d)\n", ifp->if_xname,
383 rv);
384 goto fail;
385 }
386
387 vlan_reset_linkname(ifp);
388 if_register(ifp);
389 return 0;
390
391 fail:
392 mutex_enter(&ifv_list.lock);
393 LIST_REMOVE(ifv, ifv_list);
394 mutex_exit(&ifv_list.lock);
395
396 mutex_destroy(&ifv->ifv_lock);
397 psref_target_destroy(&ifv->ifv_mib->ifvm_psref, ifvm_psref_class);
398 kmem_free(ifv->ifv_mib, sizeof(struct ifvlan_linkmib));
399 free(ifv, M_DEVBUF);
400
401 return rv;
402 }
403
404 static int
405 vlan_clone_destroy(struct ifnet *ifp)
406 {
407 struct ifvlan *ifv = ifp->if_softc;
408
409 mutex_enter(&ifv_list.lock);
410 LIST_REMOVE(ifv, ifv_list);
411 mutex_exit(&ifv_list.lock);
412
413 IFNET_LOCK(ifp);
414 vlan_unconfig(ifp);
415 IFNET_UNLOCK(ifp);
416 if_detach(ifp);
417
418 psref_target_destroy(&ifv->ifv_mib->ifvm_psref, ifvm_psref_class);
419 kmem_free(ifv->ifv_mib, sizeof(struct ifvlan_linkmib));
420 pserialize_destroy(ifv->ifv_psz);
421 mutex_destroy(&ifv->ifv_lock);
422 free(ifv, M_DEVBUF);
423
424 return 0;
425 }
426
427 /*
428 * Configure a VLAN interface.
429 */
430 static int
431 vlan_config(struct ifvlan *ifv, struct ifnet *p, uint16_t tag)
432 {
433 struct ifnet *ifp = &ifv->ifv_if;
434 struct ifvlan_linkmib *nmib = NULL;
435 struct ifvlan_linkmib *omib = NULL;
436 struct ifvlan_linkmib *checkmib;
437 struct psref_target *nmib_psref = NULL;
438 const uint16_t vid = EVL_VLANOFTAG(tag);
439 int error = 0;
440 int idx;
441 bool omib_cleanup = false;
442 struct psref psref;
443
444 /* VLAN ID 0 and 4095 are reserved in the spec */
445 if ((vid == 0) || (vid == 0xfff))
446 return EINVAL;
447
448 nmib = kmem_alloc(sizeof(*nmib), KM_SLEEP);
449 mutex_enter(&ifv->ifv_lock);
450 omib = ifv->ifv_mib;
451
452 if (omib->ifvm_p != NULL) {
453 error = EBUSY;
454 goto done;
455 }
456
457 /* Duplicate check */
458 checkmib = vlan_lookup_tag_psref(p, vid, &psref);
459 if (checkmib != NULL) {
460 vlan_putref_linkmib(checkmib, &psref);
461 error = EEXIST;
462 goto done;
463 }
464
465 *nmib = *omib;
466 nmib_psref = &nmib->ifvm_psref;
467
468 psref_target_init(nmib_psref, ifvm_psref_class);
469
470 switch (p->if_type) {
471 case IFT_ETHER:
472 {
473 struct ethercom *ec = (void *)p;
474 nmib->ifvm_msw = &vlan_ether_multisw;
475 nmib->ifvm_encaplen = ETHER_VLAN_ENCAP_LEN;
476 nmib->ifvm_mintu = ETHERMIN;
477
478 if (ec->ec_nvlans++ == 0) {
479 IFNET_LOCK(p);
480 error = ether_enable_vlan_mtu(p);
481 IFNET_UNLOCK(p);
482 if (error >= 0) {
483 if (error) {
484 ec->ec_nvlans--;
485 goto done;
486 }
487 nmib->ifvm_mtufudge = 0;
488 } else {
489 /*
490 * Fudge the MTU by the encapsulation size. This
491 * makes us incompatible with strictly compliant
492 * 802.1Q implementations, but allows us to use
493 * the feature with other NetBSD
494 * implementations, which might still be useful.
495 */
496 nmib->ifvm_mtufudge = nmib->ifvm_encaplen;
497 }
498 error = 0;
499 }
500
501 /*
502 * If the parent interface can do hardware-assisted
503 * VLAN encapsulation, then propagate its hardware-
504 * assisted checksumming flags and tcp segmentation
505 * offload.
506 */
507 if (ec->ec_capabilities & ETHERCAP_VLAN_HWTAGGING) {
508 ec->ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
509 ifp->if_capabilities = p->if_capabilities &
510 (IFCAP_TSOv4 | IFCAP_TSOv6 |
511 IFCAP_CSUM_IPv4_Tx|IFCAP_CSUM_IPv4_Rx|
512 IFCAP_CSUM_TCPv4_Tx|IFCAP_CSUM_TCPv4_Rx|
513 IFCAP_CSUM_UDPv4_Tx|IFCAP_CSUM_UDPv4_Rx|
514 IFCAP_CSUM_TCPv6_Tx|IFCAP_CSUM_TCPv6_Rx|
515 IFCAP_CSUM_UDPv6_Tx|IFCAP_CSUM_UDPv6_Rx);
516 }
517
518 /*
519 * We inherit the parent's Ethernet address.
520 */
521 ether_ifattach(ifp, CLLADDR(p->if_sadl));
522 ifp->if_hdrlen = sizeof(struct ether_vlan_header); /* XXX? */
523 break;
524 }
525
526 default:
527 error = EPROTONOSUPPORT;
528 goto done;
529 }
530
531 nmib->ifvm_p = p;
532 nmib->ifvm_tag = vid;
533 ifv->ifv_if.if_mtu = p->if_mtu - nmib->ifvm_mtufudge;
534 ifv->ifv_if.if_flags = p->if_flags &
535 (IFF_UP | IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
536
537 /*
538 * Inherit the if_type from the parent. This allows us
539 * to participate in bridges of that type.
540 */
541 ifv->ifv_if.if_type = p->if_type;
542
543 PSLIST_ENTRY_INIT(ifv, ifv_hash);
544 idx = vlan_tag_hash(vid, ifv_hash.mask);
545
546 mutex_enter(&ifv_hash.lock);
547 PSLIST_WRITER_INSERT_HEAD(&ifv_hash.lists[idx], ifv, ifv_hash);
548 mutex_exit(&ifv_hash.lock);
549
550 vlan_linkmib_update(ifv, nmib);
551 nmib = NULL;
552 nmib_psref = NULL;
553 omib_cleanup = true;
554
555 done:
556 mutex_exit(&ifv->ifv_lock);
557
558 if (nmib_psref)
559 psref_target_destroy(nmib_psref, ifvm_psref_class);
560 if (nmib)
561 kmem_free(nmib, sizeof(*nmib));
562 if (omib_cleanup)
563 kmem_free(omib, sizeof(*omib));
564
565 return error;
566 }
567
568 /*
569 * Unconfigure a VLAN interface.
570 */
571 static void
572 vlan_unconfig(struct ifnet *ifp)
573 {
574 struct ifvlan *ifv = ifp->if_softc;
575 struct ifvlan_linkmib *nmib = NULL;
576 int error;
577
578 KASSERT(IFNET_LOCKED(ifp));
579
580 nmib = kmem_alloc(sizeof(*nmib), KM_SLEEP);
581
582 mutex_enter(&ifv->ifv_lock);
583 error = vlan_unconfig_locked(ifv, nmib);
584 mutex_exit(&ifv->ifv_lock);
585
586 if (error)
587 kmem_free(nmib, sizeof(*nmib));
588 }
589 static int
590 vlan_unconfig_locked(struct ifvlan *ifv, struct ifvlan_linkmib *nmib)
591 {
592 struct ifnet *p;
593 struct ifnet *ifp = &ifv->ifv_if;
594 struct psref_target *nmib_psref = NULL;
595 struct ifvlan_linkmib *omib;
596 int error = 0;
597
598 KASSERT(IFNET_LOCKED(ifp));
599 KASSERT(mutex_owned(&ifv->ifv_lock));
600
601 ifp->if_flags &= ~(IFF_UP|IFF_RUNNING);
602
603 omib = ifv->ifv_mib;
604 p = omib->ifvm_p;
605
606 if (p == NULL) {
607 error = -1;
608 goto done;
609 }
610
611 *nmib = *omib;
612 nmib_psref = &nmib->ifvm_psref;
613 psref_target_init(nmib_psref, ifvm_psref_class);
614
615 /*
616 * Since the interface is being unconfigured, we need to empty the
617 * list of multicast groups that we may have joined while we were
618 * alive and remove them from the parent's list also.
619 */
620 (*nmib->ifvm_msw->vmsw_purgemulti)(ifv);
621
622 /* Disconnect from parent. */
623 switch (p->if_type) {
624 case IFT_ETHER:
625 {
626 struct ethercom *ec = (void *)p;
627 if (--ec->ec_nvlans == 0) {
628 IFNET_LOCK(p);
629 (void) ether_disable_vlan_mtu(p);
630 IFNET_UNLOCK(p);
631 }
632
633 /* XXX ether_ifdetach must not be called with IFNET_LOCK */
634 mutex_exit(&ifv->ifv_lock);
635 IFNET_UNLOCK(ifp);
636 ether_ifdetach(ifp);
637 IFNET_LOCK(ifp);
638 mutex_enter(&ifv->ifv_lock);
639
640 /* if_free_sadl must be called with IFNET_LOCK */
641 if_free_sadl(ifp, 1);
642
643 /* Restore vlan_ioctl overwritten by ether_ifdetach */
644 ifp->if_ioctl = vlan_ioctl;
645 vlan_reset_linkname(ifp);
646 break;
647 }
648
649 default:
650 panic("%s: impossible", __func__);
651 }
652
653 nmib->ifvm_p = NULL;
654 ifv->ifv_if.if_mtu = 0;
655 ifv->ifv_flags = 0;
656
657 mutex_enter(&ifv_hash.lock);
658 PSLIST_WRITER_REMOVE(ifv, ifv_hash);
659 pserialize_perform(vlan_psz);
660 mutex_exit(&ifv_hash.lock);
661 PSLIST_ENTRY_DESTROY(ifv, ifv_hash);
662
663 vlan_linkmib_update(ifv, nmib);
664
665 mutex_exit(&ifv->ifv_lock);
666
667 nmib_psref = NULL;
668 kmem_free(omib, sizeof(*omib));
669
670 #ifdef INET6
671 KERNEL_LOCK_UNLESS_NET_MPSAFE();
672 /* To delete v6 link local addresses */
673 if (in6_present)
674 in6_ifdetach(ifp);
675 KERNEL_UNLOCK_UNLESS_NET_MPSAFE();
676 #endif
677
678 if ((ifp->if_flags & IFF_PROMISC) != 0)
679 vlan_safe_ifpromisc_locked(ifp, 0);
680 if_down_locked(ifp);
681 ifp->if_capabilities = 0;
682 mutex_enter(&ifv->ifv_lock);
683 done:
684
685 if (nmib_psref)
686 psref_target_destroy(nmib_psref, ifvm_psref_class);
687
688 return error;
689 }
690
691 static void
692 vlan_hash_init(void)
693 {
694
695 ifv_hash.lists = hashinit(VLAN_TAG_HASH_SIZE, HASH_PSLIST, true,
696 &ifv_hash.mask);
697 }
698
699 static int
700 vlan_hash_fini(void)
701 {
702 int i;
703
704 mutex_enter(&ifv_hash.lock);
705
706 for (i = 0; i < ifv_hash.mask + 1; i++) {
707 if (PSLIST_WRITER_FIRST(&ifv_hash.lists[i], struct ifvlan,
708 ifv_hash) != NULL) {
709 mutex_exit(&ifv_hash.lock);
710 return EBUSY;
711 }
712 }
713
714 for (i = 0; i < ifv_hash.mask + 1; i++)
715 PSLIST_DESTROY(&ifv_hash.lists[i]);
716
717 mutex_exit(&ifv_hash.lock);
718
719 hashdone(ifv_hash.lists, HASH_PSLIST, ifv_hash.mask);
720
721 ifv_hash.lists = NULL;
722 ifv_hash.mask = 0;
723
724 return 0;
725 }
726
727 static int
728 vlan_tag_hash(uint16_t tag, u_long mask)
729 {
730 uint32_t hash;
731
732 hash = (tag >> 8) ^ tag;
733 hash = (hash >> 2) ^ hash;
734
735 return hash & mask;
736 }
737
738 static struct ifvlan_linkmib *
739 vlan_getref_linkmib(struct ifvlan *sc, struct psref *psref)
740 {
741 struct ifvlan_linkmib *mib;
742 int s;
743
744 s = pserialize_read_enter();
745 mib = atomic_load_consume(&sc->ifv_mib);
746 if (mib == NULL) {
747 pserialize_read_exit(s);
748 return NULL;
749 }
750 psref_acquire(psref, &mib->ifvm_psref, ifvm_psref_class);
751 pserialize_read_exit(s);
752
753 return mib;
754 }
755
756 static void
757 vlan_putref_linkmib(struct ifvlan_linkmib *mib, struct psref *psref)
758 {
759 if (mib == NULL)
760 return;
761 psref_release(psref, &mib->ifvm_psref, ifvm_psref_class);
762 }
763
764 static struct ifvlan_linkmib *
765 vlan_lookup_tag_psref(struct ifnet *ifp, uint16_t tag, struct psref *psref)
766 {
767 int idx;
768 int s;
769 struct ifvlan *sc;
770
771 idx = vlan_tag_hash(tag, ifv_hash.mask);
772
773 s = pserialize_read_enter();
774 PSLIST_READER_FOREACH(sc, &ifv_hash.lists[idx], struct ifvlan,
775 ifv_hash) {
776 struct ifvlan_linkmib *mib = atomic_load_consume(&sc->ifv_mib);
777 if (mib == NULL)
778 continue;
779 if (mib->ifvm_tag != tag)
780 continue;
781 if (mib->ifvm_p != ifp)
782 continue;
783
784 psref_acquire(psref, &mib->ifvm_psref, ifvm_psref_class);
785 pserialize_read_exit(s);
786 return mib;
787 }
788 pserialize_read_exit(s);
789 return NULL;
790 }
791
792 static void
793 vlan_linkmib_update(struct ifvlan *ifv, struct ifvlan_linkmib *nmib)
794 {
795 struct ifvlan_linkmib *omib = ifv->ifv_mib;
796
797 KASSERT(mutex_owned(&ifv->ifv_lock));
798
799 atomic_store_release(&ifv->ifv_mib, nmib);
800
801 pserialize_perform(ifv->ifv_psz);
802 psref_target_destroy(&omib->ifvm_psref, ifvm_psref_class);
803 }
804
805 /*
806 * Called when a parent interface is detaching; destroy any VLAN
807 * configuration for the parent interface.
808 */
809 void
810 vlan_ifdetach(struct ifnet *p)
811 {
812 struct ifvlan *ifv;
813 struct ifvlan_linkmib *mib, **nmibs;
814 struct psref psref;
815 int error;
816 int bound;
817 int i, cnt = 0;
818
819 bound = curlwp_bind();
820
821 mutex_enter(&ifv_list.lock);
822 LIST_FOREACH(ifv, &ifv_list.list, ifv_list) {
823 mib = vlan_getref_linkmib(ifv, &psref);
824 if (mib == NULL)
825 continue;
826
827 if (mib->ifvm_p == p)
828 cnt++;
829
830 vlan_putref_linkmib(mib, &psref);
831 }
832 mutex_exit(&ifv_list.lock);
833
834 if (cnt == 0) {
835 curlwp_bindx(bound);
836 return;
837 }
838
839 /*
840 * The value of "cnt" does not increase while ifv_list.lock
841 * and ifv->ifv_lock are released here, because the parent
842 * interface is detaching.
843 */
844 nmibs = kmem_alloc(sizeof(*nmibs) * cnt, KM_SLEEP);
845 for (i = 0; i < cnt; i++) {
846 nmibs[i] = kmem_alloc(sizeof(*nmibs[i]), KM_SLEEP);
847 }
848
849 mutex_enter(&ifv_list.lock);
850
851 i = 0;
852 LIST_FOREACH(ifv, &ifv_list.list, ifv_list) {
853 struct ifnet *ifp = &ifv->ifv_if;
854
855 /* IFNET_LOCK must be held before ifv_lock. */
856 IFNET_LOCK(ifp);
857 mutex_enter(&ifv->ifv_lock);
858
859 /* XXX ifv_mib = NULL? */
860 if (ifv->ifv_mib->ifvm_p == p) {
861 KASSERTMSG(i < cnt, "no memory for unconfig, parent=%s",
862 p->if_xname);
863 error = vlan_unconfig_locked(ifv, nmibs[i]);
864 if (!error) {
865 nmibs[i] = NULL;
866 i++;
867 }
868
869 }
870
871 mutex_exit(&ifv->ifv_lock);
872 IFNET_UNLOCK(ifp);
873 }
874
875 mutex_exit(&ifv_list.lock);
876
877 curlwp_bindx(bound);
878
879 for (i = 0; i < cnt; i++) {
880 if (nmibs[i])
881 kmem_free(nmibs[i], sizeof(*nmibs[i]));
882 }
883
884 kmem_free(nmibs, sizeof(*nmibs) * cnt);
885
886 return;
887 }
888
889 static int
890 vlan_set_promisc(struct ifnet *ifp)
891 {
892 struct ifvlan *ifv = ifp->if_softc;
893 struct ifvlan_linkmib *mib;
894 struct psref psref;
895 int error = 0;
896 int bound;
897
898 bound = curlwp_bind();
899 mib = vlan_getref_linkmib(ifv, &psref);
900 if (mib == NULL) {
901 curlwp_bindx(bound);
902 return EBUSY;
903 }
904
905 if ((ifp->if_flags & IFF_PROMISC) != 0) {
906 if ((ifv->ifv_flags & IFVF_PROMISC) == 0) {
907 error = vlan_safe_ifpromisc(mib->ifvm_p, 1);
908 if (error == 0)
909 ifv->ifv_flags |= IFVF_PROMISC;
910 }
911 } else {
912 if ((ifv->ifv_flags & IFVF_PROMISC) != 0) {
913 error = vlan_safe_ifpromisc(mib->ifvm_p, 0);
914 if (error == 0)
915 ifv->ifv_flags &= ~IFVF_PROMISC;
916 }
917 }
918 vlan_putref_linkmib(mib, &psref);
919 curlwp_bindx(bound);
920
921 return error;
922 }
923
924 static int
925 vlan_ioctl(struct ifnet *ifp, u_long cmd, void *data)
926 {
927 struct lwp *l = curlwp;
928 struct ifvlan *ifv = ifp->if_softc;
929 struct ifaddr *ifa = (struct ifaddr *) data;
930 struct ifreq *ifr = (struct ifreq *) data;
931 struct ifnet *pr;
932 struct ifcapreq *ifcr;
933 struct vlanreq vlr;
934 struct ifvlan_linkmib *mib;
935 struct psref psref;
936 int error = 0;
937 int bound;
938
939 switch (cmd) {
940 case SIOCSIFMTU:
941 bound = curlwp_bind();
942 mib = vlan_getref_linkmib(ifv, &psref);
943 if (mib == NULL) {
944 curlwp_bindx(bound);
945 error = EBUSY;
946 break;
947 }
948
949 if (mib->ifvm_p == NULL) {
950 vlan_putref_linkmib(mib, &psref);
951 curlwp_bindx(bound);
952 error = EINVAL;
953 } else if (
954 ifr->ifr_mtu > (mib->ifvm_p->if_mtu - mib->ifvm_mtufudge) ||
955 ifr->ifr_mtu < (mib->ifvm_mintu - mib->ifvm_mtufudge)) {
956 vlan_putref_linkmib(mib, &psref);
957 curlwp_bindx(bound);
958 error = EINVAL;
959 } else {
960 vlan_putref_linkmib(mib, &psref);
961 curlwp_bindx(bound);
962
963 error = ifioctl_common(ifp, cmd, data);
964 if (error == ENETRESET)
965 error = 0;
966 }
967
968 break;
969
970 case SIOCSETVLAN:
971 if ((error = kauth_authorize_network(l->l_cred,
972 KAUTH_NETWORK_INTERFACE,
973 KAUTH_REQ_NETWORK_INTERFACE_SETPRIV, ifp, (void *)cmd,
974 NULL)) != 0)
975 break;
976 if ((error = copyin(ifr->ifr_data, &vlr, sizeof(vlr))) != 0)
977 break;
978
979 if (vlr.vlr_parent[0] == '\0') {
980 bound = curlwp_bind();
981 mib = vlan_getref_linkmib(ifv, &psref);
982 if (mib == NULL) {
983 curlwp_bindx(bound);
984 error = EBUSY;
985 break;
986 }
987
988 if (mib->ifvm_p != NULL &&
989 (ifp->if_flags & IFF_PROMISC) != 0)
990 error = vlan_safe_ifpromisc(mib->ifvm_p, 0);
991
992 vlan_putref_linkmib(mib, &psref);
993 curlwp_bindx(bound);
994
995 vlan_unconfig(ifp);
996 break;
997 }
998 if (vlr.vlr_tag != EVL_VLANOFTAG(vlr.vlr_tag)) {
999 error = EINVAL; /* check for valid tag */
1000 break;
1001 }
1002 if ((pr = ifunit(vlr.vlr_parent)) == NULL) {
1003 error = ENOENT;
1004 break;
1005 }
1006 error = vlan_config(ifv, pr, vlr.vlr_tag);
1007 if (error != 0) {
1008 break;
1009 }
1010
1011 /* Update promiscuous mode, if necessary. */
1012 vlan_set_promisc(ifp);
1013
1014 ifp->if_flags |= IFF_RUNNING;
1015 break;
1016
1017 case SIOCGETVLAN:
1018 memset(&vlr, 0, sizeof(vlr));
1019 bound = curlwp_bind();
1020 mib = vlan_getref_linkmib(ifv, &psref);
1021 if (mib == NULL) {
1022 curlwp_bindx(bound);
1023 error = EBUSY;
1024 break;
1025 }
1026 if (mib->ifvm_p != NULL) {
1027 snprintf(vlr.vlr_parent, sizeof(vlr.vlr_parent), "%s",
1028 mib->ifvm_p->if_xname);
1029 vlr.vlr_tag = mib->ifvm_tag;
1030 }
1031 vlan_putref_linkmib(mib, &psref);
1032 curlwp_bindx(bound);
1033 error = copyout(&vlr, ifr->ifr_data, sizeof(vlr));
1034 break;
1035
1036 case SIOCSIFFLAGS:
1037 if ((error = ifioctl_common(ifp, cmd, data)) != 0)
1038 break;
1039 /*
1040 * For promiscuous mode, we enable promiscuous mode on
1041 * the parent if we need promiscuous on the VLAN interface.
1042 */
1043 bound = curlwp_bind();
1044 mib = vlan_getref_linkmib(ifv, &psref);
1045 if (mib == NULL) {
1046 curlwp_bindx(bound);
1047 error = EBUSY;
1048 break;
1049 }
1050
1051 if (mib->ifvm_p != NULL)
1052 error = vlan_set_promisc(ifp);
1053 vlan_putref_linkmib(mib, &psref);
1054 curlwp_bindx(bound);
1055 break;
1056
1057 case SIOCADDMULTI:
1058 mutex_enter(&ifv->ifv_lock);
1059 mib = ifv->ifv_mib;
1060 if (mib == NULL) {
1061 error = EBUSY;
1062 mutex_exit(&ifv->ifv_lock);
1063 break;
1064 }
1065
1066 error = (mib->ifvm_p != NULL) ?
1067 (*mib->ifvm_msw->vmsw_addmulti)(ifv, ifr) : EINVAL;
1068 mib = NULL;
1069 mutex_exit(&ifv->ifv_lock);
1070 break;
1071
1072 case SIOCDELMULTI:
1073 mutex_enter(&ifv->ifv_lock);
1074 mib = ifv->ifv_mib;
1075 if (mib == NULL) {
1076 error = EBUSY;
1077 mutex_exit(&ifv->ifv_lock);
1078 break;
1079 }
1080 error = (mib->ifvm_p != NULL) ?
1081 (*mib->ifvm_msw->vmsw_delmulti)(ifv, ifr) : EINVAL;
1082 mib = NULL;
1083 mutex_exit(&ifv->ifv_lock);
1084 break;
1085
1086 case SIOCSIFCAP:
1087 ifcr = data;
1088 /* make sure caps are enabled on parent */
1089 bound = curlwp_bind();
1090 mib = vlan_getref_linkmib(ifv, &psref);
1091 if (mib == NULL) {
1092 curlwp_bindx(bound);
1093 error = EBUSY;
1094 break;
1095 }
1096
1097 if (mib->ifvm_p == NULL) {
1098 vlan_putref_linkmib(mib, &psref);
1099 curlwp_bindx(bound);
1100 error = EINVAL;
1101 break;
1102 }
1103 if ((mib->ifvm_p->if_capenable & ifcr->ifcr_capenable) !=
1104 ifcr->ifcr_capenable) {
1105 vlan_putref_linkmib(mib, &psref);
1106 curlwp_bindx(bound);
1107 error = EINVAL;
1108 break;
1109 }
1110
1111 vlan_putref_linkmib(mib, &psref);
1112 curlwp_bindx(bound);
1113
1114 if ((error = ifioctl_common(ifp, cmd, data)) == ENETRESET)
1115 error = 0;
1116 break;
1117 case SIOCINITIFADDR:
1118 bound = curlwp_bind();
1119 mib = vlan_getref_linkmib(ifv, &psref);
1120 if (mib == NULL) {
1121 curlwp_bindx(bound);
1122 error = EBUSY;
1123 break;
1124 }
1125
1126 if (mib->ifvm_p == NULL) {
1127 error = EINVAL;
1128 vlan_putref_linkmib(mib, &psref);
1129 curlwp_bindx(bound);
1130 break;
1131 }
1132 vlan_putref_linkmib(mib, &psref);
1133 curlwp_bindx(bound);
1134
1135 ifp->if_flags |= IFF_UP;
1136 #ifdef INET
1137 if (ifa->ifa_addr->sa_family == AF_INET)
1138 arp_ifinit(ifp, ifa);
1139 #endif
1140 break;
1141
1142 default:
1143 error = ether_ioctl(ifp, cmd, data);
1144 }
1145
1146 return error;
1147 }
1148
1149 static int
1150 vlan_ether_addmulti(struct ifvlan *ifv, struct ifreq *ifr)
1151 {
1152 const struct sockaddr *sa = ifreq_getaddr(SIOCADDMULTI, ifr);
1153 struct vlan_mc_entry *mc;
1154 uint8_t addrlo[ETHER_ADDR_LEN], addrhi[ETHER_ADDR_LEN];
1155 struct ifvlan_linkmib *mib;
1156 int error;
1157
1158 KASSERT(mutex_owned(&ifv->ifv_lock));
1159
1160 if (sa->sa_len > sizeof(struct sockaddr_storage))
1161 return EINVAL;
1162
1163 error = ether_addmulti(sa, &ifv->ifv_ec);
1164 if (error != ENETRESET)
1165 return error;
1166
1167 /*
1168 * This is a new multicast address. We have to tell parent
1169 * about it. Also, remember this multicast address so that
1170 * we can delete it on unconfigure.
1171 */
1172 mc = malloc(sizeof(struct vlan_mc_entry), M_DEVBUF, M_NOWAIT);
1173 if (mc == NULL) {
1174 error = ENOMEM;
1175 goto alloc_failed;
1176 }
1177
1178 /*
1179 * Since ether_addmulti() returned ENETRESET, the following two
1180 * statements shouldn't fail. Here ifv_ec is implicitly protected
1181 * by the ifv_lock lock.
1182 */
1183 error = ether_multiaddr(sa, addrlo, addrhi);
1184 KASSERT(error == 0);
1185
1186 ETHER_LOCK(&ifv->ifv_ec);
1187 mc->mc_enm = ether_lookup_multi(addrlo, addrhi, &ifv->ifv_ec);
1188 ETHER_UNLOCK(&ifv->ifv_ec);
1189
1190 KASSERT(mc->mc_enm != NULL);
1191
1192 memcpy(&mc->mc_addr, sa, sa->sa_len);
1193 LIST_INSERT_HEAD(&ifv->ifv_mc_listhead, mc, mc_entries);
1194
1195 mib = ifv->ifv_mib;
1196
1197 KERNEL_LOCK_UNLESS_IFP_MPSAFE(mib->ifvm_p);
1198 error = if_mcast_op(mib->ifvm_p, SIOCADDMULTI, sa);
1199 KERNEL_UNLOCK_UNLESS_IFP_MPSAFE(mib->ifvm_p);
1200
1201 if (error != 0)
1202 goto ioctl_failed;
1203 return error;
1204
1205 ioctl_failed:
1206 LIST_REMOVE(mc, mc_entries);
1207 free(mc, M_DEVBUF);
1208
1209 alloc_failed:
1210 (void)ether_delmulti(sa, &ifv->ifv_ec);
1211 return error;
1212 }
1213
1214 static int
1215 vlan_ether_delmulti(struct ifvlan *ifv, struct ifreq *ifr)
1216 {
1217 const struct sockaddr *sa = ifreq_getaddr(SIOCDELMULTI, ifr);
1218 struct ether_multi *enm;
1219 struct vlan_mc_entry *mc;
1220 struct ifvlan_linkmib *mib;
1221 uint8_t addrlo[ETHER_ADDR_LEN], addrhi[ETHER_ADDR_LEN];
1222 int error;
1223
1224 KASSERT(mutex_owned(&ifv->ifv_lock));
1225
1226 /*
1227 * Find a key to lookup vlan_mc_entry. We have to do this
1228 * before calling ether_delmulti for obvious reasons.
1229 */
1230 if ((error = ether_multiaddr(sa, addrlo, addrhi)) != 0)
1231 return error;
1232
1233 ETHER_LOCK(&ifv->ifv_ec);
1234 enm = ether_lookup_multi(addrlo, addrhi, &ifv->ifv_ec);
1235 ETHER_UNLOCK(&ifv->ifv_ec);
1236 if (enm == NULL)
1237 return EINVAL;
1238
1239 LIST_FOREACH(mc, &ifv->ifv_mc_listhead, mc_entries) {
1240 if (mc->mc_enm == enm)
1241 break;
1242 }
1243
1244 /* We woun't delete entries we didn't add */
1245 if (mc == NULL)
1246 return EINVAL;
1247
1248 error = ether_delmulti(sa, &ifv->ifv_ec);
1249 if (error != ENETRESET)
1250 return error;
1251
1252 /* We no longer use this multicast address. Tell parent so. */
1253 mib = ifv->ifv_mib;
1254 error = if_mcast_op(mib->ifvm_p, SIOCDELMULTI, sa);
1255
1256 if (error == 0) {
1257 /* And forget about this address. */
1258 LIST_REMOVE(mc, mc_entries);
1259 free(mc, M_DEVBUF);
1260 } else {
1261 (void)ether_addmulti(sa, &ifv->ifv_ec);
1262 }
1263
1264 return error;
1265 }
1266
1267 /*
1268 * Delete any multicast address we have asked to add from parent
1269 * interface. Called when the vlan is being unconfigured.
1270 */
1271 static void
1272 vlan_ether_purgemulti(struct ifvlan *ifv)
1273 {
1274 struct vlan_mc_entry *mc;
1275 struct ifvlan_linkmib *mib;
1276
1277 KASSERT(mutex_owned(&ifv->ifv_lock));
1278 mib = ifv->ifv_mib;
1279 if (mib == NULL) {
1280 return;
1281 }
1282
1283 while ((mc = LIST_FIRST(&ifv->ifv_mc_listhead)) != NULL) {
1284 (void)if_mcast_op(mib->ifvm_p, SIOCDELMULTI,
1285 sstocsa(&mc->mc_addr));
1286 LIST_REMOVE(mc, mc_entries);
1287 free(mc, M_DEVBUF);
1288 }
1289 }
1290
1291 static void
1292 vlan_start(struct ifnet *ifp)
1293 {
1294 struct ifvlan *ifv = ifp->if_softc;
1295 struct ifnet *p;
1296 struct ethercom *ec;
1297 struct mbuf *m;
1298 struct ifvlan_linkmib *mib;
1299 struct psref psref;
1300 int error;
1301
1302 mib = vlan_getref_linkmib(ifv, &psref);
1303 if (mib == NULL)
1304 return;
1305 p = mib->ifvm_p;
1306 ec = (void *)mib->ifvm_p;
1307
1308 ifp->if_flags |= IFF_OACTIVE;
1309
1310 for (;;) {
1311 IFQ_DEQUEUE(&ifp->if_snd, m);
1312 if (m == NULL)
1313 break;
1314
1315 #ifdef ALTQ
1316 /*
1317 * KERNEL_LOCK is required for ALTQ even if NET_MPSAFE is
1318 * defined.
1319 */
1320 KERNEL_LOCK(1, NULL);
1321 /*
1322 * If ALTQ is enabled on the parent interface, do
1323 * classification; the queueing discipline might
1324 * not require classification, but might require
1325 * the address family/header pointer in the pktattr.
1326 */
1327 if (ALTQ_IS_ENABLED(&p->if_snd)) {
1328 switch (p->if_type) {
1329 case IFT_ETHER:
1330 altq_etherclassify(&p->if_snd, m);
1331 break;
1332 default:
1333 panic("%s: impossible (altq)", __func__);
1334 }
1335 }
1336 KERNEL_UNLOCK_ONE(NULL);
1337 #endif /* ALTQ */
1338
1339 bpf_mtap(ifp, m, BPF_D_OUT);
1340 /*
1341 * If the parent can insert the tag itself, just mark
1342 * the tag in the mbuf header.
1343 */
1344 if (ec->ec_capabilities & ETHERCAP_VLAN_HWTAGGING) {
1345 vlan_set_tag(m, mib->ifvm_tag);
1346 } else {
1347 /*
1348 * insert the tag ourselves
1349 */
1350 M_PREPEND(m, mib->ifvm_encaplen, M_DONTWAIT);
1351 if (m == NULL) {
1352 printf("%s: unable to prepend encap header",
1353 p->if_xname);
1354 if_statinc(ifp, if_oerrors);
1355 continue;
1356 }
1357
1358 switch (p->if_type) {
1359 case IFT_ETHER:
1360 {
1361 struct ether_vlan_header *evl;
1362
1363 if (m->m_len < sizeof(struct ether_vlan_header))
1364 m = m_pullup(m,
1365 sizeof(struct ether_vlan_header));
1366 if (m == NULL) {
1367 printf("%s: unable to pullup encap "
1368 "header", p->if_xname);
1369 if_statinc(ifp, if_oerrors);
1370 continue;
1371 }
1372
1373 /*
1374 * Transform the Ethernet header into an
1375 * Ethernet header with 802.1Q encapsulation.
1376 */
1377 memmove(mtod(m, void *),
1378 mtod(m, char *) + mib->ifvm_encaplen,
1379 sizeof(struct ether_header));
1380 evl = mtod(m, struct ether_vlan_header *);
1381 evl->evl_proto = evl->evl_encap_proto;
1382 evl->evl_encap_proto = htons(ETHERTYPE_VLAN);
1383 evl->evl_tag = htons(mib->ifvm_tag);
1384
1385 /*
1386 * To cater for VLAN-aware layer 2 ethernet
1387 * switches which may need to strip the tag
1388 * before forwarding the packet, make sure
1389 * the packet+tag is at least 68 bytes long.
1390 * This is necessary because our parent will
1391 * only pad to 64 bytes (ETHER_MIN_LEN) and
1392 * some switches will not pad by themselves
1393 * after deleting a tag.
1394 */
1395 const size_t min_data_len = ETHER_MIN_LEN -
1396 ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN;
1397 if (m->m_pkthdr.len < min_data_len) {
1398 m_copyback(m, m->m_pkthdr.len,
1399 min_data_len - m->m_pkthdr.len,
1400 vlan_zero_pad_buff);
1401 }
1402 break;
1403 }
1404
1405 default:
1406 panic("%s: impossible", __func__);
1407 }
1408 }
1409
1410 if ((p->if_flags & IFF_RUNNING) == 0) {
1411 m_freem(m);
1412 continue;
1413 }
1414
1415 error = if_transmit_lock(p, m);
1416 if (error) {
1417 /* mbuf is already freed */
1418 if_statinc(ifp, if_oerrors);
1419 continue;
1420 }
1421 if_statinc(ifp, if_opackets);
1422 }
1423
1424 ifp->if_flags &= ~IFF_OACTIVE;
1425
1426 /* Remove reference to mib before release */
1427 vlan_putref_linkmib(mib, &psref);
1428 }
1429
1430 static int
1431 vlan_transmit(struct ifnet *ifp, struct mbuf *m)
1432 {
1433 struct ifvlan *ifv = ifp->if_softc;
1434 struct ifnet *p;
1435 struct ethercom *ec;
1436 struct ifvlan_linkmib *mib;
1437 struct psref psref;
1438 int error;
1439 size_t pktlen = m->m_pkthdr.len;
1440 bool mcast = (m->m_flags & M_MCAST) != 0;
1441
1442 mib = vlan_getref_linkmib(ifv, &psref);
1443 if (mib == NULL) {
1444 m_freem(m);
1445 return ENETDOWN;
1446 }
1447
1448 p = mib->ifvm_p;
1449 ec = (void *)mib->ifvm_p;
1450
1451 bpf_mtap(ifp, m, BPF_D_OUT);
1452
1453 if ((error = pfil_run_hooks(ifp->if_pfil, &m, ifp, PFIL_OUT)) != 0)
1454 goto out;
1455 if (m == NULL)
1456 goto out;
1457
1458 /*
1459 * If the parent can insert the tag itself, just mark
1460 * the tag in the mbuf header.
1461 */
1462 if (ec->ec_capabilities & ETHERCAP_VLAN_HWTAGGING) {
1463 vlan_set_tag(m, mib->ifvm_tag);
1464 } else {
1465 /*
1466 * insert the tag ourselves
1467 */
1468 M_PREPEND(m, mib->ifvm_encaplen, M_DONTWAIT);
1469 if (m == NULL) {
1470 printf("%s: unable to prepend encap header",
1471 p->if_xname);
1472 if_statinc(ifp, if_oerrors);
1473 error = ENOBUFS;
1474 goto out;
1475 }
1476
1477 switch (p->if_type) {
1478 case IFT_ETHER:
1479 {
1480 struct ether_vlan_header *evl;
1481
1482 if (m->m_len < sizeof(struct ether_vlan_header))
1483 m = m_pullup(m,
1484 sizeof(struct ether_vlan_header));
1485 if (m == NULL) {
1486 printf("%s: unable to pullup encap "
1487 "header", p->if_xname);
1488 if_statinc(ifp, if_oerrors);
1489 error = ENOBUFS;
1490 goto out;
1491 }
1492
1493 /*
1494 * Transform the Ethernet header into an
1495 * Ethernet header with 802.1Q encapsulation.
1496 */
1497 memmove(mtod(m, void *),
1498 mtod(m, char *) + mib->ifvm_encaplen,
1499 sizeof(struct ether_header));
1500 evl = mtod(m, struct ether_vlan_header *);
1501 evl->evl_proto = evl->evl_encap_proto;
1502 evl->evl_encap_proto = htons(ETHERTYPE_VLAN);
1503 evl->evl_tag = htons(mib->ifvm_tag);
1504
1505 /*
1506 * To cater for VLAN-aware layer 2 ethernet
1507 * switches which may need to strip the tag
1508 * before forwarding the packet, make sure
1509 * the packet+tag is at least 68 bytes long.
1510 * This is necessary because our parent will
1511 * only pad to 64 bytes (ETHER_MIN_LEN) and
1512 * some switches will not pad by themselves
1513 * after deleting a tag.
1514 */
1515 const size_t min_data_len = ETHER_MIN_LEN -
1516 ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN;
1517 if (m->m_pkthdr.len < min_data_len) {
1518 m_copyback(m, m->m_pkthdr.len,
1519 min_data_len - m->m_pkthdr.len,
1520 vlan_zero_pad_buff);
1521 }
1522 break;
1523 }
1524
1525 default:
1526 panic("%s: impossible", __func__);
1527 }
1528 }
1529
1530 if ((p->if_flags & IFF_RUNNING) == 0) {
1531 m_freem(m);
1532 error = ENETDOWN;
1533 goto out;
1534 }
1535
1536 error = if_transmit_lock(p, m);
1537 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
1538 if (error) {
1539 /* mbuf is already freed */
1540 if_statinc_ref(nsr, if_oerrors);
1541 } else {
1542 if_statinc_ref(nsr, if_opackets);
1543 if_statadd_ref(nsr, if_obytes, pktlen);
1544 if (mcast)
1545 if_statinc_ref(nsr, if_omcasts);
1546 }
1547 IF_STAT_PUTREF(ifp);
1548
1549 out:
1550 /* Remove reference to mib before release */
1551 vlan_putref_linkmib(mib, &psref);
1552 return error;
1553 }
1554
1555 /*
1556 * Given an Ethernet frame, find a valid vlan interface corresponding to the
1557 * given source interface and tag, then run the real packet through the
1558 * parent's input routine.
1559 */
1560 void
1561 vlan_input(struct ifnet *ifp, struct mbuf *m)
1562 {
1563 struct ifvlan *ifv;
1564 uint16_t vid;
1565 struct ifvlan_linkmib *mib;
1566 struct psref psref;
1567 bool have_vtag;
1568
1569 have_vtag = vlan_has_tag(m);
1570 if (have_vtag) {
1571 vid = EVL_VLANOFTAG(vlan_get_tag(m));
1572 m->m_flags &= ~M_VLANTAG;
1573 } else {
1574 struct ether_vlan_header *evl;
1575
1576 if (ifp->if_type != IFT_ETHER) {
1577 panic("%s: impossible", __func__);
1578 }
1579
1580 if (m->m_len < sizeof(struct ether_vlan_header) &&
1581 (m = m_pullup(m,
1582 sizeof(struct ether_vlan_header))) == NULL) {
1583 printf("%s: no memory for VLAN header, "
1584 "dropping packet.\n", ifp->if_xname);
1585 return;
1586 }
1587 evl = mtod(m, struct ether_vlan_header *);
1588 KASSERT(ntohs(evl->evl_encap_proto) == ETHERTYPE_VLAN);
1589
1590 vid = EVL_VLANOFTAG(ntohs(evl->evl_tag));
1591
1592 /*
1593 * Restore the original ethertype. We'll remove
1594 * the encapsulation after we've found the vlan
1595 * interface corresponding to the tag.
1596 */
1597 evl->evl_encap_proto = evl->evl_proto;
1598 }
1599
1600 mib = vlan_lookup_tag_psref(ifp, vid, &psref);
1601 if (mib == NULL) {
1602 m_freem(m);
1603 if_statinc(ifp, if_noproto);
1604 return;
1605 }
1606 KASSERT(mib->ifvm_encaplen == ETHER_VLAN_ENCAP_LEN);
1607
1608 ifv = mib->ifvm_ifvlan;
1609 if ((ifv->ifv_if.if_flags & (IFF_UP|IFF_RUNNING)) !=
1610 (IFF_UP|IFF_RUNNING)) {
1611 m_freem(m);
1612 if_statinc(ifp, if_noproto);
1613 goto out;
1614 }
1615
1616 /*
1617 * Now, remove the encapsulation header. The original
1618 * header has already been fixed up above.
1619 */
1620 if (!have_vtag) {
1621 memmove(mtod(m, char *) + mib->ifvm_encaplen,
1622 mtod(m, void *), sizeof(struct ether_header));
1623 m_adj(m, mib->ifvm_encaplen);
1624 }
1625
1626 m_set_rcvif(m, &ifv->ifv_if);
1627 ifv->ifv_if.if_ipackets++;
1628
1629 if (pfil_run_hooks(ifp->if_pfil, &m, ifp, PFIL_IN) != 0)
1630 goto out;
1631 if (m == NULL)
1632 goto out;
1633
1634 m->m_flags &= ~M_PROMISC;
1635 if_input(&ifv->ifv_if, m);
1636 out:
1637 vlan_putref_linkmib(mib, &psref);
1638 }
1639
1640 /*
1641 * Module infrastructure
1642 */
1643 #include "if_module.h"
1644
1645 IF_MODULE(MODULE_CLASS_DRIVER, vlan, NULL)
1646