if_vlan.c revision 1.143 1 /* $NetBSD: if_vlan.c,v 1.143 2019/08/20 03:56:59 msaitoh Exp $ */
2
3 /*
4 * Copyright (c) 2000, 2001 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran, and by Jason R. Thorpe of Zembu Labs, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Copyright 1998 Massachusetts Institute of Technology
34 *
35 * Permission to use, copy, modify, and distribute this software and
36 * its documentation for any purpose and without fee is hereby
37 * granted, provided that both the above copyright notice and this
38 * permission notice appear in all copies, that both the above
39 * copyright notice and this permission notice appear in all
40 * supporting documentation, and that the name of M.I.T. not be used
41 * in advertising or publicity pertaining to distribution of the
42 * software without specific, written prior permission. M.I.T. makes
43 * no representations about the suitability of this software for any
44 * purpose. It is provided "as is" without express or implied
45 * warranty.
46 *
47 * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS
48 * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
49 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
50 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
51 * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
52 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
53 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
54 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
55 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
56 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
57 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 *
60 * from FreeBSD: if_vlan.c,v 1.16 2000/03/26 15:21:40 charnier Exp
61 * via OpenBSD: if_vlan.c,v 1.4 2000/05/15 19:15:00 chris Exp
62 */
63
64 /*
65 * if_vlan.c - pseudo-device driver for IEEE 802.1Q virtual LANs. Might be
66 * extended some day to also handle IEEE 802.1P priority tagging. This is
67 * sort of sneaky in the implementation, since we need to pretend to be
68 * enough of an Ethernet implementation to make ARP work. The way we do
69 * this is by telling everyone that we are an Ethernet interface, and then
70 * catch the packets that ether_output() left on our output queue when it
71 * calls if_start(), rewrite them for use by the real outgoing interface,
72 * and ask it to send them.
73 *
74 * TODO:
75 *
76 * - Need some way to notify vlan interfaces when the parent
77 * interface changes MTU.
78 */
79
80 #include <sys/cdefs.h>
81 __KERNEL_RCSID(0, "$NetBSD: if_vlan.c,v 1.143 2019/08/20 03:56:59 msaitoh Exp $");
82
83 #ifdef _KERNEL_OPT
84 #include "opt_inet.h"
85 #include "opt_net_mpsafe.h"
86 #endif
87
88 #include <sys/param.h>
89 #include <sys/systm.h>
90 #include <sys/kernel.h>
91 #include <sys/mbuf.h>
92 #include <sys/queue.h>
93 #include <sys/socket.h>
94 #include <sys/sockio.h>
95 #include <sys/systm.h>
96 #include <sys/proc.h>
97 #include <sys/kauth.h>
98 #include <sys/mutex.h>
99 #include <sys/kmem.h>
100 #include <sys/cpu.h>
101 #include <sys/pserialize.h>
102 #include <sys/psref.h>
103 #include <sys/pslist.h>
104 #include <sys/atomic.h>
105 #include <sys/device.h>
106 #include <sys/module.h>
107
108 #include <net/bpf.h>
109 #include <net/if.h>
110 #include <net/if_dl.h>
111 #include <net/if_types.h>
112 #include <net/if_ether.h>
113 #include <net/if_vlanvar.h>
114
115 #ifdef INET
116 #include <netinet/in.h>
117 #include <netinet/if_inarp.h>
118 #endif
119 #ifdef INET6
120 #include <netinet6/in6_ifattach.h>
121 #include <netinet6/in6_var.h>
122 #endif
123
124 #include "ioconf.h"
125
126 struct vlan_mc_entry {
127 LIST_ENTRY(vlan_mc_entry) mc_entries;
128 /*
129 * A key to identify this entry. The mc_addr below can't be
130 * used since multiple sockaddr may mapped into the same
131 * ether_multi (e.g., AF_UNSPEC).
132 */
133 struct ether_multi *mc_enm;
134 struct sockaddr_storage mc_addr;
135 };
136
137 struct ifvlan_linkmib {
138 struct ifvlan *ifvm_ifvlan;
139 const struct vlan_multisw *ifvm_msw;
140 int ifvm_encaplen; /* encapsulation length */
141 int ifvm_mtufudge; /* MTU fudged by this much */
142 int ifvm_mintu; /* min transmission unit */
143 uint16_t ifvm_proto; /* encapsulation ethertype */
144 uint16_t ifvm_tag; /* tag to apply on packets */
145 struct ifnet *ifvm_p; /* parent interface of this vlan */
146
147 struct psref_target ifvm_psref;
148 };
149
150 struct ifvlan {
151 struct ethercom ifv_ec;
152 struct ifvlan_linkmib *ifv_mib; /*
153 * reader must use vlan_getref_linkmib()
154 * instead of direct dereference
155 */
156 kmutex_t ifv_lock; /* writer lock for ifv_mib */
157 pserialize_t ifv_psz;
158
159 LIST_HEAD(__vlan_mchead, vlan_mc_entry) ifv_mc_listhead;
160 LIST_ENTRY(ifvlan) ifv_list;
161 struct pslist_entry ifv_hash;
162 int ifv_flags;
163 };
164
165 #define IFVF_PROMISC 0x01 /* promiscuous mode enabled */
166
167 #define ifv_if ifv_ec.ec_if
168
169 #define ifv_msw ifv_mib.ifvm_msw
170 #define ifv_encaplen ifv_mib.ifvm_encaplen
171 #define ifv_mtufudge ifv_mib.ifvm_mtufudge
172 #define ifv_mintu ifv_mib.ifvm_mintu
173 #define ifv_tag ifv_mib.ifvm_tag
174
175 struct vlan_multisw {
176 int (*vmsw_addmulti)(struct ifvlan *, struct ifreq *);
177 int (*vmsw_delmulti)(struct ifvlan *, struct ifreq *);
178 void (*vmsw_purgemulti)(struct ifvlan *);
179 };
180
181 static int vlan_ether_addmulti(struct ifvlan *, struct ifreq *);
182 static int vlan_ether_delmulti(struct ifvlan *, struct ifreq *);
183 static void vlan_ether_purgemulti(struct ifvlan *);
184
185 const struct vlan_multisw vlan_ether_multisw = {
186 .vmsw_addmulti = vlan_ether_addmulti,
187 .vmsw_delmulti = vlan_ether_delmulti,
188 .vmsw_purgemulti = vlan_ether_purgemulti,
189 };
190
191 static int vlan_clone_create(struct if_clone *, int);
192 static int vlan_clone_destroy(struct ifnet *);
193 static int vlan_config(struct ifvlan *, struct ifnet *, uint16_t);
194 static int vlan_ioctl(struct ifnet *, u_long, void *);
195 static void vlan_start(struct ifnet *);
196 static int vlan_transmit(struct ifnet *, struct mbuf *);
197 static void vlan_unconfig(struct ifnet *);
198 static int vlan_unconfig_locked(struct ifvlan *, struct ifvlan_linkmib *);
199 static void vlan_hash_init(void);
200 static int vlan_hash_fini(void);
201 static int vlan_tag_hash(uint16_t, u_long);
202 static struct ifvlan_linkmib* vlan_getref_linkmib(struct ifvlan *,
203 struct psref *);
204 static void vlan_putref_linkmib(struct ifvlan_linkmib *, struct psref *);
205 static void vlan_linkmib_update(struct ifvlan *, struct ifvlan_linkmib *);
206 static struct ifvlan_linkmib* vlan_lookup_tag_psref(struct ifnet *,
207 uint16_t, struct psref *);
208
209 static struct {
210 kmutex_t lock;
211 LIST_HEAD(vlan_ifvlist, ifvlan) list;
212 } ifv_list __cacheline_aligned;
213
214
215 #if !defined(VLAN_TAG_HASH_SIZE)
216 #define VLAN_TAG_HASH_SIZE 32
217 #endif
218 static struct {
219 kmutex_t lock;
220 struct pslist_head *lists;
221 u_long mask;
222 } ifv_hash __cacheline_aligned = {
223 .lists = NULL,
224 .mask = 0,
225 };
226
227 pserialize_t vlan_psz __read_mostly;
228 static struct psref_class *ifvm_psref_class __read_mostly;
229
230 struct if_clone vlan_cloner =
231 IF_CLONE_INITIALIZER("vlan", vlan_clone_create, vlan_clone_destroy);
232
233 /* Used to pad ethernet frames with < ETHER_MIN_LEN bytes */
234 static char vlan_zero_pad_buff[ETHER_MIN_LEN];
235
236 static inline int
237 vlan_safe_ifpromisc(struct ifnet *ifp, int pswitch)
238 {
239 int e;
240
241 KERNEL_LOCK_UNLESS_NET_MPSAFE();
242 e = ifpromisc(ifp, pswitch);
243 KERNEL_UNLOCK_UNLESS_NET_MPSAFE();
244
245 return e;
246 }
247
248 static inline int
249 vlan_safe_ifpromisc_locked(struct ifnet *ifp, int pswitch)
250 {
251 int e;
252
253 KERNEL_LOCK_UNLESS_NET_MPSAFE();
254 e = ifpromisc_locked(ifp, pswitch);
255 KERNEL_UNLOCK_UNLESS_NET_MPSAFE();
256
257 return e;
258 }
259
260 void
261 vlanattach(int n)
262 {
263
264 /*
265 * Nothing to do here, initialization is handled by the
266 * module initialization code in vlaninit() below.
267 */
268 }
269
270 static void
271 vlaninit(void)
272 {
273 mutex_init(&ifv_list.lock, MUTEX_DEFAULT, IPL_NONE);
274 LIST_INIT(&ifv_list.list);
275
276 mutex_init(&ifv_hash.lock, MUTEX_DEFAULT, IPL_NONE);
277 vlan_psz = pserialize_create();
278 ifvm_psref_class = psref_class_create("vlanlinkmib", IPL_SOFTNET);
279 if_clone_attach(&vlan_cloner);
280
281 vlan_hash_init();
282 MODULE_HOOK_SET(if_vlan_vlan_input_hook, "vlan_inp", vlan_input);
283 }
284
285 static int
286 vlandetach(void)
287 {
288 bool is_empty;
289 int error;
290
291 mutex_enter(&ifv_list.lock);
292 is_empty = LIST_EMPTY(&ifv_list.list);
293 mutex_exit(&ifv_list.lock);
294
295 if (!is_empty)
296 return EBUSY;
297
298 error = vlan_hash_fini();
299 if (error != 0)
300 return error;
301
302 if_clone_detach(&vlan_cloner);
303 psref_class_destroy(ifvm_psref_class);
304 pserialize_destroy(vlan_psz);
305 mutex_destroy(&ifv_hash.lock);
306 mutex_destroy(&ifv_list.lock);
307
308 MODULE_HOOK_UNSET(if_vlan_vlan_input_hook);
309 return 0;
310 }
311
312 static void
313 vlan_reset_linkname(struct ifnet *ifp)
314 {
315
316 /*
317 * We start out with a "802.1Q VLAN" type and zero-length
318 * addresses. When we attach to a parent interface, we
319 * inherit its type, address length, address, and data link
320 * type.
321 */
322
323 ifp->if_type = IFT_L2VLAN;
324 ifp->if_addrlen = 0;
325 ifp->if_dlt = DLT_NULL;
326 if_alloc_sadl(ifp);
327 }
328
329 static int
330 vlan_clone_create(struct if_clone *ifc, int unit)
331 {
332 struct ifvlan *ifv;
333 struct ifnet *ifp;
334 struct ifvlan_linkmib *mib;
335 int rv;
336
337 ifv = malloc(sizeof(struct ifvlan), M_DEVBUF, M_WAITOK | M_ZERO);
338 mib = kmem_zalloc(sizeof(struct ifvlan_linkmib), KM_SLEEP);
339 ifp = &ifv->ifv_if;
340 LIST_INIT(&ifv->ifv_mc_listhead);
341
342 mib->ifvm_ifvlan = ifv;
343 mib->ifvm_p = NULL;
344 psref_target_init(&mib->ifvm_psref, ifvm_psref_class);
345
346 mutex_init(&ifv->ifv_lock, MUTEX_DEFAULT, IPL_NONE);
347 ifv->ifv_psz = pserialize_create();
348 ifv->ifv_mib = mib;
349
350 mutex_enter(&ifv_list.lock);
351 LIST_INSERT_HEAD(&ifv_list.list, ifv, ifv_list);
352 mutex_exit(&ifv_list.lock);
353
354 if_initname(ifp, ifc->ifc_name, unit);
355 ifp->if_softc = ifv;
356 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
357 ifp->if_extflags = IFEF_NO_LINK_STATE_CHANGE;
358 #ifdef NET_MPSAFE
359 ifp->if_extflags |= IFEF_MPSAFE;
360 #endif
361 ifp->if_start = vlan_start;
362 ifp->if_transmit = vlan_transmit;
363 ifp->if_ioctl = vlan_ioctl;
364 IFQ_SET_READY(&ifp->if_snd);
365
366 rv = if_initialize(ifp);
367 if (rv != 0) {
368 aprint_error("%s: if_initialize failed(%d)\n", ifp->if_xname,
369 rv);
370 goto fail;
371 }
372
373 vlan_reset_linkname(ifp);
374 if_register(ifp);
375 return 0;
376
377 fail:
378 mutex_enter(&ifv_list.lock);
379 LIST_REMOVE(ifv, ifv_list);
380 mutex_exit(&ifv_list.lock);
381
382 mutex_destroy(&ifv->ifv_lock);
383 psref_target_destroy(&ifv->ifv_mib->ifvm_psref, ifvm_psref_class);
384 kmem_free(ifv->ifv_mib, sizeof(struct ifvlan_linkmib));
385 free(ifv, M_DEVBUF);
386
387 return rv;
388 }
389
390 static int
391 vlan_clone_destroy(struct ifnet *ifp)
392 {
393 struct ifvlan *ifv = ifp->if_softc;
394
395 mutex_enter(&ifv_list.lock);
396 LIST_REMOVE(ifv, ifv_list);
397 mutex_exit(&ifv_list.lock);
398
399 IFNET_LOCK(ifp);
400 vlan_unconfig(ifp);
401 IFNET_UNLOCK(ifp);
402 if_detach(ifp);
403
404 psref_target_destroy(&ifv->ifv_mib->ifvm_psref, ifvm_psref_class);
405 kmem_free(ifv->ifv_mib, sizeof(struct ifvlan_linkmib));
406 pserialize_destroy(ifv->ifv_psz);
407 mutex_destroy(&ifv->ifv_lock);
408 free(ifv, M_DEVBUF);
409
410 return 0;
411 }
412
413 /*
414 * Configure a VLAN interface.
415 */
416 static int
417 vlan_config(struct ifvlan *ifv, struct ifnet *p, uint16_t tag)
418 {
419 struct ifnet *ifp = &ifv->ifv_if;
420 struct ifvlan_linkmib *nmib = NULL;
421 struct ifvlan_linkmib *omib = NULL;
422 struct ifvlan_linkmib *checkmib;
423 struct psref_target *nmib_psref = NULL;
424 const uint16_t vid = EVL_VLANOFTAG(tag);
425 int error = 0;
426 int idx;
427 bool omib_cleanup = false;
428 struct psref psref;
429
430 /* VLAN ID 0 and 4095 are reserved in the spec */
431 if ((vid == 0) || (vid == 0xfff))
432 return EINVAL;
433
434 nmib = kmem_alloc(sizeof(*nmib), KM_SLEEP);
435 mutex_enter(&ifv->ifv_lock);
436 omib = ifv->ifv_mib;
437
438 if (omib->ifvm_p != NULL) {
439 error = EBUSY;
440 goto done;
441 }
442
443 /* Duplicate check */
444 checkmib = vlan_lookup_tag_psref(p, vid, &psref);
445 if (checkmib != NULL) {
446 vlan_putref_linkmib(checkmib, &psref);
447 error = EEXIST;
448 goto done;
449 }
450
451 *nmib = *omib;
452 nmib_psref = &nmib->ifvm_psref;
453
454 psref_target_init(nmib_psref, ifvm_psref_class);
455
456 switch (p->if_type) {
457 case IFT_ETHER:
458 {
459 struct ethercom *ec = (void *)p;
460 struct vlanid_list *vidmem;
461
462 nmib->ifvm_msw = &vlan_ether_multisw;
463 nmib->ifvm_encaplen = ETHER_VLAN_ENCAP_LEN;
464 nmib->ifvm_mintu = ETHERMIN;
465
466 if (ec->ec_nvlans++ == 0) {
467 IFNET_LOCK(p);
468 error = ether_enable_vlan_mtu(p);
469 IFNET_UNLOCK(p);
470 if (error >= 0) {
471 if (error) {
472 ec->ec_nvlans--;
473 goto done;
474 }
475 nmib->ifvm_mtufudge = 0;
476 } else {
477 /*
478 * Fudge the MTU by the encapsulation size. This
479 * makes us incompatible with strictly compliant
480 * 802.1Q implementations, but allows us to use
481 * the feature with other NetBSD
482 * implementations, which might still be useful.
483 */
484 nmib->ifvm_mtufudge = nmib->ifvm_encaplen;
485 }
486 error = 0;
487 }
488 /*
489 * Add a vid to the list even if it's not enabled in case
490 * it's enabled later.
491 */
492 if (ec->ec_capabilities & ETHERCAP_VLAN_HWFILTER) {
493 vidmem = kmem_alloc(sizeof(struct vlanid_list),
494 KM_SLEEP);
495 if (vidmem == NULL){
496 ec->ec_nvlans--;
497 if (ec->ec_nvlans == 0) {
498 IFNET_LOCK(p);
499 (void)ether_disable_vlan_mtu(p);
500 IFNET_UNLOCK(p);
501 }
502 error = ENOMEM;
503 goto done;
504 }
505 vidmem->vid = vid;
506 mutex_enter(ec->ec_lock);
507 SIMPLEQ_INSERT_TAIL(&ec->ec_vids, vidmem, vid_list);
508 mutex_exit(ec->ec_lock);
509 }
510 if (ec->ec_capenable & ETHERCAP_VLAN_HWFILTER) {
511 if (ec->ec_vlan_cb != NULL) {
512 error = (*ec->ec_vlan_cb)(ec, vid, true);
513 if (error) {
514 ec->ec_nvlans--;
515 if (ec->ec_nvlans == 0) {
516 IFNET_LOCK(p);
517 (void)ether_disable_vlan_mtu(p);
518 IFNET_UNLOCK(p);
519 }
520 goto done;
521 }
522 }
523 }
524 /*
525 * If the parent interface can do hardware-assisted
526 * VLAN encapsulation, then propagate its hardware-
527 * assisted checksumming flags and tcp segmentation
528 * offload.
529 */
530 if (ec->ec_capabilities & ETHERCAP_VLAN_HWTAGGING) {
531 ifp->if_capabilities = p->if_capabilities &
532 (IFCAP_TSOv4 | IFCAP_TSOv6 |
533 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
534 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
535 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
536 IFCAP_CSUM_TCPv6_Tx | IFCAP_CSUM_TCPv6_Rx |
537 IFCAP_CSUM_UDPv6_Tx | IFCAP_CSUM_UDPv6_Rx);
538 }
539
540 /*
541 * We inherit the parent's Ethernet address.
542 */
543 ether_ifattach(ifp, CLLADDR(p->if_sadl));
544 ifp->if_hdrlen = sizeof(struct ether_vlan_header); /* XXX? */
545 break;
546 }
547
548 default:
549 error = EPROTONOSUPPORT;
550 goto done;
551 }
552
553 nmib->ifvm_p = p;
554 nmib->ifvm_tag = vid;
555 ifv->ifv_if.if_mtu = p->if_mtu - nmib->ifvm_mtufudge;
556 ifv->ifv_if.if_flags = p->if_flags &
557 (IFF_UP | IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
558
559 /*
560 * Inherit the if_type from the parent. This allows us
561 * to participate in bridges of that type.
562 */
563 ifv->ifv_if.if_type = p->if_type;
564
565 PSLIST_ENTRY_INIT(ifv, ifv_hash);
566 idx = vlan_tag_hash(vid, ifv_hash.mask);
567
568 mutex_enter(&ifv_hash.lock);
569 PSLIST_WRITER_INSERT_HEAD(&ifv_hash.lists[idx], ifv, ifv_hash);
570 mutex_exit(&ifv_hash.lock);
571
572 vlan_linkmib_update(ifv, nmib);
573 nmib = NULL;
574 nmib_psref = NULL;
575 omib_cleanup = true;
576
577 done:
578 mutex_exit(&ifv->ifv_lock);
579
580 if (nmib_psref)
581 psref_target_destroy(nmib_psref, ifvm_psref_class);
582 if (nmib)
583 kmem_free(nmib, sizeof(*nmib));
584 if (omib_cleanup)
585 kmem_free(omib, sizeof(*omib));
586
587 return error;
588 }
589
590 /*
591 * Unconfigure a VLAN interface.
592 */
593 static void
594 vlan_unconfig(struct ifnet *ifp)
595 {
596 struct ifvlan *ifv = ifp->if_softc;
597 struct ifvlan_linkmib *nmib = NULL;
598 int error;
599
600 KASSERT(IFNET_LOCKED(ifp));
601
602 nmib = kmem_alloc(sizeof(*nmib), KM_SLEEP);
603
604 mutex_enter(&ifv->ifv_lock);
605 error = vlan_unconfig_locked(ifv, nmib);
606 mutex_exit(&ifv->ifv_lock);
607
608 if (error)
609 kmem_free(nmib, sizeof(*nmib));
610 }
611 static int
612 vlan_unconfig_locked(struct ifvlan *ifv, struct ifvlan_linkmib *nmib)
613 {
614 struct ifnet *p;
615 struct ifnet *ifp = &ifv->ifv_if;
616 struct psref_target *nmib_psref = NULL;
617 struct ifvlan_linkmib *omib;
618 int error = 0;
619
620 KASSERT(IFNET_LOCKED(ifp));
621 KASSERT(mutex_owned(&ifv->ifv_lock));
622
623 ifp->if_flags &= ~(IFF_UP | IFF_RUNNING);
624
625 omib = ifv->ifv_mib;
626 p = omib->ifvm_p;
627
628 if (p == NULL) {
629 error = -1;
630 goto done;
631 }
632
633 *nmib = *omib;
634 nmib_psref = &nmib->ifvm_psref;
635 psref_target_init(nmib_psref, ifvm_psref_class);
636
637 /*
638 * Since the interface is being unconfigured, we need to empty the
639 * list of multicast groups that we may have joined while we were
640 * alive and remove them from the parent's list also.
641 */
642 (*nmib->ifvm_msw->vmsw_purgemulti)(ifv);
643
644 /* Disconnect from parent. */
645 switch (p->if_type) {
646 case IFT_ETHER:
647 {
648 struct ethercom *ec = (void *)p;
649 struct vlanid_list *vlanidp, *tmpp;
650 uint16_t vid = EVL_VLANOFTAG(nmib->ifvm_tag);
651
652 mutex_enter(ec->ec_lock);
653 SIMPLEQ_FOREACH_SAFE(vlanidp, &ec->ec_vids, vid_list, tmpp) {
654 if (vlanidp->vid == vid) {
655 SIMPLEQ_REMOVE(&ec->ec_vids, vlanidp,
656 vlanid_list, vid_list);
657 kmem_free(vlanidp, sizeof(*vlanidp));
658 }
659 }
660 mutex_exit(ec->ec_lock);
661 if (ec->ec_vlan_cb != NULL)
662 (void)(*ec->ec_vlan_cb)(ec, vid, false);
663 if (--ec->ec_nvlans == 0) {
664 IFNET_LOCK(p);
665 (void)ether_disable_vlan_mtu(p);
666 IFNET_UNLOCK(p);
667 }
668
669 /* XXX ether_ifdetach must not be called with IFNET_LOCK */
670 mutex_exit(&ifv->ifv_lock);
671 IFNET_UNLOCK(ifp);
672 ether_ifdetach(ifp);
673 IFNET_LOCK(ifp);
674 mutex_enter(&ifv->ifv_lock);
675
676 /* if_free_sadl must be called with IFNET_LOCK */
677 if_free_sadl(ifp, 1);
678
679 /* Restore vlan_ioctl overwritten by ether_ifdetach */
680 ifp->if_ioctl = vlan_ioctl;
681 vlan_reset_linkname(ifp);
682 break;
683 }
684
685 default:
686 panic("%s: impossible", __func__);
687 }
688
689 nmib->ifvm_p = NULL;
690 ifv->ifv_if.if_mtu = 0;
691 ifv->ifv_flags = 0;
692
693 mutex_enter(&ifv_hash.lock);
694 PSLIST_WRITER_REMOVE(ifv, ifv_hash);
695 pserialize_perform(vlan_psz);
696 mutex_exit(&ifv_hash.lock);
697 PSLIST_ENTRY_DESTROY(ifv, ifv_hash);
698
699 vlan_linkmib_update(ifv, nmib);
700
701 mutex_exit(&ifv->ifv_lock);
702
703 nmib_psref = NULL;
704 kmem_free(omib, sizeof(*omib));
705
706 #ifdef INET6
707 KERNEL_LOCK_UNLESS_NET_MPSAFE();
708 /* To delete v6 link local addresses */
709 if (in6_present)
710 in6_ifdetach(ifp);
711 KERNEL_UNLOCK_UNLESS_NET_MPSAFE();
712 #endif
713
714 if ((ifp->if_flags & IFF_PROMISC) != 0)
715 vlan_safe_ifpromisc_locked(ifp, 0);
716 if_down_locked(ifp);
717 ifp->if_capabilities = 0;
718 mutex_enter(&ifv->ifv_lock);
719 done:
720
721 if (nmib_psref)
722 psref_target_destroy(nmib_psref, ifvm_psref_class);
723
724 return error;
725 }
726
727 static void
728 vlan_hash_init(void)
729 {
730
731 ifv_hash.lists = hashinit(VLAN_TAG_HASH_SIZE, HASH_PSLIST, true,
732 &ifv_hash.mask);
733 }
734
735 static int
736 vlan_hash_fini(void)
737 {
738 int i;
739
740 mutex_enter(&ifv_hash.lock);
741
742 for (i = 0; i < ifv_hash.mask + 1; i++) {
743 if (PSLIST_WRITER_FIRST(&ifv_hash.lists[i], struct ifvlan,
744 ifv_hash) != NULL) {
745 mutex_exit(&ifv_hash.lock);
746 return EBUSY;
747 }
748 }
749
750 for (i = 0; i < ifv_hash.mask + 1; i++)
751 PSLIST_DESTROY(&ifv_hash.lists[i]);
752
753 mutex_exit(&ifv_hash.lock);
754
755 hashdone(ifv_hash.lists, HASH_PSLIST, ifv_hash.mask);
756
757 ifv_hash.lists = NULL;
758 ifv_hash.mask = 0;
759
760 return 0;
761 }
762
763 static int
764 vlan_tag_hash(uint16_t tag, u_long mask)
765 {
766 uint32_t hash;
767
768 hash = (tag >> 8) ^ tag;
769 hash = (hash >> 2) ^ hash;
770
771 return hash & mask;
772 }
773
774 static struct ifvlan_linkmib *
775 vlan_getref_linkmib(struct ifvlan *sc, struct psref *psref)
776 {
777 struct ifvlan_linkmib *mib;
778 int s;
779
780 s = pserialize_read_enter();
781 mib = sc->ifv_mib;
782 if (mib == NULL) {
783 pserialize_read_exit(s);
784 return NULL;
785 }
786 membar_datadep_consumer();
787 psref_acquire(psref, &mib->ifvm_psref, ifvm_psref_class);
788 pserialize_read_exit(s);
789
790 return mib;
791 }
792
793 static void
794 vlan_putref_linkmib(struct ifvlan_linkmib *mib, struct psref *psref)
795 {
796 if (mib == NULL)
797 return;
798 psref_release(psref, &mib->ifvm_psref, ifvm_psref_class);
799 }
800
801 static struct ifvlan_linkmib *
802 vlan_lookup_tag_psref(struct ifnet *ifp, uint16_t tag, struct psref *psref)
803 {
804 int idx;
805 int s;
806 struct ifvlan *sc;
807
808 idx = vlan_tag_hash(tag, ifv_hash.mask);
809
810 s = pserialize_read_enter();
811 PSLIST_READER_FOREACH(sc, &ifv_hash.lists[idx], struct ifvlan,
812 ifv_hash) {
813 struct ifvlan_linkmib *mib = sc->ifv_mib;
814 if (mib == NULL)
815 continue;
816 if (mib->ifvm_tag != tag)
817 continue;
818 if (mib->ifvm_p != ifp)
819 continue;
820
821 psref_acquire(psref, &mib->ifvm_psref, ifvm_psref_class);
822 pserialize_read_exit(s);
823 return mib;
824 }
825 pserialize_read_exit(s);
826 return NULL;
827 }
828
829 static void
830 vlan_linkmib_update(struct ifvlan *ifv, struct ifvlan_linkmib *nmib)
831 {
832 struct ifvlan_linkmib *omib = ifv->ifv_mib;
833
834 KASSERT(mutex_owned(&ifv->ifv_lock));
835
836 membar_producer();
837 ifv->ifv_mib = nmib;
838
839 pserialize_perform(ifv->ifv_psz);
840 psref_target_destroy(&omib->ifvm_psref, ifvm_psref_class);
841 }
842
843 /*
844 * Called when a parent interface is detaching; destroy any VLAN
845 * configuration for the parent interface.
846 */
847 void
848 vlan_ifdetach(struct ifnet *p)
849 {
850 struct ifvlan *ifv;
851 struct ifvlan_linkmib *mib, **nmibs;
852 struct psref psref;
853 int error;
854 int bound;
855 int i, cnt = 0;
856
857 bound = curlwp_bind();
858
859 mutex_enter(&ifv_list.lock);
860 LIST_FOREACH(ifv, &ifv_list.list, ifv_list) {
861 mib = vlan_getref_linkmib(ifv, &psref);
862 if (mib == NULL)
863 continue;
864
865 if (mib->ifvm_p == p)
866 cnt++;
867
868 vlan_putref_linkmib(mib, &psref);
869 }
870 mutex_exit(&ifv_list.lock);
871
872 if (cnt == 0) {
873 curlwp_bindx(bound);
874 return;
875 }
876
877 /*
878 * The value of "cnt" does not increase while ifv_list.lock
879 * and ifv->ifv_lock are released here, because the parent
880 * interface is detaching.
881 */
882 nmibs = kmem_alloc(sizeof(*nmibs) * cnt, KM_SLEEP);
883 for (i = 0; i < cnt; i++) {
884 nmibs[i] = kmem_alloc(sizeof(*nmibs[i]), KM_SLEEP);
885 }
886
887 mutex_enter(&ifv_list.lock);
888
889 i = 0;
890 LIST_FOREACH(ifv, &ifv_list.list, ifv_list) {
891 struct ifnet *ifp = &ifv->ifv_if;
892
893 /* IFNET_LOCK must be held before ifv_lock. */
894 IFNET_LOCK(ifp);
895 mutex_enter(&ifv->ifv_lock);
896
897 /* XXX ifv_mib = NULL? */
898 if (ifv->ifv_mib->ifvm_p == p) {
899 KASSERTMSG(i < cnt,
900 "no memory for unconfig, parent=%s", p->if_xname);
901 error = vlan_unconfig_locked(ifv, nmibs[i]);
902 if (!error) {
903 nmibs[i] = NULL;
904 i++;
905 }
906
907 }
908
909 mutex_exit(&ifv->ifv_lock);
910 IFNET_UNLOCK(ifp);
911 }
912
913 mutex_exit(&ifv_list.lock);
914
915 curlwp_bindx(bound);
916
917 for (i = 0; i < cnt; i++) {
918 if (nmibs[i])
919 kmem_free(nmibs[i], sizeof(*nmibs[i]));
920 }
921
922 kmem_free(nmibs, sizeof(*nmibs) * cnt);
923
924 return;
925 }
926
927 static int
928 vlan_set_promisc(struct ifnet *ifp)
929 {
930 struct ifvlan *ifv = ifp->if_softc;
931 struct ifvlan_linkmib *mib;
932 struct psref psref;
933 int error = 0;
934 int bound;
935
936 bound = curlwp_bind();
937 mib = vlan_getref_linkmib(ifv, &psref);
938 if (mib == NULL) {
939 curlwp_bindx(bound);
940 return EBUSY;
941 }
942
943 if ((ifp->if_flags & IFF_PROMISC) != 0) {
944 if ((ifv->ifv_flags & IFVF_PROMISC) == 0) {
945 error = vlan_safe_ifpromisc(mib->ifvm_p, 1);
946 if (error == 0)
947 ifv->ifv_flags |= IFVF_PROMISC;
948 }
949 } else {
950 if ((ifv->ifv_flags & IFVF_PROMISC) != 0) {
951 error = vlan_safe_ifpromisc(mib->ifvm_p, 0);
952 if (error == 0)
953 ifv->ifv_flags &= ~IFVF_PROMISC;
954 }
955 }
956 vlan_putref_linkmib(mib, &psref);
957 curlwp_bindx(bound);
958
959 return error;
960 }
961
962 static int
963 vlan_ioctl(struct ifnet *ifp, u_long cmd, void *data)
964 {
965 struct lwp *l = curlwp;
966 struct ifvlan *ifv = ifp->if_softc;
967 struct ifaddr *ifa = (struct ifaddr *) data;
968 struct ifreq *ifr = (struct ifreq *) data;
969 struct ifnet *pr;
970 struct ifcapreq *ifcr;
971 struct vlanreq vlr;
972 struct ifvlan_linkmib *mib;
973 struct psref psref;
974 int error = 0;
975 int bound;
976
977 switch (cmd) {
978 case SIOCSIFMTU:
979 bound = curlwp_bind();
980 mib = vlan_getref_linkmib(ifv, &psref);
981 if (mib == NULL) {
982 curlwp_bindx(bound);
983 error = EBUSY;
984 break;
985 }
986
987 if (mib->ifvm_p == NULL) {
988 vlan_putref_linkmib(mib, &psref);
989 curlwp_bindx(bound);
990 error = EINVAL;
991 } else if (
992 ifr->ifr_mtu > (mib->ifvm_p->if_mtu - mib->ifvm_mtufudge) ||
993 ifr->ifr_mtu < (mib->ifvm_mintu - mib->ifvm_mtufudge)) {
994 vlan_putref_linkmib(mib, &psref);
995 curlwp_bindx(bound);
996 error = EINVAL;
997 } else {
998 vlan_putref_linkmib(mib, &psref);
999 curlwp_bindx(bound);
1000
1001 error = ifioctl_common(ifp, cmd, data);
1002 if (error == ENETRESET)
1003 error = 0;
1004 }
1005
1006 break;
1007
1008 case SIOCSETVLAN:
1009 if ((error = kauth_authorize_network(l->l_cred,
1010 KAUTH_NETWORK_INTERFACE,
1011 KAUTH_REQ_NETWORK_INTERFACE_SETPRIV, ifp, (void *)cmd,
1012 NULL)) != 0)
1013 break;
1014 if ((error = copyin(ifr->ifr_data, &vlr, sizeof(vlr))) != 0)
1015 break;
1016
1017 if (vlr.vlr_parent[0] == '\0') {
1018 bound = curlwp_bind();
1019 mib = vlan_getref_linkmib(ifv, &psref);
1020 if (mib == NULL) {
1021 curlwp_bindx(bound);
1022 error = EBUSY;
1023 break;
1024 }
1025
1026 if (mib->ifvm_p != NULL &&
1027 (ifp->if_flags & IFF_PROMISC) != 0)
1028 error = vlan_safe_ifpromisc(mib->ifvm_p, 0);
1029
1030 vlan_putref_linkmib(mib, &psref);
1031 curlwp_bindx(bound);
1032
1033 vlan_unconfig(ifp);
1034 break;
1035 }
1036 if (vlr.vlr_tag != EVL_VLANOFTAG(vlr.vlr_tag)) {
1037 error = EINVAL; /* check for valid tag */
1038 break;
1039 }
1040 if ((pr = ifunit(vlr.vlr_parent)) == NULL) {
1041 error = ENOENT;
1042 break;
1043 }
1044
1045 error = vlan_config(ifv, pr, vlr.vlr_tag);
1046 if (error != 0)
1047 break;
1048
1049 /* Update promiscuous mode, if necessary. */
1050 vlan_set_promisc(ifp);
1051
1052 ifp->if_flags |= IFF_RUNNING;
1053 break;
1054
1055 case SIOCGETVLAN:
1056 memset(&vlr, 0, sizeof(vlr));
1057 bound = curlwp_bind();
1058 mib = vlan_getref_linkmib(ifv, &psref);
1059 if (mib == NULL) {
1060 curlwp_bindx(bound);
1061 error = EBUSY;
1062 break;
1063 }
1064 if (mib->ifvm_p != NULL) {
1065 snprintf(vlr.vlr_parent, sizeof(vlr.vlr_parent), "%s",
1066 mib->ifvm_p->if_xname);
1067 vlr.vlr_tag = mib->ifvm_tag;
1068 }
1069 vlan_putref_linkmib(mib, &psref);
1070 curlwp_bindx(bound);
1071 error = copyout(&vlr, ifr->ifr_data, sizeof(vlr));
1072 break;
1073
1074 case SIOCSIFFLAGS:
1075 if ((error = ifioctl_common(ifp, cmd, data)) != 0)
1076 break;
1077 /*
1078 * For promiscuous mode, we enable promiscuous mode on
1079 * the parent if we need promiscuous on the VLAN interface.
1080 */
1081 bound = curlwp_bind();
1082 mib = vlan_getref_linkmib(ifv, &psref);
1083 if (mib == NULL) {
1084 curlwp_bindx(bound);
1085 error = EBUSY;
1086 break;
1087 }
1088
1089 if (mib->ifvm_p != NULL)
1090 error = vlan_set_promisc(ifp);
1091 vlan_putref_linkmib(mib, &psref);
1092 curlwp_bindx(bound);
1093 break;
1094
1095 case SIOCADDMULTI:
1096 mutex_enter(&ifv->ifv_lock);
1097 mib = ifv->ifv_mib;
1098 if (mib == NULL) {
1099 error = EBUSY;
1100 mutex_exit(&ifv->ifv_lock);
1101 break;
1102 }
1103
1104 error = (mib->ifvm_p != NULL) ?
1105 (*mib->ifvm_msw->vmsw_addmulti)(ifv, ifr) : EINVAL;
1106 mib = NULL;
1107 mutex_exit(&ifv->ifv_lock);
1108 break;
1109
1110 case SIOCDELMULTI:
1111 mutex_enter(&ifv->ifv_lock);
1112 mib = ifv->ifv_mib;
1113 if (mib == NULL) {
1114 error = EBUSY;
1115 mutex_exit(&ifv->ifv_lock);
1116 break;
1117 }
1118 error = (mib->ifvm_p != NULL) ?
1119 (*mib->ifvm_msw->vmsw_delmulti)(ifv, ifr) : EINVAL;
1120 mib = NULL;
1121 mutex_exit(&ifv->ifv_lock);
1122 break;
1123
1124 case SIOCSIFCAP:
1125 ifcr = data;
1126 /* make sure caps are enabled on parent */
1127 bound = curlwp_bind();
1128 mib = vlan_getref_linkmib(ifv, &psref);
1129 if (mib == NULL) {
1130 curlwp_bindx(bound);
1131 error = EBUSY;
1132 break;
1133 }
1134
1135 if (mib->ifvm_p == NULL) {
1136 vlan_putref_linkmib(mib, &psref);
1137 curlwp_bindx(bound);
1138 error = EINVAL;
1139 break;
1140 }
1141 if ((mib->ifvm_p->if_capenable & ifcr->ifcr_capenable) !=
1142 ifcr->ifcr_capenable) {
1143 vlan_putref_linkmib(mib, &psref);
1144 curlwp_bindx(bound);
1145 error = EINVAL;
1146 break;
1147 }
1148
1149 vlan_putref_linkmib(mib, &psref);
1150 curlwp_bindx(bound);
1151
1152 if ((error = ifioctl_common(ifp, cmd, data)) == ENETRESET)
1153 error = 0;
1154 break;
1155 case SIOCINITIFADDR:
1156 bound = curlwp_bind();
1157 mib = vlan_getref_linkmib(ifv, &psref);
1158 if (mib == NULL) {
1159 curlwp_bindx(bound);
1160 error = EBUSY;
1161 break;
1162 }
1163
1164 if (mib->ifvm_p == NULL) {
1165 error = EINVAL;
1166 vlan_putref_linkmib(mib, &psref);
1167 curlwp_bindx(bound);
1168 break;
1169 }
1170 vlan_putref_linkmib(mib, &psref);
1171 curlwp_bindx(bound);
1172
1173 ifp->if_flags |= IFF_UP;
1174 #ifdef INET
1175 if (ifa->ifa_addr->sa_family == AF_INET)
1176 arp_ifinit(ifp, ifa);
1177 #endif
1178 break;
1179
1180 default:
1181 error = ether_ioctl(ifp, cmd, data);
1182 }
1183
1184 return error;
1185 }
1186
1187 static int
1188 vlan_ether_addmulti(struct ifvlan *ifv, struct ifreq *ifr)
1189 {
1190 const struct sockaddr *sa = ifreq_getaddr(SIOCADDMULTI, ifr);
1191 struct vlan_mc_entry *mc;
1192 uint8_t addrlo[ETHER_ADDR_LEN], addrhi[ETHER_ADDR_LEN];
1193 struct ifvlan_linkmib *mib;
1194 int error;
1195
1196 KASSERT(mutex_owned(&ifv->ifv_lock));
1197
1198 if (sa->sa_len > sizeof(struct sockaddr_storage))
1199 return EINVAL;
1200
1201 error = ether_addmulti(sa, &ifv->ifv_ec);
1202 if (error != ENETRESET)
1203 return error;
1204
1205 /*
1206 * This is a new multicast address. We have to tell parent
1207 * about it. Also, remember this multicast address so that
1208 * we can delete it on unconfigure.
1209 */
1210 mc = malloc(sizeof(struct vlan_mc_entry), M_DEVBUF, M_NOWAIT);
1211 if (mc == NULL) {
1212 error = ENOMEM;
1213 goto alloc_failed;
1214 }
1215
1216 /*
1217 * Since ether_addmulti() returned ENETRESET, the following two
1218 * statements shouldn't fail. Here ifv_ec is implicitly protected
1219 * by the ifv_lock lock.
1220 */
1221 error = ether_multiaddr(sa, addrlo, addrhi);
1222 KASSERT(error == 0);
1223
1224 ETHER_LOCK(&ifv->ifv_ec);
1225 mc->mc_enm = ether_lookup_multi(addrlo, addrhi, &ifv->ifv_ec);
1226 ETHER_UNLOCK(&ifv->ifv_ec);
1227
1228 KASSERT(mc->mc_enm != NULL);
1229
1230 memcpy(&mc->mc_addr, sa, sa->sa_len);
1231 LIST_INSERT_HEAD(&ifv->ifv_mc_listhead, mc, mc_entries);
1232
1233 mib = ifv->ifv_mib;
1234
1235 KERNEL_LOCK_UNLESS_IFP_MPSAFE(mib->ifvm_p);
1236 error = if_mcast_op(mib->ifvm_p, SIOCADDMULTI, sa);
1237 KERNEL_UNLOCK_UNLESS_IFP_MPSAFE(mib->ifvm_p);
1238
1239 if (error != 0)
1240 goto ioctl_failed;
1241 return error;
1242
1243 ioctl_failed:
1244 LIST_REMOVE(mc, mc_entries);
1245 free(mc, M_DEVBUF);
1246
1247 alloc_failed:
1248 (void)ether_delmulti(sa, &ifv->ifv_ec);
1249 return error;
1250 }
1251
1252 static int
1253 vlan_ether_delmulti(struct ifvlan *ifv, struct ifreq *ifr)
1254 {
1255 const struct sockaddr *sa = ifreq_getaddr(SIOCDELMULTI, ifr);
1256 struct ether_multi *enm;
1257 struct vlan_mc_entry *mc;
1258 struct ifvlan_linkmib *mib;
1259 uint8_t addrlo[ETHER_ADDR_LEN], addrhi[ETHER_ADDR_LEN];
1260 int error;
1261
1262 KASSERT(mutex_owned(&ifv->ifv_lock));
1263
1264 /*
1265 * Find a key to lookup vlan_mc_entry. We have to do this
1266 * before calling ether_delmulti for obvious reasons.
1267 */
1268 if ((error = ether_multiaddr(sa, addrlo, addrhi)) != 0)
1269 return error;
1270
1271 ETHER_LOCK(&ifv->ifv_ec);
1272 enm = ether_lookup_multi(addrlo, addrhi, &ifv->ifv_ec);
1273 ETHER_UNLOCK(&ifv->ifv_ec);
1274 if (enm == NULL)
1275 return EINVAL;
1276
1277 LIST_FOREACH(mc, &ifv->ifv_mc_listhead, mc_entries) {
1278 if (mc->mc_enm == enm)
1279 break;
1280 }
1281
1282 /* We woun't delete entries we didn't add */
1283 if (mc == NULL)
1284 return EINVAL;
1285
1286 error = ether_delmulti(sa, &ifv->ifv_ec);
1287 if (error != ENETRESET)
1288 return error;
1289
1290 /* We no longer use this multicast address. Tell parent so. */
1291 mib = ifv->ifv_mib;
1292 error = if_mcast_op(mib->ifvm_p, SIOCDELMULTI, sa);
1293
1294 if (error == 0) {
1295 /* And forget about this address. */
1296 LIST_REMOVE(mc, mc_entries);
1297 free(mc, M_DEVBUF);
1298 } else {
1299 (void)ether_addmulti(sa, &ifv->ifv_ec);
1300 }
1301
1302 return error;
1303 }
1304
1305 /*
1306 * Delete any multicast address we have asked to add from parent
1307 * interface. Called when the vlan is being unconfigured.
1308 */
1309 static void
1310 vlan_ether_purgemulti(struct ifvlan *ifv)
1311 {
1312 struct vlan_mc_entry *mc;
1313 struct ifvlan_linkmib *mib;
1314
1315 KASSERT(mutex_owned(&ifv->ifv_lock));
1316 mib = ifv->ifv_mib;
1317 if (mib == NULL) {
1318 return;
1319 }
1320
1321 while ((mc = LIST_FIRST(&ifv->ifv_mc_listhead)) != NULL) {
1322 (void)if_mcast_op(mib->ifvm_p, SIOCDELMULTI,
1323 sstocsa(&mc->mc_addr));
1324 LIST_REMOVE(mc, mc_entries);
1325 free(mc, M_DEVBUF);
1326 }
1327 }
1328
1329 static void
1330 vlan_start(struct ifnet *ifp)
1331 {
1332 struct ifvlan *ifv = ifp->if_softc;
1333 struct ifnet *p;
1334 struct ethercom *ec;
1335 struct mbuf *m;
1336 struct ifvlan_linkmib *mib;
1337 struct psref psref;
1338 int error;
1339
1340 mib = vlan_getref_linkmib(ifv, &psref);
1341 if (mib == NULL)
1342 return;
1343 p = mib->ifvm_p;
1344 ec = (void *)mib->ifvm_p;
1345
1346 ifp->if_flags |= IFF_OACTIVE;
1347
1348 for (;;) {
1349 IFQ_DEQUEUE(&ifp->if_snd, m);
1350 if (m == NULL)
1351 break;
1352
1353 #ifdef ALTQ
1354 /*
1355 * KERNEL_LOCK is required for ALTQ even if NET_MPSAFE is
1356 * defined.
1357 */
1358 KERNEL_LOCK(1, NULL);
1359 /*
1360 * If ALTQ is enabled on the parent interface, do
1361 * classification; the queueing discipline might
1362 * not require classification, but might require
1363 * the address family/header pointer in the pktattr.
1364 */
1365 if (ALTQ_IS_ENABLED(&p->if_snd)) {
1366 switch (p->if_type) {
1367 case IFT_ETHER:
1368 altq_etherclassify(&p->if_snd, m);
1369 break;
1370 default:
1371 panic("%s: impossible (altq)", __func__);
1372 }
1373 }
1374 KERNEL_UNLOCK_ONE(NULL);
1375 #endif /* ALTQ */
1376
1377 bpf_mtap(ifp, m, BPF_D_OUT);
1378 /*
1379 * If the parent can insert the tag itself, just mark
1380 * the tag in the mbuf header.
1381 */
1382 if (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING) {
1383 vlan_set_tag(m, mib->ifvm_tag);
1384 } else {
1385 /*
1386 * insert the tag ourselves
1387 */
1388 M_PREPEND(m, mib->ifvm_encaplen, M_DONTWAIT);
1389 if (m == NULL) {
1390 printf("%s: unable to prepend encap header",
1391 p->if_xname);
1392 ifp->if_oerrors++;
1393 continue;
1394 }
1395
1396 switch (p->if_type) {
1397 case IFT_ETHER:
1398 {
1399 struct ether_vlan_header *evl;
1400
1401 if (m->m_len < sizeof(struct ether_vlan_header))
1402 m = m_pullup(m,
1403 sizeof(struct ether_vlan_header));
1404 if (m == NULL) {
1405 printf("%s: unable to pullup encap "
1406 "header", p->if_xname);
1407 ifp->if_oerrors++;
1408 continue;
1409 }
1410
1411 /*
1412 * Transform the Ethernet header into an
1413 * Ethernet header with 802.1Q encapsulation.
1414 */
1415 memmove(mtod(m, void *),
1416 mtod(m, char *) + mib->ifvm_encaplen,
1417 sizeof(struct ether_header));
1418 evl = mtod(m, struct ether_vlan_header *);
1419 evl->evl_proto = evl->evl_encap_proto;
1420 evl->evl_encap_proto = htons(ETHERTYPE_VLAN);
1421 evl->evl_tag = htons(mib->ifvm_tag);
1422
1423 /*
1424 * To cater for VLAN-aware layer 2 ethernet
1425 * switches which may need to strip the tag
1426 * before forwarding the packet, make sure
1427 * the packet+tag is at least 68 bytes long.
1428 * This is necessary because our parent will
1429 * only pad to 64 bytes (ETHER_MIN_LEN) and
1430 * some switches will not pad by themselves
1431 * after deleting a tag.
1432 */
1433 const size_t min_data_len = ETHER_MIN_LEN -
1434 ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN;
1435 if (m->m_pkthdr.len < min_data_len) {
1436 m_copyback(m, m->m_pkthdr.len,
1437 min_data_len - m->m_pkthdr.len,
1438 vlan_zero_pad_buff);
1439 }
1440 break;
1441 }
1442
1443 default:
1444 panic("%s: impossible", __func__);
1445 }
1446 }
1447
1448 if ((p->if_flags & IFF_RUNNING) == 0) {
1449 m_freem(m);
1450 continue;
1451 }
1452
1453 error = if_transmit_lock(p, m);
1454 if (error) {
1455 /* mbuf is already freed */
1456 ifp->if_oerrors++;
1457 continue;
1458 }
1459 ifp->if_opackets++;
1460 }
1461
1462 ifp->if_flags &= ~IFF_OACTIVE;
1463
1464 /* Remove reference to mib before release */
1465 vlan_putref_linkmib(mib, &psref);
1466 }
1467
1468 static int
1469 vlan_transmit(struct ifnet *ifp, struct mbuf *m)
1470 {
1471 struct ifvlan *ifv = ifp->if_softc;
1472 struct ifnet *p;
1473 struct ethercom *ec;
1474 struct ifvlan_linkmib *mib;
1475 struct psref psref;
1476 int error;
1477 size_t pktlen = m->m_pkthdr.len;
1478 bool mcast = (m->m_flags & M_MCAST) != 0;
1479
1480 mib = vlan_getref_linkmib(ifv, &psref);
1481 if (mib == NULL) {
1482 m_freem(m);
1483 return ENETDOWN;
1484 }
1485
1486 p = mib->ifvm_p;
1487 ec = (void *)mib->ifvm_p;
1488
1489 bpf_mtap(ifp, m, BPF_D_OUT);
1490
1491 if ((error = pfil_run_hooks(ifp->if_pfil, &m, ifp, PFIL_OUT)) != 0)
1492 goto out;
1493 if (m == NULL)
1494 goto out;
1495
1496 /*
1497 * If the parent can insert the tag itself, just mark
1498 * the tag in the mbuf header.
1499 */
1500 if (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING) {
1501 vlan_set_tag(m, mib->ifvm_tag);
1502 } else {
1503 /*
1504 * insert the tag ourselves
1505 */
1506 M_PREPEND(m, mib->ifvm_encaplen, M_DONTWAIT);
1507 if (m == NULL) {
1508 printf("%s: unable to prepend encap header",
1509 p->if_xname);
1510 ifp->if_oerrors++;
1511 error = ENOBUFS;
1512 goto out;
1513 }
1514
1515 switch (p->if_type) {
1516 case IFT_ETHER:
1517 {
1518 struct ether_vlan_header *evl;
1519
1520 if (m->m_len < sizeof(struct ether_vlan_header))
1521 m = m_pullup(m,
1522 sizeof(struct ether_vlan_header));
1523 if (m == NULL) {
1524 printf("%s: unable to pullup encap "
1525 "header", p->if_xname);
1526 ifp->if_oerrors++;
1527 error = ENOBUFS;
1528 goto out;
1529 }
1530
1531 /*
1532 * Transform the Ethernet header into an
1533 * Ethernet header with 802.1Q encapsulation.
1534 */
1535 memmove(mtod(m, void *),
1536 mtod(m, char *) + mib->ifvm_encaplen,
1537 sizeof(struct ether_header));
1538 evl = mtod(m, struct ether_vlan_header *);
1539 evl->evl_proto = evl->evl_encap_proto;
1540 evl->evl_encap_proto = htons(ETHERTYPE_VLAN);
1541 evl->evl_tag = htons(mib->ifvm_tag);
1542
1543 /*
1544 * To cater for VLAN-aware layer 2 ethernet
1545 * switches which may need to strip the tag
1546 * before forwarding the packet, make sure
1547 * the packet+tag is at least 68 bytes long.
1548 * This is necessary because our parent will
1549 * only pad to 64 bytes (ETHER_MIN_LEN) and
1550 * some switches will not pad by themselves
1551 * after deleting a tag.
1552 */
1553 const size_t min_data_len = ETHER_MIN_LEN -
1554 ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN;
1555 if (m->m_pkthdr.len < min_data_len) {
1556 m_copyback(m, m->m_pkthdr.len,
1557 min_data_len - m->m_pkthdr.len,
1558 vlan_zero_pad_buff);
1559 }
1560 break;
1561 }
1562
1563 default:
1564 panic("%s: impossible", __func__);
1565 }
1566 }
1567
1568 if ((p->if_flags & IFF_RUNNING) == 0) {
1569 m_freem(m);
1570 error = ENETDOWN;
1571 goto out;
1572 }
1573
1574 error = if_transmit_lock(p, m);
1575 if (error) {
1576 /* mbuf is already freed */
1577 ifp->if_oerrors++;
1578 } else {
1579
1580 ifp->if_opackets++;
1581 ifp->if_obytes += pktlen;
1582 if (mcast)
1583 ifp->if_omcasts++;
1584 }
1585
1586 out:
1587 /* Remove reference to mib before release */
1588 vlan_putref_linkmib(mib, &psref);
1589 return error;
1590 }
1591
1592 /*
1593 * Given an Ethernet frame, find a valid vlan interface corresponding to the
1594 * given source interface and tag, then run the real packet through the
1595 * parent's input routine.
1596 */
1597 void
1598 vlan_input(struct ifnet *ifp, struct mbuf *m)
1599 {
1600 struct ifvlan *ifv;
1601 uint16_t vid;
1602 struct ifvlan_linkmib *mib;
1603 struct psref psref;
1604 bool have_vtag;
1605
1606 have_vtag = vlan_has_tag(m);
1607 if (have_vtag) {
1608 vid = EVL_VLANOFTAG(vlan_get_tag(m));
1609 m->m_flags &= ~M_VLANTAG;
1610 } else {
1611 struct ether_vlan_header *evl;
1612
1613 if (ifp->if_type != IFT_ETHER) {
1614 panic("%s: impossible", __func__);
1615 }
1616
1617 if (m->m_len < sizeof(struct ether_vlan_header) &&
1618 (m = m_pullup(m,
1619 sizeof(struct ether_vlan_header))) == NULL) {
1620 printf("%s: no memory for VLAN header, "
1621 "dropping packet.\n", ifp->if_xname);
1622 return;
1623 }
1624 evl = mtod(m, struct ether_vlan_header *);
1625 KASSERT(ntohs(evl->evl_encap_proto) == ETHERTYPE_VLAN);
1626
1627 vid = EVL_VLANOFTAG(ntohs(evl->evl_tag));
1628
1629 /*
1630 * Restore the original ethertype. We'll remove
1631 * the encapsulation after we've found the vlan
1632 * interface corresponding to the tag.
1633 */
1634 evl->evl_encap_proto = evl->evl_proto;
1635 }
1636
1637 mib = vlan_lookup_tag_psref(ifp, vid, &psref);
1638 if (mib == NULL) {
1639 m_freem(m);
1640 ifp->if_noproto++;
1641 return;
1642 }
1643 KASSERT(mib->ifvm_encaplen == ETHER_VLAN_ENCAP_LEN);
1644
1645 ifv = mib->ifvm_ifvlan;
1646 if ((ifv->ifv_if.if_flags & (IFF_UP | IFF_RUNNING)) !=
1647 (IFF_UP | IFF_RUNNING)) {
1648 m_freem(m);
1649 ifp->if_noproto++;
1650 goto out;
1651 }
1652
1653 /*
1654 * Now, remove the encapsulation header. The original
1655 * header has already been fixed up above.
1656 */
1657 if (!have_vtag) {
1658 memmove(mtod(m, char *) + mib->ifvm_encaplen,
1659 mtod(m, void *), sizeof(struct ether_header));
1660 m_adj(m, mib->ifvm_encaplen);
1661 }
1662
1663 m_set_rcvif(m, &ifv->ifv_if);
1664 ifv->ifv_if.if_ipackets++;
1665
1666 if (pfil_run_hooks(ifp->if_pfil, &m, ifp, PFIL_IN) != 0)
1667 goto out;
1668 if (m == NULL)
1669 goto out;
1670
1671 m->m_flags &= ~M_PROMISC;
1672 if_input(&ifv->ifv_if, m);
1673 out:
1674 vlan_putref_linkmib(mib, &psref);
1675 }
1676
1677 /*
1678 * Module infrastructure
1679 */
1680 #include "if_module.h"
1681
1682 IF_MODULE(MODULE_CLASS_DRIVER, vlan, NULL)
1683