if_vlan.c revision 1.130.2.3 1 /* $NetBSD: if_vlan.c,v 1.130.2.3 2020/04/13 08:05:15 martin Exp $ */
2
3 /*
4 * Copyright (c) 2000, 2001 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran, and by Jason R. Thorpe of Zembu Labs, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Copyright 1998 Massachusetts Institute of Technology
34 *
35 * Permission to use, copy, modify, and distribute this software and
36 * its documentation for any purpose and without fee is hereby
37 * granted, provided that both the above copyright notice and this
38 * permission notice appear in all copies, that both the above
39 * copyright notice and this permission notice appear in all
40 * supporting documentation, and that the name of M.I.T. not be used
41 * in advertising or publicity pertaining to distribution of the
42 * software without specific, written prior permission. M.I.T. makes
43 * no representations about the suitability of this software for any
44 * purpose. It is provided "as is" without express or implied
45 * warranty.
46 *
47 * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS
48 * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
49 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
50 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
51 * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
52 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
53 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
54 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
55 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
56 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
57 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 *
60 * from FreeBSD: if_vlan.c,v 1.16 2000/03/26 15:21:40 charnier Exp
61 * via OpenBSD: if_vlan.c,v 1.4 2000/05/15 19:15:00 chris Exp
62 */
63
64 /*
65 * if_vlan.c - pseudo-device driver for IEEE 802.1Q virtual LANs. Might be
66 * extended some day to also handle IEEE 802.1P priority tagging. This is
67 * sort of sneaky in the implementation, since we need to pretend to be
68 * enough of an Ethernet implementation to make ARP work. The way we do
69 * this is by telling everyone that we are an Ethernet interface, and then
70 * catch the packets that ether_output() left on our output queue when it
71 * calls if_start(), rewrite them for use by the real outgoing interface,
72 * and ask it to send them.
73 *
74 * TODO:
75 *
76 * - Need some way to notify vlan interfaces when the parent
77 * interface changes MTU.
78 */
79
80 #include <sys/cdefs.h>
81 __KERNEL_RCSID(0, "$NetBSD: if_vlan.c,v 1.130.2.3 2020/04/13 08:05:15 martin Exp $");
82
83 #ifdef _KERNEL_OPT
84 #include "opt_inet.h"
85 #include "opt_net_mpsafe.h"
86 #endif
87
88 #include <sys/param.h>
89 #include <sys/systm.h>
90 #include <sys/kernel.h>
91 #include <sys/mbuf.h>
92 #include <sys/queue.h>
93 #include <sys/socket.h>
94 #include <sys/sockio.h>
95 #include <sys/systm.h>
96 #include <sys/proc.h>
97 #include <sys/kauth.h>
98 #include <sys/mutex.h>
99 #include <sys/kmem.h>
100 #include <sys/cpu.h>
101 #include <sys/pserialize.h>
102 #include <sys/psref.h>
103 #include <sys/pslist.h>
104 #include <sys/atomic.h>
105 #include <sys/device.h>
106 #include <sys/module.h>
107
108 #include <net/bpf.h>
109 #include <net/if.h>
110 #include <net/if_dl.h>
111 #include <net/if_types.h>
112 #include <net/if_ether.h>
113 #include <net/if_vlanvar.h>
114
115 #ifdef INET
116 #include <netinet/in.h>
117 #include <netinet/if_inarp.h>
118 #endif
119 #ifdef INET6
120 #include <netinet6/in6_ifattach.h>
121 #include <netinet6/in6_var.h>
122 #include <netinet6/nd6.h>
123 #endif
124
125 #include "ioconf.h"
126
127 struct vlan_mc_entry {
128 LIST_ENTRY(vlan_mc_entry) mc_entries;
129 /*
130 * A key to identify this entry. The mc_addr below can't be
131 * used since multiple sockaddr may mapped into the same
132 * ether_multi (e.g., AF_UNSPEC).
133 */
134 struct ether_multi *mc_enm;
135 struct sockaddr_storage mc_addr;
136 };
137
138 struct ifvlan_linkmib {
139 struct ifvlan *ifvm_ifvlan;
140 const struct vlan_multisw *ifvm_msw;
141 int ifvm_encaplen; /* encapsulation length */
142 int ifvm_mtufudge; /* MTU fudged by this much */
143 int ifvm_mintu; /* min transmission unit */
144 uint16_t ifvm_proto; /* encapsulation ethertype */
145 uint16_t ifvm_tag; /* tag to apply on packets */
146 struct ifnet *ifvm_p; /* parent interface of this vlan */
147
148 struct psref_target ifvm_psref;
149 };
150
151 struct ifvlan {
152 struct ethercom ifv_ec;
153 struct ifvlan_linkmib *ifv_mib; /*
154 * reader must use vlan_getref_linkmib()
155 * instead of direct dereference
156 */
157 kmutex_t ifv_lock; /* writer lock for ifv_mib */
158 pserialize_t ifv_psz;
159
160 LIST_HEAD(__vlan_mchead, vlan_mc_entry) ifv_mc_listhead;
161 LIST_ENTRY(ifvlan) ifv_list;
162 struct pslist_entry ifv_hash;
163 int ifv_flags;
164 };
165
166 #define IFVF_PROMISC 0x01 /* promiscuous mode enabled */
167
168 #define ifv_if ifv_ec.ec_if
169
170 #define ifv_msw ifv_mib.ifvm_msw
171 #define ifv_encaplen ifv_mib.ifvm_encaplen
172 #define ifv_mtufudge ifv_mib.ifvm_mtufudge
173 #define ifv_mintu ifv_mib.ifvm_mintu
174 #define ifv_tag ifv_mib.ifvm_tag
175
176 struct vlan_multisw {
177 int (*vmsw_addmulti)(struct ifvlan *, struct ifreq *);
178 int (*vmsw_delmulti)(struct ifvlan *, struct ifreq *);
179 void (*vmsw_purgemulti)(struct ifvlan *);
180 };
181
182 static int vlan_ether_addmulti(struct ifvlan *, struct ifreq *);
183 static int vlan_ether_delmulti(struct ifvlan *, struct ifreq *);
184 static void vlan_ether_purgemulti(struct ifvlan *);
185
186 const struct vlan_multisw vlan_ether_multisw = {
187 .vmsw_addmulti = vlan_ether_addmulti,
188 .vmsw_delmulti = vlan_ether_delmulti,
189 .vmsw_purgemulti = vlan_ether_purgemulti,
190 };
191
192 static int vlan_clone_create(struct if_clone *, int);
193 static int vlan_clone_destroy(struct ifnet *);
194 static int vlan_config(struct ifvlan *, struct ifnet *, uint16_t);
195 static int vlan_ioctl(struct ifnet *, u_long, void *);
196 static void vlan_start(struct ifnet *);
197 static int vlan_transmit(struct ifnet *, struct mbuf *);
198 static void vlan_unconfig(struct ifnet *);
199 static int vlan_unconfig_locked(struct ifvlan *, struct ifvlan_linkmib *);
200 static void vlan_hash_init(void);
201 static int vlan_hash_fini(void);
202 static int vlan_tag_hash(uint16_t, u_long);
203 static struct ifvlan_linkmib* vlan_getref_linkmib(struct ifvlan *,
204 struct psref *);
205 static void vlan_putref_linkmib(struct ifvlan_linkmib *, struct psref *);
206 static void vlan_linkmib_update(struct ifvlan *, struct ifvlan_linkmib *);
207 static struct ifvlan_linkmib* vlan_lookup_tag_psref(struct ifnet *,
208 uint16_t, struct psref *);
209
210 static struct {
211 kmutex_t lock;
212 LIST_HEAD(vlan_ifvlist, ifvlan) list;
213 } ifv_list __cacheline_aligned;
214
215
216 #if !defined(VLAN_TAG_HASH_SIZE)
217 #define VLAN_TAG_HASH_SIZE 32
218 #endif
219 static struct {
220 kmutex_t lock;
221 struct pslist_head *lists;
222 u_long mask;
223 } ifv_hash __cacheline_aligned = {
224 .lists = NULL,
225 .mask = 0,
226 };
227
228 pserialize_t vlan_psz __read_mostly;
229 static struct psref_class *ifvm_psref_class __read_mostly;
230
231 struct if_clone vlan_cloner =
232 IF_CLONE_INITIALIZER("vlan", vlan_clone_create, vlan_clone_destroy);
233
234 /* Used to pad ethernet frames with < ETHER_MIN_LEN bytes */
235 static char vlan_zero_pad_buff[ETHER_MIN_LEN];
236
237 static inline int
238 vlan_safe_ifpromisc(struct ifnet *ifp, int pswitch)
239 {
240 int e;
241
242 KERNEL_LOCK_UNLESS_NET_MPSAFE();
243 e = ifpromisc(ifp, pswitch);
244 KERNEL_UNLOCK_UNLESS_NET_MPSAFE();
245
246 return e;
247 }
248
249 static inline int
250 vlan_safe_ifpromisc_locked(struct ifnet *ifp, int pswitch)
251 {
252 int e;
253
254 KERNEL_LOCK_UNLESS_NET_MPSAFE();
255 e = ifpromisc_locked(ifp, pswitch);
256 KERNEL_UNLOCK_UNLESS_NET_MPSAFE();
257
258 return e;
259 }
260
261 void
262 vlanattach(int n)
263 {
264
265 /*
266 * Nothing to do here, initialization is handled by the
267 * module initialization code in vlaninit() below.
268 */
269 }
270
271 static void
272 vlaninit(void)
273 {
274 mutex_init(&ifv_list.lock, MUTEX_DEFAULT, IPL_NONE);
275 LIST_INIT(&ifv_list.list);
276
277 mutex_init(&ifv_hash.lock, MUTEX_DEFAULT, IPL_NONE);
278 vlan_psz = pserialize_create();
279 ifvm_psref_class = psref_class_create("vlanlinkmib", IPL_SOFTNET);
280 if_clone_attach(&vlan_cloner);
281
282 vlan_hash_init();
283 MODULE_HOOK_SET(if_vlan_vlan_input_hook, vlan_input);
284 }
285
286 static int
287 vlandetach(void)
288 {
289 bool is_empty;
290 int error;
291
292 mutex_enter(&ifv_list.lock);
293 is_empty = LIST_EMPTY(&ifv_list.list);
294 mutex_exit(&ifv_list.lock);
295
296 if (!is_empty)
297 return EBUSY;
298
299 error = vlan_hash_fini();
300 if (error != 0)
301 return error;
302
303 if_clone_detach(&vlan_cloner);
304 psref_class_destroy(ifvm_psref_class);
305 pserialize_destroy(vlan_psz);
306 mutex_destroy(&ifv_hash.lock);
307 mutex_destroy(&ifv_list.lock);
308
309 MODULE_HOOK_UNSET(if_vlan_vlan_input_hook);
310 return 0;
311 }
312
313 static void
314 vlan_reset_linkname(struct ifnet *ifp)
315 {
316
317 /*
318 * We start out with a "802.1Q VLAN" type and zero-length
319 * addresses. When we attach to a parent interface, we
320 * inherit its type, address length, address, and data link
321 * type.
322 */
323
324 ifp->if_type = IFT_L2VLAN;
325 ifp->if_addrlen = 0;
326 ifp->if_dlt = DLT_NULL;
327 if_alloc_sadl(ifp);
328 }
329
330 static int
331 vlan_clone_create(struct if_clone *ifc, int unit)
332 {
333 struct ifvlan *ifv;
334 struct ifnet *ifp;
335 struct ifvlan_linkmib *mib;
336 int rv;
337
338 ifv = malloc(sizeof(struct ifvlan), M_DEVBUF, M_WAITOK | M_ZERO);
339 mib = kmem_zalloc(sizeof(struct ifvlan_linkmib), KM_SLEEP);
340 ifp = &ifv->ifv_if;
341 LIST_INIT(&ifv->ifv_mc_listhead);
342
343 mib->ifvm_ifvlan = ifv;
344 mib->ifvm_p = NULL;
345 psref_target_init(&mib->ifvm_psref, ifvm_psref_class);
346
347 mutex_init(&ifv->ifv_lock, MUTEX_DEFAULT, IPL_NONE);
348 ifv->ifv_psz = pserialize_create();
349 ifv->ifv_mib = mib;
350
351 mutex_enter(&ifv_list.lock);
352 LIST_INSERT_HEAD(&ifv_list.list, ifv, ifv_list);
353 mutex_exit(&ifv_list.lock);
354
355 if_initname(ifp, ifc->ifc_name, unit);
356 ifp->if_softc = ifv;
357 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
358 ifp->if_extflags = IFEF_NO_LINK_STATE_CHANGE;
359 #ifdef NET_MPSAFE
360 ifp->if_extflags |= IFEF_MPSAFE;
361 #endif
362 ifp->if_start = vlan_start;
363 ifp->if_transmit = vlan_transmit;
364 ifp->if_ioctl = vlan_ioctl;
365 IFQ_SET_READY(&ifp->if_snd);
366
367 rv = if_initialize(ifp);
368 if (rv != 0) {
369 aprint_error("%s: if_initialize failed(%d)\n", ifp->if_xname,
370 rv);
371 goto fail;
372 }
373
374 vlan_reset_linkname(ifp);
375 if_register(ifp);
376 return 0;
377
378 fail:
379 mutex_enter(&ifv_list.lock);
380 LIST_REMOVE(ifv, ifv_list);
381 mutex_exit(&ifv_list.lock);
382
383 mutex_destroy(&ifv->ifv_lock);
384 psref_target_destroy(&ifv->ifv_mib->ifvm_psref, ifvm_psref_class);
385 kmem_free(ifv->ifv_mib, sizeof(struct ifvlan_linkmib));
386 free(ifv, M_DEVBUF);
387
388 return rv;
389 }
390
391 static int
392 vlan_clone_destroy(struct ifnet *ifp)
393 {
394 struct ifvlan *ifv = ifp->if_softc;
395
396 mutex_enter(&ifv_list.lock);
397 LIST_REMOVE(ifv, ifv_list);
398 mutex_exit(&ifv_list.lock);
399
400 IFNET_LOCK(ifp);
401 vlan_unconfig(ifp);
402 IFNET_UNLOCK(ifp);
403 if_detach(ifp);
404
405 psref_target_destroy(&ifv->ifv_mib->ifvm_psref, ifvm_psref_class);
406 kmem_free(ifv->ifv_mib, sizeof(struct ifvlan_linkmib));
407 pserialize_destroy(ifv->ifv_psz);
408 mutex_destroy(&ifv->ifv_lock);
409 free(ifv, M_DEVBUF);
410
411 return 0;
412 }
413
414 /*
415 * Configure a VLAN interface.
416 */
417 static int
418 vlan_config(struct ifvlan *ifv, struct ifnet *p, uint16_t tag)
419 {
420 struct ifnet *ifp = &ifv->ifv_if;
421 struct ifvlan_linkmib *nmib = NULL;
422 struct ifvlan_linkmib *omib = NULL;
423 struct ifvlan_linkmib *checkmib;
424 struct psref_target *nmib_psref = NULL;
425 const uint16_t vid = EVL_VLANOFTAG(tag);
426 int error = 0;
427 int idx;
428 bool omib_cleanup = false;
429 struct psref psref;
430
431 /* VLAN ID 0 and 4095 are reserved in the spec */
432 if ((vid == 0) || (vid == 0xfff))
433 return EINVAL;
434
435 nmib = kmem_alloc(sizeof(*nmib), KM_SLEEP);
436 mutex_enter(&ifv->ifv_lock);
437 omib = ifv->ifv_mib;
438
439 if (omib->ifvm_p != NULL) {
440 error = EBUSY;
441 goto done;
442 }
443
444 /* Duplicate check */
445 checkmib = vlan_lookup_tag_psref(p, vid, &psref);
446 if (checkmib != NULL) {
447 vlan_putref_linkmib(checkmib, &psref);
448 error = EEXIST;
449 goto done;
450 }
451
452 *nmib = *omib;
453 nmib_psref = &nmib->ifvm_psref;
454
455 psref_target_init(nmib_psref, ifvm_psref_class);
456
457 switch (p->if_type) {
458 case IFT_ETHER:
459 {
460 struct ethercom *ec = (void *)p;
461 struct vlanid_list *vidmem;
462
463 nmib->ifvm_msw = &vlan_ether_multisw;
464 nmib->ifvm_encaplen = ETHER_VLAN_ENCAP_LEN;
465 nmib->ifvm_mintu = ETHERMIN;
466
467 if (ec->ec_nvlans++ == 0) {
468 IFNET_LOCK(p);
469 error = ether_enable_vlan_mtu(p);
470 IFNET_UNLOCK(p);
471 if (error >= 0) {
472 if (error) {
473 ec->ec_nvlans--;
474 goto done;
475 }
476 nmib->ifvm_mtufudge = 0;
477 } else {
478 /*
479 * Fudge the MTU by the encapsulation size. This
480 * makes us incompatible with strictly compliant
481 * 802.1Q implementations, but allows us to use
482 * the feature with other NetBSD
483 * implementations, which might still be useful.
484 */
485 nmib->ifvm_mtufudge = nmib->ifvm_encaplen;
486 }
487 error = 0;
488 }
489 /* Add a vid to the list */
490 vidmem = kmem_alloc(sizeof(struct vlanid_list), KM_SLEEP);
491 vidmem->vid = vid;
492 ETHER_LOCK(ec);
493 SIMPLEQ_INSERT_TAIL(&ec->ec_vids, vidmem, vid_list);
494 ETHER_UNLOCK(ec);
495
496 if (ec->ec_vlan_cb != NULL) {
497 /*
498 * Call ec_vlan_cb(). It will setup VLAN HW filter or
499 * HW tagging function.
500 */
501 error = (*ec->ec_vlan_cb)(ec, vid, true);
502 if (error) {
503 ec->ec_nvlans--;
504 if (ec->ec_nvlans == 0) {
505 IFNET_LOCK(p);
506 (void)ether_disable_vlan_mtu(p);
507 IFNET_UNLOCK(p);
508 }
509 goto done;
510 }
511 }
512 /*
513 * If the parent interface can do hardware-assisted
514 * VLAN encapsulation, then propagate its hardware-
515 * assisted checksumming flags and tcp segmentation
516 * offload.
517 */
518 if (ec->ec_capabilities & ETHERCAP_VLAN_HWTAGGING) {
519 ifp->if_capabilities = p->if_capabilities &
520 (IFCAP_TSOv4 | IFCAP_TSOv6 |
521 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
522 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
523 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
524 IFCAP_CSUM_TCPv6_Tx | IFCAP_CSUM_TCPv6_Rx |
525 IFCAP_CSUM_UDPv6_Tx | IFCAP_CSUM_UDPv6_Rx);
526 }
527
528 /*
529 * We inherit the parent's Ethernet address.
530 */
531 ether_ifattach(ifp, CLLADDR(p->if_sadl));
532 ifp->if_hdrlen = sizeof(struct ether_vlan_header); /* XXX? */
533 break;
534 }
535
536 default:
537 error = EPROTONOSUPPORT;
538 goto done;
539 }
540
541 nmib->ifvm_p = p;
542 nmib->ifvm_tag = vid;
543 ifv->ifv_if.if_mtu = p->if_mtu - nmib->ifvm_mtufudge;
544 #ifdef INET6
545 KERNEL_LOCK_UNLESS_NET_MPSAFE();
546 if (in6_present)
547 nd6_setmtu(ifp);
548 KERNEL_UNLOCK_UNLESS_NET_MPSAFE();
549 #endif
550 ifv->ifv_if.if_flags = p->if_flags &
551 (IFF_UP | IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
552
553 /*
554 * Inherit the if_type from the parent. This allows us
555 * to participate in bridges of that type.
556 */
557 ifv->ifv_if.if_type = p->if_type;
558
559 PSLIST_ENTRY_INIT(ifv, ifv_hash);
560 idx = vlan_tag_hash(vid, ifv_hash.mask);
561
562 mutex_enter(&ifv_hash.lock);
563 PSLIST_WRITER_INSERT_HEAD(&ifv_hash.lists[idx], ifv, ifv_hash);
564 mutex_exit(&ifv_hash.lock);
565
566 vlan_linkmib_update(ifv, nmib);
567 nmib = NULL;
568 nmib_psref = NULL;
569 omib_cleanup = true;
570
571 done:
572 mutex_exit(&ifv->ifv_lock);
573
574 if (nmib_psref)
575 psref_target_destroy(nmib_psref, ifvm_psref_class);
576 if (nmib)
577 kmem_free(nmib, sizeof(*nmib));
578 if (omib_cleanup)
579 kmem_free(omib, sizeof(*omib));
580
581 return error;
582 }
583
584 /*
585 * Unconfigure a VLAN interface.
586 */
587 static void
588 vlan_unconfig(struct ifnet *ifp)
589 {
590 struct ifvlan *ifv = ifp->if_softc;
591 struct ifvlan_linkmib *nmib = NULL;
592 int error;
593
594 KASSERT(IFNET_LOCKED(ifp));
595
596 nmib = kmem_alloc(sizeof(*nmib), KM_SLEEP);
597
598 mutex_enter(&ifv->ifv_lock);
599 error = vlan_unconfig_locked(ifv, nmib);
600 mutex_exit(&ifv->ifv_lock);
601
602 if (error)
603 kmem_free(nmib, sizeof(*nmib));
604 }
605 static int
606 vlan_unconfig_locked(struct ifvlan *ifv, struct ifvlan_linkmib *nmib)
607 {
608 struct ifnet *p;
609 struct ifnet *ifp = &ifv->ifv_if;
610 struct psref_target *nmib_psref = NULL;
611 struct ifvlan_linkmib *omib;
612 int error = 0;
613
614 KASSERT(IFNET_LOCKED(ifp));
615 KASSERT(mutex_owned(&ifv->ifv_lock));
616
617 ifp->if_flags &= ~(IFF_UP | IFF_RUNNING);
618
619 omib = ifv->ifv_mib;
620 p = omib->ifvm_p;
621
622 if (p == NULL) {
623 error = -1;
624 goto done;
625 }
626
627 *nmib = *omib;
628 nmib_psref = &nmib->ifvm_psref;
629 psref_target_init(nmib_psref, ifvm_psref_class);
630
631 /*
632 * Since the interface is being unconfigured, we need to empty the
633 * list of multicast groups that we may have joined while we were
634 * alive and remove them from the parent's list also.
635 */
636 (*nmib->ifvm_msw->vmsw_purgemulti)(ifv);
637
638 /* Disconnect from parent. */
639 switch (p->if_type) {
640 case IFT_ETHER:
641 {
642 struct ethercom *ec = (void *)p;
643 struct vlanid_list *vlanidp;
644 uint16_t vid = EVL_VLANOFTAG(nmib->ifvm_tag);
645
646 ETHER_LOCK(ec);
647 SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) {
648 if (vlanidp->vid == vid) {
649 SIMPLEQ_REMOVE(&ec->ec_vids, vlanidp,
650 vlanid_list, vid_list);
651 break;
652 }
653 }
654 ETHER_UNLOCK(ec);
655 if (vlanidp != NULL)
656 kmem_free(vlanidp, sizeof(*vlanidp));
657
658 if (ec->ec_vlan_cb != NULL) {
659 /*
660 * Call ec_vlan_cb(). It will setup VLAN HW filter or
661 * HW tagging function.
662 */
663 (void)(*ec->ec_vlan_cb)(ec, vid, false);
664 }
665 if (--ec->ec_nvlans == 0) {
666 IFNET_LOCK(p);
667 (void)ether_disable_vlan_mtu(p);
668 IFNET_UNLOCK(p);
669 }
670
671 /* XXX ether_ifdetach must not be called with IFNET_LOCK */
672 mutex_exit(&ifv->ifv_lock);
673 IFNET_UNLOCK(ifp);
674 ether_ifdetach(ifp);
675 IFNET_LOCK(ifp);
676 mutex_enter(&ifv->ifv_lock);
677
678 /* if_free_sadl must be called with IFNET_LOCK */
679 if_free_sadl(ifp, 1);
680
681 /* Restore vlan_ioctl overwritten by ether_ifdetach */
682 ifp->if_ioctl = vlan_ioctl;
683 vlan_reset_linkname(ifp);
684 break;
685 }
686
687 default:
688 panic("%s: impossible", __func__);
689 }
690
691 nmib->ifvm_p = NULL;
692 ifv->ifv_if.if_mtu = 0;
693 ifv->ifv_flags = 0;
694
695 mutex_enter(&ifv_hash.lock);
696 PSLIST_WRITER_REMOVE(ifv, ifv_hash);
697 pserialize_perform(vlan_psz);
698 mutex_exit(&ifv_hash.lock);
699 PSLIST_ENTRY_DESTROY(ifv, ifv_hash);
700
701 vlan_linkmib_update(ifv, nmib);
702
703 mutex_exit(&ifv->ifv_lock);
704
705 nmib_psref = NULL;
706 kmem_free(omib, sizeof(*omib));
707
708 #ifdef INET6
709 KERNEL_LOCK_UNLESS_NET_MPSAFE();
710 /* To delete v6 link local addresses */
711 if (in6_present)
712 in6_ifdetach(ifp);
713 KERNEL_UNLOCK_UNLESS_NET_MPSAFE();
714 #endif
715
716 if ((ifp->if_flags & IFF_PROMISC) != 0)
717 vlan_safe_ifpromisc_locked(ifp, 0);
718 if_down_locked(ifp);
719 ifp->if_capabilities = 0;
720 mutex_enter(&ifv->ifv_lock);
721 done:
722
723 if (nmib_psref)
724 psref_target_destroy(nmib_psref, ifvm_psref_class);
725
726 return error;
727 }
728
729 static void
730 vlan_hash_init(void)
731 {
732
733 ifv_hash.lists = hashinit(VLAN_TAG_HASH_SIZE, HASH_PSLIST, true,
734 &ifv_hash.mask);
735 }
736
737 static int
738 vlan_hash_fini(void)
739 {
740 int i;
741
742 mutex_enter(&ifv_hash.lock);
743
744 for (i = 0; i < ifv_hash.mask + 1; i++) {
745 if (PSLIST_WRITER_FIRST(&ifv_hash.lists[i], struct ifvlan,
746 ifv_hash) != NULL) {
747 mutex_exit(&ifv_hash.lock);
748 return EBUSY;
749 }
750 }
751
752 for (i = 0; i < ifv_hash.mask + 1; i++)
753 PSLIST_DESTROY(&ifv_hash.lists[i]);
754
755 mutex_exit(&ifv_hash.lock);
756
757 hashdone(ifv_hash.lists, HASH_PSLIST, ifv_hash.mask);
758
759 ifv_hash.lists = NULL;
760 ifv_hash.mask = 0;
761
762 return 0;
763 }
764
765 static int
766 vlan_tag_hash(uint16_t tag, u_long mask)
767 {
768 uint32_t hash;
769
770 hash = (tag >> 8) ^ tag;
771 hash = (hash >> 2) ^ hash;
772
773 return hash & mask;
774 }
775
776 static struct ifvlan_linkmib *
777 vlan_getref_linkmib(struct ifvlan *sc, struct psref *psref)
778 {
779 struct ifvlan_linkmib *mib;
780 int s;
781
782 s = pserialize_read_enter();
783 mib = atomic_load_consume(&sc->ifv_mib);
784 if (mib == NULL) {
785 pserialize_read_exit(s);
786 return NULL;
787 }
788 psref_acquire(psref, &mib->ifvm_psref, ifvm_psref_class);
789 pserialize_read_exit(s);
790
791 return mib;
792 }
793
794 static void
795 vlan_putref_linkmib(struct ifvlan_linkmib *mib, struct psref *psref)
796 {
797 if (mib == NULL)
798 return;
799 psref_release(psref, &mib->ifvm_psref, ifvm_psref_class);
800 }
801
802 static struct ifvlan_linkmib *
803 vlan_lookup_tag_psref(struct ifnet *ifp, uint16_t tag, struct psref *psref)
804 {
805 int idx;
806 int s;
807 struct ifvlan *sc;
808
809 idx = vlan_tag_hash(tag, ifv_hash.mask);
810
811 s = pserialize_read_enter();
812 PSLIST_READER_FOREACH(sc, &ifv_hash.lists[idx], struct ifvlan,
813 ifv_hash) {
814 struct ifvlan_linkmib *mib = atomic_load_consume(&sc->ifv_mib);
815 if (mib == NULL)
816 continue;
817 if (mib->ifvm_tag != tag)
818 continue;
819 if (mib->ifvm_p != ifp)
820 continue;
821
822 psref_acquire(psref, &mib->ifvm_psref, ifvm_psref_class);
823 pserialize_read_exit(s);
824 return mib;
825 }
826 pserialize_read_exit(s);
827 return NULL;
828 }
829
830 static void
831 vlan_linkmib_update(struct ifvlan *ifv, struct ifvlan_linkmib *nmib)
832 {
833 struct ifvlan_linkmib *omib = ifv->ifv_mib;
834
835 KASSERT(mutex_owned(&ifv->ifv_lock));
836
837 atomic_store_release(&ifv->ifv_mib, nmib);
838
839 pserialize_perform(ifv->ifv_psz);
840 psref_target_destroy(&omib->ifvm_psref, ifvm_psref_class);
841 }
842
843 /*
844 * Called when a parent interface is detaching; destroy any VLAN
845 * configuration for the parent interface.
846 */
847 void
848 vlan_ifdetach(struct ifnet *p)
849 {
850 struct ifvlan *ifv;
851 struct ifvlan_linkmib *mib, **nmibs;
852 struct psref psref;
853 int error;
854 int bound;
855 int i, cnt = 0;
856
857 bound = curlwp_bind();
858
859 mutex_enter(&ifv_list.lock);
860 LIST_FOREACH(ifv, &ifv_list.list, ifv_list) {
861 mib = vlan_getref_linkmib(ifv, &psref);
862 if (mib == NULL)
863 continue;
864
865 if (mib->ifvm_p == p)
866 cnt++;
867
868 vlan_putref_linkmib(mib, &psref);
869 }
870 mutex_exit(&ifv_list.lock);
871
872 if (cnt == 0) {
873 curlwp_bindx(bound);
874 return;
875 }
876
877 /*
878 * The value of "cnt" does not increase while ifv_list.lock
879 * and ifv->ifv_lock are released here, because the parent
880 * interface is detaching.
881 */
882 nmibs = kmem_alloc(sizeof(*nmibs) * cnt, KM_SLEEP);
883 for (i = 0; i < cnt; i++) {
884 nmibs[i] = kmem_alloc(sizeof(*nmibs[i]), KM_SLEEP);
885 }
886
887 mutex_enter(&ifv_list.lock);
888
889 i = 0;
890 LIST_FOREACH(ifv, &ifv_list.list, ifv_list) {
891 struct ifnet *ifp = &ifv->ifv_if;
892
893 /* IFNET_LOCK must be held before ifv_lock. */
894 IFNET_LOCK(ifp);
895 mutex_enter(&ifv->ifv_lock);
896
897 /* XXX ifv_mib = NULL? */
898 if (ifv->ifv_mib->ifvm_p == p) {
899 KASSERTMSG(i < cnt,
900 "no memory for unconfig, parent=%s", p->if_xname);
901 error = vlan_unconfig_locked(ifv, nmibs[i]);
902 if (!error) {
903 nmibs[i] = NULL;
904 i++;
905 }
906
907 }
908
909 mutex_exit(&ifv->ifv_lock);
910 IFNET_UNLOCK(ifp);
911 }
912
913 mutex_exit(&ifv_list.lock);
914
915 curlwp_bindx(bound);
916
917 for (i = 0; i < cnt; i++) {
918 if (nmibs[i])
919 kmem_free(nmibs[i], sizeof(*nmibs[i]));
920 }
921
922 kmem_free(nmibs, sizeof(*nmibs) * cnt);
923
924 return;
925 }
926
927 static int
928 vlan_set_promisc(struct ifnet *ifp)
929 {
930 struct ifvlan *ifv = ifp->if_softc;
931 struct ifvlan_linkmib *mib;
932 struct psref psref;
933 int error = 0;
934 int bound;
935
936 bound = curlwp_bind();
937 mib = vlan_getref_linkmib(ifv, &psref);
938 if (mib == NULL) {
939 curlwp_bindx(bound);
940 return EBUSY;
941 }
942
943 if ((ifp->if_flags & IFF_PROMISC) != 0) {
944 if ((ifv->ifv_flags & IFVF_PROMISC) == 0) {
945 error = vlan_safe_ifpromisc(mib->ifvm_p, 1);
946 if (error == 0)
947 ifv->ifv_flags |= IFVF_PROMISC;
948 }
949 } else {
950 if ((ifv->ifv_flags & IFVF_PROMISC) != 0) {
951 error = vlan_safe_ifpromisc(mib->ifvm_p, 0);
952 if (error == 0)
953 ifv->ifv_flags &= ~IFVF_PROMISC;
954 }
955 }
956 vlan_putref_linkmib(mib, &psref);
957 curlwp_bindx(bound);
958
959 return error;
960 }
961
962 static int
963 vlan_ioctl(struct ifnet *ifp, u_long cmd, void *data)
964 {
965 struct lwp *l = curlwp;
966 struct ifvlan *ifv = ifp->if_softc;
967 struct ifaddr *ifa = (struct ifaddr *) data;
968 struct ifreq *ifr = (struct ifreq *) data;
969 struct ifnet *pr;
970 struct ifcapreq *ifcr;
971 struct vlanreq vlr;
972 struct ifvlan_linkmib *mib;
973 struct psref psref;
974 int error = 0;
975 int bound;
976
977 switch (cmd) {
978 case SIOCSIFMTU:
979 bound = curlwp_bind();
980 mib = vlan_getref_linkmib(ifv, &psref);
981 if (mib == NULL) {
982 curlwp_bindx(bound);
983 error = EBUSY;
984 break;
985 }
986
987 if (mib->ifvm_p == NULL) {
988 vlan_putref_linkmib(mib, &psref);
989 curlwp_bindx(bound);
990 error = EINVAL;
991 } else if (
992 ifr->ifr_mtu > (mib->ifvm_p->if_mtu - mib->ifvm_mtufudge) ||
993 ifr->ifr_mtu < (mib->ifvm_mintu - mib->ifvm_mtufudge)) {
994 vlan_putref_linkmib(mib, &psref);
995 curlwp_bindx(bound);
996 error = EINVAL;
997 } else {
998 vlan_putref_linkmib(mib, &psref);
999 curlwp_bindx(bound);
1000
1001 error = ifioctl_common(ifp, cmd, data);
1002 if (error == ENETRESET)
1003 error = 0;
1004 }
1005
1006 break;
1007
1008 case SIOCSETVLAN:
1009 if ((error = kauth_authorize_network(l->l_cred,
1010 KAUTH_NETWORK_INTERFACE,
1011 KAUTH_REQ_NETWORK_INTERFACE_SETPRIV, ifp, (void *)cmd,
1012 NULL)) != 0)
1013 break;
1014 if ((error = copyin(ifr->ifr_data, &vlr, sizeof(vlr))) != 0)
1015 break;
1016
1017 if (vlr.vlr_parent[0] == '\0') {
1018 bound = curlwp_bind();
1019 mib = vlan_getref_linkmib(ifv, &psref);
1020 if (mib == NULL) {
1021 curlwp_bindx(bound);
1022 error = EBUSY;
1023 break;
1024 }
1025
1026 if (mib->ifvm_p != NULL &&
1027 (ifp->if_flags & IFF_PROMISC) != 0)
1028 error = vlan_safe_ifpromisc(mib->ifvm_p, 0);
1029
1030 vlan_putref_linkmib(mib, &psref);
1031 curlwp_bindx(bound);
1032
1033 vlan_unconfig(ifp);
1034 break;
1035 }
1036 if (vlr.vlr_tag != EVL_VLANOFTAG(vlr.vlr_tag)) {
1037 error = EINVAL; /* check for valid tag */
1038 break;
1039 }
1040 if ((pr = ifunit(vlr.vlr_parent)) == NULL) {
1041 error = ENOENT;
1042 break;
1043 }
1044
1045 error = vlan_config(ifv, pr, vlr.vlr_tag);
1046 if (error != 0)
1047 break;
1048
1049 /* Update promiscuous mode, if necessary. */
1050 vlan_set_promisc(ifp);
1051
1052 ifp->if_flags |= IFF_RUNNING;
1053 break;
1054
1055 case SIOCGETVLAN:
1056 memset(&vlr, 0, sizeof(vlr));
1057 bound = curlwp_bind();
1058 mib = vlan_getref_linkmib(ifv, &psref);
1059 if (mib == NULL) {
1060 curlwp_bindx(bound);
1061 error = EBUSY;
1062 break;
1063 }
1064 if (mib->ifvm_p != NULL) {
1065 snprintf(vlr.vlr_parent, sizeof(vlr.vlr_parent), "%s",
1066 mib->ifvm_p->if_xname);
1067 vlr.vlr_tag = mib->ifvm_tag;
1068 }
1069 vlan_putref_linkmib(mib, &psref);
1070 curlwp_bindx(bound);
1071 error = copyout(&vlr, ifr->ifr_data, sizeof(vlr));
1072 break;
1073
1074 case SIOCSIFFLAGS:
1075 if ((error = ifioctl_common(ifp, cmd, data)) != 0)
1076 break;
1077 /*
1078 * For promiscuous mode, we enable promiscuous mode on
1079 * the parent if we need promiscuous on the VLAN interface.
1080 */
1081 bound = curlwp_bind();
1082 mib = vlan_getref_linkmib(ifv, &psref);
1083 if (mib == NULL) {
1084 curlwp_bindx(bound);
1085 error = EBUSY;
1086 break;
1087 }
1088
1089 if (mib->ifvm_p != NULL)
1090 error = vlan_set_promisc(ifp);
1091 vlan_putref_linkmib(mib, &psref);
1092 curlwp_bindx(bound);
1093 break;
1094
1095 case SIOCADDMULTI:
1096 mutex_enter(&ifv->ifv_lock);
1097 mib = ifv->ifv_mib;
1098 if (mib == NULL) {
1099 error = EBUSY;
1100 mutex_exit(&ifv->ifv_lock);
1101 break;
1102 }
1103
1104 error = (mib->ifvm_p != NULL) ?
1105 (*mib->ifvm_msw->vmsw_addmulti)(ifv, ifr) : EINVAL;
1106 mib = NULL;
1107 mutex_exit(&ifv->ifv_lock);
1108 break;
1109
1110 case SIOCDELMULTI:
1111 mutex_enter(&ifv->ifv_lock);
1112 mib = ifv->ifv_mib;
1113 if (mib == NULL) {
1114 error = EBUSY;
1115 mutex_exit(&ifv->ifv_lock);
1116 break;
1117 }
1118 error = (mib->ifvm_p != NULL) ?
1119 (*mib->ifvm_msw->vmsw_delmulti)(ifv, ifr) : EINVAL;
1120 mib = NULL;
1121 mutex_exit(&ifv->ifv_lock);
1122 break;
1123
1124 case SIOCSIFCAP:
1125 ifcr = data;
1126 /* make sure caps are enabled on parent */
1127 bound = curlwp_bind();
1128 mib = vlan_getref_linkmib(ifv, &psref);
1129 if (mib == NULL) {
1130 curlwp_bindx(bound);
1131 error = EBUSY;
1132 break;
1133 }
1134
1135 if (mib->ifvm_p == NULL) {
1136 vlan_putref_linkmib(mib, &psref);
1137 curlwp_bindx(bound);
1138 error = EINVAL;
1139 break;
1140 }
1141 if ((mib->ifvm_p->if_capenable & ifcr->ifcr_capenable) !=
1142 ifcr->ifcr_capenable) {
1143 vlan_putref_linkmib(mib, &psref);
1144 curlwp_bindx(bound);
1145 error = EINVAL;
1146 break;
1147 }
1148
1149 vlan_putref_linkmib(mib, &psref);
1150 curlwp_bindx(bound);
1151
1152 if ((error = ifioctl_common(ifp, cmd, data)) == ENETRESET)
1153 error = 0;
1154 break;
1155 case SIOCINITIFADDR:
1156 bound = curlwp_bind();
1157 mib = vlan_getref_linkmib(ifv, &psref);
1158 if (mib == NULL) {
1159 curlwp_bindx(bound);
1160 error = EBUSY;
1161 break;
1162 }
1163
1164 if (mib->ifvm_p == NULL) {
1165 error = EINVAL;
1166 vlan_putref_linkmib(mib, &psref);
1167 curlwp_bindx(bound);
1168 break;
1169 }
1170 vlan_putref_linkmib(mib, &psref);
1171 curlwp_bindx(bound);
1172
1173 ifp->if_flags |= IFF_UP;
1174 #ifdef INET
1175 if (ifa->ifa_addr->sa_family == AF_INET)
1176 arp_ifinit(ifp, ifa);
1177 #endif
1178 break;
1179
1180 default:
1181 error = ether_ioctl(ifp, cmd, data);
1182 }
1183
1184 return error;
1185 }
1186
1187 static int
1188 vlan_ether_addmulti(struct ifvlan *ifv, struct ifreq *ifr)
1189 {
1190 const struct sockaddr *sa = ifreq_getaddr(SIOCADDMULTI, ifr);
1191 struct vlan_mc_entry *mc;
1192 uint8_t addrlo[ETHER_ADDR_LEN], addrhi[ETHER_ADDR_LEN];
1193 struct ifvlan_linkmib *mib;
1194 int error;
1195
1196 KASSERT(mutex_owned(&ifv->ifv_lock));
1197
1198 if (sa->sa_len > sizeof(struct sockaddr_storage))
1199 return EINVAL;
1200
1201 error = ether_addmulti(sa, &ifv->ifv_ec);
1202 if (error != ENETRESET)
1203 return error;
1204
1205 /*
1206 * This is a new multicast address. We have to tell parent
1207 * about it. Also, remember this multicast address so that
1208 * we can delete it on unconfigure.
1209 */
1210 mc = malloc(sizeof(struct vlan_mc_entry), M_DEVBUF, M_NOWAIT);
1211 if (mc == NULL) {
1212 error = ENOMEM;
1213 goto alloc_failed;
1214 }
1215
1216 /*
1217 * Since ether_addmulti() returned ENETRESET, the following two
1218 * statements shouldn't fail. Here ifv_ec is implicitly protected
1219 * by the ifv_lock lock.
1220 */
1221 error = ether_multiaddr(sa, addrlo, addrhi);
1222 KASSERT(error == 0);
1223
1224 ETHER_LOCK(&ifv->ifv_ec);
1225 mc->mc_enm = ether_lookup_multi(addrlo, addrhi, &ifv->ifv_ec);
1226 ETHER_UNLOCK(&ifv->ifv_ec);
1227
1228 KASSERT(mc->mc_enm != NULL);
1229
1230 memcpy(&mc->mc_addr, sa, sa->sa_len);
1231 LIST_INSERT_HEAD(&ifv->ifv_mc_listhead, mc, mc_entries);
1232
1233 mib = ifv->ifv_mib;
1234
1235 KERNEL_LOCK_UNLESS_IFP_MPSAFE(mib->ifvm_p);
1236 error = if_mcast_op(mib->ifvm_p, SIOCADDMULTI, sa);
1237 KERNEL_UNLOCK_UNLESS_IFP_MPSAFE(mib->ifvm_p);
1238
1239 if (error != 0)
1240 goto ioctl_failed;
1241 return error;
1242
1243 ioctl_failed:
1244 LIST_REMOVE(mc, mc_entries);
1245 free(mc, M_DEVBUF);
1246
1247 alloc_failed:
1248 (void)ether_delmulti(sa, &ifv->ifv_ec);
1249 return error;
1250 }
1251
1252 static int
1253 vlan_ether_delmulti(struct ifvlan *ifv, struct ifreq *ifr)
1254 {
1255 const struct sockaddr *sa = ifreq_getaddr(SIOCDELMULTI, ifr);
1256 struct ether_multi *enm;
1257 struct vlan_mc_entry *mc;
1258 struct ifvlan_linkmib *mib;
1259 uint8_t addrlo[ETHER_ADDR_LEN], addrhi[ETHER_ADDR_LEN];
1260 int error;
1261
1262 KASSERT(mutex_owned(&ifv->ifv_lock));
1263
1264 /*
1265 * Find a key to lookup vlan_mc_entry. We have to do this
1266 * before calling ether_delmulti for obvious reasons.
1267 */
1268 if ((error = ether_multiaddr(sa, addrlo, addrhi)) != 0)
1269 return error;
1270
1271 ETHER_LOCK(&ifv->ifv_ec);
1272 enm = ether_lookup_multi(addrlo, addrhi, &ifv->ifv_ec);
1273 ETHER_UNLOCK(&ifv->ifv_ec);
1274 if (enm == NULL)
1275 return EINVAL;
1276
1277 LIST_FOREACH(mc, &ifv->ifv_mc_listhead, mc_entries) {
1278 if (mc->mc_enm == enm)
1279 break;
1280 }
1281
1282 /* We woun't delete entries we didn't add */
1283 if (mc == NULL)
1284 return EINVAL;
1285
1286 error = ether_delmulti(sa, &ifv->ifv_ec);
1287 if (error != ENETRESET)
1288 return error;
1289
1290 /* We no longer use this multicast address. Tell parent so. */
1291 mib = ifv->ifv_mib;
1292 error = if_mcast_op(mib->ifvm_p, SIOCDELMULTI, sa);
1293
1294 if (error == 0) {
1295 /* And forget about this address. */
1296 LIST_REMOVE(mc, mc_entries);
1297 free(mc, M_DEVBUF);
1298 } else {
1299 (void)ether_addmulti(sa, &ifv->ifv_ec);
1300 }
1301
1302 return error;
1303 }
1304
1305 /*
1306 * Delete any multicast address we have asked to add from parent
1307 * interface. Called when the vlan is being unconfigured.
1308 */
1309 static void
1310 vlan_ether_purgemulti(struct ifvlan *ifv)
1311 {
1312 struct vlan_mc_entry *mc;
1313 struct ifvlan_linkmib *mib;
1314
1315 KASSERT(mutex_owned(&ifv->ifv_lock));
1316 mib = ifv->ifv_mib;
1317 if (mib == NULL) {
1318 return;
1319 }
1320
1321 while ((mc = LIST_FIRST(&ifv->ifv_mc_listhead)) != NULL) {
1322 (void)if_mcast_op(mib->ifvm_p, SIOCDELMULTI,
1323 sstocsa(&mc->mc_addr));
1324 LIST_REMOVE(mc, mc_entries);
1325 free(mc, M_DEVBUF);
1326 }
1327 }
1328
1329 static void
1330 vlan_start(struct ifnet *ifp)
1331 {
1332 struct ifvlan *ifv = ifp->if_softc;
1333 struct ifnet *p;
1334 struct ethercom *ec;
1335 struct mbuf *m;
1336 struct ifvlan_linkmib *mib;
1337 struct psref psref;
1338 int error;
1339
1340 mib = vlan_getref_linkmib(ifv, &psref);
1341 if (mib == NULL)
1342 return;
1343 p = mib->ifvm_p;
1344 ec = (void *)mib->ifvm_p;
1345
1346 ifp->if_flags |= IFF_OACTIVE;
1347
1348 for (;;) {
1349 IFQ_DEQUEUE(&ifp->if_snd, m);
1350 if (m == NULL)
1351 break;
1352
1353 #ifdef ALTQ
1354 /*
1355 * KERNEL_LOCK is required for ALTQ even if NET_MPSAFE is
1356 * defined.
1357 */
1358 KERNEL_LOCK(1, NULL);
1359 /*
1360 * If ALTQ is enabled on the parent interface, do
1361 * classification; the queueing discipline might
1362 * not require classification, but might require
1363 * the address family/header pointer in the pktattr.
1364 */
1365 if (ALTQ_IS_ENABLED(&p->if_snd)) {
1366 switch (p->if_type) {
1367 case IFT_ETHER:
1368 altq_etherclassify(&p->if_snd, m);
1369 break;
1370 default:
1371 panic("%s: impossible (altq)", __func__);
1372 }
1373 }
1374 KERNEL_UNLOCK_ONE(NULL);
1375 #endif /* ALTQ */
1376
1377 bpf_mtap(ifp, m, BPF_D_OUT);
1378 /*
1379 * If the parent can insert the tag itself, just mark
1380 * the tag in the mbuf header.
1381 */
1382 if (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING) {
1383 vlan_set_tag(m, mib->ifvm_tag);
1384 } else {
1385 /*
1386 * insert the tag ourselves
1387 */
1388 M_PREPEND(m, mib->ifvm_encaplen, M_DONTWAIT);
1389 if (m == NULL) {
1390 printf("%s: unable to prepend encap header",
1391 p->if_xname);
1392 if_statinc(ifp, if_oerrors);
1393 continue;
1394 }
1395
1396 switch (p->if_type) {
1397 case IFT_ETHER:
1398 {
1399 struct ether_vlan_header *evl;
1400
1401 if (m->m_len < sizeof(struct ether_vlan_header))
1402 m = m_pullup(m,
1403 sizeof(struct ether_vlan_header));
1404 if (m == NULL) {
1405 printf("%s: unable to pullup encap "
1406 "header", p->if_xname);
1407 if_statinc(ifp, if_oerrors);
1408 continue;
1409 }
1410
1411 /*
1412 * Transform the Ethernet header into an
1413 * Ethernet header with 802.1Q encapsulation.
1414 */
1415 memmove(mtod(m, void *),
1416 mtod(m, char *) + mib->ifvm_encaplen,
1417 sizeof(struct ether_header));
1418 evl = mtod(m, struct ether_vlan_header *);
1419 evl->evl_proto = evl->evl_encap_proto;
1420 evl->evl_encap_proto = htons(ETHERTYPE_VLAN);
1421 evl->evl_tag = htons(mib->ifvm_tag);
1422
1423 /*
1424 * To cater for VLAN-aware layer 2 ethernet
1425 * switches which may need to strip the tag
1426 * before forwarding the packet, make sure
1427 * the packet+tag is at least 68 bytes long.
1428 * This is necessary because our parent will
1429 * only pad to 64 bytes (ETHER_MIN_LEN) and
1430 * some switches will not pad by themselves
1431 * after deleting a tag.
1432 */
1433 const size_t min_data_len = ETHER_MIN_LEN -
1434 ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN;
1435 if (m->m_pkthdr.len < min_data_len) {
1436 m_copyback(m, m->m_pkthdr.len,
1437 min_data_len - m->m_pkthdr.len,
1438 vlan_zero_pad_buff);
1439 }
1440 break;
1441 }
1442
1443 default:
1444 panic("%s: impossible", __func__);
1445 }
1446 }
1447
1448 if ((p->if_flags & IFF_RUNNING) == 0) {
1449 m_freem(m);
1450 continue;
1451 }
1452
1453 error = if_transmit_lock(p, m);
1454 if (error) {
1455 /* mbuf is already freed */
1456 if_statinc(ifp, if_oerrors);
1457 continue;
1458 }
1459 if_statinc(ifp, if_opackets);
1460 }
1461
1462 ifp->if_flags &= ~IFF_OACTIVE;
1463
1464 /* Remove reference to mib before release */
1465 vlan_putref_linkmib(mib, &psref);
1466 }
1467
1468 static int
1469 vlan_transmit(struct ifnet *ifp, struct mbuf *m)
1470 {
1471 struct ifvlan *ifv = ifp->if_softc;
1472 struct ifnet *p;
1473 struct ethercom *ec;
1474 struct ifvlan_linkmib *mib;
1475 struct psref psref;
1476 int error;
1477 size_t pktlen = m->m_pkthdr.len;
1478 bool mcast = (m->m_flags & M_MCAST) != 0;
1479
1480 mib = vlan_getref_linkmib(ifv, &psref);
1481 if (mib == NULL) {
1482 m_freem(m);
1483 return ENETDOWN;
1484 }
1485
1486 p = mib->ifvm_p;
1487 ec = (void *)mib->ifvm_p;
1488
1489 bpf_mtap(ifp, m, BPF_D_OUT);
1490
1491 if ((error = pfil_run_hooks(ifp->if_pfil, &m, ifp, PFIL_OUT)) != 0)
1492 goto out;
1493 if (m == NULL)
1494 goto out;
1495
1496 /*
1497 * If the parent can insert the tag itself, just mark
1498 * the tag in the mbuf header.
1499 */
1500 if (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING) {
1501 vlan_set_tag(m, mib->ifvm_tag);
1502 } else {
1503 /*
1504 * insert the tag ourselves
1505 */
1506 M_PREPEND(m, mib->ifvm_encaplen, M_DONTWAIT);
1507 if (m == NULL) {
1508 printf("%s: unable to prepend encap header",
1509 p->if_xname);
1510 if_statinc(ifp, if_oerrors);
1511 error = ENOBUFS;
1512 goto out;
1513 }
1514
1515 switch (p->if_type) {
1516 case IFT_ETHER:
1517 {
1518 struct ether_vlan_header *evl;
1519
1520 if (m->m_len < sizeof(struct ether_vlan_header))
1521 m = m_pullup(m,
1522 sizeof(struct ether_vlan_header));
1523 if (m == NULL) {
1524 printf("%s: unable to pullup encap "
1525 "header", p->if_xname);
1526 if_statinc(ifp, if_oerrors);
1527 error = ENOBUFS;
1528 goto out;
1529 }
1530
1531 /*
1532 * Transform the Ethernet header into an
1533 * Ethernet header with 802.1Q encapsulation.
1534 */
1535 memmove(mtod(m, void *),
1536 mtod(m, char *) + mib->ifvm_encaplen,
1537 sizeof(struct ether_header));
1538 evl = mtod(m, struct ether_vlan_header *);
1539 evl->evl_proto = evl->evl_encap_proto;
1540 evl->evl_encap_proto = htons(ETHERTYPE_VLAN);
1541 evl->evl_tag = htons(mib->ifvm_tag);
1542
1543 /*
1544 * To cater for VLAN-aware layer 2 ethernet
1545 * switches which may need to strip the tag
1546 * before forwarding the packet, make sure
1547 * the packet+tag is at least 68 bytes long.
1548 * This is necessary because our parent will
1549 * only pad to 64 bytes (ETHER_MIN_LEN) and
1550 * some switches will not pad by themselves
1551 * after deleting a tag.
1552 */
1553 const size_t min_data_len = ETHER_MIN_LEN -
1554 ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN;
1555 if (m->m_pkthdr.len < min_data_len) {
1556 m_copyback(m, m->m_pkthdr.len,
1557 min_data_len - m->m_pkthdr.len,
1558 vlan_zero_pad_buff);
1559 }
1560 break;
1561 }
1562
1563 default:
1564 panic("%s: impossible", __func__);
1565 }
1566 }
1567
1568 if ((p->if_flags & IFF_RUNNING) == 0) {
1569 m_freem(m);
1570 error = ENETDOWN;
1571 goto out;
1572 }
1573
1574 error = if_transmit_lock(p, m);
1575 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
1576 if (error) {
1577 /* mbuf is already freed */
1578 if_statinc_ref(nsr, if_oerrors);
1579 } else {
1580 if_statinc_ref(nsr, if_opackets);
1581 if_statadd_ref(nsr, if_obytes, pktlen);
1582 if (mcast)
1583 if_statinc_ref(nsr, if_omcasts);
1584 }
1585 IF_STAT_PUTREF(ifp);
1586
1587 out:
1588 /* Remove reference to mib before release */
1589 vlan_putref_linkmib(mib, &psref);
1590 return error;
1591 }
1592
1593 /*
1594 * Given an Ethernet frame, find a valid vlan interface corresponding to the
1595 * given source interface and tag, then run the real packet through the
1596 * parent's input routine.
1597 */
1598 void
1599 vlan_input(struct ifnet *ifp, struct mbuf *m)
1600 {
1601 struct ifvlan *ifv;
1602 uint16_t vid;
1603 struct ifvlan_linkmib *mib;
1604 struct psref psref;
1605 bool have_vtag;
1606
1607 have_vtag = vlan_has_tag(m);
1608 if (have_vtag) {
1609 vid = EVL_VLANOFTAG(vlan_get_tag(m));
1610 m->m_flags &= ~M_VLANTAG;
1611 } else {
1612 struct ether_vlan_header *evl;
1613
1614 if (ifp->if_type != IFT_ETHER) {
1615 panic("%s: impossible", __func__);
1616 }
1617
1618 if (m->m_len < sizeof(struct ether_vlan_header) &&
1619 (m = m_pullup(m,
1620 sizeof(struct ether_vlan_header))) == NULL) {
1621 printf("%s: no memory for VLAN header, "
1622 "dropping packet.\n", ifp->if_xname);
1623 return;
1624 }
1625 evl = mtod(m, struct ether_vlan_header *);
1626 KASSERT(ntohs(evl->evl_encap_proto) == ETHERTYPE_VLAN);
1627
1628 vid = EVL_VLANOFTAG(ntohs(evl->evl_tag));
1629
1630 /*
1631 * Restore the original ethertype. We'll remove
1632 * the encapsulation after we've found the vlan
1633 * interface corresponding to the tag.
1634 */
1635 evl->evl_encap_proto = evl->evl_proto;
1636 }
1637
1638 mib = vlan_lookup_tag_psref(ifp, vid, &psref);
1639 if (mib == NULL) {
1640 m_freem(m);
1641 if_statinc(ifp, if_noproto);
1642 return;
1643 }
1644 KASSERT(mib->ifvm_encaplen == ETHER_VLAN_ENCAP_LEN);
1645
1646 ifv = mib->ifvm_ifvlan;
1647 if ((ifv->ifv_if.if_flags & (IFF_UP | IFF_RUNNING)) !=
1648 (IFF_UP | IFF_RUNNING)) {
1649 m_freem(m);
1650 if_statinc(ifp, if_noproto);
1651 goto out;
1652 }
1653
1654 /*
1655 * Now, remove the encapsulation header. The original
1656 * header has already been fixed up above.
1657 */
1658 if (!have_vtag) {
1659 memmove(mtod(m, char *) + mib->ifvm_encaplen,
1660 mtod(m, void *), sizeof(struct ether_header));
1661 m_adj(m, mib->ifvm_encaplen);
1662 }
1663
1664 m_set_rcvif(m, &ifv->ifv_if);
1665
1666 if (pfil_run_hooks(ifp->if_pfil, &m, ifp, PFIL_IN) != 0)
1667 goto out;
1668 if (m == NULL)
1669 goto out;
1670
1671 m->m_flags &= ~M_PROMISC;
1672 if_input(&ifv->ifv_if, m);
1673 out:
1674 vlan_putref_linkmib(mib, &psref);
1675 }
1676
1677 /*
1678 * Module infrastructure
1679 */
1680 #include "if_module.h"
1681
1682 IF_MODULE(MODULE_CLASS_DRIVER, vlan, NULL)
1683