if_vlan.c revision 1.152 1 /* $NetBSD: if_vlan.c,v 1.152 2020/06/12 11:04:45 roy Exp $ */
2
3 /*
4 * Copyright (c) 2000, 2001 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran, and by Jason R. Thorpe of Zembu Labs, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Copyright 1998 Massachusetts Institute of Technology
34 *
35 * Permission to use, copy, modify, and distribute this software and
36 * its documentation for any purpose and without fee is hereby
37 * granted, provided that both the above copyright notice and this
38 * permission notice appear in all copies, that both the above
39 * copyright notice and this permission notice appear in all
40 * supporting documentation, and that the name of M.I.T. not be used
41 * in advertising or publicity pertaining to distribution of the
42 * software without specific, written prior permission. M.I.T. makes
43 * no representations about the suitability of this software for any
44 * purpose. It is provided "as is" without express or implied
45 * warranty.
46 *
47 * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS
48 * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
49 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
50 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
51 * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
52 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
53 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
54 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
55 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
56 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
57 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 *
60 * from FreeBSD: if_vlan.c,v 1.16 2000/03/26 15:21:40 charnier Exp
61 * via OpenBSD: if_vlan.c,v 1.4 2000/05/15 19:15:00 chris Exp
62 */
63
64 /*
65 * if_vlan.c - pseudo-device driver for IEEE 802.1Q virtual LANs. Might be
66 * extended some day to also handle IEEE 802.1P priority tagging. This is
67 * sort of sneaky in the implementation, since we need to pretend to be
68 * enough of an Ethernet implementation to make ARP work. The way we do
69 * this is by telling everyone that we are an Ethernet interface, and then
70 * catch the packets that ether_output() left on our output queue when it
71 * calls if_start(), rewrite them for use by the real outgoing interface,
72 * and ask it to send them.
73 *
74 * TODO:
75 *
76 * - Need some way to notify vlan interfaces when the parent
77 * interface changes MTU.
78 */
79
80 #include <sys/cdefs.h>
81 __KERNEL_RCSID(0, "$NetBSD: if_vlan.c,v 1.152 2020/06/12 11:04:45 roy Exp $");
82
83 #ifdef _KERNEL_OPT
84 #include "opt_inet.h"
85 #include "opt_net_mpsafe.h"
86 #endif
87
88 #include <sys/param.h>
89 #include <sys/systm.h>
90 #include <sys/kernel.h>
91 #include <sys/mbuf.h>
92 #include <sys/queue.h>
93 #include <sys/socket.h>
94 #include <sys/sockio.h>
95 #include <sys/systm.h>
96 #include <sys/proc.h>
97 #include <sys/kauth.h>
98 #include <sys/mutex.h>
99 #include <sys/kmem.h>
100 #include <sys/cpu.h>
101 #include <sys/pserialize.h>
102 #include <sys/psref.h>
103 #include <sys/pslist.h>
104 #include <sys/atomic.h>
105 #include <sys/device.h>
106 #include <sys/module.h>
107
108 #include <net/bpf.h>
109 #include <net/if.h>
110 #include <net/if_dl.h>
111 #include <net/if_types.h>
112 #include <net/if_ether.h>
113 #include <net/if_vlanvar.h>
114
115 #ifdef INET
116 #include <netinet/in.h>
117 #include <netinet/if_inarp.h>
118 #endif
119 #ifdef INET6
120 #include <netinet6/in6_ifattach.h>
121 #include <netinet6/in6_var.h>
122 #include <netinet6/nd6.h>
123 #endif
124
125 #include "ioconf.h"
126
127 struct vlan_mc_entry {
128 LIST_ENTRY(vlan_mc_entry) mc_entries;
129 /*
130 * A key to identify this entry. The mc_addr below can't be
131 * used since multiple sockaddr may mapped into the same
132 * ether_multi (e.g., AF_UNSPEC).
133 */
134 struct ether_multi *mc_enm;
135 struct sockaddr_storage mc_addr;
136 };
137
138 struct ifvlan_linkmib {
139 struct ifvlan *ifvm_ifvlan;
140 const struct vlan_multisw *ifvm_msw;
141 int ifvm_encaplen; /* encapsulation length */
142 int ifvm_mtufudge; /* MTU fudged by this much */
143 int ifvm_mintu; /* min transmission unit */
144 uint16_t ifvm_proto; /* encapsulation ethertype */
145 uint16_t ifvm_tag; /* tag to apply on packets */
146 struct ifnet *ifvm_p; /* parent interface of this vlan */
147
148 struct psref_target ifvm_psref;
149 };
150
151 struct ifvlan {
152 struct ethercom ifv_ec;
153 struct ifvlan_linkmib *ifv_mib; /*
154 * reader must use vlan_getref_linkmib()
155 * instead of direct dereference
156 */
157 kmutex_t ifv_lock; /* writer lock for ifv_mib */
158 pserialize_t ifv_psz;
159
160 LIST_HEAD(__vlan_mchead, vlan_mc_entry) ifv_mc_listhead;
161 LIST_ENTRY(ifvlan) ifv_list;
162 struct pslist_entry ifv_hash;
163 int ifv_flags;
164 };
165
166 #define IFVF_PROMISC 0x01 /* promiscuous mode enabled */
167
168 #define ifv_if ifv_ec.ec_if
169
170 #define ifv_msw ifv_mib.ifvm_msw
171 #define ifv_encaplen ifv_mib.ifvm_encaplen
172 #define ifv_mtufudge ifv_mib.ifvm_mtufudge
173 #define ifv_mintu ifv_mib.ifvm_mintu
174 #define ifv_tag ifv_mib.ifvm_tag
175
176 struct vlan_multisw {
177 int (*vmsw_addmulti)(struct ifvlan *, struct ifreq *);
178 int (*vmsw_delmulti)(struct ifvlan *, struct ifreq *);
179 void (*vmsw_purgemulti)(struct ifvlan *);
180 };
181
182 static int vlan_ether_addmulti(struct ifvlan *, struct ifreq *);
183 static int vlan_ether_delmulti(struct ifvlan *, struct ifreq *);
184 static void vlan_ether_purgemulti(struct ifvlan *);
185
186 const struct vlan_multisw vlan_ether_multisw = {
187 .vmsw_addmulti = vlan_ether_addmulti,
188 .vmsw_delmulti = vlan_ether_delmulti,
189 .vmsw_purgemulti = vlan_ether_purgemulti,
190 };
191
192 static int vlan_clone_create(struct if_clone *, int);
193 static int vlan_clone_destroy(struct ifnet *);
194 static int vlan_config(struct ifvlan *, struct ifnet *, uint16_t);
195 static int vlan_ioctl(struct ifnet *, u_long, void *);
196 static void vlan_start(struct ifnet *);
197 static int vlan_transmit(struct ifnet *, struct mbuf *);
198 static void vlan_unconfig(struct ifnet *);
199 static int vlan_unconfig_locked(struct ifvlan *, struct ifvlan_linkmib *);
200 static void vlan_hash_init(void);
201 static int vlan_hash_fini(void);
202 static int vlan_tag_hash(uint16_t, u_long);
203 static struct ifvlan_linkmib* vlan_getref_linkmib(struct ifvlan *,
204 struct psref *);
205 static void vlan_putref_linkmib(struct ifvlan_linkmib *, struct psref *);
206 static void vlan_linkmib_update(struct ifvlan *, struct ifvlan_linkmib *);
207 static struct ifvlan_linkmib* vlan_lookup_tag_psref(struct ifnet *,
208 uint16_t, struct psref *);
209
210 static struct {
211 kmutex_t lock;
212 LIST_HEAD(vlan_ifvlist, ifvlan) list;
213 } ifv_list __cacheline_aligned;
214
215
216 #if !defined(VLAN_TAG_HASH_SIZE)
217 #define VLAN_TAG_HASH_SIZE 32
218 #endif
219 static struct {
220 kmutex_t lock;
221 struct pslist_head *lists;
222 u_long mask;
223 } ifv_hash __cacheline_aligned = {
224 .lists = NULL,
225 .mask = 0,
226 };
227
228 pserialize_t vlan_psz __read_mostly;
229 static struct psref_class *ifvm_psref_class __read_mostly;
230
231 struct if_clone vlan_cloner =
232 IF_CLONE_INITIALIZER("vlan", vlan_clone_create, vlan_clone_destroy);
233
234 /* Used to pad ethernet frames with < ETHER_MIN_LEN bytes */
235 static char vlan_zero_pad_buff[ETHER_MIN_LEN];
236
237 static inline int
238 vlan_safe_ifpromisc(struct ifnet *ifp, int pswitch)
239 {
240 int e;
241
242 KERNEL_LOCK_UNLESS_NET_MPSAFE();
243 e = ifpromisc(ifp, pswitch);
244 KERNEL_UNLOCK_UNLESS_NET_MPSAFE();
245
246 return e;
247 }
248
249 static inline int
250 vlan_safe_ifpromisc_locked(struct ifnet *ifp, int pswitch)
251 {
252 int e;
253
254 KERNEL_LOCK_UNLESS_NET_MPSAFE();
255 e = ifpromisc_locked(ifp, pswitch);
256 KERNEL_UNLOCK_UNLESS_NET_MPSAFE();
257
258 return e;
259 }
260
261 void
262 vlanattach(int n)
263 {
264
265 /*
266 * Nothing to do here, initialization is handled by the
267 * module initialization code in vlaninit() below.
268 */
269 }
270
271 static void
272 vlaninit(void)
273 {
274 mutex_init(&ifv_list.lock, MUTEX_DEFAULT, IPL_NONE);
275 LIST_INIT(&ifv_list.list);
276
277 mutex_init(&ifv_hash.lock, MUTEX_DEFAULT, IPL_NONE);
278 vlan_psz = pserialize_create();
279 ifvm_psref_class = psref_class_create("vlanlinkmib", IPL_SOFTNET);
280 if_clone_attach(&vlan_cloner);
281
282 vlan_hash_init();
283 MODULE_HOOK_SET(if_vlan_vlan_input_hook, vlan_input);
284 }
285
286 static int
287 vlandetach(void)
288 {
289 bool is_empty;
290 int error;
291
292 mutex_enter(&ifv_list.lock);
293 is_empty = LIST_EMPTY(&ifv_list.list);
294 mutex_exit(&ifv_list.lock);
295
296 if (!is_empty)
297 return EBUSY;
298
299 error = vlan_hash_fini();
300 if (error != 0)
301 return error;
302
303 if_clone_detach(&vlan_cloner);
304 psref_class_destroy(ifvm_psref_class);
305 pserialize_destroy(vlan_psz);
306 mutex_destroy(&ifv_hash.lock);
307 mutex_destroy(&ifv_list.lock);
308
309 MODULE_HOOK_UNSET(if_vlan_vlan_input_hook);
310 return 0;
311 }
312
313 static void
314 vlan_reset_linkname(struct ifnet *ifp)
315 {
316
317 /*
318 * We start out with a "802.1Q VLAN" type and zero-length
319 * addresses. When we attach to a parent interface, we
320 * inherit its type, address length, address, and data link
321 * type.
322 */
323
324 ifp->if_type = IFT_L2VLAN;
325 ifp->if_addrlen = 0;
326 ifp->if_dlt = DLT_NULL;
327 if_alloc_sadl(ifp);
328 }
329
330 static int
331 vlan_clone_create(struct if_clone *ifc, int unit)
332 {
333 struct ifvlan *ifv;
334 struct ifnet *ifp;
335 struct ifvlan_linkmib *mib;
336 int rv;
337
338 ifv = malloc(sizeof(struct ifvlan), M_DEVBUF, M_WAITOK | M_ZERO);
339 mib = kmem_zalloc(sizeof(struct ifvlan_linkmib), KM_SLEEP);
340 ifp = &ifv->ifv_if;
341 LIST_INIT(&ifv->ifv_mc_listhead);
342
343 mib->ifvm_ifvlan = ifv;
344 mib->ifvm_p = NULL;
345 psref_target_init(&mib->ifvm_psref, ifvm_psref_class);
346
347 mutex_init(&ifv->ifv_lock, MUTEX_DEFAULT, IPL_NONE);
348 ifv->ifv_psz = pserialize_create();
349 ifv->ifv_mib = mib;
350
351 mutex_enter(&ifv_list.lock);
352 LIST_INSERT_HEAD(&ifv_list.list, ifv, ifv_list);
353 mutex_exit(&ifv_list.lock);
354
355 if_initname(ifp, ifc->ifc_name, unit);
356 ifp->if_softc = ifv;
357 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
358 ifp->if_extflags = IFEF_NO_LINK_STATE_CHANGE;
359 #ifdef NET_MPSAFE
360 ifp->if_extflags |= IFEF_MPSAFE;
361 #endif
362 ifp->if_start = vlan_start;
363 ifp->if_transmit = vlan_transmit;
364 ifp->if_ioctl = vlan_ioctl;
365 IFQ_SET_READY(&ifp->if_snd);
366
367 rv = if_initialize(ifp);
368 if (rv != 0) {
369 aprint_error("%s: if_initialize failed(%d)\n", ifp->if_xname,
370 rv);
371 goto fail;
372 }
373
374 vlan_reset_linkname(ifp);
375 if_register(ifp);
376 return 0;
377
378 fail:
379 mutex_enter(&ifv_list.lock);
380 LIST_REMOVE(ifv, ifv_list);
381 mutex_exit(&ifv_list.lock);
382
383 mutex_destroy(&ifv->ifv_lock);
384 psref_target_destroy(&ifv->ifv_mib->ifvm_psref, ifvm_psref_class);
385 kmem_free(ifv->ifv_mib, sizeof(struct ifvlan_linkmib));
386 free(ifv, M_DEVBUF);
387
388 return rv;
389 }
390
391 static int
392 vlan_clone_destroy(struct ifnet *ifp)
393 {
394 struct ifvlan *ifv = ifp->if_softc;
395
396 mutex_enter(&ifv_list.lock);
397 LIST_REMOVE(ifv, ifv_list);
398 mutex_exit(&ifv_list.lock);
399
400 IFNET_LOCK(ifp);
401 vlan_unconfig(ifp);
402 IFNET_UNLOCK(ifp);
403 if_detach(ifp);
404
405 psref_target_destroy(&ifv->ifv_mib->ifvm_psref, ifvm_psref_class);
406 kmem_free(ifv->ifv_mib, sizeof(struct ifvlan_linkmib));
407 pserialize_destroy(ifv->ifv_psz);
408 mutex_destroy(&ifv->ifv_lock);
409 free(ifv, M_DEVBUF);
410
411 return 0;
412 }
413
414 /*
415 * Configure a VLAN interface.
416 */
417 static int
418 vlan_config(struct ifvlan *ifv, struct ifnet *p, uint16_t tag)
419 {
420 struct ifnet *ifp = &ifv->ifv_if;
421 struct ifvlan_linkmib *nmib = NULL;
422 struct ifvlan_linkmib *omib = NULL;
423 struct ifvlan_linkmib *checkmib;
424 struct psref_target *nmib_psref = NULL;
425 const uint16_t vid = EVL_VLANOFTAG(tag);
426 int error = 0;
427 int idx;
428 bool omib_cleanup = false;
429 struct psref psref;
430
431 /* VLAN ID 0 and 4095 are reserved in the spec */
432 if ((vid == 0) || (vid == 0xfff))
433 return EINVAL;
434
435 nmib = kmem_alloc(sizeof(*nmib), KM_SLEEP);
436 mutex_enter(&ifv->ifv_lock);
437 omib = ifv->ifv_mib;
438
439 if (omib->ifvm_p != NULL) {
440 error = EBUSY;
441 goto done;
442 }
443
444 /* Duplicate check */
445 checkmib = vlan_lookup_tag_psref(p, vid, &psref);
446 if (checkmib != NULL) {
447 vlan_putref_linkmib(checkmib, &psref);
448 error = EEXIST;
449 goto done;
450 }
451
452 *nmib = *omib;
453 nmib_psref = &nmib->ifvm_psref;
454
455 psref_target_init(nmib_psref, ifvm_psref_class);
456
457 switch (p->if_type) {
458 case IFT_ETHER:
459 {
460 struct ethercom *ec = (void *)p;
461 struct vlanid_list *vidmem;
462
463 nmib->ifvm_msw = &vlan_ether_multisw;
464 nmib->ifvm_encaplen = ETHER_VLAN_ENCAP_LEN;
465 nmib->ifvm_mintu = ETHERMIN;
466
467 if (ec->ec_nvlans++ == 0) {
468 IFNET_LOCK(p);
469 error = ether_enable_vlan_mtu(p);
470 IFNET_UNLOCK(p);
471 if (error >= 0) {
472 if (error) {
473 ec->ec_nvlans--;
474 goto done;
475 }
476 nmib->ifvm_mtufudge = 0;
477 } else {
478 /*
479 * Fudge the MTU by the encapsulation size. This
480 * makes us incompatible with strictly compliant
481 * 802.1Q implementations, but allows us to use
482 * the feature with other NetBSD
483 * implementations, which might still be useful.
484 */
485 nmib->ifvm_mtufudge = nmib->ifvm_encaplen;
486 }
487 error = 0;
488 }
489 /* Add a vid to the list */
490 vidmem = kmem_alloc(sizeof(struct vlanid_list), KM_SLEEP);
491 vidmem->vid = vid;
492 ETHER_LOCK(ec);
493 SIMPLEQ_INSERT_TAIL(&ec->ec_vids, vidmem, vid_list);
494 ETHER_UNLOCK(ec);
495
496 if (ec->ec_vlan_cb != NULL) {
497 /*
498 * Call ec_vlan_cb(). It will setup VLAN HW filter or
499 * HW tagging function.
500 */
501 error = (*ec->ec_vlan_cb)(ec, vid, true);
502 if (error) {
503 ec->ec_nvlans--;
504 if (ec->ec_nvlans == 0) {
505 IFNET_LOCK(p);
506 (void)ether_disable_vlan_mtu(p);
507 IFNET_UNLOCK(p);
508 }
509 goto done;
510 }
511 }
512 /*
513 * If the parent interface can do hardware-assisted
514 * VLAN encapsulation, then propagate its hardware-
515 * assisted checksumming flags and tcp segmentation
516 * offload.
517 */
518 if (ec->ec_capabilities & ETHERCAP_VLAN_HWTAGGING) {
519 ifp->if_capabilities = p->if_capabilities &
520 (IFCAP_TSOv4 | IFCAP_TSOv6 |
521 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
522 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
523 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
524 IFCAP_CSUM_TCPv6_Tx | IFCAP_CSUM_TCPv6_Rx |
525 IFCAP_CSUM_UDPv6_Tx | IFCAP_CSUM_UDPv6_Rx);
526 }
527
528 /*
529 * We inherit the parent's Ethernet address.
530 */
531 ether_ifattach(ifp, CLLADDR(p->if_sadl));
532 ifp->if_hdrlen = sizeof(struct ether_vlan_header); /* XXX? */
533 break;
534 }
535
536 default:
537 error = EPROTONOSUPPORT;
538 goto done;
539 }
540
541 nmib->ifvm_p = p;
542 nmib->ifvm_tag = vid;
543 ifv->ifv_if.if_mtu = p->if_mtu - nmib->ifvm_mtufudge;
544 ifv->ifv_if.if_flags = p->if_flags &
545 (IFF_UP | IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
546
547 /*
548 * Inherit the if_type from the parent. This allows us
549 * to participate in bridges of that type.
550 */
551 ifv->ifv_if.if_type = p->if_type;
552
553 PSLIST_ENTRY_INIT(ifv, ifv_hash);
554 idx = vlan_tag_hash(vid, ifv_hash.mask);
555
556 mutex_enter(&ifv_hash.lock);
557 PSLIST_WRITER_INSERT_HEAD(&ifv_hash.lists[idx], ifv, ifv_hash);
558 mutex_exit(&ifv_hash.lock);
559
560 vlan_linkmib_update(ifv, nmib);
561 nmib = NULL;
562 nmib_psref = NULL;
563 omib_cleanup = true;
564
565 done:
566 mutex_exit(&ifv->ifv_lock);
567
568 if (nmib_psref)
569 psref_target_destroy(nmib_psref, ifvm_psref_class);
570 if (nmib)
571 kmem_free(nmib, sizeof(*nmib));
572 if (omib_cleanup)
573 kmem_free(omib, sizeof(*omib));
574
575 return error;
576 }
577
578 /*
579 * Unconfigure a VLAN interface.
580 */
581 static void
582 vlan_unconfig(struct ifnet *ifp)
583 {
584 struct ifvlan *ifv = ifp->if_softc;
585 struct ifvlan_linkmib *nmib = NULL;
586 int error;
587
588 KASSERT(IFNET_LOCKED(ifp));
589
590 nmib = kmem_alloc(sizeof(*nmib), KM_SLEEP);
591
592 mutex_enter(&ifv->ifv_lock);
593 error = vlan_unconfig_locked(ifv, nmib);
594 mutex_exit(&ifv->ifv_lock);
595
596 if (error)
597 kmem_free(nmib, sizeof(*nmib));
598 }
599 static int
600 vlan_unconfig_locked(struct ifvlan *ifv, struct ifvlan_linkmib *nmib)
601 {
602 struct ifnet *p;
603 struct ifnet *ifp = &ifv->ifv_if;
604 struct psref_target *nmib_psref = NULL;
605 struct ifvlan_linkmib *omib;
606 int error = 0;
607
608 KASSERT(IFNET_LOCKED(ifp));
609 KASSERT(mutex_owned(&ifv->ifv_lock));
610
611 ifp->if_flags &= ~(IFF_UP | IFF_RUNNING);
612
613 omib = ifv->ifv_mib;
614 p = omib->ifvm_p;
615
616 if (p == NULL) {
617 error = -1;
618 goto done;
619 }
620
621 *nmib = *omib;
622 nmib_psref = &nmib->ifvm_psref;
623 psref_target_init(nmib_psref, ifvm_psref_class);
624
625 /*
626 * Since the interface is being unconfigured, we need to empty the
627 * list of multicast groups that we may have joined while we were
628 * alive and remove them from the parent's list also.
629 */
630 (*nmib->ifvm_msw->vmsw_purgemulti)(ifv);
631
632 /* Disconnect from parent. */
633 switch (p->if_type) {
634 case IFT_ETHER:
635 {
636 struct ethercom *ec = (void *)p;
637 struct vlanid_list *vlanidp;
638 uint16_t vid = EVL_VLANOFTAG(nmib->ifvm_tag);
639
640 ETHER_LOCK(ec);
641 SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) {
642 if (vlanidp->vid == vid) {
643 SIMPLEQ_REMOVE(&ec->ec_vids, vlanidp,
644 vlanid_list, vid_list);
645 break;
646 }
647 }
648 ETHER_UNLOCK(ec);
649 if (vlanidp != NULL)
650 kmem_free(vlanidp, sizeof(*vlanidp));
651
652 if (ec->ec_vlan_cb != NULL) {
653 /*
654 * Call ec_vlan_cb(). It will setup VLAN HW filter or
655 * HW tagging function.
656 */
657 (void)(*ec->ec_vlan_cb)(ec, vid, false);
658 }
659 if (--ec->ec_nvlans == 0) {
660 IFNET_LOCK(p);
661 (void)ether_disable_vlan_mtu(p);
662 IFNET_UNLOCK(p);
663 }
664
665 /* XXX ether_ifdetach must not be called with IFNET_LOCK */
666 mutex_exit(&ifv->ifv_lock);
667 IFNET_UNLOCK(ifp);
668 ether_ifdetach(ifp);
669 IFNET_LOCK(ifp);
670 mutex_enter(&ifv->ifv_lock);
671
672 /* if_free_sadl must be called with IFNET_LOCK */
673 if_free_sadl(ifp, 1);
674
675 /* Restore vlan_ioctl overwritten by ether_ifdetach */
676 ifp->if_ioctl = vlan_ioctl;
677 vlan_reset_linkname(ifp);
678 break;
679 }
680
681 default:
682 panic("%s: impossible", __func__);
683 }
684
685 nmib->ifvm_p = NULL;
686 ifv->ifv_if.if_mtu = 0;
687 ifv->ifv_flags = 0;
688
689 mutex_enter(&ifv_hash.lock);
690 PSLIST_WRITER_REMOVE(ifv, ifv_hash);
691 pserialize_perform(vlan_psz);
692 mutex_exit(&ifv_hash.lock);
693 PSLIST_ENTRY_DESTROY(ifv, ifv_hash);
694
695 vlan_linkmib_update(ifv, nmib);
696
697 mutex_exit(&ifv->ifv_lock);
698
699 nmib_psref = NULL;
700 kmem_free(omib, sizeof(*omib));
701
702 #ifdef INET6
703 KERNEL_LOCK_UNLESS_NET_MPSAFE();
704 /* To delete v6 link local addresses */
705 if (in6_present)
706 in6_ifdetach(ifp);
707 KERNEL_UNLOCK_UNLESS_NET_MPSAFE();
708 #endif
709
710 if ((ifp->if_flags & IFF_PROMISC) != 0)
711 vlan_safe_ifpromisc_locked(ifp, 0);
712 if_down_locked(ifp);
713 ifp->if_capabilities = 0;
714 mutex_enter(&ifv->ifv_lock);
715 done:
716
717 if (nmib_psref)
718 psref_target_destroy(nmib_psref, ifvm_psref_class);
719
720 return error;
721 }
722
723 static void
724 vlan_hash_init(void)
725 {
726
727 ifv_hash.lists = hashinit(VLAN_TAG_HASH_SIZE, HASH_PSLIST, true,
728 &ifv_hash.mask);
729 }
730
731 static int
732 vlan_hash_fini(void)
733 {
734 int i;
735
736 mutex_enter(&ifv_hash.lock);
737
738 for (i = 0; i < ifv_hash.mask + 1; i++) {
739 if (PSLIST_WRITER_FIRST(&ifv_hash.lists[i], struct ifvlan,
740 ifv_hash) != NULL) {
741 mutex_exit(&ifv_hash.lock);
742 return EBUSY;
743 }
744 }
745
746 for (i = 0; i < ifv_hash.mask + 1; i++)
747 PSLIST_DESTROY(&ifv_hash.lists[i]);
748
749 mutex_exit(&ifv_hash.lock);
750
751 hashdone(ifv_hash.lists, HASH_PSLIST, ifv_hash.mask);
752
753 ifv_hash.lists = NULL;
754 ifv_hash.mask = 0;
755
756 return 0;
757 }
758
759 static int
760 vlan_tag_hash(uint16_t tag, u_long mask)
761 {
762 uint32_t hash;
763
764 hash = (tag >> 8) ^ tag;
765 hash = (hash >> 2) ^ hash;
766
767 return hash & mask;
768 }
769
770 static struct ifvlan_linkmib *
771 vlan_getref_linkmib(struct ifvlan *sc, struct psref *psref)
772 {
773 struct ifvlan_linkmib *mib;
774 int s;
775
776 s = pserialize_read_enter();
777 mib = atomic_load_consume(&sc->ifv_mib);
778 if (mib == NULL) {
779 pserialize_read_exit(s);
780 return NULL;
781 }
782 psref_acquire(psref, &mib->ifvm_psref, ifvm_psref_class);
783 pserialize_read_exit(s);
784
785 return mib;
786 }
787
788 static void
789 vlan_putref_linkmib(struct ifvlan_linkmib *mib, struct psref *psref)
790 {
791 if (mib == NULL)
792 return;
793 psref_release(psref, &mib->ifvm_psref, ifvm_psref_class);
794 }
795
796 static struct ifvlan_linkmib *
797 vlan_lookup_tag_psref(struct ifnet *ifp, uint16_t tag, struct psref *psref)
798 {
799 int idx;
800 int s;
801 struct ifvlan *sc;
802
803 idx = vlan_tag_hash(tag, ifv_hash.mask);
804
805 s = pserialize_read_enter();
806 PSLIST_READER_FOREACH(sc, &ifv_hash.lists[idx], struct ifvlan,
807 ifv_hash) {
808 struct ifvlan_linkmib *mib = atomic_load_consume(&sc->ifv_mib);
809 if (mib == NULL)
810 continue;
811 if (mib->ifvm_tag != tag)
812 continue;
813 if (mib->ifvm_p != ifp)
814 continue;
815
816 psref_acquire(psref, &mib->ifvm_psref, ifvm_psref_class);
817 pserialize_read_exit(s);
818 return mib;
819 }
820 pserialize_read_exit(s);
821 return NULL;
822 }
823
824 static void
825 vlan_linkmib_update(struct ifvlan *ifv, struct ifvlan_linkmib *nmib)
826 {
827 struct ifvlan_linkmib *omib = ifv->ifv_mib;
828
829 KASSERT(mutex_owned(&ifv->ifv_lock));
830
831 atomic_store_release(&ifv->ifv_mib, nmib);
832
833 pserialize_perform(ifv->ifv_psz);
834 psref_target_destroy(&omib->ifvm_psref, ifvm_psref_class);
835 }
836
837 /*
838 * Called when a parent interface is detaching; destroy any VLAN
839 * configuration for the parent interface.
840 */
841 void
842 vlan_ifdetach(struct ifnet *p)
843 {
844 struct ifvlan *ifv;
845 struct ifvlan_linkmib *mib, **nmibs;
846 struct psref psref;
847 int error;
848 int bound;
849 int i, cnt = 0;
850
851 bound = curlwp_bind();
852
853 mutex_enter(&ifv_list.lock);
854 LIST_FOREACH(ifv, &ifv_list.list, ifv_list) {
855 mib = vlan_getref_linkmib(ifv, &psref);
856 if (mib == NULL)
857 continue;
858
859 if (mib->ifvm_p == p)
860 cnt++;
861
862 vlan_putref_linkmib(mib, &psref);
863 }
864 mutex_exit(&ifv_list.lock);
865
866 if (cnt == 0) {
867 curlwp_bindx(bound);
868 return;
869 }
870
871 /*
872 * The value of "cnt" does not increase while ifv_list.lock
873 * and ifv->ifv_lock are released here, because the parent
874 * interface is detaching.
875 */
876 nmibs = kmem_alloc(sizeof(*nmibs) * cnt, KM_SLEEP);
877 for (i = 0; i < cnt; i++) {
878 nmibs[i] = kmem_alloc(sizeof(*nmibs[i]), KM_SLEEP);
879 }
880
881 mutex_enter(&ifv_list.lock);
882
883 i = 0;
884 LIST_FOREACH(ifv, &ifv_list.list, ifv_list) {
885 struct ifnet *ifp = &ifv->ifv_if;
886
887 /* IFNET_LOCK must be held before ifv_lock. */
888 IFNET_LOCK(ifp);
889 mutex_enter(&ifv->ifv_lock);
890
891 /* XXX ifv_mib = NULL? */
892 if (ifv->ifv_mib->ifvm_p == p) {
893 KASSERTMSG(i < cnt,
894 "no memory for unconfig, parent=%s", p->if_xname);
895 error = vlan_unconfig_locked(ifv, nmibs[i]);
896 if (!error) {
897 nmibs[i] = NULL;
898 i++;
899 }
900
901 }
902
903 mutex_exit(&ifv->ifv_lock);
904 IFNET_UNLOCK(ifp);
905 }
906
907 mutex_exit(&ifv_list.lock);
908
909 curlwp_bindx(bound);
910
911 for (i = 0; i < cnt; i++) {
912 if (nmibs[i])
913 kmem_free(nmibs[i], sizeof(*nmibs[i]));
914 }
915
916 kmem_free(nmibs, sizeof(*nmibs) * cnt);
917
918 return;
919 }
920
921 static int
922 vlan_set_promisc(struct ifnet *ifp)
923 {
924 struct ifvlan *ifv = ifp->if_softc;
925 struct ifvlan_linkmib *mib;
926 struct psref psref;
927 int error = 0;
928 int bound;
929
930 bound = curlwp_bind();
931 mib = vlan_getref_linkmib(ifv, &psref);
932 if (mib == NULL) {
933 curlwp_bindx(bound);
934 return EBUSY;
935 }
936
937 if ((ifp->if_flags & IFF_PROMISC) != 0) {
938 if ((ifv->ifv_flags & IFVF_PROMISC) == 0) {
939 error = vlan_safe_ifpromisc(mib->ifvm_p, 1);
940 if (error == 0)
941 ifv->ifv_flags |= IFVF_PROMISC;
942 }
943 } else {
944 if ((ifv->ifv_flags & IFVF_PROMISC) != 0) {
945 error = vlan_safe_ifpromisc(mib->ifvm_p, 0);
946 if (error == 0)
947 ifv->ifv_flags &= ~IFVF_PROMISC;
948 }
949 }
950 vlan_putref_linkmib(mib, &psref);
951 curlwp_bindx(bound);
952
953 return error;
954 }
955
956 static int
957 vlan_ioctl(struct ifnet *ifp, u_long cmd, void *data)
958 {
959 struct lwp *l = curlwp;
960 struct ifvlan *ifv = ifp->if_softc;
961 struct ifaddr *ifa = (struct ifaddr *) data;
962 struct ifreq *ifr = (struct ifreq *) data;
963 struct ifnet *pr;
964 struct ifcapreq *ifcr;
965 struct vlanreq vlr;
966 struct ifvlan_linkmib *mib;
967 struct psref psref;
968 int error = 0;
969 int bound;
970
971 switch (cmd) {
972 case SIOCSIFMTU:
973 bound = curlwp_bind();
974 mib = vlan_getref_linkmib(ifv, &psref);
975 if (mib == NULL) {
976 curlwp_bindx(bound);
977 error = EBUSY;
978 break;
979 }
980
981 if (mib->ifvm_p == NULL) {
982 vlan_putref_linkmib(mib, &psref);
983 curlwp_bindx(bound);
984 error = EINVAL;
985 } else if (
986 ifr->ifr_mtu > (mib->ifvm_p->if_mtu - mib->ifvm_mtufudge) ||
987 ifr->ifr_mtu < (mib->ifvm_mintu - mib->ifvm_mtufudge)) {
988 vlan_putref_linkmib(mib, &psref);
989 curlwp_bindx(bound);
990 error = EINVAL;
991 } else {
992 vlan_putref_linkmib(mib, &psref);
993 curlwp_bindx(bound);
994
995 error = ifioctl_common(ifp, cmd, data);
996 if (error == ENETRESET)
997 error = 0;
998 }
999
1000 break;
1001
1002 case SIOCSETVLAN:
1003 if ((error = kauth_authorize_network(l->l_cred,
1004 KAUTH_NETWORK_INTERFACE,
1005 KAUTH_REQ_NETWORK_INTERFACE_SETPRIV, ifp, (void *)cmd,
1006 NULL)) != 0)
1007 break;
1008 if ((error = copyin(ifr->ifr_data, &vlr, sizeof(vlr))) != 0)
1009 break;
1010
1011 if (vlr.vlr_parent[0] == '\0') {
1012 bound = curlwp_bind();
1013 mib = vlan_getref_linkmib(ifv, &psref);
1014 if (mib == NULL) {
1015 curlwp_bindx(bound);
1016 error = EBUSY;
1017 break;
1018 }
1019
1020 if (mib->ifvm_p != NULL &&
1021 (ifp->if_flags & IFF_PROMISC) != 0)
1022 error = vlan_safe_ifpromisc(mib->ifvm_p, 0);
1023
1024 vlan_putref_linkmib(mib, &psref);
1025 curlwp_bindx(bound);
1026
1027 vlan_unconfig(ifp);
1028 break;
1029 }
1030 if (vlr.vlr_tag != EVL_VLANOFTAG(vlr.vlr_tag)) {
1031 error = EINVAL; /* check for valid tag */
1032 break;
1033 }
1034 if ((pr = ifunit(vlr.vlr_parent)) == NULL) {
1035 error = ENOENT;
1036 break;
1037 }
1038
1039 error = vlan_config(ifv, pr, vlr.vlr_tag);
1040 if (error != 0)
1041 break;
1042
1043 /* Update promiscuous mode, if necessary. */
1044 vlan_set_promisc(ifp);
1045
1046 ifp->if_flags |= IFF_RUNNING;
1047 break;
1048
1049 case SIOCGETVLAN:
1050 memset(&vlr, 0, sizeof(vlr));
1051 bound = curlwp_bind();
1052 mib = vlan_getref_linkmib(ifv, &psref);
1053 if (mib == NULL) {
1054 curlwp_bindx(bound);
1055 error = EBUSY;
1056 break;
1057 }
1058 if (mib->ifvm_p != NULL) {
1059 snprintf(vlr.vlr_parent, sizeof(vlr.vlr_parent), "%s",
1060 mib->ifvm_p->if_xname);
1061 vlr.vlr_tag = mib->ifvm_tag;
1062 }
1063 vlan_putref_linkmib(mib, &psref);
1064 curlwp_bindx(bound);
1065 error = copyout(&vlr, ifr->ifr_data, sizeof(vlr));
1066 break;
1067
1068 case SIOCSIFFLAGS:
1069 if ((error = ifioctl_common(ifp, cmd, data)) != 0)
1070 break;
1071 /*
1072 * For promiscuous mode, we enable promiscuous mode on
1073 * the parent if we need promiscuous on the VLAN interface.
1074 */
1075 bound = curlwp_bind();
1076 mib = vlan_getref_linkmib(ifv, &psref);
1077 if (mib == NULL) {
1078 curlwp_bindx(bound);
1079 error = EBUSY;
1080 break;
1081 }
1082
1083 if (mib->ifvm_p != NULL)
1084 error = vlan_set_promisc(ifp);
1085 vlan_putref_linkmib(mib, &psref);
1086 curlwp_bindx(bound);
1087 break;
1088
1089 case SIOCADDMULTI:
1090 mutex_enter(&ifv->ifv_lock);
1091 mib = ifv->ifv_mib;
1092 if (mib == NULL) {
1093 error = EBUSY;
1094 mutex_exit(&ifv->ifv_lock);
1095 break;
1096 }
1097
1098 error = (mib->ifvm_p != NULL) ?
1099 (*mib->ifvm_msw->vmsw_addmulti)(ifv, ifr) : EINVAL;
1100 mib = NULL;
1101 mutex_exit(&ifv->ifv_lock);
1102 break;
1103
1104 case SIOCDELMULTI:
1105 mutex_enter(&ifv->ifv_lock);
1106 mib = ifv->ifv_mib;
1107 if (mib == NULL) {
1108 error = EBUSY;
1109 mutex_exit(&ifv->ifv_lock);
1110 break;
1111 }
1112 error = (mib->ifvm_p != NULL) ?
1113 (*mib->ifvm_msw->vmsw_delmulti)(ifv, ifr) : EINVAL;
1114 mib = NULL;
1115 mutex_exit(&ifv->ifv_lock);
1116 break;
1117
1118 case SIOCSIFCAP:
1119 ifcr = data;
1120 /* make sure caps are enabled on parent */
1121 bound = curlwp_bind();
1122 mib = vlan_getref_linkmib(ifv, &psref);
1123 if (mib == NULL) {
1124 curlwp_bindx(bound);
1125 error = EBUSY;
1126 break;
1127 }
1128
1129 if (mib->ifvm_p == NULL) {
1130 vlan_putref_linkmib(mib, &psref);
1131 curlwp_bindx(bound);
1132 error = EINVAL;
1133 break;
1134 }
1135 if ((mib->ifvm_p->if_capenable & ifcr->ifcr_capenable) !=
1136 ifcr->ifcr_capenable) {
1137 vlan_putref_linkmib(mib, &psref);
1138 curlwp_bindx(bound);
1139 error = EINVAL;
1140 break;
1141 }
1142
1143 vlan_putref_linkmib(mib, &psref);
1144 curlwp_bindx(bound);
1145
1146 if ((error = ifioctl_common(ifp, cmd, data)) == ENETRESET)
1147 error = 0;
1148 break;
1149 case SIOCINITIFADDR:
1150 bound = curlwp_bind();
1151 mib = vlan_getref_linkmib(ifv, &psref);
1152 if (mib == NULL) {
1153 curlwp_bindx(bound);
1154 error = EBUSY;
1155 break;
1156 }
1157
1158 if (mib->ifvm_p == NULL) {
1159 error = EINVAL;
1160 vlan_putref_linkmib(mib, &psref);
1161 curlwp_bindx(bound);
1162 break;
1163 }
1164 vlan_putref_linkmib(mib, &psref);
1165 curlwp_bindx(bound);
1166
1167 ifp->if_flags |= IFF_UP;
1168 #ifdef INET
1169 if (ifa->ifa_addr->sa_family == AF_INET)
1170 arp_ifinit(ifp, ifa);
1171 #endif
1172 break;
1173
1174 default:
1175 error = ether_ioctl(ifp, cmd, data);
1176 }
1177
1178 return error;
1179 }
1180
1181 static int
1182 vlan_ether_addmulti(struct ifvlan *ifv, struct ifreq *ifr)
1183 {
1184 const struct sockaddr *sa = ifreq_getaddr(SIOCADDMULTI, ifr);
1185 struct vlan_mc_entry *mc;
1186 uint8_t addrlo[ETHER_ADDR_LEN], addrhi[ETHER_ADDR_LEN];
1187 struct ifvlan_linkmib *mib;
1188 int error;
1189
1190 KASSERT(mutex_owned(&ifv->ifv_lock));
1191
1192 if (sa->sa_len > sizeof(struct sockaddr_storage))
1193 return EINVAL;
1194
1195 error = ether_addmulti(sa, &ifv->ifv_ec);
1196 if (error != ENETRESET)
1197 return error;
1198
1199 /*
1200 * This is a new multicast address. We have to tell parent
1201 * about it. Also, remember this multicast address so that
1202 * we can delete it on unconfigure.
1203 */
1204 mc = malloc(sizeof(struct vlan_mc_entry), M_DEVBUF, M_NOWAIT);
1205 if (mc == NULL) {
1206 error = ENOMEM;
1207 goto alloc_failed;
1208 }
1209
1210 /*
1211 * Since ether_addmulti() returned ENETRESET, the following two
1212 * statements shouldn't fail. Here ifv_ec is implicitly protected
1213 * by the ifv_lock lock.
1214 */
1215 error = ether_multiaddr(sa, addrlo, addrhi);
1216 KASSERT(error == 0);
1217
1218 ETHER_LOCK(&ifv->ifv_ec);
1219 mc->mc_enm = ether_lookup_multi(addrlo, addrhi, &ifv->ifv_ec);
1220 ETHER_UNLOCK(&ifv->ifv_ec);
1221
1222 KASSERT(mc->mc_enm != NULL);
1223
1224 memcpy(&mc->mc_addr, sa, sa->sa_len);
1225 LIST_INSERT_HEAD(&ifv->ifv_mc_listhead, mc, mc_entries);
1226
1227 mib = ifv->ifv_mib;
1228
1229 KERNEL_LOCK_UNLESS_IFP_MPSAFE(mib->ifvm_p);
1230 error = if_mcast_op(mib->ifvm_p, SIOCADDMULTI, sa);
1231 KERNEL_UNLOCK_UNLESS_IFP_MPSAFE(mib->ifvm_p);
1232
1233 if (error != 0)
1234 goto ioctl_failed;
1235 return error;
1236
1237 ioctl_failed:
1238 LIST_REMOVE(mc, mc_entries);
1239 free(mc, M_DEVBUF);
1240
1241 alloc_failed:
1242 (void)ether_delmulti(sa, &ifv->ifv_ec);
1243 return error;
1244 }
1245
1246 static int
1247 vlan_ether_delmulti(struct ifvlan *ifv, struct ifreq *ifr)
1248 {
1249 const struct sockaddr *sa = ifreq_getaddr(SIOCDELMULTI, ifr);
1250 struct ether_multi *enm;
1251 struct vlan_mc_entry *mc;
1252 struct ifvlan_linkmib *mib;
1253 uint8_t addrlo[ETHER_ADDR_LEN], addrhi[ETHER_ADDR_LEN];
1254 int error;
1255
1256 KASSERT(mutex_owned(&ifv->ifv_lock));
1257
1258 /*
1259 * Find a key to lookup vlan_mc_entry. We have to do this
1260 * before calling ether_delmulti for obvious reasons.
1261 */
1262 if ((error = ether_multiaddr(sa, addrlo, addrhi)) != 0)
1263 return error;
1264
1265 ETHER_LOCK(&ifv->ifv_ec);
1266 enm = ether_lookup_multi(addrlo, addrhi, &ifv->ifv_ec);
1267 ETHER_UNLOCK(&ifv->ifv_ec);
1268 if (enm == NULL)
1269 return EINVAL;
1270
1271 LIST_FOREACH(mc, &ifv->ifv_mc_listhead, mc_entries) {
1272 if (mc->mc_enm == enm)
1273 break;
1274 }
1275
1276 /* We woun't delete entries we didn't add */
1277 if (mc == NULL)
1278 return EINVAL;
1279
1280 error = ether_delmulti(sa, &ifv->ifv_ec);
1281 if (error != ENETRESET)
1282 return error;
1283
1284 /* We no longer use this multicast address. Tell parent so. */
1285 mib = ifv->ifv_mib;
1286 error = if_mcast_op(mib->ifvm_p, SIOCDELMULTI, sa);
1287
1288 if (error == 0) {
1289 /* And forget about this address. */
1290 LIST_REMOVE(mc, mc_entries);
1291 free(mc, M_DEVBUF);
1292 } else {
1293 (void)ether_addmulti(sa, &ifv->ifv_ec);
1294 }
1295
1296 return error;
1297 }
1298
1299 /*
1300 * Delete any multicast address we have asked to add from parent
1301 * interface. Called when the vlan is being unconfigured.
1302 */
1303 static void
1304 vlan_ether_purgemulti(struct ifvlan *ifv)
1305 {
1306 struct vlan_mc_entry *mc;
1307 struct ifvlan_linkmib *mib;
1308
1309 KASSERT(mutex_owned(&ifv->ifv_lock));
1310 mib = ifv->ifv_mib;
1311 if (mib == NULL) {
1312 return;
1313 }
1314
1315 while ((mc = LIST_FIRST(&ifv->ifv_mc_listhead)) != NULL) {
1316 (void)if_mcast_op(mib->ifvm_p, SIOCDELMULTI,
1317 sstocsa(&mc->mc_addr));
1318 LIST_REMOVE(mc, mc_entries);
1319 free(mc, M_DEVBUF);
1320 }
1321 }
1322
1323 static void
1324 vlan_start(struct ifnet *ifp)
1325 {
1326 struct ifvlan *ifv = ifp->if_softc;
1327 struct ifnet *p;
1328 struct ethercom *ec;
1329 struct mbuf *m;
1330 struct ifvlan_linkmib *mib;
1331 struct psref psref;
1332 int error;
1333
1334 mib = vlan_getref_linkmib(ifv, &psref);
1335 if (mib == NULL)
1336 return;
1337 p = mib->ifvm_p;
1338 ec = (void *)mib->ifvm_p;
1339
1340 ifp->if_flags |= IFF_OACTIVE;
1341
1342 for (;;) {
1343 IFQ_DEQUEUE(&ifp->if_snd, m);
1344 if (m == NULL)
1345 break;
1346
1347 #ifdef ALTQ
1348 /*
1349 * KERNEL_LOCK is required for ALTQ even if NET_MPSAFE is
1350 * defined.
1351 */
1352 KERNEL_LOCK(1, NULL);
1353 /*
1354 * If ALTQ is enabled on the parent interface, do
1355 * classification; the queueing discipline might
1356 * not require classification, but might require
1357 * the address family/header pointer in the pktattr.
1358 */
1359 if (ALTQ_IS_ENABLED(&p->if_snd)) {
1360 switch (p->if_type) {
1361 case IFT_ETHER:
1362 altq_etherclassify(&p->if_snd, m);
1363 break;
1364 default:
1365 panic("%s: impossible (altq)", __func__);
1366 }
1367 }
1368 KERNEL_UNLOCK_ONE(NULL);
1369 #endif /* ALTQ */
1370
1371 bpf_mtap(ifp, m, BPF_D_OUT);
1372 /*
1373 * If the parent can insert the tag itself, just mark
1374 * the tag in the mbuf header.
1375 */
1376 if (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING) {
1377 vlan_set_tag(m, mib->ifvm_tag);
1378 } else {
1379 /*
1380 * insert the tag ourselves
1381 */
1382 M_PREPEND(m, mib->ifvm_encaplen, M_DONTWAIT);
1383 if (m == NULL) {
1384 printf("%s: unable to prepend encap header",
1385 p->if_xname);
1386 if_statinc(ifp, if_oerrors);
1387 continue;
1388 }
1389
1390 switch (p->if_type) {
1391 case IFT_ETHER:
1392 {
1393 struct ether_vlan_header *evl;
1394
1395 if (m->m_len < sizeof(struct ether_vlan_header))
1396 m = m_pullup(m,
1397 sizeof(struct ether_vlan_header));
1398 if (m == NULL) {
1399 printf("%s: unable to pullup encap "
1400 "header", p->if_xname);
1401 if_statinc(ifp, if_oerrors);
1402 continue;
1403 }
1404
1405 /*
1406 * Transform the Ethernet header into an
1407 * Ethernet header with 802.1Q encapsulation.
1408 */
1409 memmove(mtod(m, void *),
1410 mtod(m, char *) + mib->ifvm_encaplen,
1411 sizeof(struct ether_header));
1412 evl = mtod(m, struct ether_vlan_header *);
1413 evl->evl_proto = evl->evl_encap_proto;
1414 evl->evl_encap_proto = htons(ETHERTYPE_VLAN);
1415 evl->evl_tag = htons(mib->ifvm_tag);
1416
1417 /*
1418 * To cater for VLAN-aware layer 2 ethernet
1419 * switches which may need to strip the tag
1420 * before forwarding the packet, make sure
1421 * the packet+tag is at least 68 bytes long.
1422 * This is necessary because our parent will
1423 * only pad to 64 bytes (ETHER_MIN_LEN) and
1424 * some switches will not pad by themselves
1425 * after deleting a tag.
1426 */
1427 const size_t min_data_len = ETHER_MIN_LEN -
1428 ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN;
1429 if (m->m_pkthdr.len < min_data_len) {
1430 m_copyback(m, m->m_pkthdr.len,
1431 min_data_len - m->m_pkthdr.len,
1432 vlan_zero_pad_buff);
1433 }
1434 break;
1435 }
1436
1437 default:
1438 panic("%s: impossible", __func__);
1439 }
1440 }
1441
1442 if ((p->if_flags & IFF_RUNNING) == 0) {
1443 m_freem(m);
1444 continue;
1445 }
1446
1447 error = if_transmit_lock(p, m);
1448 if (error) {
1449 /* mbuf is already freed */
1450 if_statinc(ifp, if_oerrors);
1451 continue;
1452 }
1453 if_statinc(ifp, if_opackets);
1454 }
1455
1456 ifp->if_flags &= ~IFF_OACTIVE;
1457
1458 /* Remove reference to mib before release */
1459 vlan_putref_linkmib(mib, &psref);
1460 }
1461
1462 static int
1463 vlan_transmit(struct ifnet *ifp, struct mbuf *m)
1464 {
1465 struct ifvlan *ifv = ifp->if_softc;
1466 struct ifnet *p;
1467 struct ethercom *ec;
1468 struct ifvlan_linkmib *mib;
1469 struct psref psref;
1470 int error;
1471 size_t pktlen = m->m_pkthdr.len;
1472 bool mcast = (m->m_flags & M_MCAST) != 0;
1473
1474 mib = vlan_getref_linkmib(ifv, &psref);
1475 if (mib == NULL) {
1476 m_freem(m);
1477 return ENETDOWN;
1478 }
1479
1480 p = mib->ifvm_p;
1481 ec = (void *)mib->ifvm_p;
1482
1483 bpf_mtap(ifp, m, BPF_D_OUT);
1484
1485 if ((error = pfil_run_hooks(ifp->if_pfil, &m, ifp, PFIL_OUT)) != 0)
1486 goto out;
1487 if (m == NULL)
1488 goto out;
1489
1490 /*
1491 * If the parent can insert the tag itself, just mark
1492 * the tag in the mbuf header.
1493 */
1494 if (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING) {
1495 vlan_set_tag(m, mib->ifvm_tag);
1496 } else {
1497 /*
1498 * insert the tag ourselves
1499 */
1500 M_PREPEND(m, mib->ifvm_encaplen, M_DONTWAIT);
1501 if (m == NULL) {
1502 printf("%s: unable to prepend encap header",
1503 p->if_xname);
1504 if_statinc(ifp, if_oerrors);
1505 error = ENOBUFS;
1506 goto out;
1507 }
1508
1509 switch (p->if_type) {
1510 case IFT_ETHER:
1511 {
1512 struct ether_vlan_header *evl;
1513
1514 if (m->m_len < sizeof(struct ether_vlan_header))
1515 m = m_pullup(m,
1516 sizeof(struct ether_vlan_header));
1517 if (m == NULL) {
1518 printf("%s: unable to pullup encap "
1519 "header", p->if_xname);
1520 if_statinc(ifp, if_oerrors);
1521 error = ENOBUFS;
1522 goto out;
1523 }
1524
1525 /*
1526 * Transform the Ethernet header into an
1527 * Ethernet header with 802.1Q encapsulation.
1528 */
1529 memmove(mtod(m, void *),
1530 mtod(m, char *) + mib->ifvm_encaplen,
1531 sizeof(struct ether_header));
1532 evl = mtod(m, struct ether_vlan_header *);
1533 evl->evl_proto = evl->evl_encap_proto;
1534 evl->evl_encap_proto = htons(ETHERTYPE_VLAN);
1535 evl->evl_tag = htons(mib->ifvm_tag);
1536
1537 /*
1538 * To cater for VLAN-aware layer 2 ethernet
1539 * switches which may need to strip the tag
1540 * before forwarding the packet, make sure
1541 * the packet+tag is at least 68 bytes long.
1542 * This is necessary because our parent will
1543 * only pad to 64 bytes (ETHER_MIN_LEN) and
1544 * some switches will not pad by themselves
1545 * after deleting a tag.
1546 */
1547 const size_t min_data_len = ETHER_MIN_LEN -
1548 ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN;
1549 if (m->m_pkthdr.len < min_data_len) {
1550 m_copyback(m, m->m_pkthdr.len,
1551 min_data_len - m->m_pkthdr.len,
1552 vlan_zero_pad_buff);
1553 }
1554 break;
1555 }
1556
1557 default:
1558 panic("%s: impossible", __func__);
1559 }
1560 }
1561
1562 if ((p->if_flags & IFF_RUNNING) == 0) {
1563 m_freem(m);
1564 error = ENETDOWN;
1565 goto out;
1566 }
1567
1568 error = if_transmit_lock(p, m);
1569 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
1570 if (error) {
1571 /* mbuf is already freed */
1572 if_statinc_ref(nsr, if_oerrors);
1573 } else {
1574 if_statinc_ref(nsr, if_opackets);
1575 if_statadd_ref(nsr, if_obytes, pktlen);
1576 if (mcast)
1577 if_statinc_ref(nsr, if_omcasts);
1578 }
1579 IF_STAT_PUTREF(ifp);
1580
1581 out:
1582 /* Remove reference to mib before release */
1583 vlan_putref_linkmib(mib, &psref);
1584 return error;
1585 }
1586
1587 /*
1588 * Given an Ethernet frame, find a valid vlan interface corresponding to the
1589 * given source interface and tag, then run the real packet through the
1590 * parent's input routine.
1591 */
1592 void
1593 vlan_input(struct ifnet *ifp, struct mbuf *m)
1594 {
1595 struct ifvlan *ifv;
1596 uint16_t vid;
1597 struct ifvlan_linkmib *mib;
1598 struct psref psref;
1599 bool have_vtag;
1600
1601 have_vtag = vlan_has_tag(m);
1602 if (have_vtag) {
1603 vid = EVL_VLANOFTAG(vlan_get_tag(m));
1604 m->m_flags &= ~M_VLANTAG;
1605 } else {
1606 struct ether_vlan_header *evl;
1607
1608 if (ifp->if_type != IFT_ETHER) {
1609 panic("%s: impossible", __func__);
1610 }
1611
1612 if (m->m_len < sizeof(struct ether_vlan_header) &&
1613 (m = m_pullup(m,
1614 sizeof(struct ether_vlan_header))) == NULL) {
1615 printf("%s: no memory for VLAN header, "
1616 "dropping packet.\n", ifp->if_xname);
1617 return;
1618 }
1619 evl = mtod(m, struct ether_vlan_header *);
1620 KASSERT(ntohs(evl->evl_encap_proto) == ETHERTYPE_VLAN);
1621
1622 vid = EVL_VLANOFTAG(ntohs(evl->evl_tag));
1623
1624 /*
1625 * Restore the original ethertype. We'll remove
1626 * the encapsulation after we've found the vlan
1627 * interface corresponding to the tag.
1628 */
1629 evl->evl_encap_proto = evl->evl_proto;
1630 }
1631
1632 mib = vlan_lookup_tag_psref(ifp, vid, &psref);
1633 if (mib == NULL) {
1634 m_freem(m);
1635 if_statinc(ifp, if_noproto);
1636 return;
1637 }
1638 KASSERT(mib->ifvm_encaplen == ETHER_VLAN_ENCAP_LEN);
1639
1640 ifv = mib->ifvm_ifvlan;
1641 if ((ifv->ifv_if.if_flags & (IFF_UP | IFF_RUNNING)) !=
1642 (IFF_UP | IFF_RUNNING)) {
1643 m_freem(m);
1644 if_statinc(ifp, if_noproto);
1645 goto out;
1646 }
1647
1648 /*
1649 * Now, remove the encapsulation header. The original
1650 * header has already been fixed up above.
1651 */
1652 if (!have_vtag) {
1653 memmove(mtod(m, char *) + mib->ifvm_encaplen,
1654 mtod(m, void *), sizeof(struct ether_header));
1655 m_adj(m, mib->ifvm_encaplen);
1656 }
1657
1658 m_set_rcvif(m, &ifv->ifv_if);
1659
1660 if (pfil_run_hooks(ifp->if_pfil, &m, ifp, PFIL_IN) != 0)
1661 goto out;
1662 if (m == NULL)
1663 goto out;
1664
1665 m->m_flags &= ~M_PROMISC;
1666 if_input(&ifv->ifv_if, m);
1667 out:
1668 vlan_putref_linkmib(mib, &psref);
1669 }
1670
1671 /*
1672 * Module infrastructure
1673 */
1674 #include "if_module.h"
1675
1676 IF_MODULE(MODULE_CLASS_DRIVER, vlan, NULL)
1677