if_vlan.c revision 1.153.6.2 1 /* $NetBSD: if_vlan.c,v 1.153.6.2 2021/08/01 22:42:41 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 2000, 2001 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran, and by Jason R. Thorpe of Zembu Labs, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Copyright 1998 Massachusetts Institute of Technology
34 *
35 * Permission to use, copy, modify, and distribute this software and
36 * its documentation for any purpose and without fee is hereby
37 * granted, provided that both the above copyright notice and this
38 * permission notice appear in all copies, that both the above
39 * copyright notice and this permission notice appear in all
40 * supporting documentation, and that the name of M.I.T. not be used
41 * in advertising or publicity pertaining to distribution of the
42 * software without specific, written prior permission. M.I.T. makes
43 * no representations about the suitability of this software for any
44 * purpose. It is provided "as is" without express or implied
45 * warranty.
46 *
47 * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS
48 * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
49 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
50 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
51 * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
52 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
53 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
54 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
55 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
56 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
57 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 *
60 * from FreeBSD: if_vlan.c,v 1.16 2000/03/26 15:21:40 charnier Exp
61 * via OpenBSD: if_vlan.c,v 1.4 2000/05/15 19:15:00 chris Exp
62 */
63
64 /*
65 * if_vlan.c - pseudo-device driver for IEEE 802.1Q virtual LANs. Might be
66 * extended some day to also handle IEEE 802.1P priority tagging. This is
67 * sort of sneaky in the implementation, since we need to pretend to be
68 * enough of an Ethernet implementation to make ARP work. The way we do
69 * this is by telling everyone that we are an Ethernet interface, and then
70 * catch the packets that ether_output() left on our output queue when it
71 * calls if_start(), rewrite them for use by the real outgoing interface,
72 * and ask it to send them.
73 *
74 * TODO:
75 *
76 * - Need some way to notify vlan interfaces when the parent
77 * interface changes MTU.
78 */
79
80 #include <sys/cdefs.h>
81 __KERNEL_RCSID(0, "$NetBSD: if_vlan.c,v 1.153.6.2 2021/08/01 22:42:41 thorpej Exp $");
82
83 #ifdef _KERNEL_OPT
84 #include "opt_inet.h"
85 #include "opt_net_mpsafe.h"
86 #endif
87
88 #include <sys/param.h>
89 #include <sys/systm.h>
90 #include <sys/kernel.h>
91 #include <sys/mbuf.h>
92 #include <sys/queue.h>
93 #include <sys/socket.h>
94 #include <sys/sockio.h>
95 #include <sys/systm.h>
96 #include <sys/proc.h>
97 #include <sys/kauth.h>
98 #include <sys/mutex.h>
99 #include <sys/kmem.h>
100 #include <sys/cpu.h>
101 #include <sys/pserialize.h>
102 #include <sys/psref.h>
103 #include <sys/pslist.h>
104 #include <sys/atomic.h>
105 #include <sys/device.h>
106 #include <sys/module.h>
107
108 #include <net/bpf.h>
109 #include <net/if.h>
110 #include <net/if_dl.h>
111 #include <net/if_types.h>
112 #include <net/if_ether.h>
113 #include <net/if_vlanvar.h>
114
115 #ifdef INET
116 #include <netinet/in.h>
117 #include <netinet/if_inarp.h>
118 #endif
119 #ifdef INET6
120 #include <netinet6/in6_ifattach.h>
121 #include <netinet6/in6_var.h>
122 #include <netinet6/nd6.h>
123 #endif
124
125 #include "ioconf.h"
126
127 struct vlan_mc_entry {
128 LIST_ENTRY(vlan_mc_entry) mc_entries;
129 /*
130 * A key to identify this entry. The mc_addr below can't be
131 * used since multiple sockaddr may mapped into the same
132 * ether_multi (e.g., AF_UNSPEC).
133 */
134 struct ether_multi *mc_enm;
135 struct sockaddr_storage mc_addr;
136 };
137
138 struct ifvlan_linkmib {
139 struct ifvlan *ifvm_ifvlan;
140 const struct vlan_multisw *ifvm_msw;
141 int ifvm_encaplen; /* encapsulation length */
142 int ifvm_mtufudge; /* MTU fudged by this much */
143 int ifvm_mintu; /* min transmission unit */
144 uint16_t ifvm_proto; /* encapsulation ethertype */
145 uint16_t ifvm_tag; /* tag to apply on packets */
146 struct ifnet *ifvm_p; /* parent interface of this vlan */
147
148 struct psref_target ifvm_psref;
149 };
150
151 struct ifvlan {
152 struct ethercom ifv_ec;
153 struct ifvlan_linkmib *ifv_mib; /*
154 * reader must use vlan_getref_linkmib()
155 * instead of direct dereference
156 */
157 kmutex_t ifv_lock; /* writer lock for ifv_mib */
158 pserialize_t ifv_psz;
159
160 LIST_HEAD(__vlan_mchead, vlan_mc_entry) ifv_mc_listhead;
161 LIST_ENTRY(ifvlan) ifv_list;
162 struct pslist_entry ifv_hash;
163 int ifv_flags;
164 };
165
166 #define IFVF_PROMISC 0x01 /* promiscuous mode enabled */
167
168 #define ifv_if ifv_ec.ec_if
169
170 #define ifv_msw ifv_mib.ifvm_msw
171 #define ifv_encaplen ifv_mib.ifvm_encaplen
172 #define ifv_mtufudge ifv_mib.ifvm_mtufudge
173 #define ifv_mintu ifv_mib.ifvm_mintu
174 #define ifv_tag ifv_mib.ifvm_tag
175
176 struct vlan_multisw {
177 int (*vmsw_addmulti)(struct ifvlan *, struct ifreq *);
178 int (*vmsw_delmulti)(struct ifvlan *, struct ifreq *);
179 void (*vmsw_purgemulti)(struct ifvlan *);
180 };
181
182 static int vlan_ether_addmulti(struct ifvlan *, struct ifreq *);
183 static int vlan_ether_delmulti(struct ifvlan *, struct ifreq *);
184 static void vlan_ether_purgemulti(struct ifvlan *);
185
186 const struct vlan_multisw vlan_ether_multisw = {
187 .vmsw_addmulti = vlan_ether_addmulti,
188 .vmsw_delmulti = vlan_ether_delmulti,
189 .vmsw_purgemulti = vlan_ether_purgemulti,
190 };
191
192 static int vlan_clone_create(struct if_clone *, int);
193 static int vlan_clone_destroy(struct ifnet *);
194 static int vlan_config(struct ifvlan *, struct ifnet *, uint16_t);
195 static int vlan_ioctl(struct ifnet *, u_long, void *);
196 static void vlan_start(struct ifnet *);
197 static int vlan_transmit(struct ifnet *, struct mbuf *);
198 static void vlan_unconfig(struct ifnet *);
199 static int vlan_unconfig_locked(struct ifvlan *, struct ifvlan_linkmib *);
200 static void vlan_hash_init(void);
201 static int vlan_hash_fini(void);
202 static int vlan_tag_hash(uint16_t, u_long);
203 static struct ifvlan_linkmib* vlan_getref_linkmib(struct ifvlan *,
204 struct psref *);
205 static void vlan_putref_linkmib(struct ifvlan_linkmib *, struct psref *);
206 static void vlan_linkmib_update(struct ifvlan *, struct ifvlan_linkmib *);
207 static struct ifvlan_linkmib* vlan_lookup_tag_psref(struct ifnet *,
208 uint16_t, struct psref *);
209
210 static struct {
211 kmutex_t lock;
212 LIST_HEAD(vlan_ifvlist, ifvlan) list;
213 } ifv_list __cacheline_aligned;
214
215
216 #if !defined(VLAN_TAG_HASH_SIZE)
217 #define VLAN_TAG_HASH_SIZE 32
218 #endif
219 static struct {
220 kmutex_t lock;
221 struct pslist_head *lists;
222 u_long mask;
223 } ifv_hash __cacheline_aligned = {
224 .lists = NULL,
225 .mask = 0,
226 };
227
228 pserialize_t vlan_psz __read_mostly;
229 static struct psref_class *ifvm_psref_class __read_mostly;
230
231 struct if_clone vlan_cloner =
232 IF_CLONE_INITIALIZER("vlan", vlan_clone_create, vlan_clone_destroy);
233
234 /* Used to pad ethernet frames with < ETHER_MIN_LEN bytes */
235 static char vlan_zero_pad_buff[ETHER_MIN_LEN];
236
237 static inline int
238 vlan_safe_ifpromisc(struct ifnet *ifp, int pswitch)
239 {
240 int e;
241
242 KERNEL_LOCK_UNLESS_NET_MPSAFE();
243 e = ifpromisc(ifp, pswitch);
244 KERNEL_UNLOCK_UNLESS_NET_MPSAFE();
245
246 return e;
247 }
248
249 __unused static inline int
250 vlan_safe_ifpromisc_locked(struct ifnet *ifp, int pswitch)
251 {
252 int e;
253
254 KERNEL_LOCK_UNLESS_NET_MPSAFE();
255 e = ifpromisc_locked(ifp, pswitch);
256 KERNEL_UNLOCK_UNLESS_NET_MPSAFE();
257
258 return e;
259 }
260
261 void
262 vlanattach(int n)
263 {
264
265 /*
266 * Nothing to do here, initialization is handled by the
267 * module initialization code in vlaninit() below.
268 */
269 }
270
271 static void
272 vlaninit(void)
273 {
274 mutex_init(&ifv_list.lock, MUTEX_DEFAULT, IPL_NONE);
275 LIST_INIT(&ifv_list.list);
276
277 mutex_init(&ifv_hash.lock, MUTEX_DEFAULT, IPL_NONE);
278 vlan_psz = pserialize_create();
279 ifvm_psref_class = psref_class_create("vlanlinkmib", IPL_SOFTNET);
280 if_clone_attach(&vlan_cloner);
281
282 vlan_hash_init();
283 MODULE_HOOK_SET(if_vlan_vlan_input_hook, vlan_input);
284 }
285
286 static int
287 vlandetach(void)
288 {
289 bool is_empty;
290 int error;
291
292 mutex_enter(&ifv_list.lock);
293 is_empty = LIST_EMPTY(&ifv_list.list);
294 mutex_exit(&ifv_list.lock);
295
296 if (!is_empty)
297 return EBUSY;
298
299 error = vlan_hash_fini();
300 if (error != 0)
301 return error;
302
303 if_clone_detach(&vlan_cloner);
304 psref_class_destroy(ifvm_psref_class);
305 pserialize_destroy(vlan_psz);
306 mutex_destroy(&ifv_hash.lock);
307 mutex_destroy(&ifv_list.lock);
308
309 MODULE_HOOK_UNSET(if_vlan_vlan_input_hook);
310 return 0;
311 }
312
313 static void
314 vlan_reset_linkname(struct ifnet *ifp)
315 {
316
317 /*
318 * We start out with a "802.1Q VLAN" type and zero-length
319 * addresses. When we attach to a parent interface, we
320 * inherit its type, address length, address, and data link
321 * type.
322 */
323
324 ifp->if_type = IFT_L2VLAN;
325 ifp->if_addrlen = 0;
326 ifp->if_dlt = DLT_NULL;
327 if_alloc_sadl(ifp);
328 }
329
330 static int
331 vlan_clone_create(struct if_clone *ifc, int unit)
332 {
333 struct ifvlan *ifv;
334 struct ifnet *ifp;
335 struct ifvlan_linkmib *mib;
336
337 ifv = malloc(sizeof(struct ifvlan), M_DEVBUF, M_WAITOK | M_ZERO);
338 mib = kmem_zalloc(sizeof(struct ifvlan_linkmib), KM_SLEEP);
339 ifp = &ifv->ifv_if;
340 LIST_INIT(&ifv->ifv_mc_listhead);
341
342 mib->ifvm_ifvlan = ifv;
343 mib->ifvm_p = NULL;
344 psref_target_init(&mib->ifvm_psref, ifvm_psref_class);
345
346 mutex_init(&ifv->ifv_lock, MUTEX_DEFAULT, IPL_NONE);
347 ifv->ifv_psz = pserialize_create();
348 ifv->ifv_mib = mib;
349
350 mutex_enter(&ifv_list.lock);
351 LIST_INSERT_HEAD(&ifv_list.list, ifv, ifv_list);
352 mutex_exit(&ifv_list.lock);
353
354 if_initname(ifp, ifc->ifc_name, unit);
355 ifp->if_softc = ifv;
356 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
357 #ifdef NET_MPSAFE
358 ifp->if_extflags = IFEF_MPSAFE;
359 #endif
360 ifp->if_start = vlan_start;
361 ifp->if_transmit = vlan_transmit;
362 ifp->if_ioctl = vlan_ioctl;
363 IFQ_SET_READY(&ifp->if_snd);
364 if_initialize(ifp);
365 /*
366 * Set the link state to down.
367 * When the parent interface attaches we will use that link state.
368 * When the parent interface link state changes, so will ours.
369 * When the parent interface detaches, set the link state to down.
370 */
371 ifp->if_link_state = LINK_STATE_DOWN;
372
373 vlan_reset_linkname(ifp);
374 if_register(ifp);
375 return 0;
376 }
377
378 static int
379 vlan_clone_destroy(struct ifnet *ifp)
380 {
381 struct ifvlan *ifv = ifp->if_softc;
382
383 mutex_enter(&ifv_list.lock);
384 LIST_REMOVE(ifv, ifv_list);
385 mutex_exit(&ifv_list.lock);
386
387 IFNET_LOCK(ifp);
388 vlan_unconfig(ifp);
389 IFNET_UNLOCK(ifp);
390 if_detach(ifp);
391
392 psref_target_destroy(&ifv->ifv_mib->ifvm_psref, ifvm_psref_class);
393 kmem_free(ifv->ifv_mib, sizeof(struct ifvlan_linkmib));
394 pserialize_destroy(ifv->ifv_psz);
395 mutex_destroy(&ifv->ifv_lock);
396 free(ifv, M_DEVBUF);
397
398 return 0;
399 }
400
401 /*
402 * Configure a VLAN interface.
403 */
404 static int
405 vlan_config(struct ifvlan *ifv, struct ifnet *p, uint16_t tag)
406 {
407 struct ifnet *ifp = &ifv->ifv_if;
408 struct ifvlan_linkmib *nmib = NULL;
409 struct ifvlan_linkmib *omib = NULL;
410 struct ifvlan_linkmib *checkmib;
411 struct psref_target *nmib_psref = NULL;
412 const uint16_t vid = EVL_VLANOFTAG(tag);
413 int error = 0;
414 int idx;
415 bool omib_cleanup = false;
416 struct psref psref;
417
418 /* VLAN ID 0 and 4095 are reserved in the spec */
419 if ((vid == 0) || (vid == 0xfff))
420 return EINVAL;
421
422 nmib = kmem_alloc(sizeof(*nmib), KM_SLEEP);
423 mutex_enter(&ifv->ifv_lock);
424 omib = ifv->ifv_mib;
425
426 if (omib->ifvm_p != NULL) {
427 error = EBUSY;
428 goto done;
429 }
430
431 /* Duplicate check */
432 checkmib = vlan_lookup_tag_psref(p, vid, &psref);
433 if (checkmib != NULL) {
434 vlan_putref_linkmib(checkmib, &psref);
435 error = EEXIST;
436 goto done;
437 }
438
439 *nmib = *omib;
440 nmib_psref = &nmib->ifvm_psref;
441
442 psref_target_init(nmib_psref, ifvm_psref_class);
443
444 switch (p->if_type) {
445 case IFT_ETHER:
446 {
447 struct ethercom *ec = (void *)p;
448 struct vlanid_list *vidmem;
449
450 nmib->ifvm_msw = &vlan_ether_multisw;
451 nmib->ifvm_encaplen = ETHER_VLAN_ENCAP_LEN;
452 nmib->ifvm_mintu = ETHERMIN;
453
454 if (ec->ec_nvlans++ == 0) {
455 IFNET_LOCK(p);
456 error = ether_enable_vlan_mtu(p);
457 IFNET_UNLOCK(p);
458 if (error >= 0) {
459 if (error) {
460 ec->ec_nvlans--;
461 goto done;
462 }
463 nmib->ifvm_mtufudge = 0;
464 } else {
465 /*
466 * Fudge the MTU by the encapsulation size. This
467 * makes us incompatible with strictly compliant
468 * 802.1Q implementations, but allows us to use
469 * the feature with other NetBSD
470 * implementations, which might still be useful.
471 */
472 nmib->ifvm_mtufudge = nmib->ifvm_encaplen;
473 }
474 error = 0;
475 }
476 /* Add a vid to the list */
477 vidmem = kmem_alloc(sizeof(struct vlanid_list), KM_SLEEP);
478 vidmem->vid = vid;
479 ETHER_LOCK(ec);
480 SIMPLEQ_INSERT_TAIL(&ec->ec_vids, vidmem, vid_list);
481 ETHER_UNLOCK(ec);
482
483 if (ec->ec_vlan_cb != NULL) {
484 /*
485 * Call ec_vlan_cb(). It will setup VLAN HW filter or
486 * HW tagging function.
487 */
488 error = (*ec->ec_vlan_cb)(ec, vid, true);
489 if (error) {
490 ec->ec_nvlans--;
491 if (ec->ec_nvlans == 0) {
492 IFNET_LOCK(p);
493 (void)ether_disable_vlan_mtu(p);
494 IFNET_UNLOCK(p);
495 }
496 goto done;
497 }
498 }
499 /*
500 * If the parent interface can do hardware-assisted
501 * VLAN encapsulation, then propagate its hardware-
502 * assisted checksumming flags and tcp segmentation
503 * offload.
504 */
505 if (ec->ec_capabilities & ETHERCAP_VLAN_HWTAGGING) {
506 ifp->if_capabilities = p->if_capabilities &
507 (IFCAP_TSOv4 | IFCAP_TSOv6 |
508 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
509 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
510 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
511 IFCAP_CSUM_TCPv6_Tx | IFCAP_CSUM_TCPv6_Rx |
512 IFCAP_CSUM_UDPv6_Tx | IFCAP_CSUM_UDPv6_Rx);
513 }
514
515 /*
516 * We inherit the parent's Ethernet address.
517 */
518 ether_ifattach(ifp, CLLADDR(p->if_sadl));
519 ifp->if_hdrlen = sizeof(struct ether_vlan_header); /* XXX? */
520 break;
521 }
522
523 default:
524 error = EPROTONOSUPPORT;
525 goto done;
526 }
527
528 nmib->ifvm_p = p;
529 nmib->ifvm_tag = vid;
530 ifv->ifv_if.if_mtu = p->if_mtu - nmib->ifvm_mtufudge;
531 ifv->ifv_if.if_flags = p->if_flags &
532 (IFF_UP | IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
533
534 /*
535 * Inherit the if_type from the parent. This allows us
536 * to participate in bridges of that type.
537 */
538 ifv->ifv_if.if_type = p->if_type;
539
540 PSLIST_ENTRY_INIT(ifv, ifv_hash);
541 idx = vlan_tag_hash(vid, ifv_hash.mask);
542
543 mutex_enter(&ifv_hash.lock);
544 PSLIST_WRITER_INSERT_HEAD(&ifv_hash.lists[idx], ifv, ifv_hash);
545 mutex_exit(&ifv_hash.lock);
546
547 vlan_linkmib_update(ifv, nmib);
548 nmib = NULL;
549 nmib_psref = NULL;
550 omib_cleanup = true;
551
552
553 /*
554 * We inherit the parents link state.
555 */
556 if_link_state_change(&ifv->ifv_if, p->if_link_state);
557
558 done:
559 mutex_exit(&ifv->ifv_lock);
560
561 if (nmib_psref)
562 psref_target_destroy(nmib_psref, ifvm_psref_class);
563 if (nmib)
564 kmem_free(nmib, sizeof(*nmib));
565 if (omib_cleanup)
566 kmem_free(omib, sizeof(*omib));
567
568 return error;
569 }
570
571 /*
572 * Unconfigure a VLAN interface.
573 */
574 static void
575 vlan_unconfig(struct ifnet *ifp)
576 {
577 struct ifvlan *ifv = ifp->if_softc;
578 struct ifvlan_linkmib *nmib = NULL;
579 int error;
580
581 KASSERT(IFNET_LOCKED(ifp));
582
583 nmib = kmem_alloc(sizeof(*nmib), KM_SLEEP);
584
585 mutex_enter(&ifv->ifv_lock);
586 error = vlan_unconfig_locked(ifv, nmib);
587 mutex_exit(&ifv->ifv_lock);
588
589 if (error)
590 kmem_free(nmib, sizeof(*nmib));
591 }
592 static int
593 vlan_unconfig_locked(struct ifvlan *ifv, struct ifvlan_linkmib *nmib)
594 {
595 struct ifnet *p;
596 struct ifnet *ifp = &ifv->ifv_if;
597 struct psref_target *nmib_psref = NULL;
598 struct ifvlan_linkmib *omib;
599 int error = 0;
600
601 KASSERT(IFNET_LOCKED(ifp));
602 KASSERT(mutex_owned(&ifv->ifv_lock));
603
604 ifp->if_flags &= ~(IFF_UP | IFF_RUNNING);
605
606 omib = ifv->ifv_mib;
607 p = omib->ifvm_p;
608
609 if (p == NULL) {
610 error = -1;
611 goto done;
612 }
613
614 *nmib = *omib;
615 nmib_psref = &nmib->ifvm_psref;
616 psref_target_init(nmib_psref, ifvm_psref_class);
617
618 /*
619 * Since the interface is being unconfigured, we need to empty the
620 * list of multicast groups that we may have joined while we were
621 * alive and remove them from the parent's list also.
622 */
623 (*nmib->ifvm_msw->vmsw_purgemulti)(ifv);
624
625 /* Disconnect from parent. */
626 switch (p->if_type) {
627 case IFT_ETHER:
628 {
629 struct ethercom *ec = (void *)p;
630 struct vlanid_list *vlanidp;
631 uint16_t vid = EVL_VLANOFTAG(nmib->ifvm_tag);
632
633 ETHER_LOCK(ec);
634 SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) {
635 if (vlanidp->vid == vid) {
636 SIMPLEQ_REMOVE(&ec->ec_vids, vlanidp,
637 vlanid_list, vid_list);
638 break;
639 }
640 }
641 ETHER_UNLOCK(ec);
642 if (vlanidp != NULL)
643 kmem_free(vlanidp, sizeof(*vlanidp));
644
645 if (ec->ec_vlan_cb != NULL) {
646 /*
647 * Call ec_vlan_cb(). It will setup VLAN HW filter or
648 * HW tagging function.
649 */
650 (void)(*ec->ec_vlan_cb)(ec, vid, false);
651 }
652 if (--ec->ec_nvlans == 0) {
653 IFNET_LOCK(p);
654 (void)ether_disable_vlan_mtu(p);
655 IFNET_UNLOCK(p);
656 }
657
658 /* XXX ether_ifdetach must not be called with IFNET_LOCK */
659 mutex_exit(&ifv->ifv_lock);
660 IFNET_UNLOCK(ifp);
661 ether_ifdetach(ifp);
662 IFNET_LOCK(ifp);
663 mutex_enter(&ifv->ifv_lock);
664
665 /* if_free_sadl must be called with IFNET_LOCK */
666 if_free_sadl(ifp, 1);
667
668 /* Restore vlan_ioctl overwritten by ether_ifdetach */
669 ifp->if_ioctl = vlan_ioctl;
670 vlan_reset_linkname(ifp);
671 break;
672 }
673
674 default:
675 panic("%s: impossible", __func__);
676 }
677
678 nmib->ifvm_p = NULL;
679 ifv->ifv_if.if_mtu = 0;
680 ifv->ifv_flags = 0;
681
682 mutex_enter(&ifv_hash.lock);
683 PSLIST_WRITER_REMOVE(ifv, ifv_hash);
684 pserialize_perform(vlan_psz);
685 mutex_exit(&ifv_hash.lock);
686 PSLIST_ENTRY_DESTROY(ifv, ifv_hash);
687
688 vlan_linkmib_update(ifv, nmib);
689 if_link_state_change(ifp, LINK_STATE_DOWN);
690
691 mutex_exit(&ifv->ifv_lock);
692
693 nmib_psref = NULL;
694 kmem_free(omib, sizeof(*omib));
695
696 #ifdef INET6
697 KERNEL_LOCK_UNLESS_NET_MPSAFE();
698 /* To delete v6 link local addresses */
699 if (in6_present)
700 in6_ifdetach(ifp);
701 KERNEL_UNLOCK_UNLESS_NET_MPSAFE();
702 #endif
703
704 if_down_locked(ifp);
705 ifp->if_capabilities = 0;
706 mutex_enter(&ifv->ifv_lock);
707 done:
708
709 if (nmib_psref)
710 psref_target_destroy(nmib_psref, ifvm_psref_class);
711
712 return error;
713 }
714
715 static void
716 vlan_hash_init(void)
717 {
718
719 ifv_hash.lists = hashinit(VLAN_TAG_HASH_SIZE, HASH_PSLIST, true,
720 &ifv_hash.mask);
721 }
722
723 static int
724 vlan_hash_fini(void)
725 {
726 int i;
727
728 mutex_enter(&ifv_hash.lock);
729
730 for (i = 0; i < ifv_hash.mask + 1; i++) {
731 if (PSLIST_WRITER_FIRST(&ifv_hash.lists[i], struct ifvlan,
732 ifv_hash) != NULL) {
733 mutex_exit(&ifv_hash.lock);
734 return EBUSY;
735 }
736 }
737
738 for (i = 0; i < ifv_hash.mask + 1; i++)
739 PSLIST_DESTROY(&ifv_hash.lists[i]);
740
741 mutex_exit(&ifv_hash.lock);
742
743 hashdone(ifv_hash.lists, HASH_PSLIST, ifv_hash.mask);
744
745 ifv_hash.lists = NULL;
746 ifv_hash.mask = 0;
747
748 return 0;
749 }
750
751 static int
752 vlan_tag_hash(uint16_t tag, u_long mask)
753 {
754 uint32_t hash;
755
756 hash = (tag >> 8) ^ tag;
757 hash = (hash >> 2) ^ hash;
758
759 return hash & mask;
760 }
761
762 static struct ifvlan_linkmib *
763 vlan_getref_linkmib(struct ifvlan *sc, struct psref *psref)
764 {
765 struct ifvlan_linkmib *mib;
766 int s;
767
768 s = pserialize_read_enter();
769 mib = atomic_load_consume(&sc->ifv_mib);
770 if (mib == NULL) {
771 pserialize_read_exit(s);
772 return NULL;
773 }
774 psref_acquire(psref, &mib->ifvm_psref, ifvm_psref_class);
775 pserialize_read_exit(s);
776
777 return mib;
778 }
779
780 static void
781 vlan_putref_linkmib(struct ifvlan_linkmib *mib, struct psref *psref)
782 {
783 if (mib == NULL)
784 return;
785 psref_release(psref, &mib->ifvm_psref, ifvm_psref_class);
786 }
787
788 static struct ifvlan_linkmib *
789 vlan_lookup_tag_psref(struct ifnet *ifp, uint16_t tag, struct psref *psref)
790 {
791 int idx;
792 int s;
793 struct ifvlan *sc;
794
795 idx = vlan_tag_hash(tag, ifv_hash.mask);
796
797 s = pserialize_read_enter();
798 PSLIST_READER_FOREACH(sc, &ifv_hash.lists[idx], struct ifvlan,
799 ifv_hash) {
800 struct ifvlan_linkmib *mib = atomic_load_consume(&sc->ifv_mib);
801 if (mib == NULL)
802 continue;
803 if (mib->ifvm_tag != tag)
804 continue;
805 if (mib->ifvm_p != ifp)
806 continue;
807
808 psref_acquire(psref, &mib->ifvm_psref, ifvm_psref_class);
809 pserialize_read_exit(s);
810 return mib;
811 }
812 pserialize_read_exit(s);
813 return NULL;
814 }
815
816 static void
817 vlan_linkmib_update(struct ifvlan *ifv, struct ifvlan_linkmib *nmib)
818 {
819 struct ifvlan_linkmib *omib = ifv->ifv_mib;
820
821 KASSERT(mutex_owned(&ifv->ifv_lock));
822
823 atomic_store_release(&ifv->ifv_mib, nmib);
824
825 pserialize_perform(ifv->ifv_psz);
826 psref_target_destroy(&omib->ifvm_psref, ifvm_psref_class);
827 }
828
829 /*
830 * Called when a parent interface is detaching; destroy any VLAN
831 * configuration for the parent interface.
832 */
833 void
834 vlan_ifdetach(struct ifnet *p)
835 {
836 struct ifvlan *ifv;
837 struct ifvlan_linkmib *mib, **nmibs;
838 struct psref psref;
839 int error;
840 int bound;
841 int i, cnt = 0;
842
843 bound = curlwp_bind();
844
845 mutex_enter(&ifv_list.lock);
846 LIST_FOREACH(ifv, &ifv_list.list, ifv_list) {
847 mib = vlan_getref_linkmib(ifv, &psref);
848 if (mib == NULL)
849 continue;
850
851 if (mib->ifvm_p == p)
852 cnt++;
853
854 vlan_putref_linkmib(mib, &psref);
855 }
856 mutex_exit(&ifv_list.lock);
857
858 if (cnt == 0) {
859 curlwp_bindx(bound);
860 return;
861 }
862
863 /*
864 * The value of "cnt" does not increase while ifv_list.lock
865 * and ifv->ifv_lock are released here, because the parent
866 * interface is detaching.
867 */
868 nmibs = kmem_alloc(sizeof(*nmibs) * cnt, KM_SLEEP);
869 for (i = 0; i < cnt; i++) {
870 nmibs[i] = kmem_alloc(sizeof(*nmibs[i]), KM_SLEEP);
871 }
872
873 mutex_enter(&ifv_list.lock);
874
875 i = 0;
876 LIST_FOREACH(ifv, &ifv_list.list, ifv_list) {
877 struct ifnet *ifp = &ifv->ifv_if;
878
879 /* IFNET_LOCK must be held before ifv_lock. */
880 IFNET_LOCK(ifp);
881 mutex_enter(&ifv->ifv_lock);
882
883 /* XXX ifv_mib = NULL? */
884 if (ifv->ifv_mib->ifvm_p == p) {
885 KASSERTMSG(i < cnt,
886 "no memory for unconfig, parent=%s", p->if_xname);
887 error = vlan_unconfig_locked(ifv, nmibs[i]);
888 if (!error) {
889 nmibs[i] = NULL;
890 i++;
891 }
892
893 }
894
895 mutex_exit(&ifv->ifv_lock);
896 IFNET_UNLOCK(ifp);
897 }
898
899 mutex_exit(&ifv_list.lock);
900
901 curlwp_bindx(bound);
902
903 for (i = 0; i < cnt; i++) {
904 if (nmibs[i])
905 kmem_free(nmibs[i], sizeof(*nmibs[i]));
906 }
907
908 kmem_free(nmibs, sizeof(*nmibs) * cnt);
909
910 return;
911 }
912
913 static int
914 vlan_set_promisc(struct ifnet *ifp)
915 {
916 struct ifvlan *ifv = ifp->if_softc;
917 struct ifvlan_linkmib *mib;
918 struct psref psref;
919 int error = 0;
920 int bound;
921
922 bound = curlwp_bind();
923 mib = vlan_getref_linkmib(ifv, &psref);
924 if (mib == NULL) {
925 curlwp_bindx(bound);
926 return EBUSY;
927 }
928
929 if ((ifp->if_flags & IFF_PROMISC) != 0) {
930 if ((ifv->ifv_flags & IFVF_PROMISC) == 0) {
931 error = vlan_safe_ifpromisc(mib->ifvm_p, 1);
932 if (error == 0)
933 ifv->ifv_flags |= IFVF_PROMISC;
934 }
935 } else {
936 if ((ifv->ifv_flags & IFVF_PROMISC) != 0) {
937 error = vlan_safe_ifpromisc(mib->ifvm_p, 0);
938 if (error == 0)
939 ifv->ifv_flags &= ~IFVF_PROMISC;
940 }
941 }
942 vlan_putref_linkmib(mib, &psref);
943 curlwp_bindx(bound);
944
945 return error;
946 }
947
948 static int
949 vlan_ioctl(struct ifnet *ifp, u_long cmd, void *data)
950 {
951 struct lwp *l = curlwp;
952 struct ifvlan *ifv = ifp->if_softc;
953 struct ifaddr *ifa = (struct ifaddr *) data;
954 struct ifreq *ifr = (struct ifreq *) data;
955 struct ifnet *pr;
956 struct ifcapreq *ifcr;
957 struct vlanreq vlr;
958 struct ifvlan_linkmib *mib;
959 struct psref psref;
960 int error = 0;
961 int bound;
962
963 switch (cmd) {
964 case SIOCSIFMTU:
965 bound = curlwp_bind();
966 mib = vlan_getref_linkmib(ifv, &psref);
967 if (mib == NULL) {
968 curlwp_bindx(bound);
969 error = EBUSY;
970 break;
971 }
972
973 if (mib->ifvm_p == NULL) {
974 vlan_putref_linkmib(mib, &psref);
975 curlwp_bindx(bound);
976 error = EINVAL;
977 } else if (
978 ifr->ifr_mtu > (mib->ifvm_p->if_mtu - mib->ifvm_mtufudge) ||
979 ifr->ifr_mtu < (mib->ifvm_mintu - mib->ifvm_mtufudge)) {
980 vlan_putref_linkmib(mib, &psref);
981 curlwp_bindx(bound);
982 error = EINVAL;
983 } else {
984 vlan_putref_linkmib(mib, &psref);
985 curlwp_bindx(bound);
986
987 error = ifioctl_common(ifp, cmd, data);
988 if (error == ENETRESET)
989 error = 0;
990 }
991
992 break;
993
994 case SIOCSETVLAN:
995 if ((error = kauth_authorize_network(l->l_cred,
996 KAUTH_NETWORK_INTERFACE,
997 KAUTH_REQ_NETWORK_INTERFACE_SETPRIV, ifp, (void *)cmd,
998 NULL)) != 0)
999 break;
1000 if ((error = copyin(ifr->ifr_data, &vlr, sizeof(vlr))) != 0)
1001 break;
1002
1003 if (vlr.vlr_parent[0] == '\0') {
1004 bound = curlwp_bind();
1005 mib = vlan_getref_linkmib(ifv, &psref);
1006 if (mib == NULL) {
1007 curlwp_bindx(bound);
1008 error = EBUSY;
1009 break;
1010 }
1011
1012 if (mib->ifvm_p != NULL &&
1013 (ifp->if_flags & IFF_PROMISC) != 0)
1014 error = vlan_safe_ifpromisc(mib->ifvm_p, 0);
1015
1016 vlan_putref_linkmib(mib, &psref);
1017 curlwp_bindx(bound);
1018
1019 vlan_unconfig(ifp);
1020 break;
1021 }
1022 if (vlr.vlr_tag != EVL_VLANOFTAG(vlr.vlr_tag)) {
1023 error = EINVAL; /* check for valid tag */
1024 break;
1025 }
1026 if ((pr = ifunit(vlr.vlr_parent)) == NULL) {
1027 error = ENOENT;
1028 break;
1029 }
1030
1031 error = vlan_config(ifv, pr, vlr.vlr_tag);
1032 if (error != 0)
1033 break;
1034
1035 /* Update promiscuous mode, if necessary. */
1036 vlan_set_promisc(ifp);
1037
1038 ifp->if_flags |= IFF_RUNNING;
1039 break;
1040
1041 case SIOCGETVLAN:
1042 memset(&vlr, 0, sizeof(vlr));
1043 bound = curlwp_bind();
1044 mib = vlan_getref_linkmib(ifv, &psref);
1045 if (mib == NULL) {
1046 curlwp_bindx(bound);
1047 error = EBUSY;
1048 break;
1049 }
1050 if (mib->ifvm_p != NULL) {
1051 snprintf(vlr.vlr_parent, sizeof(vlr.vlr_parent), "%s",
1052 mib->ifvm_p->if_xname);
1053 vlr.vlr_tag = mib->ifvm_tag;
1054 }
1055 vlan_putref_linkmib(mib, &psref);
1056 curlwp_bindx(bound);
1057 error = copyout(&vlr, ifr->ifr_data, sizeof(vlr));
1058 break;
1059
1060 case SIOCSIFFLAGS:
1061 if ((error = ifioctl_common(ifp, cmd, data)) != 0)
1062 break;
1063 /*
1064 * For promiscuous mode, we enable promiscuous mode on
1065 * the parent if we need promiscuous on the VLAN interface.
1066 */
1067 bound = curlwp_bind();
1068 mib = vlan_getref_linkmib(ifv, &psref);
1069 if (mib == NULL) {
1070 curlwp_bindx(bound);
1071 error = EBUSY;
1072 break;
1073 }
1074
1075 if (mib->ifvm_p != NULL)
1076 error = vlan_set_promisc(ifp);
1077 vlan_putref_linkmib(mib, &psref);
1078 curlwp_bindx(bound);
1079 break;
1080
1081 case SIOCADDMULTI:
1082 mutex_enter(&ifv->ifv_lock);
1083 mib = ifv->ifv_mib;
1084 if (mib == NULL) {
1085 error = EBUSY;
1086 mutex_exit(&ifv->ifv_lock);
1087 break;
1088 }
1089
1090 error = (mib->ifvm_p != NULL) ?
1091 (*mib->ifvm_msw->vmsw_addmulti)(ifv, ifr) : EINVAL;
1092 mib = NULL;
1093 mutex_exit(&ifv->ifv_lock);
1094 break;
1095
1096 case SIOCDELMULTI:
1097 mutex_enter(&ifv->ifv_lock);
1098 mib = ifv->ifv_mib;
1099 if (mib == NULL) {
1100 error = EBUSY;
1101 mutex_exit(&ifv->ifv_lock);
1102 break;
1103 }
1104 error = (mib->ifvm_p != NULL) ?
1105 (*mib->ifvm_msw->vmsw_delmulti)(ifv, ifr) : EINVAL;
1106 mib = NULL;
1107 mutex_exit(&ifv->ifv_lock);
1108 break;
1109
1110 case SIOCSIFCAP:
1111 ifcr = data;
1112 /* make sure caps are enabled on parent */
1113 bound = curlwp_bind();
1114 mib = vlan_getref_linkmib(ifv, &psref);
1115 if (mib == NULL) {
1116 curlwp_bindx(bound);
1117 error = EBUSY;
1118 break;
1119 }
1120
1121 if (mib->ifvm_p == NULL) {
1122 vlan_putref_linkmib(mib, &psref);
1123 curlwp_bindx(bound);
1124 error = EINVAL;
1125 break;
1126 }
1127 if ((mib->ifvm_p->if_capenable & ifcr->ifcr_capenable) !=
1128 ifcr->ifcr_capenable) {
1129 vlan_putref_linkmib(mib, &psref);
1130 curlwp_bindx(bound);
1131 error = EINVAL;
1132 break;
1133 }
1134
1135 vlan_putref_linkmib(mib, &psref);
1136 curlwp_bindx(bound);
1137
1138 if ((error = ifioctl_common(ifp, cmd, data)) == ENETRESET)
1139 error = 0;
1140 break;
1141 case SIOCINITIFADDR:
1142 bound = curlwp_bind();
1143 mib = vlan_getref_linkmib(ifv, &psref);
1144 if (mib == NULL) {
1145 curlwp_bindx(bound);
1146 error = EBUSY;
1147 break;
1148 }
1149
1150 if (mib->ifvm_p == NULL) {
1151 error = EINVAL;
1152 vlan_putref_linkmib(mib, &psref);
1153 curlwp_bindx(bound);
1154 break;
1155 }
1156 vlan_putref_linkmib(mib, &psref);
1157 curlwp_bindx(bound);
1158
1159 ifp->if_flags |= IFF_UP;
1160 #ifdef INET
1161 if (ifa->ifa_addr->sa_family == AF_INET)
1162 arp_ifinit(ifp, ifa);
1163 #endif
1164 break;
1165
1166 default:
1167 error = ether_ioctl(ifp, cmd, data);
1168 }
1169
1170 return error;
1171 }
1172
1173 static int
1174 vlan_ether_addmulti(struct ifvlan *ifv, struct ifreq *ifr)
1175 {
1176 const struct sockaddr *sa = ifreq_getaddr(SIOCADDMULTI, ifr);
1177 struct vlan_mc_entry *mc;
1178 uint8_t addrlo[ETHER_ADDR_LEN], addrhi[ETHER_ADDR_LEN];
1179 struct ifvlan_linkmib *mib;
1180 int error;
1181
1182 KASSERT(mutex_owned(&ifv->ifv_lock));
1183
1184 if (sa->sa_len > sizeof(struct sockaddr_storage))
1185 return EINVAL;
1186
1187 error = ether_addmulti(sa, &ifv->ifv_ec);
1188 if (error != ENETRESET)
1189 return error;
1190
1191 /*
1192 * This is a new multicast address. We have to tell parent
1193 * about it. Also, remember this multicast address so that
1194 * we can delete it on unconfigure.
1195 */
1196 mc = malloc(sizeof(struct vlan_mc_entry), M_DEVBUF, M_NOWAIT);
1197 if (mc == NULL) {
1198 error = ENOMEM;
1199 goto alloc_failed;
1200 }
1201
1202 /*
1203 * Since ether_addmulti() returned ENETRESET, the following two
1204 * statements shouldn't fail. Here ifv_ec is implicitly protected
1205 * by the ifv_lock lock.
1206 */
1207 error = ether_multiaddr(sa, addrlo, addrhi);
1208 KASSERT(error == 0);
1209
1210 ETHER_LOCK(&ifv->ifv_ec);
1211 mc->mc_enm = ether_lookup_multi(addrlo, addrhi, &ifv->ifv_ec);
1212 ETHER_UNLOCK(&ifv->ifv_ec);
1213
1214 KASSERT(mc->mc_enm != NULL);
1215
1216 memcpy(&mc->mc_addr, sa, sa->sa_len);
1217 LIST_INSERT_HEAD(&ifv->ifv_mc_listhead, mc, mc_entries);
1218
1219 mib = ifv->ifv_mib;
1220
1221 KERNEL_LOCK_UNLESS_IFP_MPSAFE(mib->ifvm_p);
1222 error = if_mcast_op(mib->ifvm_p, SIOCADDMULTI, sa);
1223 KERNEL_UNLOCK_UNLESS_IFP_MPSAFE(mib->ifvm_p);
1224
1225 if (error != 0)
1226 goto ioctl_failed;
1227 return error;
1228
1229 ioctl_failed:
1230 LIST_REMOVE(mc, mc_entries);
1231 free(mc, M_DEVBUF);
1232
1233 alloc_failed:
1234 (void)ether_delmulti(sa, &ifv->ifv_ec);
1235 return error;
1236 }
1237
1238 static int
1239 vlan_ether_delmulti(struct ifvlan *ifv, struct ifreq *ifr)
1240 {
1241 const struct sockaddr *sa = ifreq_getaddr(SIOCDELMULTI, ifr);
1242 struct ether_multi *enm;
1243 struct vlan_mc_entry *mc;
1244 struct ifvlan_linkmib *mib;
1245 uint8_t addrlo[ETHER_ADDR_LEN], addrhi[ETHER_ADDR_LEN];
1246 int error;
1247
1248 KASSERT(mutex_owned(&ifv->ifv_lock));
1249
1250 /*
1251 * Find a key to lookup vlan_mc_entry. We have to do this
1252 * before calling ether_delmulti for obvious reasons.
1253 */
1254 if ((error = ether_multiaddr(sa, addrlo, addrhi)) != 0)
1255 return error;
1256
1257 ETHER_LOCK(&ifv->ifv_ec);
1258 enm = ether_lookup_multi(addrlo, addrhi, &ifv->ifv_ec);
1259 ETHER_UNLOCK(&ifv->ifv_ec);
1260 if (enm == NULL)
1261 return EINVAL;
1262
1263 LIST_FOREACH(mc, &ifv->ifv_mc_listhead, mc_entries) {
1264 if (mc->mc_enm == enm)
1265 break;
1266 }
1267
1268 /* We woun't delete entries we didn't add */
1269 if (mc == NULL)
1270 return EINVAL;
1271
1272 error = ether_delmulti(sa, &ifv->ifv_ec);
1273 if (error != ENETRESET)
1274 return error;
1275
1276 /* We no longer use this multicast address. Tell parent so. */
1277 mib = ifv->ifv_mib;
1278 error = if_mcast_op(mib->ifvm_p, SIOCDELMULTI, sa);
1279
1280 if (error == 0) {
1281 /* And forget about this address. */
1282 LIST_REMOVE(mc, mc_entries);
1283 free(mc, M_DEVBUF);
1284 } else {
1285 (void)ether_addmulti(sa, &ifv->ifv_ec);
1286 }
1287
1288 return error;
1289 }
1290
1291 /*
1292 * Delete any multicast address we have asked to add from parent
1293 * interface. Called when the vlan is being unconfigured.
1294 */
1295 static void
1296 vlan_ether_purgemulti(struct ifvlan *ifv)
1297 {
1298 struct vlan_mc_entry *mc;
1299 struct ifvlan_linkmib *mib;
1300
1301 KASSERT(mutex_owned(&ifv->ifv_lock));
1302 mib = ifv->ifv_mib;
1303 if (mib == NULL) {
1304 return;
1305 }
1306
1307 while ((mc = LIST_FIRST(&ifv->ifv_mc_listhead)) != NULL) {
1308 (void)if_mcast_op(mib->ifvm_p, SIOCDELMULTI,
1309 sstocsa(&mc->mc_addr));
1310 LIST_REMOVE(mc, mc_entries);
1311 free(mc, M_DEVBUF);
1312 }
1313 }
1314
1315 static void
1316 vlan_start(struct ifnet *ifp)
1317 {
1318 struct ifvlan *ifv = ifp->if_softc;
1319 struct ifnet *p;
1320 struct ethercom *ec;
1321 struct mbuf *m;
1322 struct ifvlan_linkmib *mib;
1323 struct psref psref;
1324 struct ether_header *eh;
1325 int error;
1326
1327 mib = vlan_getref_linkmib(ifv, &psref);
1328 if (mib == NULL)
1329 return;
1330
1331 if (__predict_false(mib->ifvm_p == NULL)) {
1332 vlan_putref_linkmib(mib, &psref);
1333 return;
1334 }
1335
1336 p = mib->ifvm_p;
1337 ec = (void *)mib->ifvm_p;
1338
1339 ifp->if_flags |= IFF_OACTIVE;
1340
1341 for (;;) {
1342 IFQ_DEQUEUE(&ifp->if_snd, m);
1343 if (m == NULL)
1344 break;
1345
1346 if (m->m_len < sizeof(*eh)) {
1347 m = m_pullup(m, sizeof(*eh));
1348 if (m == NULL) {
1349 if_statinc(ifp, if_oerrors);
1350 continue;
1351 }
1352 }
1353
1354 eh = mtod(m, struct ether_header *);
1355 if (ntohs(eh->ether_type) == ETHERTYPE_VLAN) {
1356 m_freem(m);
1357 if_statinc(ifp, if_noproto);
1358 continue;
1359 }
1360
1361 #ifdef ALTQ
1362 /*
1363 * KERNEL_LOCK is required for ALTQ even if NET_MPSAFE is
1364 * defined.
1365 */
1366 KERNEL_LOCK(1, NULL);
1367 /*
1368 * If ALTQ is enabled on the parent interface, do
1369 * classification; the queueing discipline might
1370 * not require classification, but might require
1371 * the address family/header pointer in the pktattr.
1372 */
1373 if (ALTQ_IS_ENABLED(&p->if_snd)) {
1374 switch (p->if_type) {
1375 case IFT_ETHER:
1376 altq_etherclassify(&p->if_snd, m);
1377 break;
1378 default:
1379 panic("%s: impossible (altq)", __func__);
1380 }
1381 }
1382 KERNEL_UNLOCK_ONE(NULL);
1383 #endif /* ALTQ */
1384
1385 bpf_mtap(ifp, m, BPF_D_OUT);
1386 /*
1387 * If the parent can insert the tag itself, just mark
1388 * the tag in the mbuf header.
1389 */
1390 if (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING) {
1391 vlan_set_tag(m, mib->ifvm_tag);
1392 } else {
1393 /*
1394 * insert the tag ourselves
1395 */
1396 M_PREPEND(m, mib->ifvm_encaplen, M_DONTWAIT);
1397 if (m == NULL) {
1398 printf("%s: unable to prepend encap header",
1399 p->if_xname);
1400 if_statinc(ifp, if_oerrors);
1401 continue;
1402 }
1403
1404 switch (p->if_type) {
1405 case IFT_ETHER:
1406 {
1407 struct ether_vlan_header *evl;
1408
1409 if (m->m_len < sizeof(struct ether_vlan_header))
1410 m = m_pullup(m,
1411 sizeof(struct ether_vlan_header));
1412 if (m == NULL) {
1413 printf("%s: unable to pullup encap "
1414 "header", p->if_xname);
1415 if_statinc(ifp, if_oerrors);
1416 continue;
1417 }
1418
1419 /*
1420 * Transform the Ethernet header into an
1421 * Ethernet header with 802.1Q encapsulation.
1422 */
1423 memmove(mtod(m, void *),
1424 mtod(m, char *) + mib->ifvm_encaplen,
1425 sizeof(struct ether_header));
1426 evl = mtod(m, struct ether_vlan_header *);
1427 evl->evl_proto = evl->evl_encap_proto;
1428 evl->evl_encap_proto = htons(ETHERTYPE_VLAN);
1429 evl->evl_tag = htons(mib->ifvm_tag);
1430
1431 /*
1432 * To cater for VLAN-aware layer 2 ethernet
1433 * switches which may need to strip the tag
1434 * before forwarding the packet, make sure
1435 * the packet+tag is at least 68 bytes long.
1436 * This is necessary because our parent will
1437 * only pad to 64 bytes (ETHER_MIN_LEN) and
1438 * some switches will not pad by themselves
1439 * after deleting a tag.
1440 */
1441 const size_t min_data_len = ETHER_MIN_LEN -
1442 ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN;
1443 if (m->m_pkthdr.len < min_data_len) {
1444 m_copyback(m, m->m_pkthdr.len,
1445 min_data_len - m->m_pkthdr.len,
1446 vlan_zero_pad_buff);
1447 }
1448 break;
1449 }
1450
1451 default:
1452 panic("%s: impossible", __func__);
1453 }
1454 }
1455
1456 if ((p->if_flags & IFF_RUNNING) == 0) {
1457 m_freem(m);
1458 continue;
1459 }
1460
1461 error = if_transmit_lock(p, m);
1462 if (error) {
1463 /* mbuf is already freed */
1464 if_statinc(ifp, if_oerrors);
1465 continue;
1466 }
1467 if_statinc(ifp, if_opackets);
1468 }
1469
1470 ifp->if_flags &= ~IFF_OACTIVE;
1471
1472 /* Remove reference to mib before release */
1473 vlan_putref_linkmib(mib, &psref);
1474 }
1475
1476 static int
1477 vlan_transmit(struct ifnet *ifp, struct mbuf *m)
1478 {
1479 struct ifvlan *ifv = ifp->if_softc;
1480 struct ifnet *p;
1481 struct ethercom *ec;
1482 struct ifvlan_linkmib *mib;
1483 struct psref psref;
1484 struct ether_header *eh;
1485 int error;
1486 size_t pktlen = m->m_pkthdr.len;
1487 bool mcast = (m->m_flags & M_MCAST) != 0;
1488
1489 if (m->m_len < sizeof(*eh)) {
1490 m = m_pullup(m, sizeof(*eh));
1491 if (m == NULL) {
1492 if_statinc(ifp, if_oerrors);
1493 return ENOBUFS;
1494 }
1495 }
1496
1497 eh = mtod(m, struct ether_header *);
1498 if (ntohs(eh->ether_type) == ETHERTYPE_VLAN) {
1499 m_freem(m);
1500 if_statinc(ifp, if_noproto);
1501 return EPROTONOSUPPORT;
1502 }
1503
1504 mib = vlan_getref_linkmib(ifv, &psref);
1505 if (mib == NULL) {
1506 m_freem(m);
1507 return ENETDOWN;
1508 }
1509
1510 if (__predict_false(mib->ifvm_p == NULL)) {
1511 vlan_putref_linkmib(mib, &psref);
1512 m_freem(m);
1513 return ENETDOWN;
1514 }
1515
1516 p = mib->ifvm_p;
1517 ec = (void *)mib->ifvm_p;
1518
1519 bpf_mtap(ifp, m, BPF_D_OUT);
1520
1521 if ((error = pfil_run_hooks(ifp->if_pfil, &m, ifp, PFIL_OUT)) != 0)
1522 goto out;
1523 if (m == NULL)
1524 goto out;
1525
1526 /*
1527 * If the parent can insert the tag itself, just mark
1528 * the tag in the mbuf header.
1529 */
1530 if (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING) {
1531 vlan_set_tag(m, mib->ifvm_tag);
1532 } else {
1533 /*
1534 * insert the tag ourselves
1535 */
1536 M_PREPEND(m, mib->ifvm_encaplen, M_DONTWAIT);
1537 if (m == NULL) {
1538 printf("%s: unable to prepend encap header",
1539 p->if_xname);
1540 if_statinc(ifp, if_oerrors);
1541 error = ENOBUFS;
1542 goto out;
1543 }
1544
1545 switch (p->if_type) {
1546 case IFT_ETHER:
1547 {
1548 struct ether_vlan_header *evl;
1549
1550 if (m->m_len < sizeof(struct ether_vlan_header))
1551 m = m_pullup(m,
1552 sizeof(struct ether_vlan_header));
1553 if (m == NULL) {
1554 printf("%s: unable to pullup encap "
1555 "header", p->if_xname);
1556 if_statinc(ifp, if_oerrors);
1557 error = ENOBUFS;
1558 goto out;
1559 }
1560
1561 /*
1562 * Transform the Ethernet header into an
1563 * Ethernet header with 802.1Q encapsulation.
1564 */
1565 memmove(mtod(m, void *),
1566 mtod(m, char *) + mib->ifvm_encaplen,
1567 sizeof(struct ether_header));
1568 evl = mtod(m, struct ether_vlan_header *);
1569 evl->evl_proto = evl->evl_encap_proto;
1570 evl->evl_encap_proto = htons(ETHERTYPE_VLAN);
1571 evl->evl_tag = htons(mib->ifvm_tag);
1572
1573 /*
1574 * To cater for VLAN-aware layer 2 ethernet
1575 * switches which may need to strip the tag
1576 * before forwarding the packet, make sure
1577 * the packet+tag is at least 68 bytes long.
1578 * This is necessary because our parent will
1579 * only pad to 64 bytes (ETHER_MIN_LEN) and
1580 * some switches will not pad by themselves
1581 * after deleting a tag.
1582 */
1583 const size_t min_data_len = ETHER_MIN_LEN -
1584 ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN;
1585 if (m->m_pkthdr.len < min_data_len) {
1586 m_copyback(m, m->m_pkthdr.len,
1587 min_data_len - m->m_pkthdr.len,
1588 vlan_zero_pad_buff);
1589 }
1590 break;
1591 }
1592
1593 default:
1594 panic("%s: impossible", __func__);
1595 }
1596 }
1597
1598 if ((p->if_flags & IFF_RUNNING) == 0) {
1599 m_freem(m);
1600 error = ENETDOWN;
1601 goto out;
1602 }
1603
1604 error = if_transmit_lock(p, m);
1605 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
1606 if (error) {
1607 /* mbuf is already freed */
1608 if_statinc_ref(nsr, if_oerrors);
1609 } else {
1610 if_statinc_ref(nsr, if_opackets);
1611 if_statadd_ref(nsr, if_obytes, pktlen);
1612 if (mcast)
1613 if_statinc_ref(nsr, if_omcasts);
1614 }
1615 IF_STAT_PUTREF(ifp);
1616
1617 out:
1618 /* Remove reference to mib before release */
1619 vlan_putref_linkmib(mib, &psref);
1620 return error;
1621 }
1622
1623 /*
1624 * Given an Ethernet frame, find a valid vlan interface corresponding to the
1625 * given source interface and tag, then run the real packet through the
1626 * parent's input routine.
1627 */
1628 void
1629 vlan_input(struct ifnet *ifp, struct mbuf *m)
1630 {
1631 struct ifvlan *ifv;
1632 uint16_t vid;
1633 struct ifvlan_linkmib *mib;
1634 struct psref psref;
1635 bool have_vtag;
1636
1637 have_vtag = vlan_has_tag(m);
1638 if (have_vtag) {
1639 vid = EVL_VLANOFTAG(vlan_get_tag(m));
1640 m->m_flags &= ~M_VLANTAG;
1641 } else {
1642 struct ether_vlan_header *evl;
1643
1644 if (ifp->if_type != IFT_ETHER) {
1645 panic("%s: impossible", __func__);
1646 }
1647
1648 if (m->m_len < sizeof(struct ether_vlan_header) &&
1649 (m = m_pullup(m,
1650 sizeof(struct ether_vlan_header))) == NULL) {
1651 printf("%s: no memory for VLAN header, "
1652 "dropping packet.\n", ifp->if_xname);
1653 return;
1654 }
1655
1656 if (m_makewritable(&m, 0,
1657 sizeof(struct ether_vlan_header), M_DONTWAIT)) {
1658 m_freem(m);
1659 if_statinc(ifp, if_ierrors);
1660 return;
1661 }
1662
1663 evl = mtod(m, struct ether_vlan_header *);
1664 KASSERT(ntohs(evl->evl_encap_proto) == ETHERTYPE_VLAN);
1665
1666 vid = EVL_VLANOFTAG(ntohs(evl->evl_tag));
1667
1668 /*
1669 * Restore the original ethertype. We'll remove
1670 * the encapsulation after we've found the vlan
1671 * interface corresponding to the tag.
1672 */
1673 evl->evl_encap_proto = evl->evl_proto;
1674 }
1675
1676 mib = vlan_lookup_tag_psref(ifp, vid, &psref);
1677 if (mib == NULL) {
1678 m_freem(m);
1679 if_statinc(ifp, if_noproto);
1680 return;
1681 }
1682 KASSERT(mib->ifvm_encaplen == ETHER_VLAN_ENCAP_LEN);
1683
1684 ifv = mib->ifvm_ifvlan;
1685 if ((ifv->ifv_if.if_flags & (IFF_UP | IFF_RUNNING)) !=
1686 (IFF_UP | IFF_RUNNING)) {
1687 m_freem(m);
1688 if_statinc(ifp, if_noproto);
1689 goto out;
1690 }
1691
1692 /*
1693 * Now, remove the encapsulation header. The original
1694 * header has already been fixed up above.
1695 */
1696 if (!have_vtag) {
1697 memmove(mtod(m, char *) + mib->ifvm_encaplen,
1698 mtod(m, void *), sizeof(struct ether_header));
1699 m_adj(m, mib->ifvm_encaplen);
1700 }
1701
1702 /*
1703 * Drop promiscuously received packets if we are not in
1704 * promiscuous mode
1705 */
1706 if ((m->m_flags & (M_BCAST | M_MCAST)) == 0 &&
1707 (ifp->if_flags & IFF_PROMISC) &&
1708 (ifv->ifv_if.if_flags & IFF_PROMISC) == 0) {
1709 struct ether_header *eh;
1710
1711 eh = mtod(m, struct ether_header *);
1712 if (memcmp(CLLADDR(ifv->ifv_if.if_sadl),
1713 eh->ether_dhost, ETHER_ADDR_LEN) != 0) {
1714 m_freem(m);
1715 if_statinc(&ifv->ifv_if, if_ierrors);
1716 goto out;
1717 }
1718 }
1719
1720 m_set_rcvif(m, &ifv->ifv_if);
1721
1722 if (pfil_run_hooks(ifp->if_pfil, &m, ifp, PFIL_IN) != 0)
1723 goto out;
1724 if (m == NULL)
1725 goto out;
1726
1727 m->m_flags &= ~M_PROMISC;
1728 if_input(&ifv->ifv_if, m);
1729 out:
1730 vlan_putref_linkmib(mib, &psref);
1731 }
1732
1733 /*
1734 * If the parent link state changed, the vlan link state should change also.
1735 */
1736 void
1737 vlan_link_state_changed(struct ifnet *p, int link_state)
1738 {
1739 struct ifvlan *ifv;
1740 struct ifvlan_linkmib *mib;
1741 struct psref psref;
1742 struct ifnet *ifp;
1743
1744 mutex_enter(&ifv_list.lock);
1745
1746 LIST_FOREACH(ifv, &ifv_list.list, ifv_list) {
1747 mib = vlan_getref_linkmib(ifv, &psref);
1748 if (mib == NULL)
1749 continue;
1750
1751 if (mib->ifvm_p == p) {
1752 ifp = &mib->ifvm_ifvlan->ifv_if;
1753 if_link_state_change(ifp, link_state);
1754 }
1755
1756 vlan_putref_linkmib(mib, &psref);
1757 }
1758
1759 mutex_exit(&ifv_list.lock);
1760 }
1761
1762 /*
1763 * Module infrastructure
1764 */
1765 #include "if_module.h"
1766
1767 IF_MODULE(MODULE_CLASS_DRIVER, vlan, NULL)
1768