if_vlan.c revision 1.158 1 /* $NetBSD: if_vlan.c,v 1.158 2021/07/14 06:23:06 yamaguchi Exp $ */
2
3 /*
4 * Copyright (c) 2000, 2001 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran, and by Jason R. Thorpe of Zembu Labs, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Copyright 1998 Massachusetts Institute of Technology
34 *
35 * Permission to use, copy, modify, and distribute this software and
36 * its documentation for any purpose and without fee is hereby
37 * granted, provided that both the above copyright notice and this
38 * permission notice appear in all copies, that both the above
39 * copyright notice and this permission notice appear in all
40 * supporting documentation, and that the name of M.I.T. not be used
41 * in advertising or publicity pertaining to distribution of the
42 * software without specific, written prior permission. M.I.T. makes
43 * no representations about the suitability of this software for any
44 * purpose. It is provided "as is" without express or implied
45 * warranty.
46 *
47 * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS
48 * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
49 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
50 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
51 * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
52 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
53 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
54 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
55 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
56 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
57 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 *
60 * from FreeBSD: if_vlan.c,v 1.16 2000/03/26 15:21:40 charnier Exp
61 * via OpenBSD: if_vlan.c,v 1.4 2000/05/15 19:15:00 chris Exp
62 */
63
64 /*
65 * if_vlan.c - pseudo-device driver for IEEE 802.1Q virtual LANs. Might be
66 * extended some day to also handle IEEE 802.1P priority tagging. This is
67 * sort of sneaky in the implementation, since we need to pretend to be
68 * enough of an Ethernet implementation to make ARP work. The way we do
69 * this is by telling everyone that we are an Ethernet interface, and then
70 * catch the packets that ether_output() left on our output queue when it
71 * calls if_start(), rewrite them for use by the real outgoing interface,
72 * and ask it to send them.
73 *
74 * TODO:
75 *
76 * - Need some way to notify vlan interfaces when the parent
77 * interface changes MTU.
78 */
79
80 #include <sys/cdefs.h>
81 __KERNEL_RCSID(0, "$NetBSD: if_vlan.c,v 1.158 2021/07/14 06:23:06 yamaguchi Exp $");
82
83 #ifdef _KERNEL_OPT
84 #include "opt_inet.h"
85 #include "opt_net_mpsafe.h"
86 #endif
87
88 #include <sys/param.h>
89 #include <sys/systm.h>
90 #include <sys/kernel.h>
91 #include <sys/mbuf.h>
92 #include <sys/queue.h>
93 #include <sys/socket.h>
94 #include <sys/sockio.h>
95 #include <sys/systm.h>
96 #include <sys/proc.h>
97 #include <sys/kauth.h>
98 #include <sys/mutex.h>
99 #include <sys/kmem.h>
100 #include <sys/cpu.h>
101 #include <sys/pserialize.h>
102 #include <sys/psref.h>
103 #include <sys/pslist.h>
104 #include <sys/atomic.h>
105 #include <sys/device.h>
106 #include <sys/module.h>
107
108 #include <net/bpf.h>
109 #include <net/if.h>
110 #include <net/if_dl.h>
111 #include <net/if_types.h>
112 #include <net/if_ether.h>
113 #include <net/if_vlanvar.h>
114
115 #ifdef INET
116 #include <netinet/in.h>
117 #include <netinet/if_inarp.h>
118 #endif
119 #ifdef INET6
120 #include <netinet6/in6_ifattach.h>
121 #include <netinet6/in6_var.h>
122 #include <netinet6/nd6.h>
123 #endif
124
125 #include "ioconf.h"
126
127 struct vlan_mc_entry {
128 LIST_ENTRY(vlan_mc_entry) mc_entries;
129 /*
130 * A key to identify this entry. The mc_addr below can't be
131 * used since multiple sockaddr may mapped into the same
132 * ether_multi (e.g., AF_UNSPEC).
133 */
134 struct ether_multi *mc_enm;
135 struct sockaddr_storage mc_addr;
136 };
137
138 struct ifvlan_linkmib {
139 struct ifvlan *ifvm_ifvlan;
140 const struct vlan_multisw *ifvm_msw;
141 int ifvm_encaplen; /* encapsulation length */
142 int ifvm_mtufudge; /* MTU fudged by this much */
143 int ifvm_mintu; /* min transmission unit */
144 uint16_t ifvm_proto; /* encapsulation ethertype */
145 uint16_t ifvm_tag; /* tag to apply on packets */
146 struct ifnet *ifvm_p; /* parent interface of this vlan */
147
148 struct psref_target ifvm_psref;
149 };
150
151 struct ifvlan {
152 struct ethercom ifv_ec;
153 struct ifvlan_linkmib *ifv_mib; /*
154 * reader must use vlan_getref_linkmib()
155 * instead of direct dereference
156 */
157 kmutex_t ifv_lock; /* writer lock for ifv_mib */
158 pserialize_t ifv_psz;
159
160 LIST_HEAD(__vlan_mchead, vlan_mc_entry) ifv_mc_listhead;
161 LIST_ENTRY(ifvlan) ifv_list;
162 struct pslist_entry ifv_hash;
163 int ifv_flags;
164 };
165
166 #define IFVF_PROMISC 0x01 /* promiscuous mode enabled */
167
168 #define ifv_if ifv_ec.ec_if
169
170 #define ifv_msw ifv_mib.ifvm_msw
171 #define ifv_encaplen ifv_mib.ifvm_encaplen
172 #define ifv_mtufudge ifv_mib.ifvm_mtufudge
173 #define ifv_mintu ifv_mib.ifvm_mintu
174 #define ifv_tag ifv_mib.ifvm_tag
175
176 struct vlan_multisw {
177 int (*vmsw_addmulti)(struct ifvlan *, struct ifreq *);
178 int (*vmsw_delmulti)(struct ifvlan *, struct ifreq *);
179 void (*vmsw_purgemulti)(struct ifvlan *);
180 };
181
182 static int vlan_ether_addmulti(struct ifvlan *, struct ifreq *);
183 static int vlan_ether_delmulti(struct ifvlan *, struct ifreq *);
184 static void vlan_ether_purgemulti(struct ifvlan *);
185
186 const struct vlan_multisw vlan_ether_multisw = {
187 .vmsw_addmulti = vlan_ether_addmulti,
188 .vmsw_delmulti = vlan_ether_delmulti,
189 .vmsw_purgemulti = vlan_ether_purgemulti,
190 };
191
192 static int vlan_clone_create(struct if_clone *, int);
193 static int vlan_clone_destroy(struct ifnet *);
194 static int vlan_config(struct ifvlan *, struct ifnet *, uint16_t);
195 static int vlan_ioctl(struct ifnet *, u_long, void *);
196 static void vlan_start(struct ifnet *);
197 static int vlan_transmit(struct ifnet *, struct mbuf *);
198 static void vlan_unconfig(struct ifnet *);
199 static int vlan_unconfig_locked(struct ifvlan *, struct ifvlan_linkmib *);
200 static void vlan_hash_init(void);
201 static int vlan_hash_fini(void);
202 static int vlan_tag_hash(uint16_t, u_long);
203 static struct ifvlan_linkmib* vlan_getref_linkmib(struct ifvlan *,
204 struct psref *);
205 static void vlan_putref_linkmib(struct ifvlan_linkmib *, struct psref *);
206 static void vlan_linkmib_update(struct ifvlan *, struct ifvlan_linkmib *);
207 static struct ifvlan_linkmib* vlan_lookup_tag_psref(struct ifnet *,
208 uint16_t, struct psref *);
209
210 static struct {
211 kmutex_t lock;
212 LIST_HEAD(vlan_ifvlist, ifvlan) list;
213 } ifv_list __cacheline_aligned;
214
215
216 #if !defined(VLAN_TAG_HASH_SIZE)
217 #define VLAN_TAG_HASH_SIZE 32
218 #endif
219 static struct {
220 kmutex_t lock;
221 struct pslist_head *lists;
222 u_long mask;
223 } ifv_hash __cacheline_aligned = {
224 .lists = NULL,
225 .mask = 0,
226 };
227
228 pserialize_t vlan_psz __read_mostly;
229 static struct psref_class *ifvm_psref_class __read_mostly;
230
231 struct if_clone vlan_cloner =
232 IF_CLONE_INITIALIZER("vlan", vlan_clone_create, vlan_clone_destroy);
233
234 /* Used to pad ethernet frames with < ETHER_MIN_LEN bytes */
235 static char vlan_zero_pad_buff[ETHER_MIN_LEN];
236
237 static inline int
238 vlan_safe_ifpromisc(struct ifnet *ifp, int pswitch)
239 {
240 int e;
241
242 KERNEL_LOCK_UNLESS_NET_MPSAFE();
243 e = ifpromisc(ifp, pswitch);
244 KERNEL_UNLOCK_UNLESS_NET_MPSAFE();
245
246 return e;
247 }
248
249 static inline int
250 vlan_safe_ifpromisc_locked(struct ifnet *ifp, int pswitch)
251 {
252 int e;
253
254 KERNEL_LOCK_UNLESS_NET_MPSAFE();
255 e = ifpromisc_locked(ifp, pswitch);
256 KERNEL_UNLOCK_UNLESS_NET_MPSAFE();
257
258 return e;
259 }
260
261 void
262 vlanattach(int n)
263 {
264
265 /*
266 * Nothing to do here, initialization is handled by the
267 * module initialization code in vlaninit() below.
268 */
269 }
270
271 static void
272 vlaninit(void)
273 {
274 mutex_init(&ifv_list.lock, MUTEX_DEFAULT, IPL_NONE);
275 LIST_INIT(&ifv_list.list);
276
277 mutex_init(&ifv_hash.lock, MUTEX_DEFAULT, IPL_NONE);
278 vlan_psz = pserialize_create();
279 ifvm_psref_class = psref_class_create("vlanlinkmib", IPL_SOFTNET);
280 if_clone_attach(&vlan_cloner);
281
282 vlan_hash_init();
283 MODULE_HOOK_SET(if_vlan_vlan_input_hook, vlan_input);
284 }
285
286 static int
287 vlandetach(void)
288 {
289 bool is_empty;
290 int error;
291
292 mutex_enter(&ifv_list.lock);
293 is_empty = LIST_EMPTY(&ifv_list.list);
294 mutex_exit(&ifv_list.lock);
295
296 if (!is_empty)
297 return EBUSY;
298
299 error = vlan_hash_fini();
300 if (error != 0)
301 return error;
302
303 if_clone_detach(&vlan_cloner);
304 psref_class_destroy(ifvm_psref_class);
305 pserialize_destroy(vlan_psz);
306 mutex_destroy(&ifv_hash.lock);
307 mutex_destroy(&ifv_list.lock);
308
309 MODULE_HOOK_UNSET(if_vlan_vlan_input_hook);
310 return 0;
311 }
312
313 static void
314 vlan_reset_linkname(struct ifnet *ifp)
315 {
316
317 /*
318 * We start out with a "802.1Q VLAN" type and zero-length
319 * addresses. When we attach to a parent interface, we
320 * inherit its type, address length, address, and data link
321 * type.
322 */
323
324 ifp->if_type = IFT_L2VLAN;
325 ifp->if_addrlen = 0;
326 ifp->if_dlt = DLT_NULL;
327 if_alloc_sadl(ifp);
328 }
329
330 static int
331 vlan_clone_create(struct if_clone *ifc, int unit)
332 {
333 struct ifvlan *ifv;
334 struct ifnet *ifp;
335 struct ifvlan_linkmib *mib;
336
337 ifv = malloc(sizeof(struct ifvlan), M_DEVBUF, M_WAITOK | M_ZERO);
338 mib = kmem_zalloc(sizeof(struct ifvlan_linkmib), KM_SLEEP);
339 ifp = &ifv->ifv_if;
340 LIST_INIT(&ifv->ifv_mc_listhead);
341
342 mib->ifvm_ifvlan = ifv;
343 mib->ifvm_p = NULL;
344 psref_target_init(&mib->ifvm_psref, ifvm_psref_class);
345
346 mutex_init(&ifv->ifv_lock, MUTEX_DEFAULT, IPL_NONE);
347 ifv->ifv_psz = pserialize_create();
348 ifv->ifv_mib = mib;
349
350 mutex_enter(&ifv_list.lock);
351 LIST_INSERT_HEAD(&ifv_list.list, ifv, ifv_list);
352 mutex_exit(&ifv_list.lock);
353
354 if_initname(ifp, ifc->ifc_name, unit);
355 ifp->if_softc = ifv;
356 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
357 #ifdef NET_MPSAFE
358 ifp->if_extflags = IFEF_MPSAFE;
359 #endif
360 ifp->if_start = vlan_start;
361 ifp->if_transmit = vlan_transmit;
362 ifp->if_ioctl = vlan_ioctl;
363 IFQ_SET_READY(&ifp->if_snd);
364 if_initialize(ifp);
365 /*
366 * Set the link state to down.
367 * When the parent interface attaches we will use that link state.
368 * When the parent interface link state changes, so will ours.
369 * When the parent interface detaches, set the link state to down.
370 */
371 ifp->if_link_state = LINK_STATE_DOWN;
372
373 vlan_reset_linkname(ifp);
374 if_register(ifp);
375 return 0;
376 }
377
378 static int
379 vlan_clone_destroy(struct ifnet *ifp)
380 {
381 struct ifvlan *ifv = ifp->if_softc;
382
383 mutex_enter(&ifv_list.lock);
384 LIST_REMOVE(ifv, ifv_list);
385 mutex_exit(&ifv_list.lock);
386
387 IFNET_LOCK(ifp);
388 vlan_unconfig(ifp);
389 IFNET_UNLOCK(ifp);
390 if_detach(ifp);
391
392 psref_target_destroy(&ifv->ifv_mib->ifvm_psref, ifvm_psref_class);
393 kmem_free(ifv->ifv_mib, sizeof(struct ifvlan_linkmib));
394 pserialize_destroy(ifv->ifv_psz);
395 mutex_destroy(&ifv->ifv_lock);
396 free(ifv, M_DEVBUF);
397
398 return 0;
399 }
400
401 /*
402 * Configure a VLAN interface.
403 */
404 static int
405 vlan_config(struct ifvlan *ifv, struct ifnet *p, uint16_t tag)
406 {
407 struct ifnet *ifp = &ifv->ifv_if;
408 struct ifvlan_linkmib *nmib = NULL;
409 struct ifvlan_linkmib *omib = NULL;
410 struct ifvlan_linkmib *checkmib;
411 struct psref_target *nmib_psref = NULL;
412 const uint16_t vid = EVL_VLANOFTAG(tag);
413 int error = 0;
414 int idx;
415 bool omib_cleanup = false;
416 struct psref psref;
417
418 /* VLAN ID 0 and 4095 are reserved in the spec */
419 if ((vid == 0) || (vid == 0xfff))
420 return EINVAL;
421
422 nmib = kmem_alloc(sizeof(*nmib), KM_SLEEP);
423 mutex_enter(&ifv->ifv_lock);
424 omib = ifv->ifv_mib;
425
426 if (omib->ifvm_p != NULL) {
427 error = EBUSY;
428 goto done;
429 }
430
431 /* Duplicate check */
432 checkmib = vlan_lookup_tag_psref(p, vid, &psref);
433 if (checkmib != NULL) {
434 vlan_putref_linkmib(checkmib, &psref);
435 error = EEXIST;
436 goto done;
437 }
438
439 *nmib = *omib;
440 nmib_psref = &nmib->ifvm_psref;
441
442 psref_target_init(nmib_psref, ifvm_psref_class);
443
444 switch (p->if_type) {
445 case IFT_ETHER:
446 {
447 struct ethercom *ec = (void *)p;
448 struct vlanid_list *vidmem;
449
450 nmib->ifvm_msw = &vlan_ether_multisw;
451 nmib->ifvm_encaplen = ETHER_VLAN_ENCAP_LEN;
452 nmib->ifvm_mintu = ETHERMIN;
453
454 if (ec->ec_nvlans++ == 0) {
455 IFNET_LOCK(p);
456 error = ether_enable_vlan_mtu(p);
457 IFNET_UNLOCK(p);
458 if (error >= 0) {
459 if (error) {
460 ec->ec_nvlans--;
461 goto done;
462 }
463 nmib->ifvm_mtufudge = 0;
464 } else {
465 /*
466 * Fudge the MTU by the encapsulation size. This
467 * makes us incompatible with strictly compliant
468 * 802.1Q implementations, but allows us to use
469 * the feature with other NetBSD
470 * implementations, which might still be useful.
471 */
472 nmib->ifvm_mtufudge = nmib->ifvm_encaplen;
473 }
474 error = 0;
475 }
476 /* Add a vid to the list */
477 vidmem = kmem_alloc(sizeof(struct vlanid_list), KM_SLEEP);
478 vidmem->vid = vid;
479 ETHER_LOCK(ec);
480 SIMPLEQ_INSERT_TAIL(&ec->ec_vids, vidmem, vid_list);
481 ETHER_UNLOCK(ec);
482
483 if (ec->ec_vlan_cb != NULL) {
484 /*
485 * Call ec_vlan_cb(). It will setup VLAN HW filter or
486 * HW tagging function.
487 */
488 error = (*ec->ec_vlan_cb)(ec, vid, true);
489 if (error) {
490 ec->ec_nvlans--;
491 if (ec->ec_nvlans == 0) {
492 IFNET_LOCK(p);
493 (void)ether_disable_vlan_mtu(p);
494 IFNET_UNLOCK(p);
495 }
496 goto done;
497 }
498 }
499 /*
500 * If the parent interface can do hardware-assisted
501 * VLAN encapsulation, then propagate its hardware-
502 * assisted checksumming flags and tcp segmentation
503 * offload.
504 */
505 if (ec->ec_capabilities & ETHERCAP_VLAN_HWTAGGING) {
506 ifp->if_capabilities = p->if_capabilities &
507 (IFCAP_TSOv4 | IFCAP_TSOv6 |
508 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
509 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
510 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
511 IFCAP_CSUM_TCPv6_Tx | IFCAP_CSUM_TCPv6_Rx |
512 IFCAP_CSUM_UDPv6_Tx | IFCAP_CSUM_UDPv6_Rx);
513 }
514
515 /*
516 * We inherit the parent's Ethernet address.
517 */
518 ether_ifattach(ifp, CLLADDR(p->if_sadl));
519 ifp->if_hdrlen = sizeof(struct ether_vlan_header); /* XXX? */
520 break;
521 }
522
523 default:
524 error = EPROTONOSUPPORT;
525 goto done;
526 }
527
528 nmib->ifvm_p = p;
529 nmib->ifvm_tag = vid;
530 ifv->ifv_if.if_mtu = p->if_mtu - nmib->ifvm_mtufudge;
531 ifv->ifv_if.if_flags = p->if_flags &
532 (IFF_UP | IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
533
534 /*
535 * Inherit the if_type from the parent. This allows us
536 * to participate in bridges of that type.
537 */
538 ifv->ifv_if.if_type = p->if_type;
539
540 PSLIST_ENTRY_INIT(ifv, ifv_hash);
541 idx = vlan_tag_hash(vid, ifv_hash.mask);
542
543 mutex_enter(&ifv_hash.lock);
544 PSLIST_WRITER_INSERT_HEAD(&ifv_hash.lists[idx], ifv, ifv_hash);
545 mutex_exit(&ifv_hash.lock);
546
547 vlan_linkmib_update(ifv, nmib);
548 nmib = NULL;
549 nmib_psref = NULL;
550 omib_cleanup = true;
551
552
553 /*
554 * We inherit the parents link state.
555 */
556 if_link_state_change(&ifv->ifv_if, p->if_link_state);
557
558 done:
559 mutex_exit(&ifv->ifv_lock);
560
561 if (nmib_psref)
562 psref_target_destroy(nmib_psref, ifvm_psref_class);
563 if (nmib)
564 kmem_free(nmib, sizeof(*nmib));
565 if (omib_cleanup)
566 kmem_free(omib, sizeof(*omib));
567
568 return error;
569 }
570
571 /*
572 * Unconfigure a VLAN interface.
573 */
574 static void
575 vlan_unconfig(struct ifnet *ifp)
576 {
577 struct ifvlan *ifv = ifp->if_softc;
578 struct ifvlan_linkmib *nmib = NULL;
579 int error;
580
581 KASSERT(IFNET_LOCKED(ifp));
582
583 nmib = kmem_alloc(sizeof(*nmib), KM_SLEEP);
584
585 mutex_enter(&ifv->ifv_lock);
586 error = vlan_unconfig_locked(ifv, nmib);
587 mutex_exit(&ifv->ifv_lock);
588
589 if (error)
590 kmem_free(nmib, sizeof(*nmib));
591 }
592 static int
593 vlan_unconfig_locked(struct ifvlan *ifv, struct ifvlan_linkmib *nmib)
594 {
595 struct ifnet *p;
596 struct ifnet *ifp = &ifv->ifv_if;
597 struct psref_target *nmib_psref = NULL;
598 struct ifvlan_linkmib *omib;
599 int error = 0;
600
601 KASSERT(IFNET_LOCKED(ifp));
602 KASSERT(mutex_owned(&ifv->ifv_lock));
603
604 ifp->if_flags &= ~(IFF_UP | IFF_RUNNING);
605
606 omib = ifv->ifv_mib;
607 p = omib->ifvm_p;
608
609 if (p == NULL) {
610 error = -1;
611 goto done;
612 }
613
614 *nmib = *omib;
615 nmib_psref = &nmib->ifvm_psref;
616 psref_target_init(nmib_psref, ifvm_psref_class);
617
618 /*
619 * Since the interface is being unconfigured, we need to empty the
620 * list of multicast groups that we may have joined while we were
621 * alive and remove them from the parent's list also.
622 */
623 (*nmib->ifvm_msw->vmsw_purgemulti)(ifv);
624
625 /* Disconnect from parent. */
626 switch (p->if_type) {
627 case IFT_ETHER:
628 {
629 struct ethercom *ec = (void *)p;
630 struct vlanid_list *vlanidp;
631 uint16_t vid = EVL_VLANOFTAG(nmib->ifvm_tag);
632
633 ETHER_LOCK(ec);
634 SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) {
635 if (vlanidp->vid == vid) {
636 SIMPLEQ_REMOVE(&ec->ec_vids, vlanidp,
637 vlanid_list, vid_list);
638 break;
639 }
640 }
641 ETHER_UNLOCK(ec);
642 if (vlanidp != NULL)
643 kmem_free(vlanidp, sizeof(*vlanidp));
644
645 if (ec->ec_vlan_cb != NULL) {
646 /*
647 * Call ec_vlan_cb(). It will setup VLAN HW filter or
648 * HW tagging function.
649 */
650 (void)(*ec->ec_vlan_cb)(ec, vid, false);
651 }
652 if (--ec->ec_nvlans == 0) {
653 IFNET_LOCK(p);
654 (void)ether_disable_vlan_mtu(p);
655 IFNET_UNLOCK(p);
656 }
657
658 /* XXX ether_ifdetach must not be called with IFNET_LOCK */
659 mutex_exit(&ifv->ifv_lock);
660 IFNET_UNLOCK(ifp);
661 ether_ifdetach(ifp);
662 IFNET_LOCK(ifp);
663 mutex_enter(&ifv->ifv_lock);
664
665 /* if_free_sadl must be called with IFNET_LOCK */
666 if_free_sadl(ifp, 1);
667
668 /* Restore vlan_ioctl overwritten by ether_ifdetach */
669 ifp->if_ioctl = vlan_ioctl;
670 vlan_reset_linkname(ifp);
671 break;
672 }
673
674 default:
675 panic("%s: impossible", __func__);
676 }
677
678 nmib->ifvm_p = NULL;
679 ifv->ifv_if.if_mtu = 0;
680 ifv->ifv_flags = 0;
681
682 mutex_enter(&ifv_hash.lock);
683 PSLIST_WRITER_REMOVE(ifv, ifv_hash);
684 pserialize_perform(vlan_psz);
685 mutex_exit(&ifv_hash.lock);
686 PSLIST_ENTRY_DESTROY(ifv, ifv_hash);
687
688 vlan_linkmib_update(ifv, nmib);
689 if_link_state_change(ifp, LINK_STATE_DOWN);
690
691 mutex_exit(&ifv->ifv_lock);
692
693 nmib_psref = NULL;
694 kmem_free(omib, sizeof(*omib));
695
696 #ifdef INET6
697 KERNEL_LOCK_UNLESS_NET_MPSAFE();
698 /* To delete v6 link local addresses */
699 if (in6_present)
700 in6_ifdetach(ifp);
701 KERNEL_UNLOCK_UNLESS_NET_MPSAFE();
702 #endif
703
704 if ((ifp->if_flags & IFF_PROMISC) != 0)
705 vlan_safe_ifpromisc_locked(ifp, 0);
706 if_down_locked(ifp);
707 ifp->if_capabilities = 0;
708 mutex_enter(&ifv->ifv_lock);
709 done:
710
711 if (nmib_psref)
712 psref_target_destroy(nmib_psref, ifvm_psref_class);
713
714 return error;
715 }
716
717 static void
718 vlan_hash_init(void)
719 {
720
721 ifv_hash.lists = hashinit(VLAN_TAG_HASH_SIZE, HASH_PSLIST, true,
722 &ifv_hash.mask);
723 }
724
725 static int
726 vlan_hash_fini(void)
727 {
728 int i;
729
730 mutex_enter(&ifv_hash.lock);
731
732 for (i = 0; i < ifv_hash.mask + 1; i++) {
733 if (PSLIST_WRITER_FIRST(&ifv_hash.lists[i], struct ifvlan,
734 ifv_hash) != NULL) {
735 mutex_exit(&ifv_hash.lock);
736 return EBUSY;
737 }
738 }
739
740 for (i = 0; i < ifv_hash.mask + 1; i++)
741 PSLIST_DESTROY(&ifv_hash.lists[i]);
742
743 mutex_exit(&ifv_hash.lock);
744
745 hashdone(ifv_hash.lists, HASH_PSLIST, ifv_hash.mask);
746
747 ifv_hash.lists = NULL;
748 ifv_hash.mask = 0;
749
750 return 0;
751 }
752
753 static int
754 vlan_tag_hash(uint16_t tag, u_long mask)
755 {
756 uint32_t hash;
757
758 hash = (tag >> 8) ^ tag;
759 hash = (hash >> 2) ^ hash;
760
761 return hash & mask;
762 }
763
764 static struct ifvlan_linkmib *
765 vlan_getref_linkmib(struct ifvlan *sc, struct psref *psref)
766 {
767 struct ifvlan_linkmib *mib;
768 int s;
769
770 s = pserialize_read_enter();
771 mib = atomic_load_consume(&sc->ifv_mib);
772 if (mib == NULL) {
773 pserialize_read_exit(s);
774 return NULL;
775 }
776 psref_acquire(psref, &mib->ifvm_psref, ifvm_psref_class);
777 pserialize_read_exit(s);
778
779 return mib;
780 }
781
782 static void
783 vlan_putref_linkmib(struct ifvlan_linkmib *mib, struct psref *psref)
784 {
785 if (mib == NULL)
786 return;
787 psref_release(psref, &mib->ifvm_psref, ifvm_psref_class);
788 }
789
790 static struct ifvlan_linkmib *
791 vlan_lookup_tag_psref(struct ifnet *ifp, uint16_t tag, struct psref *psref)
792 {
793 int idx;
794 int s;
795 struct ifvlan *sc;
796
797 idx = vlan_tag_hash(tag, ifv_hash.mask);
798
799 s = pserialize_read_enter();
800 PSLIST_READER_FOREACH(sc, &ifv_hash.lists[idx], struct ifvlan,
801 ifv_hash) {
802 struct ifvlan_linkmib *mib = atomic_load_consume(&sc->ifv_mib);
803 if (mib == NULL)
804 continue;
805 if (mib->ifvm_tag != tag)
806 continue;
807 if (mib->ifvm_p != ifp)
808 continue;
809
810 psref_acquire(psref, &mib->ifvm_psref, ifvm_psref_class);
811 pserialize_read_exit(s);
812 return mib;
813 }
814 pserialize_read_exit(s);
815 return NULL;
816 }
817
818 static void
819 vlan_linkmib_update(struct ifvlan *ifv, struct ifvlan_linkmib *nmib)
820 {
821 struct ifvlan_linkmib *omib = ifv->ifv_mib;
822
823 KASSERT(mutex_owned(&ifv->ifv_lock));
824
825 atomic_store_release(&ifv->ifv_mib, nmib);
826
827 pserialize_perform(ifv->ifv_psz);
828 psref_target_destroy(&omib->ifvm_psref, ifvm_psref_class);
829 }
830
831 /*
832 * Called when a parent interface is detaching; destroy any VLAN
833 * configuration for the parent interface.
834 */
835 void
836 vlan_ifdetach(struct ifnet *p)
837 {
838 struct ifvlan *ifv;
839 struct ifvlan_linkmib *mib, **nmibs;
840 struct psref psref;
841 int error;
842 int bound;
843 int i, cnt = 0;
844
845 bound = curlwp_bind();
846
847 mutex_enter(&ifv_list.lock);
848 LIST_FOREACH(ifv, &ifv_list.list, ifv_list) {
849 mib = vlan_getref_linkmib(ifv, &psref);
850 if (mib == NULL)
851 continue;
852
853 if (mib->ifvm_p == p)
854 cnt++;
855
856 vlan_putref_linkmib(mib, &psref);
857 }
858 mutex_exit(&ifv_list.lock);
859
860 if (cnt == 0) {
861 curlwp_bindx(bound);
862 return;
863 }
864
865 /*
866 * The value of "cnt" does not increase while ifv_list.lock
867 * and ifv->ifv_lock are released here, because the parent
868 * interface is detaching.
869 */
870 nmibs = kmem_alloc(sizeof(*nmibs) * cnt, KM_SLEEP);
871 for (i = 0; i < cnt; i++) {
872 nmibs[i] = kmem_alloc(sizeof(*nmibs[i]), KM_SLEEP);
873 }
874
875 mutex_enter(&ifv_list.lock);
876
877 i = 0;
878 LIST_FOREACH(ifv, &ifv_list.list, ifv_list) {
879 struct ifnet *ifp = &ifv->ifv_if;
880
881 /* IFNET_LOCK must be held before ifv_lock. */
882 IFNET_LOCK(ifp);
883 mutex_enter(&ifv->ifv_lock);
884
885 /* XXX ifv_mib = NULL? */
886 if (ifv->ifv_mib->ifvm_p == p) {
887 KASSERTMSG(i < cnt,
888 "no memory for unconfig, parent=%s", p->if_xname);
889 error = vlan_unconfig_locked(ifv, nmibs[i]);
890 if (!error) {
891 nmibs[i] = NULL;
892 i++;
893 }
894
895 }
896
897 mutex_exit(&ifv->ifv_lock);
898 IFNET_UNLOCK(ifp);
899 }
900
901 mutex_exit(&ifv_list.lock);
902
903 curlwp_bindx(bound);
904
905 for (i = 0; i < cnt; i++) {
906 if (nmibs[i])
907 kmem_free(nmibs[i], sizeof(*nmibs[i]));
908 }
909
910 kmem_free(nmibs, sizeof(*nmibs) * cnt);
911
912 return;
913 }
914
915 static int
916 vlan_set_promisc(struct ifnet *ifp)
917 {
918 struct ifvlan *ifv = ifp->if_softc;
919 struct ifvlan_linkmib *mib;
920 struct psref psref;
921 int error = 0;
922 int bound;
923
924 bound = curlwp_bind();
925 mib = vlan_getref_linkmib(ifv, &psref);
926 if (mib == NULL) {
927 curlwp_bindx(bound);
928 return EBUSY;
929 }
930
931 if ((ifp->if_flags & IFF_PROMISC) != 0) {
932 if ((ifv->ifv_flags & IFVF_PROMISC) == 0) {
933 error = vlan_safe_ifpromisc(mib->ifvm_p, 1);
934 if (error == 0)
935 ifv->ifv_flags |= IFVF_PROMISC;
936 }
937 } else {
938 if ((ifv->ifv_flags & IFVF_PROMISC) != 0) {
939 error = vlan_safe_ifpromisc(mib->ifvm_p, 0);
940 if (error == 0)
941 ifv->ifv_flags &= ~IFVF_PROMISC;
942 }
943 }
944 vlan_putref_linkmib(mib, &psref);
945 curlwp_bindx(bound);
946
947 return error;
948 }
949
950 static int
951 vlan_ioctl(struct ifnet *ifp, u_long cmd, void *data)
952 {
953 struct lwp *l = curlwp;
954 struct ifvlan *ifv = ifp->if_softc;
955 struct ifaddr *ifa = (struct ifaddr *) data;
956 struct ifreq *ifr = (struct ifreq *) data;
957 struct ifnet *pr;
958 struct ifcapreq *ifcr;
959 struct vlanreq vlr;
960 struct ifvlan_linkmib *mib;
961 struct psref psref;
962 int error = 0;
963 int bound;
964
965 switch (cmd) {
966 case SIOCSIFMTU:
967 bound = curlwp_bind();
968 mib = vlan_getref_linkmib(ifv, &psref);
969 if (mib == NULL) {
970 curlwp_bindx(bound);
971 error = EBUSY;
972 break;
973 }
974
975 if (mib->ifvm_p == NULL) {
976 vlan_putref_linkmib(mib, &psref);
977 curlwp_bindx(bound);
978 error = EINVAL;
979 } else if (
980 ifr->ifr_mtu > (mib->ifvm_p->if_mtu - mib->ifvm_mtufudge) ||
981 ifr->ifr_mtu < (mib->ifvm_mintu - mib->ifvm_mtufudge)) {
982 vlan_putref_linkmib(mib, &psref);
983 curlwp_bindx(bound);
984 error = EINVAL;
985 } else {
986 vlan_putref_linkmib(mib, &psref);
987 curlwp_bindx(bound);
988
989 error = ifioctl_common(ifp, cmd, data);
990 if (error == ENETRESET)
991 error = 0;
992 }
993
994 break;
995
996 case SIOCSETVLAN:
997 if ((error = kauth_authorize_network(l->l_cred,
998 KAUTH_NETWORK_INTERFACE,
999 KAUTH_REQ_NETWORK_INTERFACE_SETPRIV, ifp, (void *)cmd,
1000 NULL)) != 0)
1001 break;
1002 if ((error = copyin(ifr->ifr_data, &vlr, sizeof(vlr))) != 0)
1003 break;
1004
1005 if (vlr.vlr_parent[0] == '\0') {
1006 bound = curlwp_bind();
1007 mib = vlan_getref_linkmib(ifv, &psref);
1008 if (mib == NULL) {
1009 curlwp_bindx(bound);
1010 error = EBUSY;
1011 break;
1012 }
1013
1014 if (mib->ifvm_p != NULL &&
1015 (ifp->if_flags & IFF_PROMISC) != 0)
1016 error = vlan_safe_ifpromisc(mib->ifvm_p, 0);
1017
1018 vlan_putref_linkmib(mib, &psref);
1019 curlwp_bindx(bound);
1020
1021 vlan_unconfig(ifp);
1022 break;
1023 }
1024 if (vlr.vlr_tag != EVL_VLANOFTAG(vlr.vlr_tag)) {
1025 error = EINVAL; /* check for valid tag */
1026 break;
1027 }
1028 if ((pr = ifunit(vlr.vlr_parent)) == NULL) {
1029 error = ENOENT;
1030 break;
1031 }
1032
1033 error = vlan_config(ifv, pr, vlr.vlr_tag);
1034 if (error != 0)
1035 break;
1036
1037 /* Update promiscuous mode, if necessary. */
1038 vlan_set_promisc(ifp);
1039
1040 ifp->if_flags |= IFF_RUNNING;
1041 break;
1042
1043 case SIOCGETVLAN:
1044 memset(&vlr, 0, sizeof(vlr));
1045 bound = curlwp_bind();
1046 mib = vlan_getref_linkmib(ifv, &psref);
1047 if (mib == NULL) {
1048 curlwp_bindx(bound);
1049 error = EBUSY;
1050 break;
1051 }
1052 if (mib->ifvm_p != NULL) {
1053 snprintf(vlr.vlr_parent, sizeof(vlr.vlr_parent), "%s",
1054 mib->ifvm_p->if_xname);
1055 vlr.vlr_tag = mib->ifvm_tag;
1056 }
1057 vlan_putref_linkmib(mib, &psref);
1058 curlwp_bindx(bound);
1059 error = copyout(&vlr, ifr->ifr_data, sizeof(vlr));
1060 break;
1061
1062 case SIOCSIFFLAGS:
1063 if ((error = ifioctl_common(ifp, cmd, data)) != 0)
1064 break;
1065 /*
1066 * For promiscuous mode, we enable promiscuous mode on
1067 * the parent if we need promiscuous on the VLAN interface.
1068 */
1069 bound = curlwp_bind();
1070 mib = vlan_getref_linkmib(ifv, &psref);
1071 if (mib == NULL) {
1072 curlwp_bindx(bound);
1073 error = EBUSY;
1074 break;
1075 }
1076
1077 if (mib->ifvm_p != NULL)
1078 error = vlan_set_promisc(ifp);
1079 vlan_putref_linkmib(mib, &psref);
1080 curlwp_bindx(bound);
1081 break;
1082
1083 case SIOCADDMULTI:
1084 mutex_enter(&ifv->ifv_lock);
1085 mib = ifv->ifv_mib;
1086 if (mib == NULL) {
1087 error = EBUSY;
1088 mutex_exit(&ifv->ifv_lock);
1089 break;
1090 }
1091
1092 error = (mib->ifvm_p != NULL) ?
1093 (*mib->ifvm_msw->vmsw_addmulti)(ifv, ifr) : EINVAL;
1094 mib = NULL;
1095 mutex_exit(&ifv->ifv_lock);
1096 break;
1097
1098 case SIOCDELMULTI:
1099 mutex_enter(&ifv->ifv_lock);
1100 mib = ifv->ifv_mib;
1101 if (mib == NULL) {
1102 error = EBUSY;
1103 mutex_exit(&ifv->ifv_lock);
1104 break;
1105 }
1106 error = (mib->ifvm_p != NULL) ?
1107 (*mib->ifvm_msw->vmsw_delmulti)(ifv, ifr) : EINVAL;
1108 mib = NULL;
1109 mutex_exit(&ifv->ifv_lock);
1110 break;
1111
1112 case SIOCSIFCAP:
1113 ifcr = data;
1114 /* make sure caps are enabled on parent */
1115 bound = curlwp_bind();
1116 mib = vlan_getref_linkmib(ifv, &psref);
1117 if (mib == NULL) {
1118 curlwp_bindx(bound);
1119 error = EBUSY;
1120 break;
1121 }
1122
1123 if (mib->ifvm_p == NULL) {
1124 vlan_putref_linkmib(mib, &psref);
1125 curlwp_bindx(bound);
1126 error = EINVAL;
1127 break;
1128 }
1129 if ((mib->ifvm_p->if_capenable & ifcr->ifcr_capenable) !=
1130 ifcr->ifcr_capenable) {
1131 vlan_putref_linkmib(mib, &psref);
1132 curlwp_bindx(bound);
1133 error = EINVAL;
1134 break;
1135 }
1136
1137 vlan_putref_linkmib(mib, &psref);
1138 curlwp_bindx(bound);
1139
1140 if ((error = ifioctl_common(ifp, cmd, data)) == ENETRESET)
1141 error = 0;
1142 break;
1143 case SIOCINITIFADDR:
1144 bound = curlwp_bind();
1145 mib = vlan_getref_linkmib(ifv, &psref);
1146 if (mib == NULL) {
1147 curlwp_bindx(bound);
1148 error = EBUSY;
1149 break;
1150 }
1151
1152 if (mib->ifvm_p == NULL) {
1153 error = EINVAL;
1154 vlan_putref_linkmib(mib, &psref);
1155 curlwp_bindx(bound);
1156 break;
1157 }
1158 vlan_putref_linkmib(mib, &psref);
1159 curlwp_bindx(bound);
1160
1161 ifp->if_flags |= IFF_UP;
1162 #ifdef INET
1163 if (ifa->ifa_addr->sa_family == AF_INET)
1164 arp_ifinit(ifp, ifa);
1165 #endif
1166 break;
1167
1168 default:
1169 error = ether_ioctl(ifp, cmd, data);
1170 }
1171
1172 return error;
1173 }
1174
1175 static int
1176 vlan_ether_addmulti(struct ifvlan *ifv, struct ifreq *ifr)
1177 {
1178 const struct sockaddr *sa = ifreq_getaddr(SIOCADDMULTI, ifr);
1179 struct vlan_mc_entry *mc;
1180 uint8_t addrlo[ETHER_ADDR_LEN], addrhi[ETHER_ADDR_LEN];
1181 struct ifvlan_linkmib *mib;
1182 int error;
1183
1184 KASSERT(mutex_owned(&ifv->ifv_lock));
1185
1186 if (sa->sa_len > sizeof(struct sockaddr_storage))
1187 return EINVAL;
1188
1189 error = ether_addmulti(sa, &ifv->ifv_ec);
1190 if (error != ENETRESET)
1191 return error;
1192
1193 /*
1194 * This is a new multicast address. We have to tell parent
1195 * about it. Also, remember this multicast address so that
1196 * we can delete it on unconfigure.
1197 */
1198 mc = malloc(sizeof(struct vlan_mc_entry), M_DEVBUF, M_NOWAIT);
1199 if (mc == NULL) {
1200 error = ENOMEM;
1201 goto alloc_failed;
1202 }
1203
1204 /*
1205 * Since ether_addmulti() returned ENETRESET, the following two
1206 * statements shouldn't fail. Here ifv_ec is implicitly protected
1207 * by the ifv_lock lock.
1208 */
1209 error = ether_multiaddr(sa, addrlo, addrhi);
1210 KASSERT(error == 0);
1211
1212 ETHER_LOCK(&ifv->ifv_ec);
1213 mc->mc_enm = ether_lookup_multi(addrlo, addrhi, &ifv->ifv_ec);
1214 ETHER_UNLOCK(&ifv->ifv_ec);
1215
1216 KASSERT(mc->mc_enm != NULL);
1217
1218 memcpy(&mc->mc_addr, sa, sa->sa_len);
1219 LIST_INSERT_HEAD(&ifv->ifv_mc_listhead, mc, mc_entries);
1220
1221 mib = ifv->ifv_mib;
1222
1223 KERNEL_LOCK_UNLESS_IFP_MPSAFE(mib->ifvm_p);
1224 error = if_mcast_op(mib->ifvm_p, SIOCADDMULTI, sa);
1225 KERNEL_UNLOCK_UNLESS_IFP_MPSAFE(mib->ifvm_p);
1226
1227 if (error != 0)
1228 goto ioctl_failed;
1229 return error;
1230
1231 ioctl_failed:
1232 LIST_REMOVE(mc, mc_entries);
1233 free(mc, M_DEVBUF);
1234
1235 alloc_failed:
1236 (void)ether_delmulti(sa, &ifv->ifv_ec);
1237 return error;
1238 }
1239
1240 static int
1241 vlan_ether_delmulti(struct ifvlan *ifv, struct ifreq *ifr)
1242 {
1243 const struct sockaddr *sa = ifreq_getaddr(SIOCDELMULTI, ifr);
1244 struct ether_multi *enm;
1245 struct vlan_mc_entry *mc;
1246 struct ifvlan_linkmib *mib;
1247 uint8_t addrlo[ETHER_ADDR_LEN], addrhi[ETHER_ADDR_LEN];
1248 int error;
1249
1250 KASSERT(mutex_owned(&ifv->ifv_lock));
1251
1252 /*
1253 * Find a key to lookup vlan_mc_entry. We have to do this
1254 * before calling ether_delmulti for obvious reasons.
1255 */
1256 if ((error = ether_multiaddr(sa, addrlo, addrhi)) != 0)
1257 return error;
1258
1259 ETHER_LOCK(&ifv->ifv_ec);
1260 enm = ether_lookup_multi(addrlo, addrhi, &ifv->ifv_ec);
1261 ETHER_UNLOCK(&ifv->ifv_ec);
1262 if (enm == NULL)
1263 return EINVAL;
1264
1265 LIST_FOREACH(mc, &ifv->ifv_mc_listhead, mc_entries) {
1266 if (mc->mc_enm == enm)
1267 break;
1268 }
1269
1270 /* We woun't delete entries we didn't add */
1271 if (mc == NULL)
1272 return EINVAL;
1273
1274 error = ether_delmulti(sa, &ifv->ifv_ec);
1275 if (error != ENETRESET)
1276 return error;
1277
1278 /* We no longer use this multicast address. Tell parent so. */
1279 mib = ifv->ifv_mib;
1280 error = if_mcast_op(mib->ifvm_p, SIOCDELMULTI, sa);
1281
1282 if (error == 0) {
1283 /* And forget about this address. */
1284 LIST_REMOVE(mc, mc_entries);
1285 free(mc, M_DEVBUF);
1286 } else {
1287 (void)ether_addmulti(sa, &ifv->ifv_ec);
1288 }
1289
1290 return error;
1291 }
1292
1293 /*
1294 * Delete any multicast address we have asked to add from parent
1295 * interface. Called when the vlan is being unconfigured.
1296 */
1297 static void
1298 vlan_ether_purgemulti(struct ifvlan *ifv)
1299 {
1300 struct vlan_mc_entry *mc;
1301 struct ifvlan_linkmib *mib;
1302
1303 KASSERT(mutex_owned(&ifv->ifv_lock));
1304 mib = ifv->ifv_mib;
1305 if (mib == NULL) {
1306 return;
1307 }
1308
1309 while ((mc = LIST_FIRST(&ifv->ifv_mc_listhead)) != NULL) {
1310 (void)if_mcast_op(mib->ifvm_p, SIOCDELMULTI,
1311 sstocsa(&mc->mc_addr));
1312 LIST_REMOVE(mc, mc_entries);
1313 free(mc, M_DEVBUF);
1314 }
1315 }
1316
1317 static void
1318 vlan_start(struct ifnet *ifp)
1319 {
1320 struct ifvlan *ifv = ifp->if_softc;
1321 struct ifnet *p;
1322 struct ethercom *ec;
1323 struct mbuf *m;
1324 struct ifvlan_linkmib *mib;
1325 struct psref psref;
1326 int error;
1327
1328 mib = vlan_getref_linkmib(ifv, &psref);
1329 if (mib == NULL)
1330 return;
1331
1332 if (__predict_false(mib->ifvm_p == NULL)) {
1333 vlan_putref_linkmib(mib, &psref);
1334 return;
1335 }
1336
1337 p = mib->ifvm_p;
1338 ec = (void *)mib->ifvm_p;
1339
1340 ifp->if_flags |= IFF_OACTIVE;
1341
1342 for (;;) {
1343 IFQ_DEQUEUE(&ifp->if_snd, m);
1344 if (m == NULL)
1345 break;
1346
1347 #ifdef ALTQ
1348 /*
1349 * KERNEL_LOCK is required for ALTQ even if NET_MPSAFE is
1350 * defined.
1351 */
1352 KERNEL_LOCK(1, NULL);
1353 /*
1354 * If ALTQ is enabled on the parent interface, do
1355 * classification; the queueing discipline might
1356 * not require classification, but might require
1357 * the address family/header pointer in the pktattr.
1358 */
1359 if (ALTQ_IS_ENABLED(&p->if_snd)) {
1360 switch (p->if_type) {
1361 case IFT_ETHER:
1362 altq_etherclassify(&p->if_snd, m);
1363 break;
1364 default:
1365 panic("%s: impossible (altq)", __func__);
1366 }
1367 }
1368 KERNEL_UNLOCK_ONE(NULL);
1369 #endif /* ALTQ */
1370
1371 bpf_mtap(ifp, m, BPF_D_OUT);
1372 /*
1373 * If the parent can insert the tag itself, just mark
1374 * the tag in the mbuf header.
1375 */
1376 if (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING) {
1377 vlan_set_tag(m, mib->ifvm_tag);
1378 } else {
1379 /*
1380 * insert the tag ourselves
1381 */
1382 M_PREPEND(m, mib->ifvm_encaplen, M_DONTWAIT);
1383 if (m == NULL) {
1384 printf("%s: unable to prepend encap header",
1385 p->if_xname);
1386 if_statinc(ifp, if_oerrors);
1387 continue;
1388 }
1389
1390 switch (p->if_type) {
1391 case IFT_ETHER:
1392 {
1393 struct ether_vlan_header *evl;
1394
1395 if (m->m_len < sizeof(struct ether_vlan_header))
1396 m = m_pullup(m,
1397 sizeof(struct ether_vlan_header));
1398 if (m == NULL) {
1399 printf("%s: unable to pullup encap "
1400 "header", p->if_xname);
1401 if_statinc(ifp, if_oerrors);
1402 continue;
1403 }
1404
1405 /*
1406 * Transform the Ethernet header into an
1407 * Ethernet header with 802.1Q encapsulation.
1408 */
1409 memmove(mtod(m, void *),
1410 mtod(m, char *) + mib->ifvm_encaplen,
1411 sizeof(struct ether_header));
1412 evl = mtod(m, struct ether_vlan_header *);
1413 evl->evl_proto = evl->evl_encap_proto;
1414 evl->evl_encap_proto = htons(ETHERTYPE_VLAN);
1415 evl->evl_tag = htons(mib->ifvm_tag);
1416
1417 /*
1418 * To cater for VLAN-aware layer 2 ethernet
1419 * switches which may need to strip the tag
1420 * before forwarding the packet, make sure
1421 * the packet+tag is at least 68 bytes long.
1422 * This is necessary because our parent will
1423 * only pad to 64 bytes (ETHER_MIN_LEN) and
1424 * some switches will not pad by themselves
1425 * after deleting a tag.
1426 */
1427 const size_t min_data_len = ETHER_MIN_LEN -
1428 ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN;
1429 if (m->m_pkthdr.len < min_data_len) {
1430 m_copyback(m, m->m_pkthdr.len,
1431 min_data_len - m->m_pkthdr.len,
1432 vlan_zero_pad_buff);
1433 }
1434 break;
1435 }
1436
1437 default:
1438 panic("%s: impossible", __func__);
1439 }
1440 }
1441
1442 if ((p->if_flags & IFF_RUNNING) == 0) {
1443 m_freem(m);
1444 continue;
1445 }
1446
1447 error = if_transmit_lock(p, m);
1448 if (error) {
1449 /* mbuf is already freed */
1450 if_statinc(ifp, if_oerrors);
1451 continue;
1452 }
1453 if_statinc(ifp, if_opackets);
1454 }
1455
1456 ifp->if_flags &= ~IFF_OACTIVE;
1457
1458 /* Remove reference to mib before release */
1459 vlan_putref_linkmib(mib, &psref);
1460 }
1461
1462 static int
1463 vlan_transmit(struct ifnet *ifp, struct mbuf *m)
1464 {
1465 struct ifvlan *ifv = ifp->if_softc;
1466 struct ifnet *p;
1467 struct ethercom *ec;
1468 struct ifvlan_linkmib *mib;
1469 struct psref psref;
1470 int error;
1471 size_t pktlen = m->m_pkthdr.len;
1472 bool mcast = (m->m_flags & M_MCAST) != 0;
1473
1474 mib = vlan_getref_linkmib(ifv, &psref);
1475 if (mib == NULL) {
1476 m_freem(m);
1477 return ENETDOWN;
1478 }
1479
1480 if (__predict_false(mib->ifvm_p == NULL)) {
1481 vlan_putref_linkmib(mib, &psref);
1482 m_freem(m);
1483 return ENETDOWN;
1484 }
1485
1486 p = mib->ifvm_p;
1487 ec = (void *)mib->ifvm_p;
1488
1489 bpf_mtap(ifp, m, BPF_D_OUT);
1490
1491 if ((error = pfil_run_hooks(ifp->if_pfil, &m, ifp, PFIL_OUT)) != 0)
1492 goto out;
1493 if (m == NULL)
1494 goto out;
1495
1496 /*
1497 * If the parent can insert the tag itself, just mark
1498 * the tag in the mbuf header.
1499 */
1500 if (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING) {
1501 vlan_set_tag(m, mib->ifvm_tag);
1502 } else {
1503 /*
1504 * insert the tag ourselves
1505 */
1506 M_PREPEND(m, mib->ifvm_encaplen, M_DONTWAIT);
1507 if (m == NULL) {
1508 printf("%s: unable to prepend encap header",
1509 p->if_xname);
1510 if_statinc(ifp, if_oerrors);
1511 error = ENOBUFS;
1512 goto out;
1513 }
1514
1515 switch (p->if_type) {
1516 case IFT_ETHER:
1517 {
1518 struct ether_vlan_header *evl;
1519
1520 if (m->m_len < sizeof(struct ether_vlan_header))
1521 m = m_pullup(m,
1522 sizeof(struct ether_vlan_header));
1523 if (m == NULL) {
1524 printf("%s: unable to pullup encap "
1525 "header", p->if_xname);
1526 if_statinc(ifp, if_oerrors);
1527 error = ENOBUFS;
1528 goto out;
1529 }
1530
1531 /*
1532 * Transform the Ethernet header into an
1533 * Ethernet header with 802.1Q encapsulation.
1534 */
1535 memmove(mtod(m, void *),
1536 mtod(m, char *) + mib->ifvm_encaplen,
1537 sizeof(struct ether_header));
1538 evl = mtod(m, struct ether_vlan_header *);
1539 evl->evl_proto = evl->evl_encap_proto;
1540 evl->evl_encap_proto = htons(ETHERTYPE_VLAN);
1541 evl->evl_tag = htons(mib->ifvm_tag);
1542
1543 /*
1544 * To cater for VLAN-aware layer 2 ethernet
1545 * switches which may need to strip the tag
1546 * before forwarding the packet, make sure
1547 * the packet+tag is at least 68 bytes long.
1548 * This is necessary because our parent will
1549 * only pad to 64 bytes (ETHER_MIN_LEN) and
1550 * some switches will not pad by themselves
1551 * after deleting a tag.
1552 */
1553 const size_t min_data_len = ETHER_MIN_LEN -
1554 ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN;
1555 if (m->m_pkthdr.len < min_data_len) {
1556 m_copyback(m, m->m_pkthdr.len,
1557 min_data_len - m->m_pkthdr.len,
1558 vlan_zero_pad_buff);
1559 }
1560 break;
1561 }
1562
1563 default:
1564 panic("%s: impossible", __func__);
1565 }
1566 }
1567
1568 if ((p->if_flags & IFF_RUNNING) == 0) {
1569 m_freem(m);
1570 error = ENETDOWN;
1571 goto out;
1572 }
1573
1574 error = if_transmit_lock(p, m);
1575 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
1576 if (error) {
1577 /* mbuf is already freed */
1578 if_statinc_ref(nsr, if_oerrors);
1579 } else {
1580 if_statinc_ref(nsr, if_opackets);
1581 if_statadd_ref(nsr, if_obytes, pktlen);
1582 if (mcast)
1583 if_statinc_ref(nsr, if_omcasts);
1584 }
1585 IF_STAT_PUTREF(ifp);
1586
1587 out:
1588 /* Remove reference to mib before release */
1589 vlan_putref_linkmib(mib, &psref);
1590 return error;
1591 }
1592
1593 /*
1594 * Given an Ethernet frame, find a valid vlan interface corresponding to the
1595 * given source interface and tag, then run the real packet through the
1596 * parent's input routine.
1597 */
1598 void
1599 vlan_input(struct ifnet *ifp, struct mbuf *m)
1600 {
1601 struct ifvlan *ifv;
1602 uint16_t vid;
1603 struct ifvlan_linkmib *mib;
1604 struct psref psref;
1605 bool have_vtag;
1606
1607 have_vtag = vlan_has_tag(m);
1608 if (have_vtag) {
1609 vid = EVL_VLANOFTAG(vlan_get_tag(m));
1610 m->m_flags &= ~M_VLANTAG;
1611 } else {
1612 struct ether_vlan_header *evl;
1613
1614 if (ifp->if_type != IFT_ETHER) {
1615 panic("%s: impossible", __func__);
1616 }
1617
1618 if (m->m_len < sizeof(struct ether_vlan_header) &&
1619 (m = m_pullup(m,
1620 sizeof(struct ether_vlan_header))) == NULL) {
1621 printf("%s: no memory for VLAN header, "
1622 "dropping packet.\n", ifp->if_xname);
1623 return;
1624 }
1625
1626 if (m_makewritable(&m, 0,
1627 sizeof(struct ether_vlan_header), M_DONTWAIT)) {
1628 m_freem(m);
1629 if_statinc(ifp, if_ierrors);
1630 return;
1631 }
1632
1633 evl = mtod(m, struct ether_vlan_header *);
1634 KASSERT(ntohs(evl->evl_encap_proto) == ETHERTYPE_VLAN);
1635
1636 vid = EVL_VLANOFTAG(ntohs(evl->evl_tag));
1637
1638 /*
1639 * Restore the original ethertype. We'll remove
1640 * the encapsulation after we've found the vlan
1641 * interface corresponding to the tag.
1642 */
1643 evl->evl_encap_proto = evl->evl_proto;
1644 }
1645
1646 mib = vlan_lookup_tag_psref(ifp, vid, &psref);
1647 if (mib == NULL) {
1648 m_freem(m);
1649 if_statinc(ifp, if_noproto);
1650 return;
1651 }
1652 KASSERT(mib->ifvm_encaplen == ETHER_VLAN_ENCAP_LEN);
1653
1654 ifv = mib->ifvm_ifvlan;
1655 if ((ifv->ifv_if.if_flags & (IFF_UP | IFF_RUNNING)) !=
1656 (IFF_UP | IFF_RUNNING)) {
1657 m_freem(m);
1658 if_statinc(ifp, if_noproto);
1659 goto out;
1660 }
1661
1662 /*
1663 * Now, remove the encapsulation header. The original
1664 * header has already been fixed up above.
1665 */
1666 if (!have_vtag) {
1667 memmove(mtod(m, char *) + mib->ifvm_encaplen,
1668 mtod(m, void *), sizeof(struct ether_header));
1669 m_adj(m, mib->ifvm_encaplen);
1670 }
1671
1672 /*
1673 * Drop promiscuously received packets if we are not in
1674 * promiscuous mode
1675 */
1676 if ((m->m_flags & (M_BCAST | M_MCAST)) == 0 &&
1677 (ifp->if_flags & IFF_PROMISC) &&
1678 (ifv->ifv_if.if_flags & IFF_PROMISC) == 0) {
1679 struct ether_header *eh;
1680
1681 eh = mtod(m, struct ether_header *);
1682 if (memcmp(CLLADDR(ifv->ifv_if.if_sadl),
1683 eh->ether_dhost, ETHER_ADDR_LEN) != 0) {
1684 m_freem(m);
1685 if_statinc(&ifv->ifv_if, if_ierrors);
1686 goto out;
1687 }
1688 }
1689
1690 m_set_rcvif(m, &ifv->ifv_if);
1691
1692 if (pfil_run_hooks(ifp->if_pfil, &m, ifp, PFIL_IN) != 0)
1693 goto out;
1694 if (m == NULL)
1695 goto out;
1696
1697 m->m_flags &= ~M_PROMISC;
1698 if_input(&ifv->ifv_if, m);
1699 out:
1700 vlan_putref_linkmib(mib, &psref);
1701 }
1702
1703 /*
1704 * If the parent link state changed, the vlan link state should change also.
1705 */
1706 void
1707 vlan_link_state_changed(struct ifnet *p, int link_state)
1708 {
1709 struct ifvlan *ifv;
1710 struct ifvlan_linkmib *mib;
1711 struct psref psref;
1712 struct ifnet *ifp;
1713
1714 mutex_enter(&ifv_list.lock);
1715
1716 LIST_FOREACH(ifv, &ifv_list.list, ifv_list) {
1717 mib = vlan_getref_linkmib(ifv, &psref);
1718 if (mib == NULL)
1719 continue;
1720
1721 if (mib->ifvm_p == p) {
1722 ifp = &mib->ifvm_ifvlan->ifv_if;
1723 if_link_state_change(ifp, link_state);
1724 }
1725
1726 vlan_putref_linkmib(mib, &psref);
1727 }
1728
1729 mutex_exit(&ifv_list.lock);
1730 }
1731
1732 /*
1733 * Module infrastructure
1734 */
1735 #include "if_module.h"
1736
1737 IF_MODULE(MODULE_CLASS_DRIVER, vlan, NULL)
1738