if_vlan.c revision 1.117 1 /* $NetBSD: if_vlan.c,v 1.117 2017/12/06 08:12:54 ozaki-r Exp $ */
2
3 /*-
4 * Copyright (c) 2000, 2001 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran, and by Jason R. Thorpe of Zembu Labs, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Copyright 1998 Massachusetts Institute of Technology
34 *
35 * Permission to use, copy, modify, and distribute this software and
36 * its documentation for any purpose and without fee is hereby
37 * granted, provided that both the above copyright notice and this
38 * permission notice appear in all copies, that both the above
39 * copyright notice and this permission notice appear in all
40 * supporting documentation, and that the name of M.I.T. not be used
41 * in advertising or publicity pertaining to distribution of the
42 * software without specific, written prior permission. M.I.T. makes
43 * no representations about the suitability of this software for any
44 * purpose. It is provided "as is" without express or implied
45 * warranty.
46 *
47 * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS
48 * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
49 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
50 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
51 * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
52 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
53 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
54 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
55 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
56 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
57 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 *
60 * from FreeBSD: if_vlan.c,v 1.16 2000/03/26 15:21:40 charnier Exp
61 * via OpenBSD: if_vlan.c,v 1.4 2000/05/15 19:15:00 chris Exp
62 */
63
64 /*
65 * if_vlan.c - pseudo-device driver for IEEE 802.1Q virtual LANs. Might be
66 * extended some day to also handle IEEE 802.1P priority tagging. This is
67 * sort of sneaky in the implementation, since we need to pretend to be
68 * enough of an Ethernet implementation to make ARP work. The way we do
69 * this is by telling everyone that we are an Ethernet interface, and then
70 * catch the packets that ether_output() left on our output queue when it
71 * calls if_start(), rewrite them for use by the real outgoing interface,
72 * and ask it to send them.
73 *
74 * TODO:
75 *
76 * - Need some way to notify vlan interfaces when the parent
77 * interface changes MTU.
78 */
79
80 #include <sys/cdefs.h>
81 __KERNEL_RCSID(0, "$NetBSD: if_vlan.c,v 1.117 2017/12/06 08:12:54 ozaki-r Exp $");
82
83 #ifdef _KERNEL_OPT
84 #include "opt_inet.h"
85 #endif
86
87 #include <sys/param.h>
88 #include <sys/systm.h>
89 #include <sys/kernel.h>
90 #include <sys/mbuf.h>
91 #include <sys/queue.h>
92 #include <sys/socket.h>
93 #include <sys/sockio.h>
94 #include <sys/systm.h>
95 #include <sys/proc.h>
96 #include <sys/kauth.h>
97 #include <sys/mutex.h>
98 #include <sys/kmem.h>
99 #include <sys/cpu.h>
100 #include <sys/pserialize.h>
101 #include <sys/psref.h>
102 #include <sys/pslist.h>
103 #include <sys/atomic.h>
104 #include <sys/device.h>
105 #include <sys/module.h>
106
107 #include <net/bpf.h>
108 #include <net/if.h>
109 #include <net/if_dl.h>
110 #include <net/if_types.h>
111 #include <net/if_ether.h>
112 #include <net/if_vlanvar.h>
113
114 #ifdef INET
115 #include <netinet/in.h>
116 #include <netinet/if_inarp.h>
117 #endif
118 #ifdef INET6
119 #include <netinet6/in6_ifattach.h>
120 #include <netinet6/in6_var.h>
121 #endif
122
123 #include "ioconf.h"
124
125 struct vlan_mc_entry {
126 LIST_ENTRY(vlan_mc_entry) mc_entries;
127 /*
128 * A key to identify this entry. The mc_addr below can't be
129 * used since multiple sockaddr may mapped into the same
130 * ether_multi (e.g., AF_UNSPEC).
131 */
132 union {
133 struct ether_multi *mcu_enm;
134 } mc_u;
135 struct sockaddr_storage mc_addr;
136 };
137
138 #define mc_enm mc_u.mcu_enm
139
140
141 struct ifvlan_linkmib {
142 struct ifvlan *ifvm_ifvlan;
143 const struct vlan_multisw *ifvm_msw;
144 int ifvm_encaplen; /* encapsulation length */
145 int ifvm_mtufudge; /* MTU fudged by this much */
146 int ifvm_mintu; /* min transmission unit */
147 uint16_t ifvm_proto; /* encapsulation ethertype */
148 uint16_t ifvm_tag; /* tag to apply on packets */
149 struct ifnet *ifvm_p; /* parent interface of this vlan */
150
151 struct psref_target ifvm_psref;
152 };
153
154 struct ifvlan {
155 union {
156 struct ethercom ifvu_ec;
157 } ifv_u;
158 struct ifvlan_linkmib *ifv_mib; /*
159 * reader must use vlan_getref_linkmib()
160 * instead of direct dereference
161 */
162 kmutex_t ifv_lock; /* writer lock for ifv_mib */
163
164 LIST_HEAD(__vlan_mchead, vlan_mc_entry) ifv_mc_listhead;
165 LIST_ENTRY(ifvlan) ifv_list;
166 struct pslist_entry ifv_hash;
167 int ifv_flags;
168 };
169
170 #define IFVF_PROMISC 0x01 /* promiscuous mode enabled */
171
172 #define ifv_ec ifv_u.ifvu_ec
173
174 #define ifv_if ifv_ec.ec_if
175
176 #define ifv_msw ifv_mib.ifvm_msw
177 #define ifv_encaplen ifv_mib.ifvm_encaplen
178 #define ifv_mtufudge ifv_mib.ifvm_mtufudge
179 #define ifv_mintu ifv_mib.ifvm_mintu
180 #define ifv_tag ifv_mib.ifvm_tag
181
182 struct vlan_multisw {
183 int (*vmsw_addmulti)(struct ifvlan *, struct ifreq *);
184 int (*vmsw_delmulti)(struct ifvlan *, struct ifreq *);
185 void (*vmsw_purgemulti)(struct ifvlan *);
186 };
187
188 static int vlan_ether_addmulti(struct ifvlan *, struct ifreq *);
189 static int vlan_ether_delmulti(struct ifvlan *, struct ifreq *);
190 static void vlan_ether_purgemulti(struct ifvlan *);
191
192 const struct vlan_multisw vlan_ether_multisw = {
193 vlan_ether_addmulti,
194 vlan_ether_delmulti,
195 vlan_ether_purgemulti,
196 };
197
198 static int vlan_clone_create(struct if_clone *, int);
199 static int vlan_clone_destroy(struct ifnet *);
200 static int vlan_config(struct ifvlan *, struct ifnet *,
201 uint16_t);
202 static int vlan_ioctl(struct ifnet *, u_long, void *);
203 static void vlan_start(struct ifnet *);
204 static int vlan_transmit(struct ifnet *, struct mbuf *);
205 static void vlan_unconfig(struct ifnet *);
206 static int vlan_unconfig_locked(struct ifvlan *,
207 struct ifvlan_linkmib *);
208 static void vlan_hash_init(void);
209 static int vlan_hash_fini(void);
210 static int vlan_tag_hash(uint16_t, u_long);
211 static struct ifvlan_linkmib* vlan_getref_linkmib(struct ifvlan *,
212 struct psref *);
213 static void vlan_putref_linkmib(struct ifvlan_linkmib *,
214 struct psref *);
215 static void vlan_linkmib_update(struct ifvlan *,
216 struct ifvlan_linkmib *);
217 static struct ifvlan_linkmib* vlan_lookup_tag_psref(struct ifnet *,
218 uint16_t, struct psref *);
219
220 LIST_HEAD(vlan_ifvlist, ifvlan);
221 static struct {
222 kmutex_t lock;
223 struct vlan_ifvlist list;
224 } ifv_list __cacheline_aligned;
225
226
227 #if !defined(VLAN_TAG_HASH_SIZE)
228 #define VLAN_TAG_HASH_SIZE 32
229 #endif
230 static struct {
231 kmutex_t lock;
232 struct pslist_head *lists;
233 u_long mask;
234 } ifv_hash __cacheline_aligned = {
235 .lists = NULL,
236 .mask = 0,
237 };
238
239 pserialize_t vlan_psz __read_mostly;
240 static struct psref_class *ifvm_psref_class __read_mostly;
241
242 struct if_clone vlan_cloner =
243 IF_CLONE_INITIALIZER("vlan", vlan_clone_create, vlan_clone_destroy);
244
245 /* Used to pad ethernet frames with < ETHER_MIN_LEN bytes */
246 static char vlan_zero_pad_buff[ETHER_MIN_LEN];
247
248 static inline int
249 vlan_safe_ifpromisc(struct ifnet *ifp, int pswitch)
250 {
251 int e;
252 KERNEL_LOCK_UNLESS_NET_MPSAFE();
253 e = ifpromisc(ifp, pswitch);
254 KERNEL_UNLOCK_UNLESS_NET_MPSAFE();
255 return e;
256 }
257
258 static inline int
259 vlan_safe_ifpromisc_locked(struct ifnet *ifp, int pswitch)
260 {
261 int e;
262 KERNEL_LOCK_UNLESS_NET_MPSAFE();
263 e = ifpromisc_locked(ifp, pswitch);
264 KERNEL_UNLOCK_UNLESS_NET_MPSAFE();
265 return e;
266 }
267
268 void
269 vlanattach(int n)
270 {
271
272 /*
273 * Nothing to do here, initialization is handled by the
274 * module initialization code in vlaninit() below).
275 */
276 }
277
278 static void
279 vlaninit(void)
280 {
281 mutex_init(&ifv_list.lock, MUTEX_DEFAULT, IPL_NONE);
282 LIST_INIT(&ifv_list.list);
283
284 mutex_init(&ifv_hash.lock, MUTEX_DEFAULT, IPL_NONE);
285 vlan_psz = pserialize_create();
286 ifvm_psref_class = psref_class_create("vlanlinkmib", IPL_SOFTNET);
287 if_clone_attach(&vlan_cloner);
288
289 vlan_hash_init();
290 }
291
292 static int
293 vlandetach(void)
294 {
295 int error = 0;
296
297 mutex_enter(&ifv_list.lock);
298 if (!LIST_EMPTY(&ifv_list.list)) {
299 mutex_exit(&ifv_list.lock);
300 return EBUSY;
301 }
302 mutex_exit(&ifv_list.lock);
303
304 error = vlan_hash_fini();
305 if (error != 0)
306 return error;
307
308 if_clone_detach(&vlan_cloner);
309 psref_class_destroy(ifvm_psref_class);
310 pserialize_destroy(vlan_psz);
311 mutex_destroy(&ifv_hash.lock);
312 mutex_destroy(&ifv_list.lock);
313
314 return 0;
315 }
316
317 static void
318 vlan_reset_linkname(struct ifnet *ifp)
319 {
320
321 /*
322 * We start out with a "802.1Q VLAN" type and zero-length
323 * addresses. When we attach to a parent interface, we
324 * inherit its type, address length, address, and data link
325 * type.
326 */
327
328 ifp->if_type = IFT_L2VLAN;
329 ifp->if_addrlen = 0;
330 ifp->if_dlt = DLT_NULL;
331 if_alloc_sadl(ifp);
332 }
333
334 static int
335 vlan_clone_create(struct if_clone *ifc, int unit)
336 {
337 struct ifvlan *ifv;
338 struct ifnet *ifp;
339 struct ifvlan_linkmib *mib;
340 int rv;
341
342 ifv = malloc(sizeof(struct ifvlan), M_DEVBUF, M_WAITOK|M_ZERO);
343 mib = kmem_zalloc(sizeof(struct ifvlan_linkmib), KM_SLEEP);
344 ifp = &ifv->ifv_if;
345 LIST_INIT(&ifv->ifv_mc_listhead);
346
347 mib->ifvm_ifvlan = ifv;
348 mib->ifvm_p = NULL;
349 psref_target_init(&mib->ifvm_psref, ifvm_psref_class);
350
351 mutex_init(&ifv->ifv_lock, MUTEX_DEFAULT, IPL_NONE);
352 ifv->ifv_mib = mib;
353
354 mutex_enter(&ifv_list.lock);
355 LIST_INSERT_HEAD(&ifv_list.list, ifv, ifv_list);
356 mutex_exit(&ifv_list.lock);
357
358 if_initname(ifp, ifc->ifc_name, unit);
359 ifp->if_softc = ifv;
360 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
361 ifp->if_extflags = IFEF_MPSAFE | IFEF_NO_LINK_STATE_CHANGE;
362 ifp->if_start = vlan_start;
363 ifp->if_transmit = vlan_transmit;
364 ifp->if_ioctl = vlan_ioctl;
365 IFQ_SET_READY(&ifp->if_snd);
366
367 rv = if_initialize(ifp);
368 if (rv != 0) {
369 aprint_error("%s: if_initialize failed(%d)\n", ifp->if_xname,
370 rv);
371 goto fail;
372 }
373
374 vlan_reset_linkname(ifp);
375 if_register(ifp);
376 return 0;
377
378 fail:
379 mutex_enter(&ifv_list.lock);
380 LIST_REMOVE(ifv, ifv_list);
381 mutex_exit(&ifv_list.lock);
382
383 mutex_destroy(&ifv->ifv_lock);
384 psref_target_destroy(&ifv->ifv_mib->ifvm_psref, ifvm_psref_class);
385 kmem_free(ifv->ifv_mib, sizeof(struct ifvlan_linkmib));
386 free(ifv, M_DEVBUF);
387
388 return rv;
389 }
390
391 static int
392 vlan_clone_destroy(struct ifnet *ifp)
393 {
394 struct ifvlan *ifv = ifp->if_softc;
395
396 mutex_enter(&ifv_list.lock);
397 LIST_REMOVE(ifv, ifv_list);
398 mutex_exit(&ifv_list.lock);
399
400 mutex_enter(ifp->if_ioctl_lock);
401 vlan_unconfig(ifp);
402 mutex_exit(ifp->if_ioctl_lock);
403 if_detach(ifp);
404
405 psref_target_destroy(&ifv->ifv_mib->ifvm_psref, ifvm_psref_class);
406 kmem_free(ifv->ifv_mib, sizeof(struct ifvlan_linkmib));
407 mutex_destroy(&ifv->ifv_lock);
408 free(ifv, M_DEVBUF);
409
410 return (0);
411 }
412
413 /*
414 * Configure a VLAN interface.
415 */
416 static int
417 vlan_config(struct ifvlan *ifv, struct ifnet *p, uint16_t tag)
418 {
419 struct ifnet *ifp = &ifv->ifv_if;
420 struct ifvlan_linkmib *nmib = NULL;
421 struct ifvlan_linkmib *omib = NULL;
422 struct ifvlan_linkmib *checkmib = NULL;
423 struct psref_target *nmib_psref = NULL;
424 uint16_t vid = EVL_VLANOFTAG(tag);
425 int error = 0;
426 int idx;
427 bool omib_cleanup = false;
428 struct psref psref;
429
430 /* VLAN ID 0 and 4095 are reserved in the spec */
431 if ((vid == 0) || (vid == 0xfff))
432 return EINVAL;
433
434 nmib = kmem_alloc(sizeof(*nmib), KM_SLEEP);
435
436 mutex_enter(&ifv->ifv_lock);
437 omib = ifv->ifv_mib;
438
439 if (omib->ifvm_p != NULL) {
440 error = EBUSY;
441 goto done;
442 }
443
444 /* Duplicate check */
445 checkmib = vlan_lookup_tag_psref(p, vid, &psref);
446 if (checkmib != NULL) {
447 vlan_putref_linkmib(checkmib, &psref);
448 error = EEXIST;
449 goto done;
450 }
451
452 *nmib = *omib;
453 nmib_psref = &nmib->ifvm_psref;
454
455 psref_target_init(nmib_psref, ifvm_psref_class);
456
457 switch (p->if_type) {
458 case IFT_ETHER:
459 {
460 struct ethercom *ec = (void *) p;
461 nmib->ifvm_msw = &vlan_ether_multisw;
462 nmib->ifvm_encaplen = ETHER_VLAN_ENCAP_LEN;
463 nmib->ifvm_mintu = ETHERMIN;
464
465 if (ec->ec_nvlans++ == 0) {
466 error = if_enable_vlan_mtu(p);
467 if (error >= 0) {
468 if (error) {
469 ec->ec_nvlans--;
470 goto done;
471 }
472 nmib->ifvm_mtufudge = 0;
473 } else {
474 /*
475 * Fudge the MTU by the encapsulation size. This
476 * makes us incompatible with strictly compliant
477 * 802.1Q implementations, but allows us to use
478 * the feature with other NetBSD
479 * implementations, which might still be useful.
480 */
481 nmib->ifvm_mtufudge = nmib->ifvm_encaplen;
482 }
483 error = 0;
484 }
485
486 /*
487 * If the parent interface can do hardware-assisted
488 * VLAN encapsulation, then propagate its hardware-
489 * assisted checksumming flags and tcp segmentation
490 * offload.
491 */
492 if (ec->ec_capabilities & ETHERCAP_VLAN_HWTAGGING) {
493 ec->ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
494 ifp->if_capabilities = p->if_capabilities &
495 (IFCAP_TSOv4 | IFCAP_TSOv6 |
496 IFCAP_CSUM_IPv4_Tx|IFCAP_CSUM_IPv4_Rx|
497 IFCAP_CSUM_TCPv4_Tx|IFCAP_CSUM_TCPv4_Rx|
498 IFCAP_CSUM_UDPv4_Tx|IFCAP_CSUM_UDPv4_Rx|
499 IFCAP_CSUM_TCPv6_Tx|IFCAP_CSUM_TCPv6_Rx|
500 IFCAP_CSUM_UDPv6_Tx|IFCAP_CSUM_UDPv6_Rx);
501 }
502 /*
503 * We inherit the parent's Ethernet address.
504 */
505 ether_ifattach(ifp, CLLADDR(p->if_sadl));
506 ifp->if_hdrlen = sizeof(struct ether_vlan_header); /* XXX? */
507 break;
508 }
509
510 default:
511 error = EPROTONOSUPPORT;
512 goto done;
513 }
514
515 nmib->ifvm_p = p;
516 nmib->ifvm_tag = vid;
517 ifv->ifv_if.if_mtu = p->if_mtu - nmib->ifvm_mtufudge;
518 ifv->ifv_if.if_flags = p->if_flags &
519 (IFF_UP | IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
520
521 /*
522 * Inherit the if_type from the parent. This allows us
523 * to participate in bridges of that type.
524 */
525 ifv->ifv_if.if_type = p->if_type;
526
527 PSLIST_ENTRY_INIT(ifv, ifv_hash);
528 idx = vlan_tag_hash(vid, ifv_hash.mask);
529
530 mutex_enter(&ifv_hash.lock);
531 PSLIST_WRITER_INSERT_HEAD(&ifv_hash.lists[idx], ifv, ifv_hash);
532 mutex_exit(&ifv_hash.lock);
533
534 vlan_linkmib_update(ifv, nmib);
535 nmib = NULL;
536 nmib_psref = NULL;
537 omib_cleanup = true;
538
539 done:
540 mutex_exit(&ifv->ifv_lock);
541
542 if (nmib_psref)
543 psref_target_destroy(nmib_psref, ifvm_psref_class);
544
545 if (nmib)
546 kmem_free(nmib, sizeof(*nmib));
547
548 if (omib_cleanup)
549 kmem_free(omib, sizeof(*omib));
550
551 return error;
552 }
553
554 /*
555 * Unconfigure a VLAN interface.
556 */
557 static void
558 vlan_unconfig(struct ifnet *ifp)
559 {
560 struct ifvlan *ifv = ifp->if_softc;
561 struct ifvlan_linkmib *nmib = NULL;
562 int error;
563
564 KASSERT(mutex_owned(ifp->if_ioctl_lock));
565
566 nmib = kmem_alloc(sizeof(*nmib), KM_SLEEP);
567
568 mutex_enter(&ifv->ifv_lock);
569 error = vlan_unconfig_locked(ifv, nmib);
570 mutex_exit(&ifv->ifv_lock);
571
572 if (error)
573 kmem_free(nmib, sizeof(*nmib));
574 }
575 static int
576 vlan_unconfig_locked(struct ifvlan *ifv, struct ifvlan_linkmib *nmib)
577 {
578 struct ifnet *p;
579 struct ifnet *ifp = &ifv->ifv_if;
580 struct psref_target *nmib_psref = NULL;
581 struct ifvlan_linkmib *omib;
582 int error = 0;
583
584 KASSERT(mutex_owned(ifp->if_ioctl_lock));
585 KASSERT(mutex_owned(&ifv->ifv_lock));
586
587 ifp->if_flags &= ~(IFF_UP|IFF_RUNNING);
588
589 omib = ifv->ifv_mib;
590 p = omib->ifvm_p;
591
592 if (p == NULL) {
593 error = -1;
594 goto done;
595 }
596
597 *nmib = *omib;
598 nmib_psref = &nmib->ifvm_psref;
599 psref_target_init(nmib_psref, ifvm_psref_class);
600
601 /*
602 * Since the interface is being unconfigured, we need to empty the
603 * list of multicast groups that we may have joined while we were
604 * alive and remove them from the parent's list also.
605 */
606 (*nmib->ifvm_msw->vmsw_purgemulti)(ifv);
607
608 /* Disconnect from parent. */
609 switch (p->if_type) {
610 case IFT_ETHER:
611 {
612 struct ethercom *ec = (void *)p;
613 if (--ec->ec_nvlans == 0)
614 (void)if_disable_vlan_mtu(p);
615
616 ether_ifdetach(ifp);
617 /* Restore vlan_ioctl overwritten by ether_ifdetach */
618 ifp->if_ioctl = vlan_ioctl;
619 vlan_reset_linkname(ifp);
620 break;
621 }
622
623 #ifdef DIAGNOSTIC
624 default:
625 panic("vlan_unconfig: impossible");
626 #endif
627 }
628
629 nmib->ifvm_p = NULL;
630 ifv->ifv_if.if_mtu = 0;
631 ifv->ifv_flags = 0;
632
633 mutex_enter(&ifv_hash.lock);
634 PSLIST_WRITER_REMOVE(ifv, ifv_hash);
635 pserialize_perform(vlan_psz);
636 mutex_exit(&ifv_hash.lock);
637 PSLIST_ENTRY_DESTROY(ifv, ifv_hash);
638
639 vlan_linkmib_update(ifv, nmib);
640
641 mutex_exit(&ifv->ifv_lock);
642
643 nmib_psref = NULL;
644 kmem_free(omib, sizeof(*omib));
645
646 #ifdef INET6
647 KERNEL_LOCK_UNLESS_NET_MPSAFE();
648 /* To delete v6 link local addresses */
649 if (in6_present)
650 in6_ifdetach(ifp);
651 KERNEL_UNLOCK_UNLESS_NET_MPSAFE();
652 #endif
653
654 if ((ifp->if_flags & IFF_PROMISC) != 0)
655 vlan_safe_ifpromisc_locked(ifp, 0);
656 if_down_locked(ifp);
657 ifp->if_capabilities = 0;
658 mutex_enter(&ifv->ifv_lock);
659 done:
660
661 if (nmib_psref)
662 psref_target_destroy(nmib_psref, ifvm_psref_class);
663
664 return error;
665 }
666
667 static void
668 vlan_hash_init(void)
669 {
670
671 ifv_hash.lists = hashinit(VLAN_TAG_HASH_SIZE, HASH_PSLIST, true,
672 &ifv_hash.mask);
673 }
674
675 static int
676 vlan_hash_fini(void)
677 {
678 int i;
679
680 mutex_enter(&ifv_hash.lock);
681
682 for (i = 0; i < ifv_hash.mask + 1; i++) {
683 if (PSLIST_WRITER_FIRST(&ifv_hash.lists[i], struct ifvlan,
684 ifv_hash) != NULL) {
685 mutex_exit(&ifv_hash.lock);
686 return EBUSY;
687 }
688 }
689
690 for (i = 0; i < ifv_hash.mask + 1; i++)
691 PSLIST_DESTROY(&ifv_hash.lists[i]);
692
693 mutex_exit(&ifv_hash.lock);
694
695 hashdone(ifv_hash.lists, HASH_PSLIST, ifv_hash.mask);
696
697 ifv_hash.lists = NULL;
698 ifv_hash.mask = 0;
699
700 return 0;
701 }
702
703 static int
704 vlan_tag_hash(uint16_t tag, u_long mask)
705 {
706 uint32_t hash;
707
708 hash = (tag >> 8) ^ tag;
709 hash = (hash >> 2) ^ hash;
710
711 return hash & mask;
712 }
713
714 static struct ifvlan_linkmib *
715 vlan_getref_linkmib(struct ifvlan *sc, struct psref *psref)
716 {
717 struct ifvlan_linkmib *mib;
718 int s;
719
720 s = pserialize_read_enter();
721 mib = sc->ifv_mib;
722 if (mib == NULL) {
723 pserialize_read_exit(s);
724 return NULL;
725 }
726 membar_datadep_consumer();
727 psref_acquire(psref, &mib->ifvm_psref, ifvm_psref_class);
728 pserialize_read_exit(s);
729
730 return mib;
731 }
732
733 static void
734 vlan_putref_linkmib(struct ifvlan_linkmib *mib, struct psref *psref)
735 {
736 if (mib == NULL)
737 return;
738 psref_release(psref, &mib->ifvm_psref, ifvm_psref_class);
739 }
740
741 static struct ifvlan_linkmib *
742 vlan_lookup_tag_psref(struct ifnet *ifp, uint16_t tag, struct psref *psref)
743 {
744 int idx;
745 int s;
746 struct ifvlan *sc;
747
748 idx = vlan_tag_hash(tag, ifv_hash.mask);
749
750 s = pserialize_read_enter();
751 PSLIST_READER_FOREACH(sc, &ifv_hash.lists[idx], struct ifvlan,
752 ifv_hash) {
753 struct ifvlan_linkmib *mib = sc->ifv_mib;
754 if (mib == NULL)
755 continue;
756 if (mib->ifvm_tag != tag)
757 continue;
758 if (mib->ifvm_p != ifp)
759 continue;
760
761 psref_acquire(psref, &mib->ifvm_psref, ifvm_psref_class);
762 pserialize_read_exit(s);
763 return mib;
764 }
765 pserialize_read_exit(s);
766 return NULL;
767 }
768
769 static void
770 vlan_linkmib_update(struct ifvlan *ifv, struct ifvlan_linkmib *nmib)
771 {
772 struct ifvlan_linkmib *omib = ifv->ifv_mib;
773
774 KASSERT(mutex_owned(&ifv->ifv_lock));
775
776 membar_producer();
777 ifv->ifv_mib = nmib;
778
779 pserialize_perform(vlan_psz);
780 psref_target_destroy(&omib->ifvm_psref, ifvm_psref_class);
781 }
782
783 /*
784 * Called when a parent interface is detaching; destroy any VLAN
785 * configuration for the parent interface.
786 */
787 void
788 vlan_ifdetach(struct ifnet *p)
789 {
790 struct ifvlan *ifv;
791 struct ifvlan_linkmib *mib, **nmibs;
792 struct psref psref;
793 int error;
794 int bound;
795 int i, cnt = 0;
796
797 bound = curlwp_bind();
798 mutex_enter(&ifv_list.lock);
799 LIST_FOREACH(ifv, &ifv_list.list, ifv_list) {
800 mib = vlan_getref_linkmib(ifv, &psref);
801 if (mib == NULL)
802 continue;
803
804 if (mib->ifvm_p == p)
805 cnt++;
806
807 vlan_putref_linkmib(mib, &psref);
808 }
809 mutex_exit(&ifv_list.lock);
810
811 /*
812 * The value of "cnt" does not increase while ifv_list.lock
813 * and ifv->ifv_lock are released here, because the parent
814 * interface is detaching.
815 */
816 nmibs = kmem_alloc(sizeof(*nmibs) * cnt, KM_SLEEP);
817 for (i=0; i < cnt; i++) {
818 nmibs[i] = kmem_alloc(sizeof(*nmibs[i]), KM_SLEEP);
819 }
820
821 mutex_enter(&ifv_list.lock);
822
823 i = 0;
824 LIST_FOREACH(ifv, &ifv_list.list, ifv_list) {
825 struct ifnet *ifp = &ifv->ifv_if;
826
827 /* Need if_ioctl_lock that must be held before ifv_lock. */
828 mutex_enter(ifp->if_ioctl_lock);
829 mutex_enter(&ifv->ifv_lock);
830 if (ifv->ifv_mib->ifvm_p == p) {
831 KASSERTMSG(i < cnt, "no memory for unconfig, parent=%s",
832 p->if_xname);
833 error = vlan_unconfig_locked(ifv, nmibs[i]);
834 if (!error) {
835 nmibs[i] = NULL;
836 i++;
837 }
838
839 }
840 mutex_exit(&ifv->ifv_lock);
841 mutex_exit(ifp->if_ioctl_lock);
842 }
843
844 mutex_exit(&ifv_list.lock);
845 curlwp_bindx(bound);
846
847 for (i=0; i < cnt; i++) {
848 if (nmibs[i])
849 kmem_free(nmibs[i], sizeof(*nmibs[i]));
850 }
851
852 kmem_free(nmibs, sizeof(*nmibs) * cnt);
853
854 return;
855 }
856
857 static int
858 vlan_set_promisc(struct ifnet *ifp)
859 {
860 struct ifvlan *ifv = ifp->if_softc;
861 struct ifvlan_linkmib *mib;
862 struct psref psref;
863 int error = 0;
864 int bound;
865
866 bound = curlwp_bind();
867 mib = vlan_getref_linkmib(ifv, &psref);
868 if (mib == NULL) {
869 curlwp_bindx(bound);
870 return EBUSY;
871 }
872
873 if ((ifp->if_flags & IFF_PROMISC) != 0) {
874 if ((ifv->ifv_flags & IFVF_PROMISC) == 0) {
875 error = vlan_safe_ifpromisc(mib->ifvm_p, 1);
876 if (error == 0)
877 ifv->ifv_flags |= IFVF_PROMISC;
878 }
879 } else {
880 if ((ifv->ifv_flags & IFVF_PROMISC) != 0) {
881 error = vlan_safe_ifpromisc(mib->ifvm_p, 0);
882 if (error == 0)
883 ifv->ifv_flags &= ~IFVF_PROMISC;
884 }
885 }
886 vlan_putref_linkmib(mib, &psref);
887 curlwp_bindx(bound);
888
889 return (error);
890 }
891
892 static int
893 vlan_ioctl(struct ifnet *ifp, u_long cmd, void *data)
894 {
895 struct lwp *l = curlwp; /* XXX */
896 struct ifvlan *ifv = ifp->if_softc;
897 struct ifaddr *ifa = (struct ifaddr *) data;
898 struct ifreq *ifr = (struct ifreq *) data;
899 struct ifnet *pr;
900 struct ifcapreq *ifcr;
901 struct vlanreq vlr;
902 struct ifvlan_linkmib *mib;
903 struct psref psref;
904 int error = 0;
905 int bound;
906
907 switch (cmd) {
908 case SIOCSIFMTU:
909 bound = curlwp_bind();
910 mib = vlan_getref_linkmib(ifv, &psref);
911 if (mib == NULL) {
912 curlwp_bindx(bound);
913 error = EBUSY;
914 break;
915 }
916
917 if (mib->ifvm_p == NULL) {
918 vlan_putref_linkmib(mib, &psref);
919 curlwp_bindx(bound);
920 error = EINVAL;
921 } else if (
922 ifr->ifr_mtu > (mib->ifvm_p->if_mtu - mib->ifvm_mtufudge) ||
923 ifr->ifr_mtu < (mib->ifvm_mintu - mib->ifvm_mtufudge)) {
924 vlan_putref_linkmib(mib, &psref);
925 curlwp_bindx(bound);
926 error = EINVAL;
927 } else {
928 vlan_putref_linkmib(mib, &psref);
929 curlwp_bindx(bound);
930
931 error = ifioctl_common(ifp, cmd, data);
932 if (error == ENETRESET)
933 error = 0;
934 }
935
936 break;
937
938 case SIOCSETVLAN:
939 if ((error = kauth_authorize_network(l->l_cred,
940 KAUTH_NETWORK_INTERFACE,
941 KAUTH_REQ_NETWORK_INTERFACE_SETPRIV, ifp, (void *)cmd,
942 NULL)) != 0)
943 break;
944 if ((error = copyin(ifr->ifr_data, &vlr, sizeof(vlr))) != 0)
945 break;
946
947 if (vlr.vlr_parent[0] == '\0') {
948 bound = curlwp_bind();
949 mib = vlan_getref_linkmib(ifv, &psref);
950 if (mib == NULL) {
951 curlwp_bindx(bound);
952 error = EBUSY;
953 break;
954 }
955
956 if (mib->ifvm_p != NULL &&
957 (ifp->if_flags & IFF_PROMISC) != 0)
958 error = vlan_safe_ifpromisc(mib->ifvm_p, 0);
959
960 vlan_putref_linkmib(mib, &psref);
961 curlwp_bindx(bound);
962
963 vlan_unconfig(ifp);
964 break;
965 }
966 if (vlr.vlr_tag != EVL_VLANOFTAG(vlr.vlr_tag)) {
967 error = EINVAL; /* check for valid tag */
968 break;
969 }
970 if ((pr = ifunit(vlr.vlr_parent)) == NULL) {
971 error = ENOENT;
972 break;
973 }
974 error = vlan_config(ifv, pr, vlr.vlr_tag);
975 if (error != 0) {
976 break;
977 }
978
979 /* Update promiscuous mode, if necessary. */
980 vlan_set_promisc(ifp);
981
982 ifp->if_flags |= IFF_RUNNING;
983 break;
984
985 case SIOCGETVLAN:
986 memset(&vlr, 0, sizeof(vlr));
987 bound = curlwp_bind();
988 mib = vlan_getref_linkmib(ifv, &psref);
989 if (mib == NULL) {
990 curlwp_bindx(bound);
991 error = EBUSY;
992 break;
993 }
994 if (mib->ifvm_p != NULL) {
995 snprintf(vlr.vlr_parent, sizeof(vlr.vlr_parent), "%s",
996 mib->ifvm_p->if_xname);
997 vlr.vlr_tag = mib->ifvm_tag;
998 }
999 vlan_putref_linkmib(mib, &psref);
1000 curlwp_bindx(bound);
1001 error = copyout(&vlr, ifr->ifr_data, sizeof(vlr));
1002 break;
1003
1004 case SIOCSIFFLAGS:
1005 if ((error = ifioctl_common(ifp, cmd, data)) != 0)
1006 break;
1007 /*
1008 * For promiscuous mode, we enable promiscuous mode on
1009 * the parent if we need promiscuous on the VLAN interface.
1010 */
1011 bound = curlwp_bind();
1012 mib = vlan_getref_linkmib(ifv, &psref);
1013 if (mib == NULL) {
1014 curlwp_bindx(bound);
1015 error = EBUSY;
1016 break;
1017 }
1018
1019 if (mib->ifvm_p != NULL)
1020 error = vlan_set_promisc(ifp);
1021 vlan_putref_linkmib(mib, &psref);
1022 curlwp_bindx(bound);
1023 break;
1024
1025 case SIOCADDMULTI:
1026 mutex_enter(&ifv->ifv_lock);
1027 mib = ifv->ifv_mib;
1028 if (mib == NULL) {
1029 error = EBUSY;
1030 mutex_exit(&ifv->ifv_lock);
1031 break;
1032 }
1033
1034 error = (mib->ifvm_p != NULL) ?
1035 (*mib->ifvm_msw->vmsw_addmulti)(ifv, ifr) : EINVAL;
1036 mib = NULL;
1037 mutex_exit(&ifv->ifv_lock);
1038 break;
1039
1040 case SIOCDELMULTI:
1041 mutex_enter(&ifv->ifv_lock);
1042 mib = ifv->ifv_mib;
1043 if (mib == NULL) {
1044 error = EBUSY;
1045 mutex_exit(&ifv->ifv_lock);
1046 break;
1047 }
1048 error = (mib->ifvm_p != NULL) ?
1049 (*mib->ifvm_msw->vmsw_delmulti)(ifv, ifr) : EINVAL;
1050 mib = NULL;
1051 mutex_exit(&ifv->ifv_lock);
1052 break;
1053
1054 case SIOCSIFCAP:
1055 ifcr = data;
1056 /* make sure caps are enabled on parent */
1057 bound = curlwp_bind();
1058 mib = vlan_getref_linkmib(ifv, &psref);
1059 if (mib == NULL) {
1060 curlwp_bindx(bound);
1061 error = EBUSY;
1062 break;
1063 }
1064
1065 if (mib->ifvm_p == NULL) {
1066 vlan_putref_linkmib(mib, &psref);
1067 curlwp_bindx(bound);
1068 error = EINVAL;
1069 break;
1070 }
1071 if ((mib->ifvm_p->if_capenable & ifcr->ifcr_capenable) !=
1072 ifcr->ifcr_capenable) {
1073 vlan_putref_linkmib(mib, &psref);
1074 curlwp_bindx(bound);
1075 error = EINVAL;
1076 break;
1077 }
1078
1079 vlan_putref_linkmib(mib, &psref);
1080 curlwp_bindx(bound);
1081
1082 if ((error = ifioctl_common(ifp, cmd, data)) == ENETRESET)
1083 error = 0;
1084 break;
1085 case SIOCINITIFADDR:
1086 bound = curlwp_bind();
1087 mib = vlan_getref_linkmib(ifv, &psref);
1088 if (mib == NULL) {
1089 curlwp_bindx(bound);
1090 error = EBUSY;
1091 break;
1092 }
1093
1094 if (mib->ifvm_p == NULL) {
1095 error = EINVAL;
1096 vlan_putref_linkmib(mib, &psref);
1097 curlwp_bindx(bound);
1098 break;
1099 }
1100 vlan_putref_linkmib(mib, &psref);
1101 curlwp_bindx(bound);
1102
1103 ifp->if_flags |= IFF_UP;
1104 #ifdef INET
1105 if (ifa->ifa_addr->sa_family == AF_INET)
1106 arp_ifinit(ifp, ifa);
1107 #endif
1108 break;
1109
1110 default:
1111 error = ether_ioctl(ifp, cmd, data);
1112 }
1113
1114 return (error);
1115 }
1116
1117 static int
1118 vlan_ether_addmulti(struct ifvlan *ifv, struct ifreq *ifr)
1119 {
1120 const struct sockaddr *sa = ifreq_getaddr(SIOCADDMULTI, ifr);
1121 struct vlan_mc_entry *mc;
1122 uint8_t addrlo[ETHER_ADDR_LEN], addrhi[ETHER_ADDR_LEN];
1123 struct ifvlan_linkmib *mib;
1124 int error;
1125
1126 KASSERT(mutex_owned(&ifv->ifv_lock));
1127
1128 if (sa->sa_len > sizeof(struct sockaddr_storage))
1129 return (EINVAL);
1130
1131 error = ether_addmulti(sa, &ifv->ifv_ec);
1132 if (error != ENETRESET)
1133 return (error);
1134
1135 /*
1136 * This is new multicast address. We have to tell parent
1137 * about it. Also, remember this multicast address so that
1138 * we can delete them on unconfigure.
1139 */
1140 mc = malloc(sizeof(struct vlan_mc_entry), M_DEVBUF, M_NOWAIT);
1141 if (mc == NULL) {
1142 error = ENOMEM;
1143 goto alloc_failed;
1144 }
1145
1146 /*
1147 * As ether_addmulti() returns ENETRESET, following two
1148 * statement shouldn't fail.
1149 */
1150 (void)ether_multiaddr(sa, addrlo, addrhi);
1151 ETHER_LOOKUP_MULTI(addrlo, addrhi, &ifv->ifv_ec, mc->mc_enm);
1152 memcpy(&mc->mc_addr, sa, sa->sa_len);
1153 LIST_INSERT_HEAD(&ifv->ifv_mc_listhead, mc, mc_entries);
1154
1155 mib = ifv->ifv_mib;
1156
1157 KERNEL_LOCK_UNLESS_IFP_MPSAFE(mib->ifvm_p);
1158 error = if_mcast_op(mib->ifvm_p, SIOCADDMULTI, sa);
1159 KERNEL_UNLOCK_UNLESS_IFP_MPSAFE(mib->ifvm_p);
1160
1161 if (error != 0)
1162 goto ioctl_failed;
1163 return (error);
1164
1165 ioctl_failed:
1166 LIST_REMOVE(mc, mc_entries);
1167 free(mc, M_DEVBUF);
1168 alloc_failed:
1169 (void)ether_delmulti(sa, &ifv->ifv_ec);
1170 return (error);
1171 }
1172
1173 static int
1174 vlan_ether_delmulti(struct ifvlan *ifv, struct ifreq *ifr)
1175 {
1176 const struct sockaddr *sa = ifreq_getaddr(SIOCDELMULTI, ifr);
1177 struct ether_multi *enm;
1178 struct vlan_mc_entry *mc;
1179 struct ifvlan_linkmib *mib;
1180 uint8_t addrlo[ETHER_ADDR_LEN], addrhi[ETHER_ADDR_LEN];
1181 int error;
1182
1183 KASSERT(mutex_owned(&ifv->ifv_lock));
1184
1185 /*
1186 * Find a key to lookup vlan_mc_entry. We have to do this
1187 * before calling ether_delmulti for obvious reason.
1188 */
1189 if ((error = ether_multiaddr(sa, addrlo, addrhi)) != 0)
1190 return (error);
1191 ETHER_LOOKUP_MULTI(addrlo, addrhi, &ifv->ifv_ec, enm);
1192
1193 error = ether_delmulti(sa, &ifv->ifv_ec);
1194 if (error != ENETRESET)
1195 return (error);
1196
1197 /* We no longer use this multicast address. Tell parent so. */
1198 mib = ifv->ifv_mib;
1199 error = if_mcast_op(mib->ifvm_p, SIOCDELMULTI, sa);
1200
1201 if (error == 0) {
1202 /* And forget about this address. */
1203 for (mc = LIST_FIRST(&ifv->ifv_mc_listhead); mc != NULL;
1204 mc = LIST_NEXT(mc, mc_entries)) {
1205 if (mc->mc_enm == enm) {
1206 LIST_REMOVE(mc, mc_entries);
1207 free(mc, M_DEVBUF);
1208 break;
1209 }
1210 }
1211 KASSERT(mc != NULL);
1212 } else
1213 (void)ether_addmulti(sa, &ifv->ifv_ec);
1214 return (error);
1215 }
1216
1217 /*
1218 * Delete any multicast address we have asked to add from parent
1219 * interface. Called when the vlan is being unconfigured.
1220 */
1221 static void
1222 vlan_ether_purgemulti(struct ifvlan *ifv)
1223 {
1224 struct vlan_mc_entry *mc;
1225 struct ifvlan_linkmib *mib;
1226
1227 KASSERT(mutex_owned(&ifv->ifv_lock));
1228 mib = ifv->ifv_mib;
1229 if (mib == NULL) {
1230 return;
1231 }
1232
1233 while ((mc = LIST_FIRST(&ifv->ifv_mc_listhead)) != NULL) {
1234 (void)if_mcast_op(mib->ifvm_p, SIOCDELMULTI,
1235 (const struct sockaddr *)&mc->mc_addr);
1236 LIST_REMOVE(mc, mc_entries);
1237 free(mc, M_DEVBUF);
1238 }
1239 }
1240
1241 static void
1242 vlan_start(struct ifnet *ifp)
1243 {
1244 struct ifvlan *ifv = ifp->if_softc;
1245 struct ifnet *p;
1246 struct ethercom *ec;
1247 struct mbuf *m;
1248 struct ifvlan_linkmib *mib;
1249 struct psref psref;
1250 int error;
1251
1252 mib = vlan_getref_linkmib(ifv, &psref);
1253 if (mib == NULL)
1254 return;
1255 p = mib->ifvm_p;
1256 ec = (void *)mib->ifvm_p;
1257
1258 ifp->if_flags |= IFF_OACTIVE;
1259
1260 for (;;) {
1261 IFQ_DEQUEUE(&ifp->if_snd, m);
1262 if (m == NULL)
1263 break;
1264
1265 #ifdef ALTQ
1266 /*
1267 * KERNEL_LOCK is required for ALTQ even if NET_MPSAFE is defined.
1268 */
1269 KERNEL_LOCK(1, NULL);
1270 /*
1271 * If ALTQ is enabled on the parent interface, do
1272 * classification; the queueing discipline might
1273 * not require classification, but might require
1274 * the address family/header pointer in the pktattr.
1275 */
1276 if (ALTQ_IS_ENABLED(&p->if_snd)) {
1277 switch (p->if_type) {
1278 case IFT_ETHER:
1279 altq_etherclassify(&p->if_snd, m);
1280 break;
1281 #ifdef DIAGNOSTIC
1282 default:
1283 panic("vlan_start: impossible (altq)");
1284 #endif
1285 }
1286 }
1287 KERNEL_UNLOCK_ONE(NULL);
1288 #endif /* ALTQ */
1289
1290 bpf_mtap(ifp, m);
1291 /*
1292 * If the parent can insert the tag itself, just mark
1293 * the tag in the mbuf header.
1294 */
1295 if (ec->ec_capabilities & ETHERCAP_VLAN_HWTAGGING) {
1296 vlan_set_tag(m, mib->ifvm_tag);
1297 } else {
1298 /*
1299 * insert the tag ourselves
1300 */
1301 M_PREPEND(m, mib->ifvm_encaplen, M_DONTWAIT);
1302 if (m == NULL) {
1303 printf("%s: unable to prepend encap header",
1304 p->if_xname);
1305 ifp->if_oerrors++;
1306 continue;
1307 }
1308
1309 switch (p->if_type) {
1310 case IFT_ETHER:
1311 {
1312 struct ether_vlan_header *evl;
1313
1314 if (m->m_len < sizeof(struct ether_vlan_header))
1315 m = m_pullup(m,
1316 sizeof(struct ether_vlan_header));
1317 if (m == NULL) {
1318 printf("%s: unable to pullup encap "
1319 "header", p->if_xname);
1320 ifp->if_oerrors++;
1321 continue;
1322 }
1323
1324 /*
1325 * Transform the Ethernet header into an
1326 * Ethernet header with 802.1Q encapsulation.
1327 */
1328 memmove(mtod(m, void *),
1329 mtod(m, char *) + mib->ifvm_encaplen,
1330 sizeof(struct ether_header));
1331 evl = mtod(m, struct ether_vlan_header *);
1332 evl->evl_proto = evl->evl_encap_proto;
1333 evl->evl_encap_proto = htons(ETHERTYPE_VLAN);
1334 evl->evl_tag = htons(mib->ifvm_tag);
1335
1336 /*
1337 * To cater for VLAN-aware layer 2 ethernet
1338 * switches which may need to strip the tag
1339 * before forwarding the packet, make sure
1340 * the packet+tag is at least 68 bytes long.
1341 * This is necessary because our parent will
1342 * only pad to 64 bytes (ETHER_MIN_LEN) and
1343 * some switches will not pad by themselves
1344 * after deleting a tag.
1345 */
1346 if (m->m_pkthdr.len <
1347 (ETHER_MIN_LEN - ETHER_CRC_LEN +
1348 ETHER_VLAN_ENCAP_LEN)) {
1349 m_copyback(m, m->m_pkthdr.len,
1350 (ETHER_MIN_LEN - ETHER_CRC_LEN +
1351 ETHER_VLAN_ENCAP_LEN) -
1352 m->m_pkthdr.len,
1353 vlan_zero_pad_buff);
1354 }
1355 break;
1356 }
1357
1358 #ifdef DIAGNOSTIC
1359 default:
1360 panic("vlan_start: impossible");
1361 #endif
1362 }
1363 }
1364
1365 if ((p->if_flags & IFF_RUNNING) == 0) {
1366 m_freem(m);
1367 continue;
1368 }
1369
1370 error = if_transmit_lock(p, m);
1371 if (error) {
1372 /* mbuf is already freed */
1373 ifp->if_oerrors++;
1374 continue;
1375 }
1376 ifp->if_opackets++;
1377 }
1378
1379 ifp->if_flags &= ~IFF_OACTIVE;
1380
1381 /* Remove reference to mib before release */
1382 p = NULL;
1383 ec = NULL;
1384
1385 vlan_putref_linkmib(mib, &psref);
1386 }
1387
1388 static int
1389 vlan_transmit(struct ifnet *ifp, struct mbuf *m)
1390 {
1391 struct ifvlan *ifv = ifp->if_softc;
1392 struct ifnet *p;
1393 struct ethercom *ec;
1394 struct ifvlan_linkmib *mib;
1395 struct psref psref;
1396 int error;
1397 size_t pktlen = m->m_pkthdr.len;
1398 bool mcast = (m->m_flags & M_MCAST) != 0;
1399
1400 mib = vlan_getref_linkmib(ifv, &psref);
1401 if (mib == NULL) {
1402 m_freem(m);
1403 return ENETDOWN;
1404 }
1405
1406 p = mib->ifvm_p;
1407 ec = (void *)mib->ifvm_p;
1408
1409 bpf_mtap(ifp, m);
1410
1411 if (pfil_run_hooks(ifp->if_pfil, &m, ifp, PFIL_OUT) != 0) {
1412 if (m != NULL)
1413 m_freem(m);
1414 error = 0;
1415 goto out;
1416 }
1417
1418 /*
1419 * If the parent can insert the tag itself, just mark
1420 * the tag in the mbuf header.
1421 */
1422 if (ec->ec_capabilities & ETHERCAP_VLAN_HWTAGGING) {
1423 vlan_set_tag(m, mib->ifvm_tag);
1424 } else {
1425 /*
1426 * insert the tag ourselves
1427 */
1428 M_PREPEND(m, mib->ifvm_encaplen, M_DONTWAIT);
1429 if (m == NULL) {
1430 printf("%s: unable to prepend encap header",
1431 p->if_xname);
1432 ifp->if_oerrors++;
1433 error = ENOBUFS;
1434 goto out;
1435 }
1436
1437 switch (p->if_type) {
1438 case IFT_ETHER:
1439 {
1440 struct ether_vlan_header *evl;
1441
1442 if (m->m_len < sizeof(struct ether_vlan_header))
1443 m = m_pullup(m,
1444 sizeof(struct ether_vlan_header));
1445 if (m == NULL) {
1446 printf("%s: unable to pullup encap "
1447 "header", p->if_xname);
1448 ifp->if_oerrors++;
1449 error = ENOBUFS;
1450 goto out;
1451 }
1452
1453 /*
1454 * Transform the Ethernet header into an
1455 * Ethernet header with 802.1Q encapsulation.
1456 */
1457 memmove(mtod(m, void *),
1458 mtod(m, char *) + mib->ifvm_encaplen,
1459 sizeof(struct ether_header));
1460 evl = mtod(m, struct ether_vlan_header *);
1461 evl->evl_proto = evl->evl_encap_proto;
1462 evl->evl_encap_proto = htons(ETHERTYPE_VLAN);
1463 evl->evl_tag = htons(mib->ifvm_tag);
1464
1465 /*
1466 * To cater for VLAN-aware layer 2 ethernet
1467 * switches which may need to strip the tag
1468 * before forwarding the packet, make sure
1469 * the packet+tag is at least 68 bytes long.
1470 * This is necessary because our parent will
1471 * only pad to 64 bytes (ETHER_MIN_LEN) and
1472 * some switches will not pad by themselves
1473 * after deleting a tag.
1474 */
1475 if (m->m_pkthdr.len <
1476 (ETHER_MIN_LEN - ETHER_CRC_LEN +
1477 ETHER_VLAN_ENCAP_LEN)) {
1478 m_copyback(m, m->m_pkthdr.len,
1479 (ETHER_MIN_LEN - ETHER_CRC_LEN +
1480 ETHER_VLAN_ENCAP_LEN) -
1481 m->m_pkthdr.len,
1482 vlan_zero_pad_buff);
1483 }
1484 break;
1485 }
1486
1487 #ifdef DIAGNOSTIC
1488 default:
1489 panic("vlan_transmit: impossible");
1490 #endif
1491 }
1492 }
1493
1494 if ((p->if_flags & IFF_RUNNING) == 0) {
1495 m_freem(m);
1496 error = ENETDOWN;
1497 goto out;
1498 }
1499
1500 error = if_transmit_lock(p, m);
1501 if (error) {
1502 /* mbuf is already freed */
1503 ifp->if_oerrors++;
1504 } else {
1505
1506 ifp->if_opackets++;
1507 ifp->if_obytes += pktlen;
1508 if (mcast)
1509 ifp->if_omcasts++;
1510 }
1511
1512 out:
1513 /* Remove reference to mib before release */
1514 p = NULL;
1515 ec = NULL;
1516
1517 vlan_putref_linkmib(mib, &psref);
1518 return error;
1519 }
1520
1521 /*
1522 * Given an Ethernet frame, find a valid vlan interface corresponding to the
1523 * given source interface and tag, then run the real packet through the
1524 * parent's input routine.
1525 */
1526 void
1527 vlan_input(struct ifnet *ifp, struct mbuf *m)
1528 {
1529 struct ifvlan *ifv;
1530 uint16_t vid;
1531 struct ifvlan_linkmib *mib;
1532 struct psref psref;
1533 bool have_vtag;
1534
1535 have_vtag = vlan_has_tag(m);
1536 if (have_vtag) {
1537 vid = EVL_VLANOFTAG(vlan_get_tag(m));
1538 m->m_flags &= ~M_VLANTAG;
1539 } else {
1540 switch (ifp->if_type) {
1541 case IFT_ETHER:
1542 {
1543 struct ether_vlan_header *evl;
1544
1545 if (m->m_len < sizeof(struct ether_vlan_header) &&
1546 (m = m_pullup(m,
1547 sizeof(struct ether_vlan_header))) == NULL) {
1548 printf("%s: no memory for VLAN header, "
1549 "dropping packet.\n", ifp->if_xname);
1550 return;
1551 }
1552 evl = mtod(m, struct ether_vlan_header *);
1553 KASSERT(ntohs(evl->evl_encap_proto) == ETHERTYPE_VLAN);
1554
1555 vid = EVL_VLANOFTAG(ntohs(evl->evl_tag));
1556
1557 /*
1558 * Restore the original ethertype. We'll remove
1559 * the encapsulation after we've found the vlan
1560 * interface corresponding to the tag.
1561 */
1562 evl->evl_encap_proto = evl->evl_proto;
1563 break;
1564 }
1565
1566 default:
1567 vid = (uint16_t) -1; /* XXX GCC */
1568 #ifdef DIAGNOSTIC
1569 panic("vlan_input: impossible");
1570 #endif
1571 }
1572 }
1573
1574 mib = vlan_lookup_tag_psref(ifp, vid, &psref);
1575 if (mib == NULL) {
1576 m_freem(m);
1577 ifp->if_noproto++;
1578 return;
1579 }
1580
1581 ifv = mib->ifvm_ifvlan;
1582 if ((ifv->ifv_if.if_flags & (IFF_UP|IFF_RUNNING)) !=
1583 (IFF_UP|IFF_RUNNING)) {
1584 m_freem(m);
1585 ifp->if_noproto++;
1586 goto out;
1587 }
1588
1589 /*
1590 * Now, remove the encapsulation header. The original
1591 * header has already been fixed up above.
1592 */
1593 if (!have_vtag) {
1594 memmove(mtod(m, char *) + mib->ifvm_encaplen,
1595 mtod(m, void *), sizeof(struct ether_header));
1596 m_adj(m, mib->ifvm_encaplen);
1597 }
1598
1599 m_set_rcvif(m, &ifv->ifv_if);
1600 ifv->ifv_if.if_ipackets++;
1601
1602 if (pfil_run_hooks(ifp->if_pfil, &m, ifp, PFIL_IN) != 0) {
1603 if (m != NULL)
1604 m_freem(m);
1605 goto out;
1606 }
1607
1608 m->m_flags &= ~M_PROMISC;
1609 if_input(&ifv->ifv_if, m);
1610 out:
1611 vlan_putref_linkmib(mib, &psref);
1612 }
1613
1614 /*
1615 * Module infrastructure
1616 */
1617 #include "if_module.h"
1618
1619 IF_MODULE(MODULE_CLASS_DRIVER, vlan, "")
1620