if_bridge.c revision 1.121 1 /* $NetBSD: if_bridge.c,v 1.121 2016/04/28 01:37:17 knakahara Exp $ */
2
3 /*
4 * Copyright 2001 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Copyright (c) 1999, 2000 Jason L. Wright (jason (at) thought.net)
40 * All rights reserved.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 * notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright
48 * notice, this list of conditions and the following disclaimer in the
49 * documentation and/or other materials provided with the distribution.
50 * 3. All advertising materials mentioning features or use of this software
51 * must display the following acknowledgement:
52 * This product includes software developed by Jason L. Wright
53 * 4. The name of the author may not be used to endorse or promote products
54 * derived from this software without specific prior written permission.
55 *
56 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
57 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
58 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
59 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
60 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
61 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
62 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
63 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
64 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
65 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
66 * POSSIBILITY OF SUCH DAMAGE.
67 *
68 * OpenBSD: if_bridge.c,v 1.60 2001/06/15 03:38:33 itojun Exp
69 */
70
71 /*
72 * Network interface bridge support.
73 *
74 * TODO:
75 *
76 * - Currently only supports Ethernet-like interfaces (Ethernet,
77 * 802.11, VLANs on Ethernet, etc.) Figure out a nice way
78 * to bridge other types of interfaces (FDDI-FDDI, and maybe
79 * consider heterogenous bridges).
80 */
81
82 #include <sys/cdefs.h>
83 __KERNEL_RCSID(0, "$NetBSD: if_bridge.c,v 1.121 2016/04/28 01:37:17 knakahara Exp $");
84
85 #ifdef _KERNEL_OPT
86 #include "opt_bridge_ipf.h"
87 #include "opt_inet.h"
88 #include "opt_net_mpsafe.h"
89 #endif /* _KERNEL_OPT */
90
91 #include <sys/param.h>
92 #include <sys/kernel.h>
93 #include <sys/mbuf.h>
94 #include <sys/queue.h>
95 #include <sys/socket.h>
96 #include <sys/socketvar.h> /* for softnet_lock */
97 #include <sys/sockio.h>
98 #include <sys/systm.h>
99 #include <sys/proc.h>
100 #include <sys/pool.h>
101 #include <sys/kauth.h>
102 #include <sys/cpu.h>
103 #include <sys/cprng.h>
104 #include <sys/mutex.h>
105 #include <sys/kmem.h>
106
107 #include <net/bpf.h>
108 #include <net/if.h>
109 #include <net/if_dl.h>
110 #include <net/if_types.h>
111 #include <net/if_llc.h>
112
113 #include <net/if_ether.h>
114 #include <net/if_bridgevar.h>
115
116 #if defined(BRIDGE_IPF)
117 /* Used for bridge_ip[6]_checkbasic */
118 #include <netinet/in.h>
119 #include <netinet/in_systm.h>
120 #include <netinet/ip.h>
121 #include <netinet/ip_var.h>
122 #include <netinet/ip_private.h> /* XXX */
123
124 #include <netinet/ip6.h>
125 #include <netinet6/in6_var.h>
126 #include <netinet6/ip6_var.h>
127 #include <netinet6/ip6_private.h> /* XXX */
128 #endif /* BRIDGE_IPF */
129
130 /*
131 * Size of the route hash table. Must be a power of two.
132 */
133 #ifndef BRIDGE_RTHASH_SIZE
134 #define BRIDGE_RTHASH_SIZE 1024
135 #endif
136
137 #define BRIDGE_RTHASH_MASK (BRIDGE_RTHASH_SIZE - 1)
138
139 #include "carp.h"
140 #if NCARP > 0
141 #include <netinet/in.h>
142 #include <netinet/in_var.h>
143 #include <netinet/ip_carp.h>
144 #endif
145
146 #include "ioconf.h"
147
148 __CTASSERT(sizeof(struct ifbifconf) == sizeof(struct ifbaconf));
149 __CTASSERT(offsetof(struct ifbifconf, ifbic_len) == offsetof(struct ifbaconf, ifbac_len));
150 __CTASSERT(offsetof(struct ifbifconf, ifbic_buf) == offsetof(struct ifbaconf, ifbac_buf));
151
152 /*
153 * Maximum number of addresses to cache.
154 */
155 #ifndef BRIDGE_RTABLE_MAX
156 #define BRIDGE_RTABLE_MAX 100
157 #endif
158
159 /*
160 * Spanning tree defaults.
161 */
162 #define BSTP_DEFAULT_MAX_AGE (20 * 256)
163 #define BSTP_DEFAULT_HELLO_TIME (2 * 256)
164 #define BSTP_DEFAULT_FORWARD_DELAY (15 * 256)
165 #define BSTP_DEFAULT_HOLD_TIME (1 * 256)
166 #define BSTP_DEFAULT_BRIDGE_PRIORITY 0x8000
167 #define BSTP_DEFAULT_PORT_PRIORITY 0x80
168 #define BSTP_DEFAULT_PATH_COST 55
169
170 /*
171 * Timeout (in seconds) for entries learned dynamically.
172 */
173 #ifndef BRIDGE_RTABLE_TIMEOUT
174 #define BRIDGE_RTABLE_TIMEOUT (20 * 60) /* same as ARP */
175 #endif
176
177 /*
178 * Number of seconds between walks of the route list.
179 */
180 #ifndef BRIDGE_RTABLE_PRUNE_PERIOD
181 #define BRIDGE_RTABLE_PRUNE_PERIOD (5 * 60)
182 #endif
183
184 #define BRIDGE_RT_LOCK(_sc) if ((_sc)->sc_rtlist_lock) \
185 mutex_enter((_sc)->sc_rtlist_lock)
186 #define BRIDGE_RT_UNLOCK(_sc) if ((_sc)->sc_rtlist_lock) \
187 mutex_exit((_sc)->sc_rtlist_lock)
188 #define BRIDGE_RT_LOCKED(_sc) (!(_sc)->sc_rtlist_lock || \
189 mutex_owned((_sc)->sc_rtlist_lock))
190
191 #define BRIDGE_RT_PSZ_PERFORM(_sc) \
192 if ((_sc)->sc_rtlist_psz != NULL) \
193 pserialize_perform((_sc)->sc_rtlist_psz);
194
195 #define BRIDGE_RT_RENTER(__s) do { __s = pserialize_read_enter(); } while (0)
196 #define BRIDGE_RT_REXIT(__s) do { pserialize_read_exit(__s); } while (0)
197
198
199 #ifdef NET_MPSAFE
200 #define DECLARE_LOCK_VARIABLE
201 #define ACQUIRE_GLOBAL_LOCKS() do { } while (0)
202 #define RELEASE_GLOBAL_LOCKS() do { } while (0)
203 #else
204 #define DECLARE_LOCK_VARIABLE int __s
205 #define ACQUIRE_GLOBAL_LOCKS() do { \
206 KERNEL_LOCK(1, NULL); \
207 mutex_enter(softnet_lock); \
208 __s = splnet(); \
209 } while (0)
210 #define RELEASE_GLOBAL_LOCKS() do { \
211 splx(__s); \
212 mutex_exit(softnet_lock); \
213 KERNEL_UNLOCK_ONE(NULL); \
214 } while (0)
215 #endif
216
217 struct psref_class *bridge_psref_class __read_mostly;
218
219 int bridge_rtable_prune_period = BRIDGE_RTABLE_PRUNE_PERIOD;
220
221 static struct pool bridge_rtnode_pool;
222
223 static int bridge_clone_create(struct if_clone *, int);
224 static int bridge_clone_destroy(struct ifnet *);
225
226 static int bridge_ioctl(struct ifnet *, u_long, void *);
227 static int bridge_init(struct ifnet *);
228 static void bridge_stop(struct ifnet *, int);
229 static void bridge_start(struct ifnet *);
230
231 static void bridge_input(struct ifnet *, struct mbuf *);
232 static void bridge_forward(struct bridge_softc *, struct mbuf *);
233
234 static void bridge_timer(void *);
235
236 static void bridge_broadcast(struct bridge_softc *, struct ifnet *,
237 struct mbuf *);
238
239 static int bridge_rtupdate(struct bridge_softc *, const uint8_t *,
240 struct ifnet *, int, uint8_t);
241 static struct ifnet *bridge_rtlookup(struct bridge_softc *, const uint8_t *);
242 static void bridge_rttrim(struct bridge_softc *);
243 static void bridge_rtage(struct bridge_softc *);
244 static void bridge_rtage_work(struct work *, void *);
245 static void bridge_rtflush(struct bridge_softc *, int);
246 static int bridge_rtdaddr(struct bridge_softc *, const uint8_t *);
247 static void bridge_rtdelete(struct bridge_softc *, struct ifnet *ifp);
248
249 static void bridge_rtable_init(struct bridge_softc *);
250 static void bridge_rtable_fini(struct bridge_softc *);
251
252 static struct bridge_rtnode *bridge_rtnode_lookup(struct bridge_softc *,
253 const uint8_t *);
254 static int bridge_rtnode_insert(struct bridge_softc *,
255 struct bridge_rtnode *);
256 static void bridge_rtnode_remove(struct bridge_softc *,
257 struct bridge_rtnode *);
258 static void bridge_rtnode_destroy(struct bridge_rtnode *);
259
260 static struct bridge_iflist *bridge_lookup_member(struct bridge_softc *,
261 const char *name,
262 struct psref *);
263 static struct bridge_iflist *bridge_lookup_member_if(struct bridge_softc *,
264 struct ifnet *ifp,
265 struct psref *);
266 static void bridge_release_member(struct bridge_softc *, struct bridge_iflist *,
267 struct psref *);
268 static void bridge_delete_member(struct bridge_softc *,
269 struct bridge_iflist *);
270 static void bridge_acquire_member(struct bridge_softc *sc,
271 struct bridge_iflist *,
272 struct psref *);
273
274 static int bridge_ioctl_add(struct bridge_softc *, void *);
275 static int bridge_ioctl_del(struct bridge_softc *, void *);
276 static int bridge_ioctl_gifflags(struct bridge_softc *, void *);
277 static int bridge_ioctl_sifflags(struct bridge_softc *, void *);
278 static int bridge_ioctl_scache(struct bridge_softc *, void *);
279 static int bridge_ioctl_gcache(struct bridge_softc *, void *);
280 static int bridge_ioctl_gifs(struct bridge_softc *, void *);
281 static int bridge_ioctl_rts(struct bridge_softc *, void *);
282 static int bridge_ioctl_saddr(struct bridge_softc *, void *);
283 static int bridge_ioctl_sto(struct bridge_softc *, void *);
284 static int bridge_ioctl_gto(struct bridge_softc *, void *);
285 static int bridge_ioctl_daddr(struct bridge_softc *, void *);
286 static int bridge_ioctl_flush(struct bridge_softc *, void *);
287 static int bridge_ioctl_gpri(struct bridge_softc *, void *);
288 static int bridge_ioctl_spri(struct bridge_softc *, void *);
289 static int bridge_ioctl_ght(struct bridge_softc *, void *);
290 static int bridge_ioctl_sht(struct bridge_softc *, void *);
291 static int bridge_ioctl_gfd(struct bridge_softc *, void *);
292 static int bridge_ioctl_sfd(struct bridge_softc *, void *);
293 static int bridge_ioctl_gma(struct bridge_softc *, void *);
294 static int bridge_ioctl_sma(struct bridge_softc *, void *);
295 static int bridge_ioctl_sifprio(struct bridge_softc *, void *);
296 static int bridge_ioctl_sifcost(struct bridge_softc *, void *);
297 #if defined(BRIDGE_IPF)
298 static int bridge_ioctl_gfilt(struct bridge_softc *, void *);
299 static int bridge_ioctl_sfilt(struct bridge_softc *, void *);
300 static int bridge_ipf(void *, struct mbuf **, struct ifnet *, int);
301 static int bridge_ip_checkbasic(struct mbuf **mp);
302 # ifdef INET6
303 static int bridge_ip6_checkbasic(struct mbuf **mp);
304 # endif /* INET6 */
305 #endif /* BRIDGE_IPF */
306
307 struct bridge_control {
308 int (*bc_func)(struct bridge_softc *, void *);
309 int bc_argsize;
310 int bc_flags;
311 };
312
313 #define BC_F_COPYIN 0x01 /* copy arguments in */
314 #define BC_F_COPYOUT 0x02 /* copy arguments out */
315 #define BC_F_SUSER 0x04 /* do super-user check */
316 #define BC_F_XLATEIN 0x08 /* xlate arguments in */
317 #define BC_F_XLATEOUT 0x10 /* xlate arguments out */
318
319 static const struct bridge_control bridge_control_table[] = {
320 [BRDGADD] = {bridge_ioctl_add, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
321 [BRDGDEL] = {bridge_ioctl_del, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
322
323 [BRDGGIFFLGS] = {bridge_ioctl_gifflags, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_COPYOUT},
324 [BRDGSIFFLGS] = {bridge_ioctl_sifflags, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
325
326 [BRDGSCACHE] = {bridge_ioctl_scache, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
327 [BRDGGCACHE] = {bridge_ioctl_gcache, sizeof(struct ifbrparam), BC_F_COPYOUT},
328
329 [OBRDGGIFS] = {bridge_ioctl_gifs, sizeof(struct ifbifconf), BC_F_COPYIN|BC_F_COPYOUT},
330 [OBRDGRTS] = {bridge_ioctl_rts, sizeof(struct ifbaconf), BC_F_COPYIN|BC_F_COPYOUT},
331
332 [BRDGSADDR] = {bridge_ioctl_saddr, sizeof(struct ifbareq), BC_F_COPYIN|BC_F_SUSER},
333
334 [BRDGSTO] = {bridge_ioctl_sto, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
335 [BRDGGTO] = {bridge_ioctl_gto, sizeof(struct ifbrparam), BC_F_COPYOUT},
336
337 [BRDGDADDR] = {bridge_ioctl_daddr, sizeof(struct ifbareq), BC_F_COPYIN|BC_F_SUSER},
338
339 [BRDGFLUSH] = {bridge_ioctl_flush, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
340
341 [BRDGGPRI] = {bridge_ioctl_gpri, sizeof(struct ifbrparam), BC_F_COPYOUT},
342 [BRDGSPRI] = {bridge_ioctl_spri, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
343
344 [BRDGGHT] = {bridge_ioctl_ght, sizeof(struct ifbrparam), BC_F_COPYOUT},
345 [BRDGSHT] = {bridge_ioctl_sht, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
346
347 [BRDGGFD] = {bridge_ioctl_gfd, sizeof(struct ifbrparam), BC_F_COPYOUT},
348 [BRDGSFD] = {bridge_ioctl_sfd, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
349
350 [BRDGGMA] = {bridge_ioctl_gma, sizeof(struct ifbrparam), BC_F_COPYOUT},
351 [BRDGSMA] = {bridge_ioctl_sma, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
352
353 [BRDGSIFPRIO] = {bridge_ioctl_sifprio, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
354
355 [BRDGSIFCOST] = {bridge_ioctl_sifcost, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
356 #if defined(BRIDGE_IPF)
357 [BRDGGFILT] = {bridge_ioctl_gfilt, sizeof(struct ifbrparam), BC_F_COPYOUT},
358 [BRDGSFILT] = {bridge_ioctl_sfilt, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
359 #endif /* BRIDGE_IPF */
360 [BRDGGIFS] = {bridge_ioctl_gifs, sizeof(struct ifbifconf), BC_F_XLATEIN|BC_F_XLATEOUT},
361 [BRDGRTS] = {bridge_ioctl_rts, sizeof(struct ifbaconf), BC_F_XLATEIN|BC_F_XLATEOUT},
362 };
363
364 static const int bridge_control_table_size = __arraycount(bridge_control_table);
365
366 static struct if_clone bridge_cloner =
367 IF_CLONE_INITIALIZER("bridge", bridge_clone_create, bridge_clone_destroy);
368
369 /*
370 * bridgeattach:
371 *
372 * Pseudo-device attach routine.
373 */
374 void
375 bridgeattach(int n)
376 {
377
378 pool_init(&bridge_rtnode_pool, sizeof(struct bridge_rtnode),
379 0, 0, 0, "brtpl", NULL, IPL_NET);
380
381 bridge_psref_class = psref_class_create("bridge", IPL_SOFTNET);
382
383 if_clone_attach(&bridge_cloner);
384 }
385
386 /*
387 * bridge_clone_create:
388 *
389 * Create a new bridge instance.
390 */
391 static int
392 bridge_clone_create(struct if_clone *ifc, int unit)
393 {
394 struct bridge_softc *sc;
395 struct ifnet *ifp;
396 int error;
397
398 sc = kmem_zalloc(sizeof(*sc), KM_SLEEP);
399 ifp = &sc->sc_if;
400
401 sc->sc_brtmax = BRIDGE_RTABLE_MAX;
402 sc->sc_brttimeout = BRIDGE_RTABLE_TIMEOUT;
403 sc->sc_bridge_max_age = BSTP_DEFAULT_MAX_AGE;
404 sc->sc_bridge_hello_time = BSTP_DEFAULT_HELLO_TIME;
405 sc->sc_bridge_forward_delay = BSTP_DEFAULT_FORWARD_DELAY;
406 sc->sc_bridge_priority = BSTP_DEFAULT_BRIDGE_PRIORITY;
407 sc->sc_hold_time = BSTP_DEFAULT_HOLD_TIME;
408 sc->sc_filter_flags = 0;
409
410 /* Initialize our routing table. */
411 bridge_rtable_init(sc);
412
413 error = workqueue_create(&sc->sc_rtage_wq, "bridge_rtage",
414 bridge_rtage_work, sc, PRI_SOFTNET, IPL_SOFTNET, WQ_MPSAFE);
415 if (error)
416 panic("%s: workqueue_create %d\n", __func__, error);
417
418 callout_init(&sc->sc_brcallout, 0);
419 callout_init(&sc->sc_bstpcallout, 0);
420
421 mutex_init(&sc->sc_iflist_psref.bip_lock, MUTEX_DEFAULT, IPL_NONE);
422 PSLIST_INIT(&sc->sc_iflist_psref.bip_iflist);
423 sc->sc_iflist_psref.bip_psz = pserialize_create();
424
425 if_initname(ifp, ifc->ifc_name, unit);
426 ifp->if_softc = sc;
427 ifp->if_mtu = ETHERMTU;
428 ifp->if_ioctl = bridge_ioctl;
429 ifp->if_output = bridge_output;
430 ifp->if_start = bridge_start;
431 ifp->if_stop = bridge_stop;
432 ifp->if_init = bridge_init;
433 ifp->if_type = IFT_BRIDGE;
434 ifp->if_addrlen = 0;
435 ifp->if_dlt = DLT_EN10MB;
436 ifp->if_hdrlen = ETHER_HDR_LEN;
437
438 if_initialize(ifp);
439 if_register(ifp);
440
441 if_alloc_sadl(ifp);
442
443 return (0);
444 }
445
446 /*
447 * bridge_clone_destroy:
448 *
449 * Destroy a bridge instance.
450 */
451 static int
452 bridge_clone_destroy(struct ifnet *ifp)
453 {
454 struct bridge_softc *sc = ifp->if_softc;
455 struct bridge_iflist *bif;
456 int s;
457
458 s = splnet();
459
460 bridge_stop(ifp, 1);
461
462 BRIDGE_LOCK(sc);
463 for (;;) {
464 bif = PSLIST_WRITER_FIRST(&sc->sc_iflist_psref.bip_iflist, struct bridge_iflist,
465 bif_next);
466 if (bif == NULL)
467 break;
468 bridge_delete_member(sc, bif);
469 }
470 PSLIST_DESTROY(&sc->sc_iflist_psref.bip_iflist);
471 BRIDGE_UNLOCK(sc);
472
473 splx(s);
474
475 if_detach(ifp);
476
477 /* Tear down the routing table. */
478 bridge_rtable_fini(sc);
479
480 pserialize_destroy(sc->sc_iflist_psref.bip_psz);
481 mutex_destroy(&sc->sc_iflist_psref.bip_lock);
482
483 workqueue_destroy(sc->sc_rtage_wq);
484
485 kmem_free(sc, sizeof(*sc));
486
487 return (0);
488 }
489
490 /*
491 * bridge_ioctl:
492 *
493 * Handle a control request from the operator.
494 */
495 static int
496 bridge_ioctl(struct ifnet *ifp, u_long cmd, void *data)
497 {
498 struct bridge_softc *sc = ifp->if_softc;
499 struct lwp *l = curlwp; /* XXX */
500 union {
501 struct ifbreq ifbreq;
502 struct ifbifconf ifbifconf;
503 struct ifbareq ifbareq;
504 struct ifbaconf ifbaconf;
505 struct ifbrparam ifbrparam;
506 } args;
507 struct ifdrv *ifd = (struct ifdrv *) data;
508 const struct bridge_control *bc = NULL; /* XXXGCC */
509 int s, error = 0;
510
511 /* Authorize command before calling splnet(). */
512 switch (cmd) {
513 case SIOCGDRVSPEC:
514 case SIOCSDRVSPEC:
515 if (ifd->ifd_cmd >= bridge_control_table_size
516 || (bc = &bridge_control_table[ifd->ifd_cmd]) == NULL) {
517 error = EINVAL;
518 return error;
519 }
520
521 /* We only care about BC_F_SUSER at this point. */
522 if ((bc->bc_flags & BC_F_SUSER) == 0)
523 break;
524
525 error = kauth_authorize_network(l->l_cred,
526 KAUTH_NETWORK_INTERFACE_BRIDGE,
527 cmd == SIOCGDRVSPEC ?
528 KAUTH_REQ_NETWORK_INTERFACE_BRIDGE_GETPRIV :
529 KAUTH_REQ_NETWORK_INTERFACE_BRIDGE_SETPRIV,
530 ifd, NULL, NULL);
531 if (error)
532 return (error);
533
534 break;
535 }
536
537 s = splnet();
538
539 switch (cmd) {
540 case SIOCGDRVSPEC:
541 case SIOCSDRVSPEC:
542 KASSERT(bc != NULL);
543 if (cmd == SIOCGDRVSPEC &&
544 (bc->bc_flags & (BC_F_COPYOUT|BC_F_XLATEOUT)) == 0) {
545 error = EINVAL;
546 break;
547 }
548 else if (cmd == SIOCSDRVSPEC &&
549 (bc->bc_flags & (BC_F_COPYOUT|BC_F_XLATEOUT)) != 0) {
550 error = EINVAL;
551 break;
552 }
553
554 /* BC_F_SUSER is checked above, before splnet(). */
555
556 if ((bc->bc_flags & (BC_F_XLATEIN|BC_F_XLATEOUT)) == 0
557 && (ifd->ifd_len != bc->bc_argsize
558 || ifd->ifd_len > sizeof(args))) {
559 error = EINVAL;
560 break;
561 }
562
563 memset(&args, 0, sizeof(args));
564 if (bc->bc_flags & BC_F_COPYIN) {
565 error = copyin(ifd->ifd_data, &args, ifd->ifd_len);
566 if (error)
567 break;
568 } else if (bc->bc_flags & BC_F_XLATEIN) {
569 args.ifbifconf.ifbic_len = ifd->ifd_len;
570 args.ifbifconf.ifbic_buf = ifd->ifd_data;
571 }
572
573 error = (*bc->bc_func)(sc, &args);
574 if (error)
575 break;
576
577 if (bc->bc_flags & BC_F_COPYOUT) {
578 error = copyout(&args, ifd->ifd_data, ifd->ifd_len);
579 } else if (bc->bc_flags & BC_F_XLATEOUT) {
580 ifd->ifd_len = args.ifbifconf.ifbic_len;
581 ifd->ifd_data = args.ifbifconf.ifbic_buf;
582 }
583 break;
584
585 case SIOCSIFFLAGS:
586 if ((error = ifioctl_common(ifp, cmd, data)) != 0)
587 break;
588 switch (ifp->if_flags & (IFF_UP|IFF_RUNNING)) {
589 case IFF_RUNNING:
590 /*
591 * If interface is marked down and it is running,
592 * then stop and disable it.
593 */
594 (*ifp->if_stop)(ifp, 1);
595 break;
596 case IFF_UP:
597 /*
598 * If interface is marked up and it is stopped, then
599 * start it.
600 */
601 error = (*ifp->if_init)(ifp);
602 break;
603 default:
604 break;
605 }
606 break;
607
608 case SIOCSIFMTU:
609 if ((error = ifioctl_common(ifp, cmd, data)) == ENETRESET)
610 error = 0;
611 break;
612
613 default:
614 error = ifioctl_common(ifp, cmd, data);
615 break;
616 }
617
618 splx(s);
619
620 return (error);
621 }
622
623 /*
624 * bridge_lookup_member:
625 *
626 * Lookup a bridge member interface.
627 */
628 static struct bridge_iflist *
629 bridge_lookup_member(struct bridge_softc *sc, const char *name, struct psref *psref)
630 {
631 struct bridge_iflist *bif;
632 struct ifnet *ifp;
633 int s;
634
635 BRIDGE_PSZ_RENTER(s);
636
637 BRIDGE_IFLIST_READER_FOREACH(bif, sc) {
638 ifp = bif->bif_ifp;
639 if (strcmp(ifp->if_xname, name) == 0)
640 break;
641 }
642 if (bif != NULL)
643 bridge_acquire_member(sc, bif, psref);
644
645 BRIDGE_PSZ_REXIT(s);
646
647 return bif;
648 }
649
650 /*
651 * bridge_lookup_member_if:
652 *
653 * Lookup a bridge member interface by ifnet*.
654 */
655 static struct bridge_iflist *
656 bridge_lookup_member_if(struct bridge_softc *sc, struct ifnet *member_ifp,
657 struct psref *psref)
658 {
659 struct bridge_iflist *bif;
660 int s;
661
662 BRIDGE_PSZ_RENTER(s);
663
664 bif = member_ifp->if_bridgeif;
665 if (bif != NULL) {
666 psref_acquire(psref, &bif->bif_psref,
667 bridge_psref_class);
668 }
669
670 BRIDGE_PSZ_REXIT(s);
671
672 return bif;
673 }
674
675 static void
676 bridge_acquire_member(struct bridge_softc *sc, struct bridge_iflist *bif,
677 struct psref *psref)
678 {
679
680 psref_acquire(psref, &bif->bif_psref, bridge_psref_class);
681 }
682
683 /*
684 * bridge_release_member:
685 *
686 * Release the specified member interface.
687 */
688 static void
689 bridge_release_member(struct bridge_softc *sc, struct bridge_iflist *bif,
690 struct psref *psref)
691 {
692
693 psref_release(psref, &bif->bif_psref, bridge_psref_class);
694 }
695
696 /*
697 * bridge_delete_member:
698 *
699 * Delete the specified member interface.
700 */
701 static void
702 bridge_delete_member(struct bridge_softc *sc, struct bridge_iflist *bif)
703 {
704 struct ifnet *ifs = bif->bif_ifp;
705
706 KASSERT(BRIDGE_LOCKED(sc));
707
708 ifs->_if_input = ether_input;
709 ifs->if_bridge = NULL;
710 ifs->if_bridgeif = NULL;
711
712 PSLIST_WRITER_REMOVE(bif, bif_next);
713 BRIDGE_PSZ_PERFORM(sc);
714 BRIDGE_UNLOCK(sc);
715
716 psref_target_destroy(&bif->bif_psref, bridge_psref_class);
717
718 PSLIST_ENTRY_DESTROY(bif, bif_next);
719 kmem_free(bif, sizeof(*bif));
720
721 BRIDGE_LOCK(sc);
722 }
723
724 static int
725 bridge_ioctl_add(struct bridge_softc *sc, void *arg)
726 {
727 struct ifbreq *req = arg;
728 struct bridge_iflist *bif = NULL;
729 struct ifnet *ifs;
730 int error = 0;
731
732 ifs = ifunit(req->ifbr_ifsname);
733 if (ifs == NULL)
734 return (ENOENT);
735
736 if (sc->sc_if.if_mtu != ifs->if_mtu)
737 return (EINVAL);
738
739 if (ifs->if_bridge == sc)
740 return (EEXIST);
741
742 if (ifs->if_bridge != NULL)
743 return (EBUSY);
744
745 if (ifs->_if_input != ether_input)
746 return EINVAL;
747
748 /* FIXME: doesn't work with non-IFF_SIMPLEX interfaces */
749 if ((ifs->if_flags & IFF_SIMPLEX) == 0)
750 return EINVAL;
751
752 bif = kmem_alloc(sizeof(*bif), KM_SLEEP);
753
754 switch (ifs->if_type) {
755 case IFT_ETHER:
756 if ((error = ether_enable_vlan_mtu(ifs)) > 0)
757 goto out;
758 /*
759 * Place the interface into promiscuous mode.
760 */
761 error = ifpromisc(ifs, 1);
762 if (error)
763 goto out;
764 break;
765 default:
766 error = EINVAL;
767 goto out;
768 }
769
770 bif->bif_ifp = ifs;
771 bif->bif_flags = IFBIF_LEARNING | IFBIF_DISCOVER;
772 bif->bif_priority = BSTP_DEFAULT_PORT_PRIORITY;
773 bif->bif_path_cost = BSTP_DEFAULT_PATH_COST;
774 PSLIST_ENTRY_INIT(bif, bif_next);
775 psref_target_init(&bif->bif_psref, bridge_psref_class);
776
777 BRIDGE_LOCK(sc);
778
779 ifs->if_bridge = sc;
780 ifs->if_bridgeif = bif;
781 PSLIST_WRITER_INSERT_HEAD(&sc->sc_iflist_psref.bip_iflist, bif, bif_next);
782 ifs->_if_input = bridge_input;
783
784 BRIDGE_UNLOCK(sc);
785
786 if (sc->sc_if.if_flags & IFF_RUNNING)
787 bstp_initialization(sc);
788 else
789 bstp_stop(sc);
790
791 out:
792 if (error) {
793 if (bif != NULL)
794 kmem_free(bif, sizeof(*bif));
795 }
796 return (error);
797 }
798
799 static int
800 bridge_ioctl_del(struct bridge_softc *sc, void *arg)
801 {
802 struct ifbreq *req = arg;
803 const char *name = req->ifbr_ifsname;
804 struct bridge_iflist *bif;
805 struct ifnet *ifs;
806
807 BRIDGE_LOCK(sc);
808
809 /*
810 * Don't use bridge_lookup_member. We want to get a member
811 * with bif_refs == 0.
812 */
813 BRIDGE_IFLIST_WRITER_FOREACH(bif, sc) {
814 ifs = bif->bif_ifp;
815 if (strcmp(ifs->if_xname, name) == 0)
816 break;
817 }
818
819 if (bif == NULL) {
820 BRIDGE_UNLOCK(sc);
821 return ENOENT;
822 }
823
824 bridge_delete_member(sc, bif);
825
826 BRIDGE_UNLOCK(sc);
827
828 switch (ifs->if_type) {
829 case IFT_ETHER:
830 /*
831 * Take the interface out of promiscuous mode.
832 * Don't call it with holding a spin lock.
833 */
834 (void) ifpromisc(ifs, 0);
835 (void) ether_disable_vlan_mtu(ifs);
836 break;
837 default:
838 #ifdef DIAGNOSTIC
839 panic("bridge_delete_member: impossible");
840 #endif
841 break;
842 }
843
844 bridge_rtdelete(sc, ifs);
845
846 if (sc->sc_if.if_flags & IFF_RUNNING)
847 bstp_initialization(sc);
848
849 return 0;
850 }
851
852 static int
853 bridge_ioctl_gifflags(struct bridge_softc *sc, void *arg)
854 {
855 struct ifbreq *req = arg;
856 struct bridge_iflist *bif;
857 struct psref psref;
858
859 bif = bridge_lookup_member(sc, req->ifbr_ifsname, &psref);
860 if (bif == NULL)
861 return (ENOENT);
862
863 req->ifbr_ifsflags = bif->bif_flags;
864 req->ifbr_state = bif->bif_state;
865 req->ifbr_priority = bif->bif_priority;
866 req->ifbr_path_cost = bif->bif_path_cost;
867 req->ifbr_portno = bif->bif_ifp->if_index & 0xff;
868
869 bridge_release_member(sc, bif, &psref);
870
871 return (0);
872 }
873
874 static int
875 bridge_ioctl_sifflags(struct bridge_softc *sc, void *arg)
876 {
877 struct ifbreq *req = arg;
878 struct bridge_iflist *bif;
879 struct psref psref;
880
881 bif = bridge_lookup_member(sc, req->ifbr_ifsname, &psref);
882 if (bif == NULL)
883 return (ENOENT);
884
885 if (req->ifbr_ifsflags & IFBIF_STP) {
886 switch (bif->bif_ifp->if_type) {
887 case IFT_ETHER:
888 /* These can do spanning tree. */
889 break;
890
891 default:
892 /* Nothing else can. */
893 bridge_release_member(sc, bif, &psref);
894 return (EINVAL);
895 }
896 }
897
898 bif->bif_flags = req->ifbr_ifsflags;
899
900 bridge_release_member(sc, bif, &psref);
901
902 if (sc->sc_if.if_flags & IFF_RUNNING)
903 bstp_initialization(sc);
904
905 return (0);
906 }
907
908 static int
909 bridge_ioctl_scache(struct bridge_softc *sc, void *arg)
910 {
911 struct ifbrparam *param = arg;
912
913 sc->sc_brtmax = param->ifbrp_csize;
914 bridge_rttrim(sc);
915
916 return (0);
917 }
918
919 static int
920 bridge_ioctl_gcache(struct bridge_softc *sc, void *arg)
921 {
922 struct ifbrparam *param = arg;
923
924 param->ifbrp_csize = sc->sc_brtmax;
925
926 return (0);
927 }
928
929 static int
930 bridge_ioctl_gifs(struct bridge_softc *sc, void *arg)
931 {
932 struct ifbifconf *bifc = arg;
933 struct bridge_iflist *bif;
934 struct ifbreq *breqs;
935 int i, count, error = 0;
936
937 retry:
938 BRIDGE_LOCK(sc);
939 count = 0;
940 BRIDGE_IFLIST_WRITER_FOREACH(bif, sc)
941 count++;
942 BRIDGE_UNLOCK(sc);
943
944 if (count == 0) {
945 bifc->ifbic_len = 0;
946 return 0;
947 }
948
949 if (bifc->ifbic_len == 0 || bifc->ifbic_len < (sizeof(*breqs) * count)) {
950 /* Tell that a larger buffer is needed */
951 bifc->ifbic_len = sizeof(*breqs) * count;
952 return 0;
953 }
954
955 breqs = kmem_alloc(sizeof(*breqs) * count, KM_SLEEP);
956
957 BRIDGE_LOCK(sc);
958
959 i = 0;
960 BRIDGE_IFLIST_WRITER_FOREACH(bif, sc)
961 i++;
962 if (i > count) {
963 /*
964 * The number of members has been increased.
965 * We need more memory!
966 */
967 BRIDGE_UNLOCK(sc);
968 kmem_free(breqs, sizeof(*breqs) * count);
969 goto retry;
970 }
971
972 i = 0;
973 BRIDGE_IFLIST_WRITER_FOREACH(bif, sc) {
974 struct ifbreq *breq = &breqs[i++];
975 memset(breq, 0, sizeof(*breq));
976
977 strlcpy(breq->ifbr_ifsname, bif->bif_ifp->if_xname,
978 sizeof(breq->ifbr_ifsname));
979 breq->ifbr_ifsflags = bif->bif_flags;
980 breq->ifbr_state = bif->bif_state;
981 breq->ifbr_priority = bif->bif_priority;
982 breq->ifbr_path_cost = bif->bif_path_cost;
983 breq->ifbr_portno = bif->bif_ifp->if_index & 0xff;
984 }
985
986 /* Don't call copyout with holding the mutex */
987 BRIDGE_UNLOCK(sc);
988
989 for (i = 0; i < count; i++) {
990 error = copyout(&breqs[i], bifc->ifbic_req + i, sizeof(*breqs));
991 if (error)
992 break;
993 }
994 bifc->ifbic_len = sizeof(*breqs) * i;
995
996 kmem_free(breqs, sizeof(*breqs) * count);
997
998 return error;
999 }
1000
1001 static int
1002 bridge_ioctl_rts(struct bridge_softc *sc, void *arg)
1003 {
1004 struct ifbaconf *bac = arg;
1005 struct bridge_rtnode *brt;
1006 struct ifbareq bareq;
1007 int count = 0, error = 0, len;
1008
1009 if (bac->ifbac_len == 0)
1010 return (0);
1011
1012 BRIDGE_RT_LOCK(sc);
1013
1014 len = bac->ifbac_len;
1015 LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) {
1016 if (len < sizeof(bareq))
1017 goto out;
1018 memset(&bareq, 0, sizeof(bareq));
1019 strlcpy(bareq.ifba_ifsname, brt->brt_ifp->if_xname,
1020 sizeof(bareq.ifba_ifsname));
1021 memcpy(bareq.ifba_dst, brt->brt_addr, sizeof(brt->brt_addr));
1022 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
1023 bareq.ifba_expire = brt->brt_expire - time_uptime;
1024 } else
1025 bareq.ifba_expire = 0;
1026 bareq.ifba_flags = brt->brt_flags;
1027
1028 error = copyout(&bareq, bac->ifbac_req + count, sizeof(bareq));
1029 if (error)
1030 goto out;
1031 count++;
1032 len -= sizeof(bareq);
1033 }
1034 out:
1035 BRIDGE_RT_UNLOCK(sc);
1036
1037 bac->ifbac_len = sizeof(bareq) * count;
1038 return (error);
1039 }
1040
1041 static int
1042 bridge_ioctl_saddr(struct bridge_softc *sc, void *arg)
1043 {
1044 struct ifbareq *req = arg;
1045 struct bridge_iflist *bif;
1046 int error;
1047 struct psref psref;
1048
1049 bif = bridge_lookup_member(sc, req->ifba_ifsname, &psref);
1050 if (bif == NULL)
1051 return (ENOENT);
1052
1053 error = bridge_rtupdate(sc, req->ifba_dst, bif->bif_ifp, 1,
1054 req->ifba_flags);
1055
1056 bridge_release_member(sc, bif, &psref);
1057
1058 return (error);
1059 }
1060
1061 static int
1062 bridge_ioctl_sto(struct bridge_softc *sc, void *arg)
1063 {
1064 struct ifbrparam *param = arg;
1065
1066 sc->sc_brttimeout = param->ifbrp_ctime;
1067
1068 return (0);
1069 }
1070
1071 static int
1072 bridge_ioctl_gto(struct bridge_softc *sc, void *arg)
1073 {
1074 struct ifbrparam *param = arg;
1075
1076 param->ifbrp_ctime = sc->sc_brttimeout;
1077
1078 return (0);
1079 }
1080
1081 static int
1082 bridge_ioctl_daddr(struct bridge_softc *sc, void *arg)
1083 {
1084 struct ifbareq *req = arg;
1085
1086 return (bridge_rtdaddr(sc, req->ifba_dst));
1087 }
1088
1089 static int
1090 bridge_ioctl_flush(struct bridge_softc *sc, void *arg)
1091 {
1092 struct ifbreq *req = arg;
1093
1094 bridge_rtflush(sc, req->ifbr_ifsflags);
1095
1096 return (0);
1097 }
1098
1099 static int
1100 bridge_ioctl_gpri(struct bridge_softc *sc, void *arg)
1101 {
1102 struct ifbrparam *param = arg;
1103
1104 param->ifbrp_prio = sc->sc_bridge_priority;
1105
1106 return (0);
1107 }
1108
1109 static int
1110 bridge_ioctl_spri(struct bridge_softc *sc, void *arg)
1111 {
1112 struct ifbrparam *param = arg;
1113
1114 sc->sc_bridge_priority = param->ifbrp_prio;
1115
1116 if (sc->sc_if.if_flags & IFF_RUNNING)
1117 bstp_initialization(sc);
1118
1119 return (0);
1120 }
1121
1122 static int
1123 bridge_ioctl_ght(struct bridge_softc *sc, void *arg)
1124 {
1125 struct ifbrparam *param = arg;
1126
1127 param->ifbrp_hellotime = sc->sc_bridge_hello_time >> 8;
1128
1129 return (0);
1130 }
1131
1132 static int
1133 bridge_ioctl_sht(struct bridge_softc *sc, void *arg)
1134 {
1135 struct ifbrparam *param = arg;
1136
1137 if (param->ifbrp_hellotime == 0)
1138 return (EINVAL);
1139 sc->sc_bridge_hello_time = param->ifbrp_hellotime << 8;
1140
1141 if (sc->sc_if.if_flags & IFF_RUNNING)
1142 bstp_initialization(sc);
1143
1144 return (0);
1145 }
1146
1147 static int
1148 bridge_ioctl_gfd(struct bridge_softc *sc, void *arg)
1149 {
1150 struct ifbrparam *param = arg;
1151
1152 param->ifbrp_fwddelay = sc->sc_bridge_forward_delay >> 8;
1153
1154 return (0);
1155 }
1156
1157 static int
1158 bridge_ioctl_sfd(struct bridge_softc *sc, void *arg)
1159 {
1160 struct ifbrparam *param = arg;
1161
1162 if (param->ifbrp_fwddelay == 0)
1163 return (EINVAL);
1164 sc->sc_bridge_forward_delay = param->ifbrp_fwddelay << 8;
1165
1166 if (sc->sc_if.if_flags & IFF_RUNNING)
1167 bstp_initialization(sc);
1168
1169 return (0);
1170 }
1171
1172 static int
1173 bridge_ioctl_gma(struct bridge_softc *sc, void *arg)
1174 {
1175 struct ifbrparam *param = arg;
1176
1177 param->ifbrp_maxage = sc->sc_bridge_max_age >> 8;
1178
1179 return (0);
1180 }
1181
1182 static int
1183 bridge_ioctl_sma(struct bridge_softc *sc, void *arg)
1184 {
1185 struct ifbrparam *param = arg;
1186
1187 if (param->ifbrp_maxage == 0)
1188 return (EINVAL);
1189 sc->sc_bridge_max_age = param->ifbrp_maxage << 8;
1190
1191 if (sc->sc_if.if_flags & IFF_RUNNING)
1192 bstp_initialization(sc);
1193
1194 return (0);
1195 }
1196
1197 static int
1198 bridge_ioctl_sifprio(struct bridge_softc *sc, void *arg)
1199 {
1200 struct ifbreq *req = arg;
1201 struct bridge_iflist *bif;
1202 struct psref psref;
1203
1204 bif = bridge_lookup_member(sc, req->ifbr_ifsname, &psref);
1205 if (bif == NULL)
1206 return (ENOENT);
1207
1208 bif->bif_priority = req->ifbr_priority;
1209
1210 if (sc->sc_if.if_flags & IFF_RUNNING)
1211 bstp_initialization(sc);
1212
1213 bridge_release_member(sc, bif, &psref);
1214
1215 return (0);
1216 }
1217
1218 #if defined(BRIDGE_IPF)
1219 static int
1220 bridge_ioctl_gfilt(struct bridge_softc *sc, void *arg)
1221 {
1222 struct ifbrparam *param = arg;
1223
1224 param->ifbrp_filter = sc->sc_filter_flags;
1225
1226 return (0);
1227 }
1228
1229 static int
1230 bridge_ioctl_sfilt(struct bridge_softc *sc, void *arg)
1231 {
1232 struct ifbrparam *param = arg;
1233 uint32_t nflags, oflags;
1234
1235 if (param->ifbrp_filter & ~IFBF_FILT_MASK)
1236 return (EINVAL);
1237
1238 nflags = param->ifbrp_filter;
1239 oflags = sc->sc_filter_flags;
1240
1241 if ((nflags & IFBF_FILT_USEIPF) && !(oflags & IFBF_FILT_USEIPF)) {
1242 pfil_add_hook((void *)bridge_ipf, NULL, PFIL_IN|PFIL_OUT,
1243 sc->sc_if.if_pfil);
1244 }
1245 if (!(nflags & IFBF_FILT_USEIPF) && (oflags & IFBF_FILT_USEIPF)) {
1246 pfil_remove_hook((void *)bridge_ipf, NULL, PFIL_IN|PFIL_OUT,
1247 sc->sc_if.if_pfil);
1248 }
1249
1250 sc->sc_filter_flags = nflags;
1251
1252 return (0);
1253 }
1254 #endif /* BRIDGE_IPF */
1255
1256 static int
1257 bridge_ioctl_sifcost(struct bridge_softc *sc, void *arg)
1258 {
1259 struct ifbreq *req = arg;
1260 struct bridge_iflist *bif;
1261 struct psref psref;
1262
1263 bif = bridge_lookup_member(sc, req->ifbr_ifsname, &psref);
1264 if (bif == NULL)
1265 return (ENOENT);
1266
1267 bif->bif_path_cost = req->ifbr_path_cost;
1268
1269 if (sc->sc_if.if_flags & IFF_RUNNING)
1270 bstp_initialization(sc);
1271
1272 bridge_release_member(sc, bif, &psref);
1273
1274 return (0);
1275 }
1276
1277 /*
1278 * bridge_ifdetach:
1279 *
1280 * Detach an interface from a bridge. Called when a member
1281 * interface is detaching.
1282 */
1283 void
1284 bridge_ifdetach(struct ifnet *ifp)
1285 {
1286 struct bridge_softc *sc = ifp->if_bridge;
1287 struct ifbreq breq;
1288
1289 /* ioctl_lock should prevent this from happening */
1290 KASSERT(sc != NULL);
1291
1292 memset(&breq, 0, sizeof(breq));
1293 strlcpy(breq.ifbr_ifsname, ifp->if_xname, sizeof(breq.ifbr_ifsname));
1294
1295 (void) bridge_ioctl_del(sc, &breq);
1296 }
1297
1298 /*
1299 * bridge_init:
1300 *
1301 * Initialize a bridge interface.
1302 */
1303 static int
1304 bridge_init(struct ifnet *ifp)
1305 {
1306 struct bridge_softc *sc = ifp->if_softc;
1307
1308 if (ifp->if_flags & IFF_RUNNING)
1309 return (0);
1310
1311 callout_reset(&sc->sc_brcallout, bridge_rtable_prune_period * hz,
1312 bridge_timer, sc);
1313
1314 ifp->if_flags |= IFF_RUNNING;
1315 bstp_initialization(sc);
1316 return (0);
1317 }
1318
1319 /*
1320 * bridge_stop:
1321 *
1322 * Stop the bridge interface.
1323 */
1324 static void
1325 bridge_stop(struct ifnet *ifp, int disable)
1326 {
1327 struct bridge_softc *sc = ifp->if_softc;
1328
1329 if ((ifp->if_flags & IFF_RUNNING) == 0)
1330 return;
1331
1332 callout_stop(&sc->sc_brcallout);
1333 bstp_stop(sc);
1334
1335 bridge_rtflush(sc, IFBF_FLUSHDYN);
1336
1337 ifp->if_flags &= ~IFF_RUNNING;
1338 }
1339
1340 /*
1341 * bridge_enqueue:
1342 *
1343 * Enqueue a packet on a bridge member interface.
1344 */
1345 void
1346 bridge_enqueue(struct bridge_softc *sc, struct ifnet *dst_ifp, struct mbuf *m,
1347 int runfilt)
1348 {
1349 int len, error;
1350 short mflags;
1351
1352 /*
1353 * Clear any in-bound checksum flags for this packet.
1354 */
1355 m->m_pkthdr.csum_flags = 0;
1356
1357 if (runfilt) {
1358 if (pfil_run_hooks(sc->sc_if.if_pfil, &m,
1359 dst_ifp, PFIL_OUT) != 0) {
1360 if (m != NULL)
1361 m_freem(m);
1362 return;
1363 }
1364 if (m == NULL)
1365 return;
1366 }
1367
1368 #ifdef ALTQ
1369 /*
1370 * If ALTQ is enabled on the member interface, do
1371 * classification; the queueing discipline might
1372 * not require classification, but might require
1373 * the address family/header pointer in the pktattr.
1374 */
1375 if (ALTQ_IS_ENABLED(&dst_ifp->if_snd)) {
1376 /* XXX IFT_ETHER */
1377 altq_etherclassify(&dst_ifp->if_snd, m);
1378 }
1379 #endif /* ALTQ */
1380
1381 len = m->m_pkthdr.len;
1382 mflags = m->m_flags;
1383
1384 error = (*dst_ifp->if_transmit)(dst_ifp, m);
1385 if (error) {
1386 /* mbuf is already freed */
1387 sc->sc_if.if_oerrors++;
1388 return;
1389 }
1390
1391 sc->sc_if.if_opackets++;
1392 sc->sc_if.if_obytes += len;
1393 if (mflags & M_MCAST)
1394 sc->sc_if.if_omcasts++;
1395 }
1396
1397 /*
1398 * bridge_output:
1399 *
1400 * Send output from a bridge member interface. This
1401 * performs the bridging function for locally originated
1402 * packets.
1403 *
1404 * The mbuf has the Ethernet header already attached. We must
1405 * enqueue or free the mbuf before returning.
1406 */
1407 int
1408 bridge_output(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *sa,
1409 const struct rtentry *rt)
1410 {
1411 struct ether_header *eh;
1412 struct ifnet *dst_if;
1413 struct bridge_softc *sc;
1414 int s;
1415
1416 if (m->m_len < ETHER_HDR_LEN) {
1417 m = m_pullup(m, ETHER_HDR_LEN);
1418 if (m == NULL)
1419 return (0);
1420 }
1421
1422 eh = mtod(m, struct ether_header *);
1423 sc = ifp->if_bridge;
1424
1425 /*
1426 * If bridge is down, but the original output interface is up,
1427 * go ahead and send out that interface. Otherwise, the packet
1428 * is dropped below.
1429 */
1430 if (__predict_false(sc == NULL) ||
1431 (sc->sc_if.if_flags & IFF_RUNNING) == 0) {
1432 dst_if = ifp;
1433 goto sendunicast;
1434 }
1435
1436 /*
1437 * If the packet is a multicast, or we don't know a better way to
1438 * get there, send to all interfaces.
1439 */
1440 if (ETHER_IS_MULTICAST(eh->ether_dhost))
1441 dst_if = NULL;
1442 else
1443 dst_if = bridge_rtlookup(sc, eh->ether_dhost);
1444 if (dst_if == NULL) {
1445 struct bridge_iflist *bif;
1446 struct mbuf *mc;
1447 bool used = false;
1448
1449 BRIDGE_PSZ_RENTER(s);
1450 BRIDGE_IFLIST_READER_FOREACH(bif, sc) {
1451 struct psref psref;
1452
1453 bridge_acquire_member(sc, bif, &psref);
1454 BRIDGE_PSZ_REXIT(s);
1455
1456 dst_if = bif->bif_ifp;
1457 if ((dst_if->if_flags & IFF_RUNNING) == 0)
1458 goto next;
1459
1460 /*
1461 * If this is not the original output interface,
1462 * and the interface is participating in spanning
1463 * tree, make sure the port is in a state that
1464 * allows forwarding.
1465 */
1466 if (dst_if != ifp &&
1467 (bif->bif_flags & IFBIF_STP) != 0) {
1468 switch (bif->bif_state) {
1469 case BSTP_IFSTATE_BLOCKING:
1470 case BSTP_IFSTATE_LISTENING:
1471 case BSTP_IFSTATE_DISABLED:
1472 goto next;
1473 }
1474 }
1475
1476 if (PSLIST_READER_NEXT(bif, struct bridge_iflist,
1477 bif_next) == NULL) {
1478 used = true;
1479 mc = m;
1480 } else {
1481 mc = m_copym(m, 0, M_COPYALL, M_NOWAIT);
1482 if (mc == NULL) {
1483 sc->sc_if.if_oerrors++;
1484 goto next;
1485 }
1486 }
1487
1488 #ifndef NET_MPSAFE
1489 s = splnet();
1490 #endif
1491 bridge_enqueue(sc, dst_if, mc, 0);
1492 #ifndef NET_MPSAFE
1493 splx(s);
1494 #endif
1495 next:
1496 BRIDGE_PSZ_RENTER(s);
1497 bridge_release_member(sc, bif, &psref);
1498
1499 /* Guarantee we don't re-enter the loop as we already
1500 * decided we're at the end. */
1501 if (used)
1502 break;
1503 }
1504 BRIDGE_PSZ_REXIT(s);
1505
1506 if (!used)
1507 m_freem(m);
1508 return (0);
1509 }
1510
1511 sendunicast:
1512 /*
1513 * XXX Spanning tree consideration here?
1514 */
1515
1516 if ((dst_if->if_flags & IFF_RUNNING) == 0) {
1517 m_freem(m);
1518 return (0);
1519 }
1520
1521 #ifndef NET_MPSAFE
1522 s = splnet();
1523 #endif
1524 bridge_enqueue(sc, dst_if, m, 0);
1525 #ifndef NET_MPSAFE
1526 splx(s);
1527 #endif
1528
1529 return (0);
1530 }
1531
1532 /*
1533 * bridge_start:
1534 *
1535 * Start output on a bridge.
1536 *
1537 * NOTE: This routine should never be called in this implementation.
1538 */
1539 static void
1540 bridge_start(struct ifnet *ifp)
1541 {
1542
1543 printf("%s: bridge_start() called\n", ifp->if_xname);
1544 }
1545
1546 /*
1547 * bridge_forward:
1548 *
1549 * The forwarding function of the bridge.
1550 */
1551 static void
1552 bridge_forward(struct bridge_softc *sc, struct mbuf *m)
1553 {
1554 struct bridge_iflist *bif;
1555 struct ifnet *src_if, *dst_if;
1556 struct ether_header *eh;
1557 struct psref psref;
1558 DECLARE_LOCK_VARIABLE;
1559
1560 if ((sc->sc_if.if_flags & IFF_RUNNING) == 0)
1561 return;
1562
1563 src_if = m->m_pkthdr.rcvif;
1564
1565 sc->sc_if.if_ipackets++;
1566 sc->sc_if.if_ibytes += m->m_pkthdr.len;
1567
1568 /*
1569 * Look up the bridge_iflist.
1570 */
1571 bif = bridge_lookup_member_if(sc, src_if, &psref);
1572 if (bif == NULL) {
1573 /* Interface is not a bridge member (anymore?) */
1574 m_freem(m);
1575 goto out;
1576 }
1577
1578 if (bif->bif_flags & IFBIF_STP) {
1579 switch (bif->bif_state) {
1580 case BSTP_IFSTATE_BLOCKING:
1581 case BSTP_IFSTATE_LISTENING:
1582 case BSTP_IFSTATE_DISABLED:
1583 m_freem(m);
1584 bridge_release_member(sc, bif, &psref);
1585 goto out;
1586 }
1587 }
1588
1589 eh = mtod(m, struct ether_header *);
1590
1591 /*
1592 * If the interface is learning, and the source
1593 * address is valid and not multicast, record
1594 * the address.
1595 */
1596 if ((bif->bif_flags & IFBIF_LEARNING) != 0 &&
1597 ETHER_IS_MULTICAST(eh->ether_shost) == 0 &&
1598 (eh->ether_shost[0] == 0 &&
1599 eh->ether_shost[1] == 0 &&
1600 eh->ether_shost[2] == 0 &&
1601 eh->ether_shost[3] == 0 &&
1602 eh->ether_shost[4] == 0 &&
1603 eh->ether_shost[5] == 0) == 0) {
1604 (void) bridge_rtupdate(sc, eh->ether_shost,
1605 src_if, 0, IFBAF_DYNAMIC);
1606 }
1607
1608 if ((bif->bif_flags & IFBIF_STP) != 0 &&
1609 bif->bif_state == BSTP_IFSTATE_LEARNING) {
1610 m_freem(m);
1611 bridge_release_member(sc, bif, &psref);
1612 goto out;
1613 }
1614
1615 bridge_release_member(sc, bif, &psref);
1616
1617 /*
1618 * At this point, the port either doesn't participate
1619 * in spanning tree or it is in the forwarding state.
1620 */
1621
1622 /*
1623 * If the packet is unicast, destined for someone on
1624 * "this" side of the bridge, drop it.
1625 */
1626 if ((m->m_flags & (M_BCAST|M_MCAST)) == 0) {
1627 dst_if = bridge_rtlookup(sc, eh->ether_dhost);
1628 if (src_if == dst_if) {
1629 m_freem(m);
1630 goto out;
1631 }
1632 } else {
1633 /* ...forward it to all interfaces. */
1634 sc->sc_if.if_imcasts++;
1635 dst_if = NULL;
1636 }
1637
1638 if (pfil_run_hooks(sc->sc_if.if_pfil, &m,
1639 m->m_pkthdr.rcvif, PFIL_IN) != 0) {
1640 if (m != NULL)
1641 m_freem(m);
1642 goto out;
1643 }
1644 if (m == NULL)
1645 goto out;
1646
1647 if (dst_if == NULL) {
1648 bridge_broadcast(sc, src_if, m);
1649 goto out;
1650 }
1651
1652 /*
1653 * At this point, we're dealing with a unicast frame
1654 * going to a different interface.
1655 */
1656 if ((dst_if->if_flags & IFF_RUNNING) == 0) {
1657 m_freem(m);
1658 goto out;
1659 }
1660
1661 bif = bridge_lookup_member_if(sc, dst_if, &psref);
1662 if (bif == NULL) {
1663 /* Not a member of the bridge (anymore?) */
1664 m_freem(m);
1665 goto out;
1666 }
1667
1668 if (bif->bif_flags & IFBIF_STP) {
1669 switch (bif->bif_state) {
1670 case BSTP_IFSTATE_DISABLED:
1671 case BSTP_IFSTATE_BLOCKING:
1672 m_freem(m);
1673 bridge_release_member(sc, bif, &psref);
1674 goto out;
1675 }
1676 }
1677
1678 bridge_release_member(sc, bif, &psref);
1679
1680 ACQUIRE_GLOBAL_LOCKS();
1681 bridge_enqueue(sc, dst_if, m, 1);
1682 RELEASE_GLOBAL_LOCKS();
1683 out:
1684 /* XXX gcc */
1685 return;
1686 }
1687
1688 static bool
1689 bstp_state_before_learning(struct bridge_iflist *bif)
1690 {
1691 if (bif->bif_flags & IFBIF_STP) {
1692 switch (bif->bif_state) {
1693 case BSTP_IFSTATE_BLOCKING:
1694 case BSTP_IFSTATE_LISTENING:
1695 case BSTP_IFSTATE_DISABLED:
1696 return true;
1697 }
1698 }
1699 return false;
1700 }
1701
1702 static bool
1703 bridge_ourether(struct bridge_iflist *bif, struct ether_header *eh, int src)
1704 {
1705 uint8_t *ether = src ? eh->ether_shost : eh->ether_dhost;
1706
1707 if (memcmp(CLLADDR(bif->bif_ifp->if_sadl), ether, ETHER_ADDR_LEN) == 0
1708 #if NCARP > 0
1709 || (bif->bif_ifp->if_carp &&
1710 carp_ourether(bif->bif_ifp->if_carp, eh, IFT_ETHER, src) != NULL)
1711 #endif /* NCARP > 0 */
1712 )
1713 return true;
1714
1715 return false;
1716 }
1717
1718 /*
1719 * bridge_input:
1720 *
1721 * Receive input from a member interface. Queue the packet for
1722 * bridging if it is not for us.
1723 */
1724 static void
1725 bridge_input(struct ifnet *ifp, struct mbuf *m)
1726 {
1727 struct bridge_softc *sc = ifp->if_bridge;
1728 struct bridge_iflist *bif;
1729 struct ether_header *eh;
1730 struct psref psref;
1731 DECLARE_LOCK_VARIABLE;
1732
1733 KASSERT(!cpu_intr_p());
1734
1735 if (__predict_false(sc == NULL) ||
1736 (sc->sc_if.if_flags & IFF_RUNNING) == 0) {
1737 ACQUIRE_GLOBAL_LOCKS();
1738 ether_input(ifp, m);
1739 RELEASE_GLOBAL_LOCKS();
1740 return;
1741 }
1742
1743 bif = bridge_lookup_member_if(sc, ifp, &psref);
1744 if (bif == NULL) {
1745 ACQUIRE_GLOBAL_LOCKS();
1746 ether_input(ifp, m);
1747 RELEASE_GLOBAL_LOCKS();
1748 return;
1749 }
1750
1751 eh = mtod(m, struct ether_header *);
1752
1753 if (ETHER_IS_MULTICAST(eh->ether_dhost)) {
1754 if (memcmp(etherbroadcastaddr,
1755 eh->ether_dhost, ETHER_ADDR_LEN) == 0)
1756 m->m_flags |= M_BCAST;
1757 else
1758 m->m_flags |= M_MCAST;
1759 }
1760
1761 /*
1762 * A 'fast' path for packets addressed to interfaces that are
1763 * part of this bridge.
1764 */
1765 if (!(m->m_flags & (M_BCAST|M_MCAST)) &&
1766 !bstp_state_before_learning(bif)) {
1767 struct bridge_iflist *_bif;
1768 struct ifnet *_ifp = NULL;
1769 int s;
1770 struct psref _psref;
1771
1772 BRIDGE_PSZ_RENTER(s);
1773 BRIDGE_IFLIST_READER_FOREACH(_bif, sc) {
1774 /* It is destined for us. */
1775 if (bridge_ourether(_bif, eh, 0)) {
1776 bridge_acquire_member(sc, _bif, &_psref);
1777 BRIDGE_PSZ_REXIT(s);
1778 if (_bif->bif_flags & IFBIF_LEARNING)
1779 (void) bridge_rtupdate(sc,
1780 eh->ether_shost, ifp, 0, IFBAF_DYNAMIC);
1781 _ifp = m->m_pkthdr.rcvif = _bif->bif_ifp;
1782 bridge_release_member(sc, _bif, &_psref);
1783 goto out;
1784 }
1785
1786 /* We just received a packet that we sent out. */
1787 if (bridge_ourether(_bif, eh, 1))
1788 break;
1789 }
1790 BRIDGE_PSZ_REXIT(s);
1791 out:
1792
1793 if (_bif != NULL) {
1794 bridge_release_member(sc, bif, &psref);
1795 if (_ifp != NULL) {
1796 m->m_flags &= ~M_PROMISC;
1797 ACQUIRE_GLOBAL_LOCKS();
1798 ether_input(_ifp, m);
1799 RELEASE_GLOBAL_LOCKS();
1800 } else
1801 m_freem(m);
1802 return;
1803 }
1804 }
1805
1806 /* Tap off 802.1D packets; they do not get forwarded. */
1807 if (bif->bif_flags & IFBIF_STP &&
1808 memcmp(eh->ether_dhost, bstp_etheraddr, ETHER_ADDR_LEN) == 0) {
1809 bstp_input(sc, bif, m);
1810 bridge_release_member(sc, bif, &psref);
1811 return;
1812 }
1813
1814 /*
1815 * A normal switch would discard the packet here, but that's not what
1816 * we've done historically. This also prevents some obnoxious behaviour.
1817 */
1818 if (bstp_state_before_learning(bif)) {
1819 bridge_release_member(sc, bif, &psref);
1820 ACQUIRE_GLOBAL_LOCKS();
1821 ether_input(ifp, m);
1822 RELEASE_GLOBAL_LOCKS();
1823 return;
1824 }
1825
1826 bridge_release_member(sc, bif, &psref);
1827
1828 bridge_forward(sc, m);
1829 }
1830
1831 /*
1832 * bridge_broadcast:
1833 *
1834 * Send a frame to all interfaces that are members of
1835 * the bridge, except for the one on which the packet
1836 * arrived.
1837 */
1838 static void
1839 bridge_broadcast(struct bridge_softc *sc, struct ifnet *src_if,
1840 struct mbuf *m)
1841 {
1842 struct bridge_iflist *bif;
1843 struct mbuf *mc;
1844 struct ifnet *dst_if;
1845 bool bmcast;
1846 int s;
1847 DECLARE_LOCK_VARIABLE;
1848
1849 bmcast = m->m_flags & (M_BCAST|M_MCAST);
1850
1851 BRIDGE_PSZ_RENTER(s);
1852 BRIDGE_IFLIST_READER_FOREACH(bif, sc) {
1853 struct psref psref;
1854
1855 bridge_acquire_member(sc, bif, &psref);
1856 BRIDGE_PSZ_REXIT(s);
1857
1858 dst_if = bif->bif_ifp;
1859
1860 if (bif->bif_flags & IFBIF_STP) {
1861 switch (bif->bif_state) {
1862 case BSTP_IFSTATE_BLOCKING:
1863 case BSTP_IFSTATE_DISABLED:
1864 goto next;
1865 }
1866 }
1867
1868 if ((bif->bif_flags & IFBIF_DISCOVER) == 0 && !bmcast)
1869 goto next;
1870
1871 if ((dst_if->if_flags & IFF_RUNNING) == 0)
1872 goto next;
1873
1874 if (dst_if != src_if) {
1875 mc = m_copym(m, 0, M_COPYALL, M_DONTWAIT);
1876 if (mc == NULL) {
1877 sc->sc_if.if_oerrors++;
1878 goto next;
1879 }
1880 ACQUIRE_GLOBAL_LOCKS();
1881 bridge_enqueue(sc, dst_if, mc, 1);
1882 RELEASE_GLOBAL_LOCKS();
1883 }
1884
1885 if (bmcast) {
1886 mc = m_copym(m, 0, M_COPYALL, M_DONTWAIT);
1887 if (mc == NULL) {
1888 sc->sc_if.if_oerrors++;
1889 goto next;
1890 }
1891
1892 mc->m_pkthdr.rcvif = dst_if;
1893 mc->m_flags &= ~M_PROMISC;
1894
1895 ACQUIRE_GLOBAL_LOCKS();
1896 ether_input(dst_if, mc);
1897 RELEASE_GLOBAL_LOCKS();
1898 }
1899 next:
1900 BRIDGE_PSZ_RENTER(s);
1901 bridge_release_member(sc, bif, &psref);
1902 }
1903 BRIDGE_PSZ_REXIT(s);
1904
1905 m_freem(m);
1906 }
1907
1908 static int
1909 bridge_rtalloc(struct bridge_softc *sc, const uint8_t *dst,
1910 struct bridge_rtnode **brtp)
1911 {
1912 struct bridge_rtnode *brt;
1913 int error;
1914
1915 if (sc->sc_brtcnt >= sc->sc_brtmax)
1916 return ENOSPC;
1917
1918 /*
1919 * Allocate a new bridge forwarding node, and
1920 * initialize the expiration time and Ethernet
1921 * address.
1922 */
1923 brt = pool_get(&bridge_rtnode_pool, PR_NOWAIT);
1924 if (brt == NULL)
1925 return ENOMEM;
1926
1927 memset(brt, 0, sizeof(*brt));
1928 brt->brt_expire = time_uptime + sc->sc_brttimeout;
1929 brt->brt_flags = IFBAF_DYNAMIC;
1930 memcpy(brt->brt_addr, dst, ETHER_ADDR_LEN);
1931
1932 BRIDGE_RT_LOCK(sc);
1933 error = bridge_rtnode_insert(sc, brt);
1934 BRIDGE_RT_UNLOCK(sc);
1935
1936 if (error != 0) {
1937 pool_put(&bridge_rtnode_pool, brt);
1938 return error;
1939 }
1940
1941 *brtp = brt;
1942 return 0;
1943 }
1944
1945 /*
1946 * bridge_rtupdate:
1947 *
1948 * Add a bridge routing entry.
1949 */
1950 static int
1951 bridge_rtupdate(struct bridge_softc *sc, const uint8_t *dst,
1952 struct ifnet *dst_if, int setflags, uint8_t flags)
1953 {
1954 struct bridge_rtnode *brt;
1955 int s;
1956
1957 again:
1958 /*
1959 * A route for this destination might already exist. If so,
1960 * update it, otherwise create a new one.
1961 */
1962 BRIDGE_RT_RENTER(s);
1963 brt = bridge_rtnode_lookup(sc, dst);
1964
1965 if (brt != NULL) {
1966 brt->brt_ifp = dst_if;
1967 if (setflags) {
1968 brt->brt_flags = flags;
1969 if (flags & IFBAF_STATIC)
1970 brt->brt_expire = 0;
1971 else
1972 brt->brt_expire = time_uptime + sc->sc_brttimeout;
1973 } else {
1974 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
1975 brt->brt_expire = time_uptime + sc->sc_brttimeout;
1976 }
1977 }
1978 BRIDGE_RT_REXIT(s);
1979
1980 if (brt == NULL) {
1981 int r;
1982
1983 r = bridge_rtalloc(sc, dst, &brt);
1984 if (r != 0)
1985 return r;
1986 goto again;
1987 }
1988
1989 return 0;
1990 }
1991
1992 /*
1993 * bridge_rtlookup:
1994 *
1995 * Lookup the destination interface for an address.
1996 */
1997 static struct ifnet *
1998 bridge_rtlookup(struct bridge_softc *sc, const uint8_t *addr)
1999 {
2000 struct bridge_rtnode *brt;
2001 struct ifnet *ifs = NULL;
2002 int s;
2003
2004 BRIDGE_RT_RENTER(s);
2005 brt = bridge_rtnode_lookup(sc, addr);
2006 if (brt != NULL)
2007 ifs = brt->brt_ifp;
2008 BRIDGE_RT_REXIT(s);
2009
2010 return ifs;
2011 }
2012
2013 typedef bool (*bridge_iterate_cb_t)
2014 (struct bridge_softc *, struct bridge_rtnode *, bool *, void *);
2015
2016 /*
2017 * bridge_rtlist_iterate_remove:
2018 *
2019 * It iterates on sc->sc_rtlist and removes rtnodes of it which func
2020 * callback judges to remove. Removals of rtnodes are done in a manner
2021 * of pserialize. To this end, all kmem_* operations are placed out of
2022 * mutexes.
2023 */
2024 static void
2025 bridge_rtlist_iterate_remove(struct bridge_softc *sc, bridge_iterate_cb_t func, void *arg)
2026 {
2027 struct bridge_rtnode *brt, *nbrt;
2028 struct bridge_rtnode **brt_list;
2029 int i, count;
2030
2031 retry:
2032 count = sc->sc_brtcnt;
2033 if (count == 0)
2034 return;
2035 brt_list = kmem_alloc(sizeof(*brt_list) * count, KM_SLEEP);
2036
2037 BRIDGE_RT_LOCK(sc);
2038 if (__predict_false(sc->sc_brtcnt > count)) {
2039 /* The rtnodes increased, we need more memory */
2040 BRIDGE_RT_UNLOCK(sc);
2041 kmem_free(brt_list, sizeof(*brt_list) * count);
2042 goto retry;
2043 }
2044
2045 i = 0;
2046 LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
2047 bool need_break = false;
2048 if (func(sc, brt, &need_break, arg)) {
2049 bridge_rtnode_remove(sc, brt);
2050 brt_list[i++] = brt;
2051 }
2052 if (need_break)
2053 break;
2054 }
2055
2056 if (i > 0)
2057 BRIDGE_RT_PSZ_PERFORM(sc);
2058 BRIDGE_RT_UNLOCK(sc);
2059
2060 while (--i >= 0)
2061 bridge_rtnode_destroy(brt_list[i]);
2062
2063 kmem_free(brt_list, sizeof(*brt_list) * count);
2064 }
2065
2066 static bool
2067 bridge_rttrim0_cb(struct bridge_softc *sc, struct bridge_rtnode *brt,
2068 bool *need_break, void *arg)
2069 {
2070 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
2071 /* Take into account of the subsequent removal */
2072 if ((sc->sc_brtcnt - 1) <= sc->sc_brtmax)
2073 *need_break = true;
2074 return true;
2075 } else
2076 return false;
2077 }
2078
2079 static void
2080 bridge_rttrim0(struct bridge_softc *sc)
2081 {
2082 bridge_rtlist_iterate_remove(sc, bridge_rttrim0_cb, NULL);
2083 }
2084
2085 /*
2086 * bridge_rttrim:
2087 *
2088 * Trim the routine table so that we have a number
2089 * of routing entries less than or equal to the
2090 * maximum number.
2091 */
2092 static void
2093 bridge_rttrim(struct bridge_softc *sc)
2094 {
2095
2096 /* Make sure we actually need to do this. */
2097 if (sc->sc_brtcnt <= sc->sc_brtmax)
2098 return;
2099
2100 /* Force an aging cycle; this might trim enough addresses. */
2101 bridge_rtage(sc);
2102 if (sc->sc_brtcnt <= sc->sc_brtmax)
2103 return;
2104
2105 bridge_rttrim0(sc);
2106
2107 return;
2108 }
2109
2110 /*
2111 * bridge_timer:
2112 *
2113 * Aging timer for the bridge.
2114 */
2115 static void
2116 bridge_timer(void *arg)
2117 {
2118 struct bridge_softc *sc = arg;
2119
2120 workqueue_enqueue(sc->sc_rtage_wq, &sc->sc_rtage_wk, NULL);
2121 }
2122
2123 static void
2124 bridge_rtage_work(struct work *wk, void *arg)
2125 {
2126 struct bridge_softc *sc = arg;
2127
2128 KASSERT(wk == &sc->sc_rtage_wk);
2129
2130 bridge_rtage(sc);
2131
2132 if (sc->sc_if.if_flags & IFF_RUNNING)
2133 callout_reset(&sc->sc_brcallout,
2134 bridge_rtable_prune_period * hz, bridge_timer, sc);
2135 }
2136
2137 static bool
2138 bridge_rtage_cb(struct bridge_softc *sc, struct bridge_rtnode *brt,
2139 bool *need_break, void *arg)
2140 {
2141 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
2142 time_uptime >= brt->brt_expire)
2143 return true;
2144 else
2145 return false;
2146 }
2147
2148 /*
2149 * bridge_rtage:
2150 *
2151 * Perform an aging cycle.
2152 */
2153 static void
2154 bridge_rtage(struct bridge_softc *sc)
2155 {
2156 bridge_rtlist_iterate_remove(sc, bridge_rtage_cb, NULL);
2157 }
2158
2159
2160 static bool
2161 bridge_rtflush_cb(struct bridge_softc *sc, struct bridge_rtnode *brt,
2162 bool *need_break, void *arg)
2163 {
2164 int full = *(int*)arg;
2165
2166 if (full || (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
2167 return true;
2168 else
2169 return false;
2170 }
2171
2172 /*
2173 * bridge_rtflush:
2174 *
2175 * Remove all dynamic addresses from the bridge.
2176 */
2177 static void
2178 bridge_rtflush(struct bridge_softc *sc, int full)
2179 {
2180 bridge_rtlist_iterate_remove(sc, bridge_rtflush_cb, &full);
2181 }
2182
2183 /*
2184 * bridge_rtdaddr:
2185 *
2186 * Remove an address from the table.
2187 */
2188 static int
2189 bridge_rtdaddr(struct bridge_softc *sc, const uint8_t *addr)
2190 {
2191 struct bridge_rtnode *brt;
2192
2193 BRIDGE_RT_LOCK(sc);
2194 if ((brt = bridge_rtnode_lookup(sc, addr)) == NULL) {
2195 BRIDGE_RT_UNLOCK(sc);
2196 return ENOENT;
2197 }
2198 bridge_rtnode_remove(sc, brt);
2199 BRIDGE_RT_PSZ_PERFORM(sc);
2200 BRIDGE_RT_UNLOCK(sc);
2201
2202 bridge_rtnode_destroy(brt);
2203
2204 return 0;
2205 }
2206
2207 /*
2208 * bridge_rtdelete:
2209 *
2210 * Delete routes to a speicifc member interface.
2211 */
2212 static void
2213 bridge_rtdelete(struct bridge_softc *sc, struct ifnet *ifp)
2214 {
2215 struct bridge_rtnode *brt;
2216
2217 BRIDGE_RT_LOCK(sc);
2218 LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) {
2219 if (brt->brt_ifp == ifp)
2220 break;
2221 }
2222 if (brt == NULL) {
2223 BRIDGE_RT_UNLOCK(sc);
2224 return;
2225 }
2226 bridge_rtnode_remove(sc, brt);
2227 BRIDGE_RT_PSZ_PERFORM(sc);
2228 BRIDGE_RT_UNLOCK(sc);
2229
2230 bridge_rtnode_destroy(brt);
2231 }
2232
2233 /*
2234 * bridge_rtable_init:
2235 *
2236 * Initialize the route table for this bridge.
2237 */
2238 static void
2239 bridge_rtable_init(struct bridge_softc *sc)
2240 {
2241 int i;
2242
2243 sc->sc_rthash = kmem_alloc(sizeof(*sc->sc_rthash) * BRIDGE_RTHASH_SIZE,
2244 KM_SLEEP);
2245
2246 for (i = 0; i < BRIDGE_RTHASH_SIZE; i++)
2247 LIST_INIT(&sc->sc_rthash[i]);
2248
2249 sc->sc_rthash_key = cprng_fast32();
2250
2251 LIST_INIT(&sc->sc_rtlist);
2252
2253 sc->sc_rtlist_psz = pserialize_create();
2254 sc->sc_rtlist_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET);
2255 }
2256
2257 /*
2258 * bridge_rtable_fini:
2259 *
2260 * Deconstruct the route table for this bridge.
2261 */
2262 static void
2263 bridge_rtable_fini(struct bridge_softc *sc)
2264 {
2265
2266 kmem_free(sc->sc_rthash, sizeof(*sc->sc_rthash) * BRIDGE_RTHASH_SIZE);
2267 if (sc->sc_rtlist_lock)
2268 mutex_obj_free(sc->sc_rtlist_lock);
2269 if (sc->sc_rtlist_psz)
2270 pserialize_destroy(sc->sc_rtlist_psz);
2271 }
2272
2273 /*
2274 * The following hash function is adapted from "Hash Functions" by Bob Jenkins
2275 * ("Algorithm Alley", Dr. Dobbs Journal, September 1997).
2276 */
2277 #define mix(a, b, c) \
2278 do { \
2279 a -= b; a -= c; a ^= (c >> 13); \
2280 b -= c; b -= a; b ^= (a << 8); \
2281 c -= a; c -= b; c ^= (b >> 13); \
2282 a -= b; a -= c; a ^= (c >> 12); \
2283 b -= c; b -= a; b ^= (a << 16); \
2284 c -= a; c -= b; c ^= (b >> 5); \
2285 a -= b; a -= c; a ^= (c >> 3); \
2286 b -= c; b -= a; b ^= (a << 10); \
2287 c -= a; c -= b; c ^= (b >> 15); \
2288 } while (/*CONSTCOND*/0)
2289
2290 static inline uint32_t
2291 bridge_rthash(struct bridge_softc *sc, const uint8_t *addr)
2292 {
2293 uint32_t a = 0x9e3779b9, b = 0x9e3779b9, c = sc->sc_rthash_key;
2294
2295 b += addr[5] << 8;
2296 b += addr[4];
2297 a += addr[3] << 24;
2298 a += addr[2] << 16;
2299 a += addr[1] << 8;
2300 a += addr[0];
2301
2302 mix(a, b, c);
2303
2304 return (c & BRIDGE_RTHASH_MASK);
2305 }
2306
2307 #undef mix
2308
2309 /*
2310 * bridge_rtnode_lookup:
2311 *
2312 * Look up a bridge route node for the specified destination.
2313 */
2314 static struct bridge_rtnode *
2315 bridge_rtnode_lookup(struct bridge_softc *sc, const uint8_t *addr)
2316 {
2317 struct bridge_rtnode *brt;
2318 uint32_t hash;
2319 int dir;
2320
2321 hash = bridge_rthash(sc, addr);
2322 LIST_FOREACH(brt, &sc->sc_rthash[hash], brt_hash) {
2323 dir = memcmp(addr, brt->brt_addr, ETHER_ADDR_LEN);
2324 if (dir == 0)
2325 return (brt);
2326 if (dir > 0)
2327 return (NULL);
2328 }
2329
2330 return (NULL);
2331 }
2332
2333 /*
2334 * bridge_rtnode_insert:
2335 *
2336 * Insert the specified bridge node into the route table. We
2337 * assume the entry is not already in the table.
2338 */
2339 static int
2340 bridge_rtnode_insert(struct bridge_softc *sc, struct bridge_rtnode *brt)
2341 {
2342 struct bridge_rtnode *lbrt;
2343 uint32_t hash;
2344 int dir;
2345
2346 KASSERT(BRIDGE_RT_LOCKED(sc));
2347
2348 hash = bridge_rthash(sc, brt->brt_addr);
2349
2350 lbrt = LIST_FIRST(&sc->sc_rthash[hash]);
2351 if (lbrt == NULL) {
2352 LIST_INSERT_HEAD(&sc->sc_rthash[hash], brt, brt_hash);
2353 goto out;
2354 }
2355
2356 do {
2357 dir = memcmp(brt->brt_addr, lbrt->brt_addr, ETHER_ADDR_LEN);
2358 if (dir == 0)
2359 return (EEXIST);
2360 if (dir > 0) {
2361 LIST_INSERT_BEFORE(lbrt, brt, brt_hash);
2362 goto out;
2363 }
2364 if (LIST_NEXT(lbrt, brt_hash) == NULL) {
2365 LIST_INSERT_AFTER(lbrt, brt, brt_hash);
2366 goto out;
2367 }
2368 lbrt = LIST_NEXT(lbrt, brt_hash);
2369 } while (lbrt != NULL);
2370
2371 #ifdef DIAGNOSTIC
2372 panic("bridge_rtnode_insert: impossible");
2373 #endif
2374
2375 out:
2376 LIST_INSERT_HEAD(&sc->sc_rtlist, brt, brt_list);
2377 sc->sc_brtcnt++;
2378
2379 return (0);
2380 }
2381
2382 /*
2383 * bridge_rtnode_remove:
2384 *
2385 * Remove a bridge rtnode from the rthash and the rtlist of a bridge.
2386 */
2387 static void
2388 bridge_rtnode_remove(struct bridge_softc *sc, struct bridge_rtnode *brt)
2389 {
2390
2391 KASSERT(BRIDGE_RT_LOCKED(sc));
2392
2393 LIST_REMOVE(brt, brt_hash);
2394 LIST_REMOVE(brt, brt_list);
2395 sc->sc_brtcnt--;
2396 }
2397
2398 /*
2399 * bridge_rtnode_destroy:
2400 *
2401 * Destroy a bridge rtnode.
2402 */
2403 static void
2404 bridge_rtnode_destroy(struct bridge_rtnode *brt)
2405 {
2406
2407 pool_put(&bridge_rtnode_pool, brt);
2408 }
2409
2410 #if defined(BRIDGE_IPF)
2411 extern pfil_head_t *inet_pfil_hook; /* XXX */
2412 extern pfil_head_t *inet6_pfil_hook; /* XXX */
2413
2414 /*
2415 * Send bridge packets through IPF if they are one of the types IPF can deal
2416 * with, or if they are ARP or REVARP. (IPF will pass ARP and REVARP without
2417 * question.)
2418 */
2419 static int
2420 bridge_ipf(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir)
2421 {
2422 int snap, error;
2423 struct ether_header *eh1, eh2;
2424 struct llc llc1;
2425 uint16_t ether_type;
2426
2427 snap = 0;
2428 error = -1; /* Default error if not error == 0 */
2429 eh1 = mtod(*mp, struct ether_header *);
2430 ether_type = ntohs(eh1->ether_type);
2431
2432 /*
2433 * Check for SNAP/LLC.
2434 */
2435 if (ether_type < ETHERMTU) {
2436 struct llc *llc2 = (struct llc *)(eh1 + 1);
2437
2438 if ((*mp)->m_len >= ETHER_HDR_LEN + 8 &&
2439 llc2->llc_dsap == LLC_SNAP_LSAP &&
2440 llc2->llc_ssap == LLC_SNAP_LSAP &&
2441 llc2->llc_control == LLC_UI) {
2442 ether_type = htons(llc2->llc_un.type_snap.ether_type);
2443 snap = 1;
2444 }
2445 }
2446
2447 /*
2448 * If we're trying to filter bridge traffic, don't look at anything
2449 * other than IP and ARP traffic. If the filter doesn't understand
2450 * IPv6, don't allow IPv6 through the bridge either. This is lame
2451 * since if we really wanted, say, an AppleTalk filter, we are hosed,
2452 * but of course we don't have an AppleTalk filter to begin with.
2453 * (Note that since IPF doesn't understand ARP it will pass *ALL*
2454 * ARP traffic.)
2455 */
2456 switch (ether_type) {
2457 case ETHERTYPE_ARP:
2458 case ETHERTYPE_REVARP:
2459 return 0; /* Automatically pass */
2460 case ETHERTYPE_IP:
2461 # ifdef INET6
2462 case ETHERTYPE_IPV6:
2463 # endif /* INET6 */
2464 break;
2465 default:
2466 goto bad;
2467 }
2468
2469 /* Strip off the Ethernet header and keep a copy. */
2470 m_copydata(*mp, 0, ETHER_HDR_LEN, (void *) &eh2);
2471 m_adj(*mp, ETHER_HDR_LEN);
2472
2473 /* Strip off snap header, if present */
2474 if (snap) {
2475 m_copydata(*mp, 0, sizeof(struct llc), (void *) &llc1);
2476 m_adj(*mp, sizeof(struct llc));
2477 }
2478
2479 /*
2480 * Check basic packet sanity and run IPF through pfil.
2481 */
2482 KASSERT(!cpu_intr_p());
2483 switch (ether_type)
2484 {
2485 case ETHERTYPE_IP :
2486 error = (dir == PFIL_IN) ? bridge_ip_checkbasic(mp) : 0;
2487 if (error == 0)
2488 error = pfil_run_hooks(inet_pfil_hook, mp, ifp, dir);
2489 break;
2490 # ifdef INET6
2491 case ETHERTYPE_IPV6 :
2492 error = (dir == PFIL_IN) ? bridge_ip6_checkbasic(mp) : 0;
2493 if (error == 0)
2494 error = pfil_run_hooks(inet6_pfil_hook, mp, ifp, dir);
2495 break;
2496 # endif
2497 default :
2498 error = 0;
2499 break;
2500 }
2501
2502 if (*mp == NULL)
2503 return error;
2504 if (error != 0)
2505 goto bad;
2506
2507 error = -1;
2508
2509 /*
2510 * Finally, put everything back the way it was and return
2511 */
2512 if (snap) {
2513 M_PREPEND(*mp, sizeof(struct llc), M_DONTWAIT);
2514 if (*mp == NULL)
2515 return error;
2516 bcopy(&llc1, mtod(*mp, void *), sizeof(struct llc));
2517 }
2518
2519 M_PREPEND(*mp, ETHER_HDR_LEN, M_DONTWAIT);
2520 if (*mp == NULL)
2521 return error;
2522 bcopy(&eh2, mtod(*mp, void *), ETHER_HDR_LEN);
2523
2524 return 0;
2525
2526 bad:
2527 m_freem(*mp);
2528 *mp = NULL;
2529 return error;
2530 }
2531
2532 /*
2533 * Perform basic checks on header size since
2534 * IPF assumes ip_input has already processed
2535 * it for it. Cut-and-pasted from ip_input.c.
2536 * Given how simple the IPv6 version is,
2537 * does the IPv4 version really need to be
2538 * this complicated?
2539 *
2540 * XXX Should we update ipstat here, or not?
2541 * XXX Right now we update ipstat but not
2542 * XXX csum_counter.
2543 */
2544 static int
2545 bridge_ip_checkbasic(struct mbuf **mp)
2546 {
2547 struct mbuf *m = *mp;
2548 struct ip *ip;
2549 int len, hlen;
2550
2551 if (*mp == NULL)
2552 return -1;
2553
2554 if (IP_HDR_ALIGNED_P(mtod(m, void *)) == 0) {
2555 if ((m = m_copyup(m, sizeof(struct ip),
2556 (max_linkhdr + 3) & ~3)) == NULL) {
2557 /* XXXJRT new stat, please */
2558 ip_statinc(IP_STAT_TOOSMALL);
2559 goto bad;
2560 }
2561 } else if (__predict_false(m->m_len < sizeof (struct ip))) {
2562 if ((m = m_pullup(m, sizeof (struct ip))) == NULL) {
2563 ip_statinc(IP_STAT_TOOSMALL);
2564 goto bad;
2565 }
2566 }
2567 ip = mtod(m, struct ip *);
2568 if (ip == NULL) goto bad;
2569
2570 if (ip->ip_v != IPVERSION) {
2571 ip_statinc(IP_STAT_BADVERS);
2572 goto bad;
2573 }
2574 hlen = ip->ip_hl << 2;
2575 if (hlen < sizeof(struct ip)) { /* minimum header length */
2576 ip_statinc(IP_STAT_BADHLEN);
2577 goto bad;
2578 }
2579 if (hlen > m->m_len) {
2580 if ((m = m_pullup(m, hlen)) == 0) {
2581 ip_statinc(IP_STAT_BADHLEN);
2582 goto bad;
2583 }
2584 ip = mtod(m, struct ip *);
2585 if (ip == NULL) goto bad;
2586 }
2587
2588 switch (m->m_pkthdr.csum_flags &
2589 ((m->m_pkthdr.rcvif->if_csum_flags_rx & M_CSUM_IPv4) |
2590 M_CSUM_IPv4_BAD)) {
2591 case M_CSUM_IPv4|M_CSUM_IPv4_BAD:
2592 /* INET_CSUM_COUNTER_INCR(&ip_hwcsum_bad); */
2593 goto bad;
2594
2595 case M_CSUM_IPv4:
2596 /* Checksum was okay. */
2597 /* INET_CSUM_COUNTER_INCR(&ip_hwcsum_ok); */
2598 break;
2599
2600 default:
2601 /* Must compute it ourselves. */
2602 /* INET_CSUM_COUNTER_INCR(&ip_swcsum); */
2603 if (in_cksum(m, hlen) != 0)
2604 goto bad;
2605 break;
2606 }
2607
2608 /* Retrieve the packet length. */
2609 len = ntohs(ip->ip_len);
2610
2611 /*
2612 * Check for additional length bogosity
2613 */
2614 if (len < hlen) {
2615 ip_statinc(IP_STAT_BADLEN);
2616 goto bad;
2617 }
2618
2619 /*
2620 * Check that the amount of data in the buffers
2621 * is as at least much as the IP header would have us expect.
2622 * Drop packet if shorter than we expect.
2623 */
2624 if (m->m_pkthdr.len < len) {
2625 ip_statinc(IP_STAT_TOOSHORT);
2626 goto bad;
2627 }
2628
2629 /* Checks out, proceed */
2630 *mp = m;
2631 return 0;
2632
2633 bad:
2634 *mp = m;
2635 return -1;
2636 }
2637
2638 # ifdef INET6
2639 /*
2640 * Same as above, but for IPv6.
2641 * Cut-and-pasted from ip6_input.c.
2642 * XXX Should we update ip6stat, or not?
2643 */
2644 static int
2645 bridge_ip6_checkbasic(struct mbuf **mp)
2646 {
2647 struct mbuf *m = *mp;
2648 struct ip6_hdr *ip6;
2649
2650 /*
2651 * If the IPv6 header is not aligned, slurp it up into a new
2652 * mbuf with space for link headers, in the event we forward
2653 * it. Otherwise, if it is aligned, make sure the entire base
2654 * IPv6 header is in the first mbuf of the chain.
2655 */
2656 if (IP6_HDR_ALIGNED_P(mtod(m, void *)) == 0) {
2657 struct ifnet *inifp = m->m_pkthdr.rcvif;
2658 if ((m = m_copyup(m, sizeof(struct ip6_hdr),
2659 (max_linkhdr + 3) & ~3)) == NULL) {
2660 /* XXXJRT new stat, please */
2661 ip6_statinc(IP6_STAT_TOOSMALL);
2662 in6_ifstat_inc(inifp, ifs6_in_hdrerr);
2663 goto bad;
2664 }
2665 } else if (__predict_false(m->m_len < sizeof(struct ip6_hdr))) {
2666 struct ifnet *inifp = m->m_pkthdr.rcvif;
2667 if ((m = m_pullup(m, sizeof(struct ip6_hdr))) == NULL) {
2668 ip6_statinc(IP6_STAT_TOOSMALL);
2669 in6_ifstat_inc(inifp, ifs6_in_hdrerr);
2670 goto bad;
2671 }
2672 }
2673
2674 ip6 = mtod(m, struct ip6_hdr *);
2675
2676 if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) {
2677 ip6_statinc(IP6_STAT_BADVERS);
2678 in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_hdrerr);
2679 goto bad;
2680 }
2681
2682 /* Checks out, proceed */
2683 *mp = m;
2684 return 0;
2685
2686 bad:
2687 *mp = m;
2688 return -1;
2689 }
2690 # endif /* INET6 */
2691 #endif /* BRIDGE_IPF */
2692