if_bridge.c revision 1.197 1 /* $NetBSD: if_bridge.c,v 1.197 2025/04/16 05:29:45 ozaki-r Exp $ */
2
3 /*
4 * Copyright 2001 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Copyright (c) 1999, 2000 Jason L. Wright (jason (at) thought.net)
40 * All rights reserved.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 * notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright
48 * notice, this list of conditions and the following disclaimer in the
49 * documentation and/or other materials provided with the distribution.
50 * 3. All advertising materials mentioning features or use of this software
51 * must display the following acknowledgement:
52 * This product includes software developed by Jason L. Wright
53 * 4. The name of the author may not be used to endorse or promote products
54 * derived from this software without specific prior written permission.
55 *
56 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
57 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
58 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
59 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
60 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
61 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
62 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
63 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
64 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
65 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
66 * POSSIBILITY OF SUCH DAMAGE.
67 *
68 * OpenBSD: if_bridge.c,v 1.60 2001/06/15 03:38:33 itojun Exp
69 */
70
71 /*
72 * Network interface bridge support.
73 *
74 * TODO:
75 *
76 * - Currently only supports Ethernet-like interfaces (Ethernet,
77 * 802.11, VLANs on Ethernet, etc.) Figure out a nice way
78 * to bridge other types of interfaces (FDDI-FDDI, and maybe
79 * consider heterogenous bridges).
80 */
81
82 #include <sys/cdefs.h>
83 __KERNEL_RCSID(0, "$NetBSD: if_bridge.c,v 1.197 2025/04/16 05:29:45 ozaki-r Exp $");
84
85 #ifdef _KERNEL_OPT
86 #include "opt_inet.h"
87 #include "opt_net_mpsafe.h"
88 #endif /* _KERNEL_OPT */
89
90 #include <sys/param.h>
91 #include <sys/kernel.h>
92 #include <sys/mbuf.h>
93 #include <sys/queue.h>
94 #include <sys/socket.h>
95 #include <sys/socketvar.h> /* for softnet_lock */
96 #include <sys/sockio.h>
97 #include <sys/systm.h>
98 #include <sys/proc.h>
99 #include <sys/pool.h>
100 #include <sys/kauth.h>
101 #include <sys/cpu.h>
102 #include <sys/cprng.h>
103 #include <sys/mutex.h>
104 #include <sys/kmem.h>
105 #include <sys/syslog.h>
106
107 #include <net/bpf.h>
108 #include <net/if.h>
109 #include <net/if_dl.h>
110 #include <net/if_types.h>
111 #include <net/if_llc.h>
112
113 #include <net/if_ether.h>
114 #include <net/if_bridgevar.h>
115 #include <net/ether_sw_offload.h>
116
117 /* Used for bridge_ip[6]_checkbasic */
118 #include <netinet/in.h>
119 #include <netinet/in_systm.h>
120 #include <netinet/ip.h>
121 #include <netinet/ip_var.h>
122 #include <netinet/ip_private.h> /* XXX */
123 #include <netinet/ip6.h>
124 #include <netinet6/in6_var.h>
125 #include <netinet6/ip6_var.h>
126 #include <netinet6/ip6_private.h> /* XXX */
127
128 /*
129 * Size of the route hash table. Must be a power of two.
130 */
131 #ifndef BRIDGE_RTHASH_SIZE
132 #define BRIDGE_RTHASH_SIZE 1024
133 #endif
134
135 #define BRIDGE_RTHASH_MASK (BRIDGE_RTHASH_SIZE - 1)
136
137 #include "carp.h"
138 #if NCARP > 0
139 #include <netinet/in.h>
140 #include <netinet/in_var.h>
141 #include <netinet/ip_carp.h>
142 #endif
143
144 #include "ioconf.h"
145
146 __CTASSERT(sizeof(struct ifbifconf) == sizeof(struct ifbaconf));
147 __CTASSERT(offsetof(struct ifbifconf, ifbic_len) == offsetof(struct ifbaconf, ifbac_len));
148 __CTASSERT(offsetof(struct ifbifconf, ifbic_buf) == offsetof(struct ifbaconf, ifbac_buf));
149
150 /*
151 * Maximum number of addresses to cache.
152 */
153 #ifndef BRIDGE_RTABLE_MAX
154 #define BRIDGE_RTABLE_MAX 100
155 #endif
156
157 /*
158 * Spanning tree defaults.
159 */
160 #define BSTP_DEFAULT_MAX_AGE (20 * 256)
161 #define BSTP_DEFAULT_HELLO_TIME (2 * 256)
162 #define BSTP_DEFAULT_FORWARD_DELAY (15 * 256)
163 #define BSTP_DEFAULT_HOLD_TIME (1 * 256)
164 #define BSTP_DEFAULT_BRIDGE_PRIORITY 0x8000
165 #define BSTP_DEFAULT_PORT_PRIORITY 0x80
166 #define BSTP_DEFAULT_PATH_COST 55
167
168 /*
169 * Timeout (in seconds) for entries learned dynamically.
170 */
171 #ifndef BRIDGE_RTABLE_TIMEOUT
172 #define BRIDGE_RTABLE_TIMEOUT (20 * 60) /* same as ARP */
173 #endif
174
175 /*
176 * Number of seconds between walks of the route list.
177 */
178 #ifndef BRIDGE_RTABLE_PRUNE_PERIOD
179 #define BRIDGE_RTABLE_PRUNE_PERIOD (5 * 60)
180 #endif
181
182 #define BRIDGE_RT_LOCK(_sc) mutex_enter((_sc)->sc_rtlist_lock)
183 #define BRIDGE_RT_UNLOCK(_sc) mutex_exit((_sc)->sc_rtlist_lock)
184 #define BRIDGE_RT_LOCKED(_sc) mutex_owned((_sc)->sc_rtlist_lock)
185
186 #define BRIDGE_RT_PSZ_PERFORM(_sc) \
187 pserialize_perform((_sc)->sc_rtlist_psz)
188
189 #define BRIDGE_RTLIST_READER_FOREACH(_brt, _sc) \
190 PSLIST_READER_FOREACH((_brt), &((_sc)->sc_rtlist), \
191 struct bridge_rtnode, brt_list)
192 #define BRIDGE_RTLIST_WRITER_FOREACH(_brt, _sc) \
193 PSLIST_WRITER_FOREACH((_brt), &((_sc)->sc_rtlist), \
194 struct bridge_rtnode, brt_list)
195 #define BRIDGE_RTLIST_WRITER_INSERT_HEAD(_sc, _brt) \
196 PSLIST_WRITER_INSERT_HEAD(&(_sc)->sc_rtlist, brt, brt_list)
197 #define BRIDGE_RTLIST_WRITER_REMOVE(_brt) \
198 PSLIST_WRITER_REMOVE((_brt), brt_list)
199
200 #define BRIDGE_RTHASH_READER_FOREACH(_brt, _sc, _hash) \
201 PSLIST_READER_FOREACH((_brt), &(_sc)->sc_rthash[(_hash)], \
202 struct bridge_rtnode, brt_hash)
203 #define BRIDGE_RTHASH_WRITER_FOREACH(_brt, _sc, _hash) \
204 PSLIST_WRITER_FOREACH((_brt), &(_sc)->sc_rthash[(_hash)], \
205 struct bridge_rtnode, brt_hash)
206 #define BRIDGE_RTHASH_WRITER_INSERT_HEAD(_sc, _hash, _brt) \
207 PSLIST_WRITER_INSERT_HEAD(&(_sc)->sc_rthash[(_hash)], brt, brt_hash)
208 #define BRIDGE_RTHASH_WRITER_INSERT_AFTER(_brt, _new) \
209 PSLIST_WRITER_INSERT_AFTER((_brt), (_new), brt_hash)
210 #define BRIDGE_RTHASH_WRITER_REMOVE(_brt) \
211 PSLIST_WRITER_REMOVE((_brt), brt_hash)
212
213 #ifdef NET_MPSAFE
214 #define DECLARE_LOCK_VARIABLE
215 #define ACQUIRE_GLOBAL_LOCKS() do { } while (0)
216 #define RELEASE_GLOBAL_LOCKS() do { } while (0)
217 #else
218 #define DECLARE_LOCK_VARIABLE int __s
219 #define ACQUIRE_GLOBAL_LOCKS() do { \
220 KERNEL_LOCK(1, NULL); \
221 mutex_enter(softnet_lock); \
222 __s = splsoftnet(); \
223 } while (0)
224 #define RELEASE_GLOBAL_LOCKS() do { \
225 splx(__s); \
226 mutex_exit(softnet_lock); \
227 KERNEL_UNLOCK_ONE(NULL); \
228 } while (0)
229 #endif
230
231 struct psref_class *bridge_psref_class __read_mostly;
232
233 int bridge_rtable_prune_period = BRIDGE_RTABLE_PRUNE_PERIOD;
234
235 static struct pool bridge_rtnode_pool;
236
237 static int bridge_clone_create(struct if_clone *, int);
238 static int bridge_clone_destroy(struct ifnet *);
239
240 static int bridge_ioctl(struct ifnet *, u_long, void *);
241 static int bridge_init(struct ifnet *);
242 static void bridge_stop(struct ifnet *, int);
243 static void bridge_start(struct ifnet *);
244 static void bridge_ifdetach(void *);
245
246 static void bridge_input(struct ifnet *, struct mbuf *);
247 static void bridge_forward(struct bridge_softc *, struct mbuf *);
248
249 static void bridge_timer(void *);
250
251 static void bridge_broadcast(struct bridge_softc *, struct ifnet *, bool,
252 struct mbuf *);
253
254 static int bridge_rtupdate(struct bridge_softc *, const uint8_t *,
255 struct ifnet *, int, uint8_t);
256 static struct ifnet *bridge_rtlookup(struct bridge_softc *, const uint8_t *);
257 static void bridge_rttrim(struct bridge_softc *);
258 static void bridge_rtage(struct bridge_softc *);
259 static void bridge_rtage_work(struct work *, void *);
260 static void bridge_rtflush(struct bridge_softc *, int);
261 static int bridge_rtdaddr(struct bridge_softc *, const uint8_t *);
262 static void bridge_rtdelete(struct bridge_softc *, struct ifnet *ifp);
263
264 static void bridge_rtable_init(struct bridge_softc *);
265 static void bridge_rtable_fini(struct bridge_softc *);
266
267 static struct bridge_rtnode *bridge_rtnode_lookup(struct bridge_softc *,
268 const uint8_t *);
269 static int bridge_rtnode_insert(struct bridge_softc *,
270 struct bridge_rtnode *);
271 static void bridge_rtnode_remove(struct bridge_softc *,
272 struct bridge_rtnode *);
273 static void bridge_rtnode_destroy(struct bridge_rtnode *);
274
275 static struct bridge_iflist *bridge_lookup_member(struct bridge_softc *,
276 const char *name,
277 struct psref *);
278 static struct bridge_iflist *bridge_lookup_member_if(struct bridge_softc *,
279 struct ifnet *ifp,
280 struct psref *);
281 static void bridge_release_member(struct bridge_softc *, struct bridge_iflist *,
282 struct psref *);
283 static void bridge_delete_member(struct bridge_softc *,
284 struct bridge_iflist *);
285 static void bridge_acquire_member(struct bridge_softc *sc,
286 struct bridge_iflist *,
287 struct psref *);
288
289 static int bridge_ioctl_add(struct bridge_softc *, void *);
290 static int bridge_ioctl_del(struct bridge_softc *, void *);
291 static int bridge_ioctl_gifflags(struct bridge_softc *, void *);
292 static int bridge_ioctl_sifflags(struct bridge_softc *, void *);
293 static int bridge_ioctl_scache(struct bridge_softc *, void *);
294 static int bridge_ioctl_gcache(struct bridge_softc *, void *);
295 static int bridge_ioctl_gifs(struct bridge_softc *, void *);
296 static int bridge_ioctl_rts(struct bridge_softc *, void *);
297 static int bridge_ioctl_saddr(struct bridge_softc *, void *);
298 static int bridge_ioctl_sto(struct bridge_softc *, void *);
299 static int bridge_ioctl_gto(struct bridge_softc *, void *);
300 static int bridge_ioctl_daddr(struct bridge_softc *, void *);
301 static int bridge_ioctl_flush(struct bridge_softc *, void *);
302 static int bridge_ioctl_gpri(struct bridge_softc *, void *);
303 static int bridge_ioctl_spri(struct bridge_softc *, void *);
304 static int bridge_ioctl_ght(struct bridge_softc *, void *);
305 static int bridge_ioctl_sht(struct bridge_softc *, void *);
306 static int bridge_ioctl_gfd(struct bridge_softc *, void *);
307 static int bridge_ioctl_sfd(struct bridge_softc *, void *);
308 static int bridge_ioctl_gma(struct bridge_softc *, void *);
309 static int bridge_ioctl_sma(struct bridge_softc *, void *);
310 static int bridge_ioctl_sifprio(struct bridge_softc *, void *);
311 static int bridge_ioctl_sifcost(struct bridge_softc *, void *);
312 static int bridge_ioctl_gfilt(struct bridge_softc *, void *);
313 static int bridge_ioctl_sfilt(struct bridge_softc *, void *);
314 static int bridge_ipf(void *, struct mbuf **, struct ifnet *, int);
315 static int bridge_ip_checkbasic(struct mbuf **mp);
316 # ifdef INET6
317 static int bridge_ip6_checkbasic(struct mbuf **mp);
318 # endif /* INET6 */
319
320 struct bridge_control {
321 int (*bc_func)(struct bridge_softc *, void *);
322 int bc_argsize;
323 int bc_flags;
324 };
325
326 #define BC_F_COPYIN 0x01 /* copy arguments in */
327 #define BC_F_COPYOUT 0x02 /* copy arguments out */
328 #define BC_F_SUSER 0x04 /* do super-user check */
329 #define BC_F_XLATEIN 0x08 /* xlate arguments in */
330 #define BC_F_XLATEOUT 0x10 /* xlate arguments out */
331
332 static const struct bridge_control bridge_control_table[] = {
333 [BRDGADD] = {bridge_ioctl_add, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
334 [BRDGDEL] = {bridge_ioctl_del, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
335
336 [BRDGGIFFLGS] = {bridge_ioctl_gifflags, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_COPYOUT},
337 [BRDGSIFFLGS] = {bridge_ioctl_sifflags, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
338
339 [BRDGSCACHE] = {bridge_ioctl_scache, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
340 [BRDGGCACHE] = {bridge_ioctl_gcache, sizeof(struct ifbrparam), BC_F_COPYOUT},
341
342 [OBRDGGIFS] = {bridge_ioctl_gifs, sizeof(struct ifbifconf), BC_F_COPYIN|BC_F_COPYOUT},
343 [OBRDGRTS] = {bridge_ioctl_rts, sizeof(struct ifbaconf), BC_F_COPYIN|BC_F_COPYOUT},
344
345 [BRDGSADDR] = {bridge_ioctl_saddr, sizeof(struct ifbareq), BC_F_COPYIN|BC_F_SUSER},
346
347 [BRDGSTO] = {bridge_ioctl_sto, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
348 [BRDGGTO] = {bridge_ioctl_gto, sizeof(struct ifbrparam), BC_F_COPYOUT},
349
350 [BRDGDADDR] = {bridge_ioctl_daddr, sizeof(struct ifbareq), BC_F_COPYIN|BC_F_SUSER},
351
352 [BRDGFLUSH] = {bridge_ioctl_flush, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
353
354 [BRDGGPRI] = {bridge_ioctl_gpri, sizeof(struct ifbrparam), BC_F_COPYOUT},
355 [BRDGSPRI] = {bridge_ioctl_spri, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
356
357 [BRDGGHT] = {bridge_ioctl_ght, sizeof(struct ifbrparam), BC_F_COPYOUT},
358 [BRDGSHT] = {bridge_ioctl_sht, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
359
360 [BRDGGFD] = {bridge_ioctl_gfd, sizeof(struct ifbrparam), BC_F_COPYOUT},
361 [BRDGSFD] = {bridge_ioctl_sfd, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
362
363 [BRDGGMA] = {bridge_ioctl_gma, sizeof(struct ifbrparam), BC_F_COPYOUT},
364 [BRDGSMA] = {bridge_ioctl_sma, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
365
366 [BRDGSIFPRIO] = {bridge_ioctl_sifprio, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
367
368 [BRDGSIFCOST] = {bridge_ioctl_sifcost, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
369
370 [BRDGGFILT] = {bridge_ioctl_gfilt, sizeof(struct ifbrparam), BC_F_COPYOUT},
371 [BRDGSFILT] = {bridge_ioctl_sfilt, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
372
373 [BRDGGIFS] = {bridge_ioctl_gifs, sizeof(struct ifbifconf), BC_F_XLATEIN|BC_F_XLATEOUT},
374 [BRDGRTS] = {bridge_ioctl_rts, sizeof(struct ifbaconf), BC_F_XLATEIN|BC_F_XLATEOUT},
375 };
376
377 static const int bridge_control_table_size = __arraycount(bridge_control_table);
378
379 static struct if_clone bridge_cloner =
380 IF_CLONE_INITIALIZER("bridge", bridge_clone_create, bridge_clone_destroy);
381
382 /*
383 * bridgeattach:
384 *
385 * Pseudo-device attach routine.
386 */
387 void
388 bridgeattach(int n)
389 {
390
391 pool_init(&bridge_rtnode_pool, sizeof(struct bridge_rtnode),
392 0, 0, 0, "brtpl", NULL, IPL_NET);
393
394 bridge_psref_class = psref_class_create("bridge", IPL_SOFTNET);
395
396 if_clone_attach(&bridge_cloner);
397 }
398
399 /*
400 * bridge_clone_create:
401 *
402 * Create a new bridge instance.
403 */
404 static int
405 bridge_clone_create(struct if_clone *ifc, int unit)
406 {
407 struct bridge_softc *sc;
408 struct ifnet *ifp;
409 int error;
410
411 sc = kmem_zalloc(sizeof(*sc), KM_SLEEP);
412 ifp = &sc->sc_if;
413
414 sc->sc_brtmax = BRIDGE_RTABLE_MAX;
415 sc->sc_brttimeout = BRIDGE_RTABLE_TIMEOUT;
416 sc->sc_bridge_max_age = BSTP_DEFAULT_MAX_AGE;
417 sc->sc_bridge_hello_time = BSTP_DEFAULT_HELLO_TIME;
418 sc->sc_bridge_forward_delay = BSTP_DEFAULT_FORWARD_DELAY;
419 sc->sc_bridge_priority = BSTP_DEFAULT_BRIDGE_PRIORITY;
420 sc->sc_hold_time = BSTP_DEFAULT_HOLD_TIME;
421 sc->sc_filter_flags = 0;
422
423 /* Initialize our routing table. */
424 bridge_rtable_init(sc);
425
426 error = workqueue_create(&sc->sc_rtage_wq, "bridge_rtage",
427 bridge_rtage_work, sc, PRI_SOFTNET, IPL_SOFTNET, WQ_MPSAFE);
428 if (error)
429 panic("%s: workqueue_create %d\n", __func__, error);
430
431 callout_init(&sc->sc_brcallout, CALLOUT_MPSAFE);
432 callout_init(&sc->sc_bstpcallout, CALLOUT_MPSAFE);
433
434 mutex_init(&sc->sc_iflist_psref.bip_lock, MUTEX_DEFAULT, IPL_NONE);
435 PSLIST_INIT(&sc->sc_iflist_psref.bip_iflist);
436 sc->sc_iflist_psref.bip_psz = pserialize_create();
437
438 if_initname(ifp, ifc->ifc_name, unit);
439 ifp->if_softc = sc;
440 #ifdef NET_MPSAFE
441 ifp->if_extflags = IFEF_MPSAFE;
442 #endif
443 ifp->if_mtu = ETHERMTU;
444 ifp->if_ioctl = bridge_ioctl;
445 ifp->if_output = bridge_output;
446 ifp->if_start = bridge_start;
447 ifp->if_stop = bridge_stop;
448 ifp->if_init = bridge_init;
449 ifp->if_type = IFT_BRIDGE;
450 ifp->if_addrlen = 0;
451 ifp->if_dlt = DLT_EN10MB;
452 ifp->if_hdrlen = ETHER_HDR_LEN;
453 if_initialize(ifp);
454
455 /*
456 * Set the link state to down.
457 * When interfaces are added the link state will reflect
458 * the best link state of the combined interfaces.
459 */
460 ifp->if_link_state = LINK_STATE_DOWN;
461
462 if_alloc_sadl(ifp);
463 if_register(ifp);
464
465 return 0;
466 }
467
468 /*
469 * bridge_clone_destroy:
470 *
471 * Destroy a bridge instance.
472 */
473 static int
474 bridge_clone_destroy(struct ifnet *ifp)
475 {
476 struct bridge_softc *sc = ifp->if_softc;
477 struct bridge_iflist *bif;
478
479 if ((ifp->if_flags & IFF_RUNNING) != 0)
480 bridge_stop(ifp, 1);
481
482 BRIDGE_LOCK(sc);
483 for (;;) {
484 bif = PSLIST_WRITER_FIRST(&sc->sc_iflist_psref.bip_iflist, struct bridge_iflist,
485 bif_next);
486 if (bif == NULL)
487 break;
488 bridge_delete_member(sc, bif);
489 }
490 PSLIST_DESTROY(&sc->sc_iflist_psref.bip_iflist);
491 BRIDGE_UNLOCK(sc);
492
493 if_detach(ifp);
494
495 /* Tear down the routing table. */
496 bridge_rtable_fini(sc);
497
498 pserialize_destroy(sc->sc_iflist_psref.bip_psz);
499 mutex_destroy(&sc->sc_iflist_psref.bip_lock);
500 callout_destroy(&sc->sc_brcallout);
501 callout_destroy(&sc->sc_bstpcallout);
502 workqueue_destroy(sc->sc_rtage_wq);
503 kmem_free(sc, sizeof(*sc));
504
505 return 0;
506 }
507
508 /*
509 * bridge_ioctl:
510 *
511 * Handle a control request from the operator.
512 */
513 static int
514 bridge_ioctl(struct ifnet *ifp, u_long cmd, void *data)
515 {
516 struct bridge_softc *sc = ifp->if_softc;
517 struct lwp *l = curlwp; /* XXX */
518 union {
519 struct ifbreq ifbreq;
520 struct ifbifconf ifbifconf;
521 struct ifbareq ifbareq;
522 struct ifbaconf ifbaconf;
523 struct ifbrparam ifbrparam;
524 } args;
525 struct ifdrv *ifd = (struct ifdrv *) data;
526 const struct bridge_control *bc = NULL; /* XXXGCC */
527 int error = 0;
528
529 /* Authorize command before calling splsoftnet(). */
530 switch (cmd) {
531 case SIOCGDRVSPEC:
532 case SIOCSDRVSPEC:
533 if (ifd->ifd_cmd >= bridge_control_table_size
534 || (bc = &bridge_control_table[ifd->ifd_cmd]) == NULL) {
535 error = EINVAL;
536 return error;
537 }
538
539 /* We only care about BC_F_SUSER at this point. */
540 if ((bc->bc_flags & BC_F_SUSER) == 0)
541 break;
542
543 error = kauth_authorize_network(l->l_cred,
544 KAUTH_NETWORK_INTERFACE_BRIDGE,
545 cmd == SIOCGDRVSPEC ?
546 KAUTH_REQ_NETWORK_INTERFACE_BRIDGE_GETPRIV :
547 KAUTH_REQ_NETWORK_INTERFACE_BRIDGE_SETPRIV,
548 ifd, NULL, NULL);
549 if (error)
550 return error;
551
552 break;
553 }
554
555 const int s = splsoftnet();
556
557 switch (cmd) {
558 case SIOCGDRVSPEC:
559 case SIOCSDRVSPEC:
560 KASSERT(bc != NULL);
561 if (cmd == SIOCGDRVSPEC &&
562 (bc->bc_flags & (BC_F_COPYOUT|BC_F_XLATEOUT)) == 0) {
563 error = EINVAL;
564 break;
565 }
566 else if (cmd == SIOCSDRVSPEC &&
567 (bc->bc_flags & (BC_F_COPYOUT|BC_F_XLATEOUT)) != 0) {
568 error = EINVAL;
569 break;
570 }
571
572 /* BC_F_SUSER is checked above, before splsoftnet(). */
573
574 if ((bc->bc_flags & (BC_F_XLATEIN|BC_F_XLATEOUT)) == 0
575 && (ifd->ifd_len != bc->bc_argsize
576 || ifd->ifd_len > sizeof(args))) {
577 error = EINVAL;
578 break;
579 }
580
581 memset(&args, 0, sizeof(args));
582 if (bc->bc_flags & BC_F_COPYIN) {
583 error = copyin(ifd->ifd_data, &args, ifd->ifd_len);
584 if (error)
585 break;
586 } else if (bc->bc_flags & BC_F_XLATEIN) {
587 args.ifbifconf.ifbic_len = ifd->ifd_len;
588 args.ifbifconf.ifbic_buf = ifd->ifd_data;
589 }
590
591 error = (*bc->bc_func)(sc, &args);
592 if (error)
593 break;
594
595 if (bc->bc_flags & BC_F_COPYOUT) {
596 error = copyout(&args, ifd->ifd_data, ifd->ifd_len);
597 } else if (bc->bc_flags & BC_F_XLATEOUT) {
598 ifd->ifd_len = args.ifbifconf.ifbic_len;
599 ifd->ifd_data = args.ifbifconf.ifbic_buf;
600 }
601 break;
602
603 case SIOCSIFFLAGS:
604 if ((error = ifioctl_common(ifp, cmd, data)) != 0)
605 break;
606 switch (ifp->if_flags & (IFF_UP|IFF_RUNNING)) {
607 case IFF_RUNNING:
608 /*
609 * If interface is marked down and it is running,
610 * then stop and disable it.
611 */
612 if_stop(ifp, 1);
613 break;
614 case IFF_UP:
615 /*
616 * If interface is marked up and it is stopped, then
617 * start it.
618 */
619 error = if_init(ifp);
620 break;
621 default:
622 break;
623 }
624 break;
625
626 case SIOCSIFMTU:
627 if ((error = ifioctl_common(ifp, cmd, data)) == ENETRESET)
628 error = 0;
629 break;
630
631 case SIOCGIFCAP:
632 {
633 struct ifcapreq *ifcr = (struct ifcapreq *)data;
634 ifcr->ifcr_capabilities = sc->sc_capenable;
635 ifcr->ifcr_capenable = sc->sc_capenable;
636 break;
637 }
638
639 default:
640 error = ifioctl_common(ifp, cmd, data);
641 break;
642 }
643
644 splx(s);
645
646 return error;
647 }
648
649 /*
650 * bridge_lookup_member:
651 *
652 * Lookup a bridge member interface.
653 */
654 static struct bridge_iflist *
655 bridge_lookup_member(struct bridge_softc *sc, const char *name, struct psref *psref)
656 {
657 struct bridge_iflist *bif;
658 struct ifnet *ifp;
659 int s;
660
661 s = pserialize_read_enter();
662
663 BRIDGE_IFLIST_READER_FOREACH(bif, sc) {
664 ifp = bif->bif_ifp;
665 if (strcmp(ifp->if_xname, name) == 0)
666 break;
667 }
668 if (bif != NULL)
669 bridge_acquire_member(sc, bif, psref);
670
671 pserialize_read_exit(s);
672
673 return bif;
674 }
675
676 /*
677 * bridge_lookup_member_if:
678 *
679 * Lookup a bridge member interface by ifnet*.
680 */
681 static struct bridge_iflist *
682 bridge_lookup_member_if(struct bridge_softc *sc, struct ifnet *member_ifp,
683 struct psref *psref)
684 {
685 struct bridge_iflist *bif;
686 int s;
687
688 s = pserialize_read_enter();
689
690 bif = member_ifp->if_bridgeif;
691 if (bif != NULL) {
692 psref_acquire(psref, &bif->bif_psref,
693 bridge_psref_class);
694 }
695
696 pserialize_read_exit(s);
697
698 return bif;
699 }
700
701 static void
702 bridge_acquire_member(struct bridge_softc *sc, struct bridge_iflist *bif,
703 struct psref *psref)
704 {
705
706 psref_acquire(psref, &bif->bif_psref, bridge_psref_class);
707 }
708
709 /*
710 * bridge_release_member:
711 *
712 * Release the specified member interface.
713 */
714 static void
715 bridge_release_member(struct bridge_softc *sc, struct bridge_iflist *bif,
716 struct psref *psref)
717 {
718
719 psref_release(psref, &bif->bif_psref, bridge_psref_class);
720 }
721
722 /*
723 * bridge_delete_member:
724 *
725 * Delete the specified member interface.
726 */
727 static void
728 bridge_delete_member(struct bridge_softc *sc, struct bridge_iflist *bif)
729 {
730 struct ifnet *ifs = bif->bif_ifp;
731
732 KASSERT(BRIDGE_LOCKED(sc));
733
734 ifs->_if_input = ether_input;
735 ifs->if_bridge = NULL;
736 ifs->if_bridgeif = NULL;
737
738 PSLIST_WRITER_REMOVE(bif, bif_next);
739 BRIDGE_PSZ_PERFORM(sc);
740
741 if_linkstate_change_disestablish(ifs,
742 bif->bif_linkstate_hook, BRIDGE_LOCK_OBJ(sc));
743 ether_ifdetachhook_disestablish(ifs,
744 bif->bif_ifdetach_hook, BRIDGE_LOCK_OBJ(sc));
745
746 BRIDGE_UNLOCK(sc);
747
748 switch (ifs->if_type) {
749 case IFT_ETHER:
750 case IFT_L2TP:
751 /*
752 * Take the interface out of promiscuous mode.
753 * Don't call it with holding a spin lock.
754 */
755 (void) ifpromisc(ifs, 0);
756 IFNET_LOCK(ifs);
757 (void) ether_disable_vlan_mtu(ifs);
758 IFNET_UNLOCK(ifs);
759 break;
760 default:
761 #ifdef DIAGNOSTIC
762 panic("%s: impossible", __func__);
763 #endif
764 break;
765 }
766
767 psref_target_destroy(&bif->bif_psref, bridge_psref_class);
768
769 PSLIST_ENTRY_DESTROY(bif, bif_next);
770 kmem_free(bif, sizeof(*bif));
771
772 BRIDGE_LOCK(sc);
773 }
774
775 /*
776 * bridge_calc_csum_flags:
777 *
778 * Calculate logical and b/w csum flags each member interface supports.
779 */
780 void
781 bridge_calc_csum_flags(struct bridge_softc *sc)
782 {
783 struct bridge_iflist *bif;
784 struct ifnet *ifs = NULL;
785 int flags = ~0;
786 int capenable = ~0;
787
788 BRIDGE_LOCK(sc);
789 BRIDGE_IFLIST_READER_FOREACH(bif, sc) {
790 ifs = bif->bif_ifp;
791 flags &= ifs->if_csum_flags_tx;
792 capenable &= ifs->if_capenable;
793 }
794 sc->sc_csum_flags_tx = flags;
795 sc->sc_capenable = (ifs != NULL) ? capenable : 0;
796 BRIDGE_UNLOCK(sc);
797 }
798
799 /*
800 * bridge_calc_link_state:
801 *
802 * Calculate the link state based on each member interface.
803 */
804 static void
805 bridge_calc_link_state(void *xsc)
806 {
807 struct bridge_softc *sc = xsc;
808 struct bridge_iflist *bif;
809 struct ifnet *ifs;
810 int link_state = LINK_STATE_DOWN;
811
812 BRIDGE_LOCK(sc);
813 BRIDGE_IFLIST_READER_FOREACH(bif, sc) {
814 ifs = bif->bif_ifp;
815 if (ifs->if_link_state == LINK_STATE_UP) {
816 link_state = LINK_STATE_UP;
817 break;
818 }
819 if (ifs->if_link_state == LINK_STATE_UNKNOWN)
820 link_state = LINK_STATE_UNKNOWN;
821 }
822 if_link_state_change(&sc->sc_if, link_state);
823 BRIDGE_UNLOCK(sc);
824 }
825
826 static int
827 bridge_ioctl_add(struct bridge_softc *sc, void *arg)
828 {
829 struct ifbreq *req = arg;
830 struct bridge_iflist *bif = NULL;
831 struct ifnet *ifs;
832 int error = 0;
833 struct psref psref;
834
835 ifs = if_get(req->ifbr_ifsname, &psref);
836 if (ifs == NULL)
837 return ENOENT;
838
839 if (ifs->if_bridge == sc) {
840 error = EEXIST;
841 goto out;
842 }
843
844 if (ifs->if_bridge != NULL) {
845 error = EBUSY;
846 goto out;
847 }
848
849 if (ifs->_if_input != ether_input) {
850 error = EINVAL;
851 goto out;
852 }
853
854 /* FIXME: doesn't work with non-IFF_SIMPLEX interfaces */
855 if ((ifs->if_flags & IFF_SIMPLEX) == 0) {
856 error = EINVAL;
857 goto out;
858 }
859
860 bif = kmem_alloc(sizeof(*bif), KM_SLEEP);
861
862 switch (ifs->if_type) {
863 case IFT_ETHER:
864 if (sc->sc_if.if_mtu != ifs->if_mtu) {
865 /* Change MTU of added interface to bridge MTU */
866 struct ifreq ifr;
867 memset(&ifr, 0, sizeof(ifr));
868 ifr.ifr_mtu = sc->sc_if.if_mtu;
869 IFNET_LOCK(ifs);
870 error = if_ioctl(ifs, SIOCSIFMTU, &ifr);
871 IFNET_UNLOCK(ifs);
872 if (error != 0)
873 goto out;
874 }
875 /* FALLTHROUGH */
876 case IFT_L2TP:
877 IFNET_LOCK(ifs);
878 error = ether_enable_vlan_mtu(ifs);
879 IFNET_UNLOCK(ifs);
880 if (error > 0)
881 goto out;
882 /*
883 * Place the interface into promiscuous mode.
884 */
885 error = ifpromisc(ifs, 1);
886 if (error)
887 goto out;
888 break;
889 default:
890 error = EINVAL;
891 goto out;
892 }
893
894 bif->bif_ifp = ifs;
895 bif->bif_flags = IFBIF_LEARNING | IFBIF_DISCOVER;
896 bif->bif_priority = BSTP_DEFAULT_PORT_PRIORITY;
897 bif->bif_path_cost = BSTP_DEFAULT_PATH_COST;
898 bif->bif_linkstate_hook = if_linkstate_change_establish(ifs,
899 bridge_calc_link_state, sc);
900 PSLIST_ENTRY_INIT(bif, bif_next);
901 psref_target_init(&bif->bif_psref, bridge_psref_class);
902
903 BRIDGE_LOCK(sc);
904
905 ifs->if_bridge = sc;
906 ifs->if_bridgeif = bif;
907 PSLIST_WRITER_INSERT_HEAD(&sc->sc_iflist_psref.bip_iflist, bif, bif_next);
908 ifs->_if_input = bridge_input;
909
910 BRIDGE_UNLOCK(sc);
911
912 bif->bif_ifdetach_hook = ether_ifdetachhook_establish(ifs,
913 bridge_ifdetach, (void *)ifs);
914
915 bridge_calc_csum_flags(sc);
916 bridge_calc_link_state(sc);
917
918 if (sc->sc_if.if_flags & IFF_RUNNING)
919 bstp_initialization(sc);
920 else
921 bstp_stop(sc);
922
923 out:
924 if_put(ifs, &psref);
925 if (error) {
926 if (bif != NULL)
927 kmem_free(bif, sizeof(*bif));
928 }
929 return error;
930 }
931
932 static int
933 bridge_ioctl_del(struct bridge_softc *sc, void *arg)
934 {
935 struct ifbreq *req = arg;
936 const char *name = req->ifbr_ifsname;
937 struct bridge_iflist *bif;
938 struct ifnet *ifs;
939
940 BRIDGE_LOCK(sc);
941
942 /*
943 * Don't use bridge_lookup_member. We want to get a member
944 * with bif_refs == 0.
945 */
946 BRIDGE_IFLIST_WRITER_FOREACH(bif, sc) {
947 ifs = bif->bif_ifp;
948 if (strcmp(ifs->if_xname, name) == 0)
949 break;
950 }
951
952 if (bif == NULL) {
953 BRIDGE_UNLOCK(sc);
954 return ENOENT;
955 }
956
957 bridge_delete_member(sc, bif);
958
959 BRIDGE_UNLOCK(sc);
960
961 bridge_rtdelete(sc, ifs);
962 bridge_calc_csum_flags(sc);
963 bridge_calc_link_state(sc);
964
965 if (sc->sc_if.if_flags & IFF_RUNNING)
966 bstp_initialization(sc);
967
968 return 0;
969 }
970
971 static int
972 bridge_ioctl_gifflags(struct bridge_softc *sc, void *arg)
973 {
974 struct ifbreq *req = arg;
975 struct bridge_iflist *bif;
976 struct psref psref;
977
978 bif = bridge_lookup_member(sc, req->ifbr_ifsname, &psref);
979 if (bif == NULL)
980 return ENOENT;
981
982 req->ifbr_ifsflags = bif->bif_flags;
983 req->ifbr_state = bif->bif_state;
984 req->ifbr_priority = bif->bif_priority;
985 req->ifbr_path_cost = bif->bif_path_cost;
986 req->ifbr_portno = bif->bif_ifp->if_index & 0xff;
987
988 bridge_release_member(sc, bif, &psref);
989
990 return 0;
991 }
992
993 static int
994 bridge_ioctl_sifflags(struct bridge_softc *sc, void *arg)
995 {
996 struct ifbreq *req = arg;
997 struct bridge_iflist *bif;
998 struct psref psref;
999
1000 bif = bridge_lookup_member(sc, req->ifbr_ifsname, &psref);
1001 if (bif == NULL)
1002 return ENOENT;
1003
1004 if (req->ifbr_ifsflags & IFBIF_STP) {
1005 switch (bif->bif_ifp->if_type) {
1006 case IFT_ETHER:
1007 case IFT_L2TP:
1008 /* These can do spanning tree. */
1009 break;
1010
1011 default:
1012 /* Nothing else can. */
1013 bridge_release_member(sc, bif, &psref);
1014 return EINVAL;
1015 }
1016 }
1017
1018 if (bif->bif_flags & IFBIF_PROTECTED) {
1019 if ((req->ifbr_ifsflags & IFBIF_PROTECTED) == 0) {
1020 log(LOG_INFO, "%s: disabling protection on %s\n",
1021 sc->sc_if.if_xname, bif->bif_ifp->if_xname);
1022 }
1023 } else {
1024 if (req->ifbr_ifsflags & IFBIF_PROTECTED) {
1025 log(LOG_INFO, "%s: enabling protection on %s\n",
1026 sc->sc_if.if_xname, bif->bif_ifp->if_xname);
1027 }
1028 }
1029
1030 bif->bif_flags = req->ifbr_ifsflags;
1031
1032 bridge_release_member(sc, bif, &psref);
1033
1034 if (sc->sc_if.if_flags & IFF_RUNNING)
1035 bstp_initialization(sc);
1036
1037 return 0;
1038 }
1039
1040 static int
1041 bridge_ioctl_scache(struct bridge_softc *sc, void *arg)
1042 {
1043 struct ifbrparam *param = arg;
1044
1045 sc->sc_brtmax = param->ifbrp_csize;
1046 bridge_rttrim(sc);
1047
1048 return 0;
1049 }
1050
1051 static int
1052 bridge_ioctl_gcache(struct bridge_softc *sc, void *arg)
1053 {
1054 struct ifbrparam *param = arg;
1055
1056 param->ifbrp_csize = sc->sc_brtmax;
1057
1058 return 0;
1059 }
1060
1061 static int
1062 bridge_ioctl_gifs(struct bridge_softc *sc, void *arg)
1063 {
1064 struct ifbifconf *bifc = arg;
1065 struct bridge_iflist *bif;
1066 struct ifbreq *breqs;
1067 int i, count, error = 0;
1068
1069 retry:
1070 BRIDGE_LOCK(sc);
1071 count = 0;
1072 BRIDGE_IFLIST_WRITER_FOREACH(bif, sc)
1073 count++;
1074 BRIDGE_UNLOCK(sc);
1075
1076 if (count == 0) {
1077 bifc->ifbic_len = 0;
1078 return 0;
1079 }
1080
1081 if (bifc->ifbic_len == 0 || bifc->ifbic_len < (sizeof(*breqs) * count)) {
1082 /* Tell that a larger buffer is needed */
1083 bifc->ifbic_len = sizeof(*breqs) * count;
1084 return 0;
1085 }
1086
1087 breqs = kmem_alloc(sizeof(*breqs) * count, KM_SLEEP);
1088
1089 BRIDGE_LOCK(sc);
1090
1091 i = 0;
1092 BRIDGE_IFLIST_WRITER_FOREACH(bif, sc)
1093 i++;
1094 if (i > count) {
1095 /*
1096 * The number of members has been increased.
1097 * We need more memory!
1098 */
1099 BRIDGE_UNLOCK(sc);
1100 kmem_free(breqs, sizeof(*breqs) * count);
1101 goto retry;
1102 }
1103
1104 i = 0;
1105 BRIDGE_IFLIST_WRITER_FOREACH(bif, sc) {
1106 struct ifbreq *breq = &breqs[i++];
1107 memset(breq, 0, sizeof(*breq));
1108
1109 strlcpy(breq->ifbr_ifsname, bif->bif_ifp->if_xname,
1110 sizeof(breq->ifbr_ifsname));
1111 breq->ifbr_ifsflags = bif->bif_flags;
1112 breq->ifbr_state = bif->bif_state;
1113 breq->ifbr_priority = bif->bif_priority;
1114 breq->ifbr_path_cost = bif->bif_path_cost;
1115 breq->ifbr_portno = bif->bif_ifp->if_index & 0xff;
1116 }
1117
1118 /* Don't call copyout with holding the mutex */
1119 BRIDGE_UNLOCK(sc);
1120
1121 for (i = 0; i < count; i++) {
1122 error = copyout(&breqs[i], bifc->ifbic_req + i, sizeof(*breqs));
1123 if (error)
1124 break;
1125 }
1126 bifc->ifbic_len = sizeof(*breqs) * i;
1127
1128 kmem_free(breqs, sizeof(*breqs) * count);
1129
1130 return error;
1131 }
1132
1133 static int
1134 bridge_ioctl_rts(struct bridge_softc *sc, void *arg)
1135 {
1136 struct ifbaconf *bac = arg;
1137 struct bridge_rtnode *brt;
1138 struct ifbareq bareq;
1139 int count = 0, error = 0, len;
1140
1141 if (bac->ifbac_len == 0)
1142 return 0;
1143
1144 BRIDGE_RT_LOCK(sc);
1145
1146 /* The passed buffer is not enough, tell a required size. */
1147 if (bac->ifbac_len < (sizeof(bareq) * sc->sc_brtcnt)) {
1148 count = sc->sc_brtcnt;
1149 goto out;
1150 }
1151
1152 len = bac->ifbac_len;
1153 BRIDGE_RTLIST_WRITER_FOREACH(brt, sc) {
1154 if (len < sizeof(bareq))
1155 goto out;
1156 memset(&bareq, 0, sizeof(bareq));
1157 strlcpy(bareq.ifba_ifsname, brt->brt_ifp->if_xname,
1158 sizeof(bareq.ifba_ifsname));
1159 memcpy(bareq.ifba_dst, brt->brt_addr, sizeof(brt->brt_addr));
1160 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
1161 bareq.ifba_expire = brt->brt_expire - time_uptime;
1162 } else
1163 bareq.ifba_expire = 0;
1164 bareq.ifba_flags = brt->brt_flags;
1165
1166 error = copyout(&bareq, bac->ifbac_req + count, sizeof(bareq));
1167 if (error)
1168 goto out;
1169 count++;
1170 len -= sizeof(bareq);
1171 }
1172 out:
1173 BRIDGE_RT_UNLOCK(sc);
1174
1175 bac->ifbac_len = sizeof(bareq) * count;
1176 return error;
1177 }
1178
1179 static int
1180 bridge_ioctl_saddr(struct bridge_softc *sc, void *arg)
1181 {
1182 struct ifbareq *req = arg;
1183 struct bridge_iflist *bif;
1184 int error;
1185 struct psref psref;
1186
1187 bif = bridge_lookup_member(sc, req->ifba_ifsname, &psref);
1188 if (bif == NULL)
1189 return ENOENT;
1190
1191 error = bridge_rtupdate(sc, req->ifba_dst, bif->bif_ifp, 1,
1192 req->ifba_flags);
1193
1194 bridge_release_member(sc, bif, &psref);
1195
1196 return error;
1197 }
1198
1199 static int
1200 bridge_ioctl_sto(struct bridge_softc *sc, void *arg)
1201 {
1202 struct ifbrparam *param = arg;
1203
1204 sc->sc_brttimeout = param->ifbrp_ctime;
1205
1206 return 0;
1207 }
1208
1209 static int
1210 bridge_ioctl_gto(struct bridge_softc *sc, void *arg)
1211 {
1212 struct ifbrparam *param = arg;
1213
1214 param->ifbrp_ctime = sc->sc_brttimeout;
1215
1216 return 0;
1217 }
1218
1219 static int
1220 bridge_ioctl_daddr(struct bridge_softc *sc, void *arg)
1221 {
1222 struct ifbareq *req = arg;
1223
1224 return (bridge_rtdaddr(sc, req->ifba_dst));
1225 }
1226
1227 static int
1228 bridge_ioctl_flush(struct bridge_softc *sc, void *arg)
1229 {
1230 struct ifbreq *req = arg;
1231
1232 bridge_rtflush(sc, req->ifbr_ifsflags);
1233
1234 return 0;
1235 }
1236
1237 static int
1238 bridge_ioctl_gpri(struct bridge_softc *sc, void *arg)
1239 {
1240 struct ifbrparam *param = arg;
1241
1242 param->ifbrp_prio = sc->sc_bridge_priority;
1243
1244 return 0;
1245 }
1246
1247 static int
1248 bridge_ioctl_spri(struct bridge_softc *sc, void *arg)
1249 {
1250 struct ifbrparam *param = arg;
1251
1252 sc->sc_bridge_priority = param->ifbrp_prio;
1253
1254 if (sc->sc_if.if_flags & IFF_RUNNING)
1255 bstp_initialization(sc);
1256
1257 return 0;
1258 }
1259
1260 static int
1261 bridge_ioctl_ght(struct bridge_softc *sc, void *arg)
1262 {
1263 struct ifbrparam *param = arg;
1264
1265 param->ifbrp_hellotime = sc->sc_bridge_hello_time >> 8;
1266
1267 return 0;
1268 }
1269
1270 static int
1271 bridge_ioctl_sht(struct bridge_softc *sc, void *arg)
1272 {
1273 struct ifbrparam *param = arg;
1274
1275 if (param->ifbrp_hellotime == 0)
1276 return EINVAL;
1277 sc->sc_bridge_hello_time = param->ifbrp_hellotime << 8;
1278
1279 if (sc->sc_if.if_flags & IFF_RUNNING)
1280 bstp_initialization(sc);
1281
1282 return 0;
1283 }
1284
1285 static int
1286 bridge_ioctl_gfd(struct bridge_softc *sc, void *arg)
1287 {
1288 struct ifbrparam *param = arg;
1289
1290 param->ifbrp_fwddelay = sc->sc_bridge_forward_delay >> 8;
1291
1292 return 0;
1293 }
1294
1295 static int
1296 bridge_ioctl_sfd(struct bridge_softc *sc, void *arg)
1297 {
1298 struct ifbrparam *param = arg;
1299
1300 if (param->ifbrp_fwddelay == 0)
1301 return EINVAL;
1302 sc->sc_bridge_forward_delay = param->ifbrp_fwddelay << 8;
1303
1304 if (sc->sc_if.if_flags & IFF_RUNNING)
1305 bstp_initialization(sc);
1306
1307 return 0;
1308 }
1309
1310 static int
1311 bridge_ioctl_gma(struct bridge_softc *sc, void *arg)
1312 {
1313 struct ifbrparam *param = arg;
1314
1315 param->ifbrp_maxage = sc->sc_bridge_max_age >> 8;
1316
1317 return 0;
1318 }
1319
1320 static int
1321 bridge_ioctl_sma(struct bridge_softc *sc, void *arg)
1322 {
1323 struct ifbrparam *param = arg;
1324
1325 if (param->ifbrp_maxage == 0)
1326 return EINVAL;
1327 sc->sc_bridge_max_age = param->ifbrp_maxage << 8;
1328
1329 if (sc->sc_if.if_flags & IFF_RUNNING)
1330 bstp_initialization(sc);
1331
1332 return 0;
1333 }
1334
1335 static int
1336 bridge_ioctl_sifprio(struct bridge_softc *sc, void *arg)
1337 {
1338 struct ifbreq *req = arg;
1339 struct bridge_iflist *bif;
1340 struct psref psref;
1341
1342 bif = bridge_lookup_member(sc, req->ifbr_ifsname, &psref);
1343 if (bif == NULL)
1344 return ENOENT;
1345
1346 bif->bif_priority = req->ifbr_priority;
1347
1348 if (sc->sc_if.if_flags & IFF_RUNNING)
1349 bstp_initialization(sc);
1350
1351 bridge_release_member(sc, bif, &psref);
1352
1353 return 0;
1354 }
1355
1356 static int
1357 bridge_ioctl_gfilt(struct bridge_softc *sc, void *arg)
1358 {
1359 struct ifbrparam *param = arg;
1360
1361 param->ifbrp_filter = sc->sc_filter_flags;
1362
1363 return 0;
1364 }
1365
1366 static int
1367 bridge_ioctl_sfilt(struct bridge_softc *sc, void *arg)
1368 {
1369 struct ifbrparam *param = arg;
1370 uint32_t nflags, oflags;
1371
1372 if (param->ifbrp_filter & ~IFBF_FILT_MASK)
1373 return EINVAL;
1374
1375 nflags = param->ifbrp_filter;
1376 oflags = sc->sc_filter_flags;
1377
1378 if ((nflags & IFBF_FILT_USEIPF) && !(oflags & IFBF_FILT_USEIPF)) {
1379 pfil_add_hook((void *)bridge_ipf, NULL, PFIL_IN|PFIL_OUT,
1380 sc->sc_if.if_pfil);
1381 }
1382 if (!(nflags & IFBF_FILT_USEIPF) && (oflags & IFBF_FILT_USEIPF)) {
1383 pfil_remove_hook((void *)bridge_ipf, NULL, PFIL_IN|PFIL_OUT,
1384 sc->sc_if.if_pfil);
1385 }
1386
1387 sc->sc_filter_flags = nflags;
1388
1389 return 0;
1390 }
1391
1392 static int
1393 bridge_ioctl_sifcost(struct bridge_softc *sc, void *arg)
1394 {
1395 struct ifbreq *req = arg;
1396 struct bridge_iflist *bif;
1397 struct psref psref;
1398
1399 bif = bridge_lookup_member(sc, req->ifbr_ifsname, &psref);
1400 if (bif == NULL)
1401 return ENOENT;
1402
1403 bif->bif_path_cost = req->ifbr_path_cost;
1404
1405 if (sc->sc_if.if_flags & IFF_RUNNING)
1406 bstp_initialization(sc);
1407
1408 bridge_release_member(sc, bif, &psref);
1409
1410 return 0;
1411 }
1412
1413 /*
1414 * bridge_ifdetach:
1415 *
1416 * Detach an interface from a bridge. Called when a member
1417 * interface is detaching.
1418 */
1419 static void
1420 bridge_ifdetach(void *xifs)
1421 {
1422 struct ifnet *ifs;
1423 struct bridge_softc *sc;
1424 struct ifbreq breq;
1425
1426 ifs = (struct ifnet *)xifs;
1427 sc = ifs->if_bridge;
1428
1429 /* ioctl_lock should prevent this from happening */
1430 KASSERT(sc != NULL);
1431
1432 memset(&breq, 0, sizeof(breq));
1433 strlcpy(breq.ifbr_ifsname, ifs->if_xname, sizeof(breq.ifbr_ifsname));
1434
1435 (void) bridge_ioctl_del(sc, &breq);
1436 }
1437
1438 /*
1439 * bridge_init:
1440 *
1441 * Initialize a bridge interface.
1442 */
1443 static int
1444 bridge_init(struct ifnet *ifp)
1445 {
1446 struct bridge_softc *sc = ifp->if_softc;
1447
1448 KASSERT((ifp->if_flags & IFF_RUNNING) == 0);
1449
1450 callout_reset(&sc->sc_brcallout, bridge_rtable_prune_period * hz,
1451 bridge_timer, sc);
1452 bstp_initialization(sc);
1453
1454 ifp->if_flags |= IFF_RUNNING;
1455 return 0;
1456 }
1457
1458 /*
1459 * bridge_stop:
1460 *
1461 * Stop the bridge interface.
1462 */
1463 static void
1464 bridge_stop(struct ifnet *ifp, int disable)
1465 {
1466 struct bridge_softc *sc = ifp->if_softc;
1467
1468 KASSERT((ifp->if_flags & IFF_RUNNING) != 0);
1469
1470 /* Prevent the callout from being scheduled again. */
1471 BRIDGE_LOCK(sc);
1472 ifp->if_flags &= ~IFF_RUNNING;
1473 BRIDGE_UNLOCK(sc);
1474
1475 callout_halt(&sc->sc_brcallout, NULL);
1476 workqueue_wait(sc->sc_rtage_wq, &sc->sc_rtage_wk);
1477 bstp_stop(sc);
1478 bridge_rtflush(sc, IFBF_FLUSHDYN);
1479 }
1480
1481 /*
1482 * bridge_enqueue:
1483 *
1484 * Enqueue a packet on a bridge member interface.
1485 */
1486 void
1487 bridge_enqueue(struct bridge_softc *sc, struct ifnet *dst_ifp, struct mbuf *m,
1488 int runfilt)
1489 {
1490 int len, error;
1491 short mflags;
1492
1493 if (runfilt) {
1494 if (pfil_run_hooks(sc->sc_if.if_pfil, &m,
1495 dst_ifp, PFIL_OUT) != 0) {
1496 m_freem(m);
1497 return;
1498 }
1499 if (m == NULL)
1500 return;
1501 }
1502
1503 #ifdef ALTQ
1504 KERNEL_LOCK(1, NULL);
1505 /*
1506 * If ALTQ is enabled on the member interface, do
1507 * classification; the queueing discipline might
1508 * not require classification, but might require
1509 * the address family/header pointer in the pktattr.
1510 */
1511 if (ALTQ_IS_ENABLED(&dst_ifp->if_snd)) {
1512 /* XXX IFT_ETHER */
1513 altq_etherclassify(&dst_ifp->if_snd, m);
1514 }
1515 KERNEL_UNLOCK_ONE(NULL);
1516 #endif /* ALTQ */
1517
1518 if (vlan_has_tag(m) &&
1519 !vlan_is_hwtag_enabled(dst_ifp)) {
1520 (void)ether_inject_vlantag(&m, ETHERTYPE_VLAN,
1521 vlan_get_tag(m));
1522 if (m == NULL) {
1523 if_statinc(&sc->sc_if, if_oerrors);
1524 return;
1525 }
1526 }
1527
1528 len = m->m_pkthdr.len;
1529 mflags = m->m_flags;
1530
1531 error = if_transmit_lock(dst_ifp, m);
1532 if (error) {
1533 /* mbuf is already freed */
1534 if_statinc(&sc->sc_if, if_oerrors);
1535 return;
1536 }
1537
1538 net_stat_ref_t nsr = IF_STAT_GETREF(&sc->sc_if);
1539 if_statinc_ref(&sc->sc_if, nsr, if_opackets);
1540 if_statadd_ref(&sc->sc_if, nsr, if_obytes, len);
1541 if (mflags & M_MCAST)
1542 if_statinc_ref(&sc->sc_if, nsr, if_omcasts);
1543 IF_STAT_PUTREF(&sc->sc_if);
1544 }
1545
1546 /*
1547 * bridge_output:
1548 *
1549 * Send output from a bridge member interface. This
1550 * performs the bridging function for locally originated
1551 * packets.
1552 *
1553 * The mbuf has the Ethernet header already attached. We must
1554 * enqueue or free the mbuf before returning.
1555 */
1556 int
1557 bridge_output(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *sa,
1558 const struct rtentry *rt)
1559 {
1560 struct ether_header *eh;
1561 struct ifnet *dst_if;
1562 struct bridge_softc *sc;
1563 struct mbuf *n;
1564 int s, bound;
1565
1566 /*
1567 * bridge_output() is called from ether_output(), furthermore
1568 * ifp argument doesn't point to bridge(4). So, don't assert
1569 * IFEF_MPSAFE here.
1570 */
1571
1572 KASSERT(m->m_len >= ETHER_HDR_LEN);
1573
1574 eh = mtod(m, struct ether_header *);
1575 sc = ifp->if_bridge;
1576
1577 if (ETHER_IS_MULTICAST(eh->ether_dhost)) {
1578 if (memcmp(etherbroadcastaddr,
1579 eh->ether_dhost, ETHER_ADDR_LEN) == 0)
1580 m->m_flags |= M_BCAST;
1581 else
1582 m->m_flags |= M_MCAST;
1583 }
1584
1585 /*
1586 * If bridge is down, but the original output interface is up,
1587 * go ahead and send out that interface. Otherwise, the packet
1588 * is dropped below.
1589 */
1590 if (__predict_false(sc == NULL) ||
1591 (sc->sc_if.if_flags & IFF_RUNNING) == 0) {
1592 dst_if = ifp;
1593 goto unicast_asis;
1594 }
1595
1596 /*
1597 * If the packet is a multicast, or we don't know a better way to
1598 * get there, send to all interfaces.
1599 */
1600 if ((m->m_flags & (M_MCAST | M_BCAST)) != 0)
1601 dst_if = NULL;
1602 else
1603 dst_if = bridge_rtlookup(sc, eh->ether_dhost);
1604
1605 /*
1606 * In general, we need to handle TX offload in software before
1607 * enqueueing a packet. However, we can send it as is in the
1608 * cases of unicast via (1) the source interface, or (2) an
1609 * interface which supports the specified offload options.
1610 * For multicast or broadcast, send it as is only if (3) all
1611 * the member interfaces support the specified options.
1612 */
1613
1614 /*
1615 * Unicast via the source interface.
1616 */
1617 if (dst_if == ifp)
1618 goto unicast_asis;
1619
1620 /*
1621 * Unicast via other interface.
1622 */
1623 if (dst_if != NULL) {
1624 KASSERT(m->m_flags & M_PKTHDR);
1625 if (TX_OFFLOAD_SUPPORTED(dst_if->if_csum_flags_tx,
1626 m->m_pkthdr.csum_flags)) {
1627 /*
1628 * Unicast via an interface which supports the
1629 * specified offload options.
1630 */
1631 goto unicast_asis;
1632 }
1633
1634 /*
1635 * Handle TX offload in software. For TSO, a packet is
1636 * split into multiple chunks. Thus, the return value of
1637 * ether_sw_offload_tx() is mbuf queue consists of them.
1638 */
1639 m = ether_sw_offload_tx(ifp, m);
1640 if (m == NULL)
1641 return 0;
1642
1643 do {
1644 n = m->m_nextpkt;
1645 if ((dst_if->if_flags & IFF_RUNNING) == 0)
1646 m_freem(m);
1647 else
1648 bridge_enqueue(sc, dst_if, m, 0);
1649 m = n;
1650 } while (m != NULL);
1651
1652 return 0;
1653 }
1654
1655 /*
1656 * Multicast or broadcast.
1657 */
1658 if (TX_OFFLOAD_SUPPORTED(sc->sc_csum_flags_tx,
1659 m->m_pkthdr.csum_flags)) {
1660 /*
1661 * Specified TX offload options are supported by all
1662 * the member interfaces of this bridge.
1663 */
1664 m->m_nextpkt = NULL; /* XXX */
1665 } else {
1666 /*
1667 * Otherwise, handle TX offload in software.
1668 */
1669 m = ether_sw_offload_tx(ifp, m);
1670 if (m == NULL)
1671 return 0;
1672 }
1673
1674 /*
1675 * When we use pppoe over bridge, bridge_output() can be called
1676 * in a lwp context by pppoe_timeout_wk().
1677 */
1678 bound = curlwp_bind();
1679 do {
1680 /* XXX Should call bridge_broadcast, but there are locking
1681 * issues which need resolving first. */
1682 struct bridge_iflist *bif;
1683 struct mbuf *mc;
1684 bool used = false;
1685
1686 n = m->m_nextpkt;
1687
1688 s = pserialize_read_enter();
1689 BRIDGE_IFLIST_READER_FOREACH(bif, sc) {
1690 struct psref psref;
1691
1692 bridge_acquire_member(sc, bif, &psref);
1693 pserialize_read_exit(s);
1694
1695 dst_if = bif->bif_ifp;
1696 if ((dst_if->if_flags & IFF_RUNNING) == 0)
1697 goto next;
1698
1699 /*
1700 * If this is not the original output interface,
1701 * and the interface is participating in spanning
1702 * tree, make sure the port is in a state that
1703 * allows forwarding.
1704 */
1705 if (dst_if != ifp &&
1706 (bif->bif_flags & IFBIF_STP) != 0) {
1707 switch (bif->bif_state) {
1708 case BSTP_IFSTATE_BLOCKING:
1709 case BSTP_IFSTATE_LISTENING:
1710 case BSTP_IFSTATE_DISABLED:
1711 goto next;
1712 }
1713 }
1714
1715 if (PSLIST_READER_NEXT(bif, struct bridge_iflist,
1716 bif_next) == NULL &&
1717 ((m->m_flags & (M_MCAST | M_BCAST)) == 0 ||
1718 dst_if == ifp))
1719 {
1720 used = true;
1721 mc = m;
1722 } else {
1723 mc = m_copypacket(m, M_DONTWAIT);
1724 if (mc == NULL) {
1725 if_statinc(&sc->sc_if, if_oerrors);
1726 goto next;
1727 }
1728 }
1729
1730 bridge_enqueue(sc, dst_if, mc, 0);
1731
1732 if ((m->m_flags & (M_MCAST | M_BCAST)) != 0 &&
1733 dst_if != ifp)
1734 {
1735 if (PSLIST_READER_NEXT(bif,
1736 struct bridge_iflist, bif_next) == NULL)
1737 {
1738 used = true;
1739 mc = m;
1740 } else {
1741 mc = m_copypacket(m, M_DONTWAIT);
1742 if (mc == NULL) {
1743 if_statinc(&sc->sc_if,
1744 if_oerrors);
1745 goto next;
1746 }
1747 }
1748
1749 m_set_rcvif(mc, dst_if);
1750 mc->m_flags &= ~M_PROMISC;
1751
1752 const int _s = splsoftnet();
1753 KERNEL_LOCK_UNLESS_IFP_MPSAFE(dst_if);
1754 ether_input(dst_if, mc);
1755 KERNEL_UNLOCK_UNLESS_IFP_MPSAFE(dst_if);
1756 splx(_s);
1757 }
1758
1759 next:
1760 s = pserialize_read_enter();
1761 bridge_release_member(sc, bif, &psref);
1762
1763 /* Guarantee we don't re-enter the loop as we already
1764 * decided we're at the end. */
1765 if (used)
1766 break;
1767 }
1768 pserialize_read_exit(s);
1769
1770 if (!used)
1771 m_freem(m);
1772
1773 m = n;
1774 } while (m != NULL);
1775 curlwp_bindx(bound);
1776
1777 return 0;
1778
1779 unicast_asis:
1780 /*
1781 * XXX Spanning tree consideration here?
1782 */
1783 if ((dst_if->if_flags & IFF_RUNNING) == 0)
1784 m_freem(m);
1785 else
1786 bridge_enqueue(sc, dst_if, m, 0);
1787 return 0;
1788 }
1789
1790 /*
1791 * bridge_start:
1792 *
1793 * Start output on a bridge.
1794 *
1795 * NOTE: This routine should never be called in this implementation.
1796 */
1797 static void
1798 bridge_start(struct ifnet *ifp)
1799 {
1800
1801 printf("%s: bridge_start() called\n", ifp->if_xname);
1802 }
1803
1804 /*
1805 * bridge_forward:
1806 *
1807 * The forwarding function of the bridge.
1808 */
1809 static void
1810 bridge_forward(struct bridge_softc *sc, struct mbuf *m)
1811 {
1812 struct bridge_iflist *bif;
1813 struct ifnet *src_if, *dst_if;
1814 struct ether_header *eh;
1815 struct psref psref;
1816 struct psref psref_src;
1817 DECLARE_LOCK_VARIABLE;
1818 bool src_if_protected;
1819
1820 src_if = m_get_rcvif_psref(m, &psref_src);
1821 if (src_if == NULL) {
1822 /* Interface is being destroyed? */
1823 goto discard;
1824 }
1825
1826 if_statadd2(&sc->sc_if, if_ipackets, 1, if_ibytes, m->m_pkthdr.len);
1827
1828 /*
1829 * Look up the bridge_iflist.
1830 */
1831 bif = bridge_lookup_member_if(sc, src_if, &psref);
1832 if (bif == NULL) {
1833 /* Interface is not a bridge member (anymore?) */
1834 goto discard;
1835 }
1836
1837 if (bif->bif_flags & IFBIF_STP) {
1838 switch (bif->bif_state) {
1839 case BSTP_IFSTATE_BLOCKING:
1840 case BSTP_IFSTATE_LISTENING:
1841 case BSTP_IFSTATE_DISABLED:
1842 bridge_release_member(sc, bif, &psref);
1843 goto discard;
1844 }
1845 }
1846
1847 eh = mtod(m, struct ether_header *);
1848
1849 /*
1850 * If the interface is learning, and the source
1851 * address is valid and not multicast, record
1852 * the address.
1853 */
1854 if ((bif->bif_flags & IFBIF_LEARNING) != 0 &&
1855 ETHER_IS_MULTICAST(eh->ether_shost) == 0 &&
1856 (eh->ether_shost[0] == 0 &&
1857 eh->ether_shost[1] == 0 &&
1858 eh->ether_shost[2] == 0 &&
1859 eh->ether_shost[3] == 0 &&
1860 eh->ether_shost[4] == 0 &&
1861 eh->ether_shost[5] == 0) == 0) {
1862 (void) bridge_rtupdate(sc, eh->ether_shost,
1863 src_if, 0, IFBAF_DYNAMIC);
1864 }
1865
1866 if ((bif->bif_flags & IFBIF_STP) != 0 &&
1867 bif->bif_state == BSTP_IFSTATE_LEARNING) {
1868 bridge_release_member(sc, bif, &psref);
1869 goto discard;
1870 }
1871
1872 src_if_protected = ((bif->bif_flags & IFBIF_PROTECTED) != 0);
1873
1874 bridge_release_member(sc, bif, &psref);
1875
1876 /*
1877 * At this point, the port either doesn't participate
1878 * in spanning tree or it is in the forwarding state.
1879 */
1880
1881 /*
1882 * If the packet is unicast, destined for someone on
1883 * "this" side of the bridge, drop it.
1884 */
1885 if ((m->m_flags & (M_BCAST|M_MCAST)) == 0) {
1886 dst_if = bridge_rtlookup(sc, eh->ether_dhost);
1887 if (src_if == dst_if)
1888 goto discard;
1889 } else {
1890 /* ...forward it to all interfaces. */
1891 if_statinc(&sc->sc_if, if_imcasts);
1892 dst_if = NULL;
1893 }
1894
1895 if (pfil_run_hooks(sc->sc_if.if_pfil, &m, src_if, PFIL_IN) != 0 ||
1896 m == NULL) {
1897 goto discard;
1898 }
1899
1900 if (dst_if == NULL) {
1901 bridge_broadcast(sc, src_if, src_if_protected, m);
1902 goto out;
1903 }
1904
1905 m_put_rcvif_psref(src_if, &psref_src);
1906 src_if = NULL;
1907
1908 /*
1909 * At this point, we're dealing with a unicast frame
1910 * going to a different interface.
1911 */
1912 if ((dst_if->if_flags & IFF_RUNNING) == 0)
1913 goto discard;
1914
1915 bif = bridge_lookup_member_if(sc, dst_if, &psref);
1916 if (bif == NULL) {
1917 /* Not a member of the bridge (anymore?) */
1918 goto discard;
1919 }
1920
1921 if (bif->bif_flags & IFBIF_STP) {
1922 switch (bif->bif_state) {
1923 case BSTP_IFSTATE_DISABLED:
1924 case BSTP_IFSTATE_BLOCKING:
1925 bridge_release_member(sc, bif, &psref);
1926 goto discard;
1927 }
1928 }
1929
1930 if ((bif->bif_flags & IFBIF_PROTECTED) && src_if_protected) {
1931 bridge_release_member(sc, bif, &psref);
1932 goto discard;
1933 }
1934
1935 bridge_release_member(sc, bif, &psref);
1936
1937 /*
1938 * Before enqueueing this packet to the destination interface,
1939 * clear any in-bound checksum flags to prevent them from being
1940 * misused as out-bound flags.
1941 */
1942 m->m_pkthdr.csum_flags = 0;
1943
1944 ACQUIRE_GLOBAL_LOCKS();
1945 bridge_enqueue(sc, dst_if, m, 1);
1946 RELEASE_GLOBAL_LOCKS();
1947 out:
1948 if (src_if != NULL)
1949 m_put_rcvif_psref(src_if, &psref_src);
1950 return;
1951
1952 discard:
1953 m_freem(m);
1954 goto out;
1955 }
1956
1957 static bool
1958 bstp_state_before_learning(struct bridge_iflist *bif)
1959 {
1960 if (bif->bif_flags & IFBIF_STP) {
1961 switch (bif->bif_state) {
1962 case BSTP_IFSTATE_BLOCKING:
1963 case BSTP_IFSTATE_LISTENING:
1964 case BSTP_IFSTATE_DISABLED:
1965 return true;
1966 }
1967 }
1968 return false;
1969 }
1970
1971 static bool
1972 bridge_ourether(struct bridge_iflist *bif, struct ether_header *eh, int src)
1973 {
1974 uint8_t *ether = src ? eh->ether_shost : eh->ether_dhost;
1975
1976 if (memcmp(CLLADDR(bif->bif_ifp->if_sadl), ether, ETHER_ADDR_LEN) == 0
1977 #if NCARP > 0
1978 || (bif->bif_ifp->if_carp &&
1979 carp_ourether(bif->bif_ifp->if_carp, eh, IFT_ETHER, src) != NULL)
1980 #endif /* NCARP > 0 */
1981 )
1982 return true;
1983
1984 return false;
1985 }
1986
1987 /*
1988 * bridge_input:
1989 *
1990 * Receive input from a member interface. Queue the packet for
1991 * bridging if it is not for us.
1992 */
1993 static void
1994 bridge_input(struct ifnet *ifp, struct mbuf *m)
1995 {
1996 struct bridge_softc *sc = ifp->if_bridge;
1997 struct bridge_iflist *bif;
1998 struct ether_header *eh;
1999 struct psref psref;
2000 int bound;
2001 DECLARE_LOCK_VARIABLE;
2002
2003 KASSERT(!cpu_intr_p());
2004
2005 if (__predict_false(sc == NULL) ||
2006 (sc->sc_if.if_flags & IFF_RUNNING) == 0) {
2007 ACQUIRE_GLOBAL_LOCKS();
2008 ether_input(ifp, m);
2009 RELEASE_GLOBAL_LOCKS();
2010 return;
2011 }
2012
2013 bound = curlwp_bind();
2014 bif = bridge_lookup_member_if(sc, ifp, &psref);
2015 if (bif == NULL) {
2016 curlwp_bindx(bound);
2017 ACQUIRE_GLOBAL_LOCKS();
2018 ether_input(ifp, m);
2019 RELEASE_GLOBAL_LOCKS();
2020 return;
2021 }
2022
2023 eh = mtod(m, struct ether_header *);
2024
2025 if (ETHER_IS_MULTICAST(eh->ether_dhost)) {
2026 if (memcmp(etherbroadcastaddr,
2027 eh->ether_dhost, ETHER_ADDR_LEN) == 0)
2028 m->m_flags |= M_BCAST;
2029 else
2030 m->m_flags |= M_MCAST;
2031 }
2032
2033 /*
2034 * A 'fast' path for packets addressed to interfaces that are
2035 * part of this bridge.
2036 */
2037 if (!(m->m_flags & (M_BCAST|M_MCAST)) &&
2038 !bstp_state_before_learning(bif)) {
2039 struct bridge_iflist *_bif;
2040 struct ifnet *_ifp = NULL;
2041 int s;
2042 struct psref _psref;
2043
2044 s = pserialize_read_enter();
2045 BRIDGE_IFLIST_READER_FOREACH(_bif, sc) {
2046 /* It is destined for us. */
2047 if (bridge_ourether(_bif, eh, 0)) {
2048 bridge_acquire_member(sc, _bif, &_psref);
2049 pserialize_read_exit(s);
2050 if (_bif->bif_flags & IFBIF_LEARNING)
2051 (void) bridge_rtupdate(sc,
2052 eh->ether_shost, ifp, 0, IFBAF_DYNAMIC);
2053 m_set_rcvif(m, _bif->bif_ifp);
2054 _ifp = _bif->bif_ifp;
2055 bridge_release_member(sc, _bif, &_psref);
2056 goto out;
2057 }
2058
2059 /* We just received a packet that we sent out. */
2060 if (bridge_ourether(_bif, eh, 1))
2061 break;
2062 }
2063 pserialize_read_exit(s);
2064 out:
2065
2066 if (_bif != NULL) {
2067 bridge_release_member(sc, bif, &psref);
2068 curlwp_bindx(bound);
2069 if (_ifp != NULL) {
2070 m->m_flags &= ~M_PROMISC;
2071 ACQUIRE_GLOBAL_LOCKS();
2072 ether_input(_ifp, m);
2073 RELEASE_GLOBAL_LOCKS();
2074 } else
2075 m_freem(m);
2076 return;
2077 }
2078 }
2079
2080 /* Tap off 802.1D packets; they do not get forwarded. */
2081 if (bif->bif_flags & IFBIF_STP &&
2082 memcmp(eh->ether_dhost, bstp_etheraddr, ETHER_ADDR_LEN) == 0) {
2083 bstp_input(sc, bif, m);
2084 bridge_release_member(sc, bif, &psref);
2085 curlwp_bindx(bound);
2086 return;
2087 }
2088
2089 /*
2090 * A normal switch would discard the packet here, but that's not what
2091 * we've done historically. This also prevents some obnoxious behaviour.
2092 */
2093 if (bstp_state_before_learning(bif)) {
2094 bridge_release_member(sc, bif, &psref);
2095 curlwp_bindx(bound);
2096 ACQUIRE_GLOBAL_LOCKS();
2097 ether_input(ifp, m);
2098 RELEASE_GLOBAL_LOCKS();
2099 return;
2100 }
2101
2102 bridge_release_member(sc, bif, &psref);
2103
2104 bridge_forward(sc, m);
2105
2106 curlwp_bindx(bound);
2107 }
2108
2109 /*
2110 * bridge_broadcast:
2111 *
2112 * Send a frame to all interfaces that are members of
2113 * the bridge, except for the one on which the packet
2114 * arrived.
2115 */
2116 static void
2117 bridge_broadcast(struct bridge_softc *sc, struct ifnet *src_if,
2118 bool src_if_protected, struct mbuf *m)
2119 {
2120 struct bridge_iflist *bif;
2121 struct mbuf *mc;
2122 struct ifnet *dst_if;
2123 bool bmcast;
2124 int s;
2125 DECLARE_LOCK_VARIABLE;
2126
2127 bmcast = m->m_flags & (M_BCAST|M_MCAST);
2128
2129 s = pserialize_read_enter();
2130 BRIDGE_IFLIST_READER_FOREACH(bif, sc) {
2131 struct psref psref;
2132
2133 bridge_acquire_member(sc, bif, &psref);
2134 pserialize_read_exit(s);
2135
2136 dst_if = bif->bif_ifp;
2137
2138 if (bif->bif_flags & IFBIF_STP) {
2139 switch (bif->bif_state) {
2140 case BSTP_IFSTATE_BLOCKING:
2141 case BSTP_IFSTATE_DISABLED:
2142 goto next;
2143 }
2144 }
2145
2146 if ((bif->bif_flags & IFBIF_DISCOVER) == 0 && !bmcast)
2147 goto next;
2148
2149 if ((dst_if->if_flags & IFF_RUNNING) == 0)
2150 goto next;
2151
2152 if (dst_if != src_if) {
2153 if ((bif->bif_flags & IFBIF_PROTECTED) &&
2154 src_if_protected) {
2155 goto next;
2156 }
2157
2158 mc = m_copypacket(m, M_DONTWAIT);
2159 if (mc == NULL) {
2160 if_statinc(&sc->sc_if, if_oerrors);
2161 goto next;
2162 }
2163 /*
2164 * Before enqueueing this packet to the destination
2165 * interface, clear any in-bound checksum flags to
2166 * prevent them from being misused as out-bound flags.
2167 */
2168 mc->m_pkthdr.csum_flags = 0;
2169
2170 ACQUIRE_GLOBAL_LOCKS();
2171 bridge_enqueue(sc, dst_if, mc, 1);
2172 RELEASE_GLOBAL_LOCKS();
2173 }
2174
2175 if (bmcast) {
2176 mc = m_copypacket(m, M_DONTWAIT);
2177 if (mc == NULL) {
2178 if_statinc(&sc->sc_if, if_oerrors);
2179 goto next;
2180 }
2181 /*
2182 * Before enqueueing this packet to the destination
2183 * interface, clear any in-bound checksum flags to
2184 * prevent them from being misused as out-bound flags.
2185 */
2186 mc->m_pkthdr.csum_flags = 0;
2187
2188 m_set_rcvif(mc, dst_if);
2189 mc->m_flags &= ~M_PROMISC;
2190
2191 ACQUIRE_GLOBAL_LOCKS();
2192 ether_input(dst_if, mc);
2193 RELEASE_GLOBAL_LOCKS();
2194 }
2195 next:
2196 s = pserialize_read_enter();
2197 bridge_release_member(sc, bif, &psref);
2198 }
2199 pserialize_read_exit(s);
2200
2201 m_freem(m);
2202 }
2203
2204 static int
2205 bridge_rtalloc(struct bridge_softc *sc, const uint8_t *dst,
2206 struct bridge_rtnode **brtp)
2207 {
2208 struct bridge_rtnode *brt;
2209 int error;
2210
2211 if (sc->sc_brtcnt >= sc->sc_brtmax)
2212 return ENOSPC;
2213
2214 /*
2215 * Allocate a new bridge forwarding node, and
2216 * initialize the expiration time and Ethernet
2217 * address.
2218 */
2219 brt = pool_get(&bridge_rtnode_pool, PR_NOWAIT);
2220 if (brt == NULL)
2221 return ENOMEM;
2222
2223 memset(brt, 0, sizeof(*brt));
2224 brt->brt_expire = time_uptime + sc->sc_brttimeout;
2225 brt->brt_flags = IFBAF_DYNAMIC;
2226 memcpy(brt->brt_addr, dst, ETHER_ADDR_LEN);
2227 PSLIST_ENTRY_INIT(brt, brt_list);
2228 PSLIST_ENTRY_INIT(brt, brt_hash);
2229
2230 BRIDGE_RT_LOCK(sc);
2231 error = bridge_rtnode_insert(sc, brt);
2232 BRIDGE_RT_UNLOCK(sc);
2233
2234 if (error != 0) {
2235 pool_put(&bridge_rtnode_pool, brt);
2236 return error;
2237 }
2238
2239 *brtp = brt;
2240 return 0;
2241 }
2242
2243 /*
2244 * bridge_rtupdate:
2245 *
2246 * Add a bridge routing entry.
2247 */
2248 static int
2249 bridge_rtupdate(struct bridge_softc *sc, const uint8_t *dst,
2250 struct ifnet *dst_if, int setflags, uint8_t flags)
2251 {
2252 struct bridge_rtnode *brt;
2253 int s;
2254
2255 again:
2256 /*
2257 * A route for this destination might already exist. If so,
2258 * update it, otherwise create a new one.
2259 */
2260 s = pserialize_read_enter();
2261 brt = bridge_rtnode_lookup(sc, dst);
2262
2263 if (brt != NULL) {
2264 brt->brt_ifp = dst_if;
2265 if (setflags) {
2266 brt->brt_flags = flags;
2267 if (flags & IFBAF_STATIC)
2268 brt->brt_expire = 0;
2269 else
2270 brt->brt_expire = time_uptime + sc->sc_brttimeout;
2271 } else {
2272 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
2273 brt->brt_expire = time_uptime + sc->sc_brttimeout;
2274 }
2275 }
2276 pserialize_read_exit(s);
2277
2278 if (brt == NULL) {
2279 int r;
2280
2281 r = bridge_rtalloc(sc, dst, &brt);
2282 if (r != 0)
2283 return r;
2284 goto again;
2285 }
2286
2287 return 0;
2288 }
2289
2290 /*
2291 * bridge_rtlookup:
2292 *
2293 * Lookup the destination interface for an address.
2294 */
2295 static struct ifnet *
2296 bridge_rtlookup(struct bridge_softc *sc, const uint8_t *addr)
2297 {
2298 struct bridge_rtnode *brt;
2299 struct ifnet *ifs = NULL;
2300 int s;
2301
2302 s = pserialize_read_enter();
2303 brt = bridge_rtnode_lookup(sc, addr);
2304 if (brt != NULL)
2305 ifs = brt->brt_ifp;
2306 pserialize_read_exit(s);
2307
2308 return ifs;
2309 }
2310
2311 typedef bool (*bridge_iterate_cb_t)
2312 (struct bridge_softc *, struct bridge_rtnode *, bool *, void *);
2313
2314 /*
2315 * bridge_rtlist_iterate_remove:
2316 *
2317 * It iterates on sc->sc_rtlist and removes rtnodes of it which func
2318 * callback judges to remove. Removals of rtnodes are done in a manner
2319 * of pserialize. To this end, all kmem_* operations are placed out of
2320 * mutexes.
2321 */
2322 static void
2323 bridge_rtlist_iterate_remove(struct bridge_softc *sc, bridge_iterate_cb_t func, void *arg)
2324 {
2325 struct bridge_rtnode *brt;
2326 struct bridge_rtnode **brt_list;
2327 int i, count;
2328
2329 retry:
2330 count = sc->sc_brtcnt;
2331 if (count == 0)
2332 return;
2333 brt_list = kmem_alloc(sizeof(*brt_list) * count, KM_SLEEP);
2334
2335 BRIDGE_RT_LOCK(sc);
2336 if (__predict_false(sc->sc_brtcnt > count)) {
2337 /* The rtnodes increased, we need more memory */
2338 BRIDGE_RT_UNLOCK(sc);
2339 kmem_free(brt_list, sizeof(*brt_list) * count);
2340 goto retry;
2341 }
2342
2343 i = 0;
2344 /*
2345 * We don't need to use a _SAFE variant here because we know
2346 * that a removed item keeps its next pointer as-is thanks to
2347 * pslist(9) and isn't freed in the loop.
2348 */
2349 BRIDGE_RTLIST_WRITER_FOREACH(brt, sc) {
2350 bool need_break = false;
2351 if (func(sc, brt, &need_break, arg)) {
2352 bridge_rtnode_remove(sc, brt);
2353 brt_list[i++] = brt;
2354 }
2355 if (need_break)
2356 break;
2357 }
2358
2359 if (i > 0)
2360 BRIDGE_RT_PSZ_PERFORM(sc);
2361 BRIDGE_RT_UNLOCK(sc);
2362
2363 while (--i >= 0)
2364 bridge_rtnode_destroy(brt_list[i]);
2365
2366 kmem_free(brt_list, sizeof(*brt_list) * count);
2367 }
2368
2369 static bool
2370 bridge_rttrim0_cb(struct bridge_softc *sc, struct bridge_rtnode *brt,
2371 bool *need_break, void *arg)
2372 {
2373 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
2374 /* Take into account of the subsequent removal */
2375 if ((sc->sc_brtcnt - 1) <= sc->sc_brtmax)
2376 *need_break = true;
2377 return true;
2378 } else
2379 return false;
2380 }
2381
2382 static void
2383 bridge_rttrim0(struct bridge_softc *sc)
2384 {
2385 bridge_rtlist_iterate_remove(sc, bridge_rttrim0_cb, NULL);
2386 }
2387
2388 /*
2389 * bridge_rttrim:
2390 *
2391 * Trim the routine table so that we have a number
2392 * of routing entries less than or equal to the
2393 * maximum number.
2394 */
2395 static void
2396 bridge_rttrim(struct bridge_softc *sc)
2397 {
2398
2399 /* Make sure we actually need to do this. */
2400 if (sc->sc_brtcnt <= sc->sc_brtmax)
2401 return;
2402
2403 /* Force an aging cycle; this might trim enough addresses. */
2404 bridge_rtage(sc);
2405 if (sc->sc_brtcnt <= sc->sc_brtmax)
2406 return;
2407
2408 bridge_rttrim0(sc);
2409
2410 return;
2411 }
2412
2413 /*
2414 * bridge_timer:
2415 *
2416 * Aging timer for the bridge.
2417 */
2418 static void
2419 bridge_timer(void *arg)
2420 {
2421 struct bridge_softc *sc = arg;
2422
2423 workqueue_enqueue(sc->sc_rtage_wq, &sc->sc_rtage_wk, NULL);
2424 }
2425
2426 static void
2427 bridge_rtage_work(struct work *wk, void *arg)
2428 {
2429 struct bridge_softc *sc = arg;
2430
2431 KASSERT(wk == &sc->sc_rtage_wk);
2432
2433 bridge_rtage(sc);
2434
2435 BRIDGE_LOCK(sc);
2436 if (sc->sc_if.if_flags & IFF_RUNNING) {
2437 callout_reset(&sc->sc_brcallout,
2438 bridge_rtable_prune_period * hz, bridge_timer, sc);
2439 }
2440 BRIDGE_UNLOCK(sc);
2441 }
2442
2443 static bool
2444 bridge_rtage_cb(struct bridge_softc *sc, struct bridge_rtnode *brt,
2445 bool *need_break, void *arg)
2446 {
2447 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
2448 time_uptime >= brt->brt_expire)
2449 return true;
2450 else
2451 return false;
2452 }
2453
2454 /*
2455 * bridge_rtage:
2456 *
2457 * Perform an aging cycle.
2458 */
2459 static void
2460 bridge_rtage(struct bridge_softc *sc)
2461 {
2462 bridge_rtlist_iterate_remove(sc, bridge_rtage_cb, NULL);
2463 }
2464
2465
2466 static bool
2467 bridge_rtflush_cb(struct bridge_softc *sc, struct bridge_rtnode *brt,
2468 bool *need_break, void *arg)
2469 {
2470 int full = *(int*)arg;
2471
2472 if (full || (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
2473 return true;
2474 else
2475 return false;
2476 }
2477
2478 /*
2479 * bridge_rtflush:
2480 *
2481 * Remove all dynamic addresses from the bridge.
2482 */
2483 static void
2484 bridge_rtflush(struct bridge_softc *sc, int full)
2485 {
2486 bridge_rtlist_iterate_remove(sc, bridge_rtflush_cb, &full);
2487 }
2488
2489 /*
2490 * bridge_rtdaddr:
2491 *
2492 * Remove an address from the table.
2493 */
2494 static int
2495 bridge_rtdaddr(struct bridge_softc *sc, const uint8_t *addr)
2496 {
2497 struct bridge_rtnode *brt;
2498
2499 BRIDGE_RT_LOCK(sc);
2500 if ((brt = bridge_rtnode_lookup(sc, addr)) == NULL) {
2501 BRIDGE_RT_UNLOCK(sc);
2502 return ENOENT;
2503 }
2504 bridge_rtnode_remove(sc, brt);
2505 BRIDGE_RT_PSZ_PERFORM(sc);
2506 BRIDGE_RT_UNLOCK(sc);
2507
2508 bridge_rtnode_destroy(brt);
2509
2510 return 0;
2511 }
2512
2513 /*
2514 * bridge_rtdelete:
2515 *
2516 * Delete routes to a speicifc member interface.
2517 */
2518 static void
2519 bridge_rtdelete(struct bridge_softc *sc, struct ifnet *ifp)
2520 {
2521 struct bridge_rtnode *brt;
2522
2523 /* XXX pserialize_perform for each entry is slow */
2524 again:
2525 BRIDGE_RT_LOCK(sc);
2526 BRIDGE_RTLIST_WRITER_FOREACH(brt, sc) {
2527 if (brt->brt_ifp == ifp)
2528 break;
2529 }
2530 if (brt == NULL) {
2531 BRIDGE_RT_UNLOCK(sc);
2532 return;
2533 }
2534 bridge_rtnode_remove(sc, brt);
2535 BRIDGE_RT_PSZ_PERFORM(sc);
2536 BRIDGE_RT_UNLOCK(sc);
2537
2538 bridge_rtnode_destroy(brt);
2539
2540 goto again;
2541 }
2542
2543 /*
2544 * bridge_rtable_init:
2545 *
2546 * Initialize the route table for this bridge.
2547 */
2548 static void
2549 bridge_rtable_init(struct bridge_softc *sc)
2550 {
2551 int i;
2552
2553 sc->sc_rthash = kmem_alloc(sizeof(*sc->sc_rthash) * BRIDGE_RTHASH_SIZE,
2554 KM_SLEEP);
2555
2556 for (i = 0; i < BRIDGE_RTHASH_SIZE; i++)
2557 PSLIST_INIT(&sc->sc_rthash[i]);
2558
2559 sc->sc_rthash_key = cprng_fast32();
2560
2561 PSLIST_INIT(&sc->sc_rtlist);
2562
2563 sc->sc_rtlist_psz = pserialize_create();
2564 sc->sc_rtlist_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET);
2565 }
2566
2567 /*
2568 * bridge_rtable_fini:
2569 *
2570 * Deconstruct the route table for this bridge.
2571 */
2572 static void
2573 bridge_rtable_fini(struct bridge_softc *sc)
2574 {
2575
2576 kmem_free(sc->sc_rthash, sizeof(*sc->sc_rthash) * BRIDGE_RTHASH_SIZE);
2577 mutex_obj_free(sc->sc_rtlist_lock);
2578 pserialize_destroy(sc->sc_rtlist_psz);
2579 }
2580
2581 /*
2582 * The following hash function is adapted from "Hash Functions" by Bob Jenkins
2583 * ("Algorithm Alley", Dr. Dobbs Journal, September 1997).
2584 */
2585 #define mix(a, b, c) \
2586 do { \
2587 a -= b; a -= c; a ^= (c >> 13); \
2588 b -= c; b -= a; b ^= (a << 8); \
2589 c -= a; c -= b; c ^= (b >> 13); \
2590 a -= b; a -= c; a ^= (c >> 12); \
2591 b -= c; b -= a; b ^= (a << 16); \
2592 c -= a; c -= b; c ^= (b >> 5); \
2593 a -= b; a -= c; a ^= (c >> 3); \
2594 b -= c; b -= a; b ^= (a << 10); \
2595 c -= a; c -= b; c ^= (b >> 15); \
2596 } while (/*CONSTCOND*/0)
2597
2598 static inline uint32_t
2599 bridge_rthash(struct bridge_softc *sc, const uint8_t *addr)
2600 {
2601 uint32_t a = 0x9e3779b9, b = 0x9e3779b9, c = sc->sc_rthash_key;
2602
2603 b += addr[5] << 8;
2604 b += addr[4];
2605 a += (uint32_t)addr[3] << 24;
2606 a += addr[2] << 16;
2607 a += addr[1] << 8;
2608 a += addr[0];
2609
2610 mix(a, b, c);
2611
2612 return (c & BRIDGE_RTHASH_MASK);
2613 }
2614
2615 #undef mix
2616
2617 /*
2618 * bridge_rtnode_lookup:
2619 *
2620 * Look up a bridge route node for the specified destination.
2621 */
2622 static struct bridge_rtnode *
2623 bridge_rtnode_lookup(struct bridge_softc *sc, const uint8_t *addr)
2624 {
2625 struct bridge_rtnode *brt;
2626 uint32_t hash;
2627 int dir;
2628
2629 hash = bridge_rthash(sc, addr);
2630 BRIDGE_RTHASH_READER_FOREACH(brt, sc, hash) {
2631 dir = memcmp(addr, brt->brt_addr, ETHER_ADDR_LEN);
2632 if (dir == 0)
2633 return brt;
2634 if (dir > 0)
2635 return NULL;
2636 }
2637
2638 return NULL;
2639 }
2640
2641 /*
2642 * bridge_rtnode_insert:
2643 *
2644 * Insert the specified bridge node into the route table. We
2645 * assume the entry is not already in the table.
2646 */
2647 static int
2648 bridge_rtnode_insert(struct bridge_softc *sc, struct bridge_rtnode *brt)
2649 {
2650 struct bridge_rtnode *lbrt, *prev = NULL;
2651 uint32_t hash;
2652
2653 KASSERT(BRIDGE_RT_LOCKED(sc));
2654
2655 hash = bridge_rthash(sc, brt->brt_addr);
2656 BRIDGE_RTHASH_WRITER_FOREACH(lbrt, sc, hash) {
2657 int dir = memcmp(brt->brt_addr, lbrt->brt_addr, ETHER_ADDR_LEN);
2658 if (dir == 0)
2659 return EEXIST;
2660 if (dir > 0)
2661 break;
2662 prev = lbrt;
2663 }
2664 if (prev == NULL)
2665 BRIDGE_RTHASH_WRITER_INSERT_HEAD(sc, hash, brt);
2666 else
2667 BRIDGE_RTHASH_WRITER_INSERT_AFTER(prev, brt);
2668
2669 BRIDGE_RTLIST_WRITER_INSERT_HEAD(sc, brt);
2670 sc->sc_brtcnt++;
2671
2672 return 0;
2673 }
2674
2675 /*
2676 * bridge_rtnode_remove:
2677 *
2678 * Remove a bridge rtnode from the rthash and the rtlist of a bridge.
2679 */
2680 static void
2681 bridge_rtnode_remove(struct bridge_softc *sc, struct bridge_rtnode *brt)
2682 {
2683
2684 KASSERT(BRIDGE_RT_LOCKED(sc));
2685
2686 BRIDGE_RTHASH_WRITER_REMOVE(brt);
2687 BRIDGE_RTLIST_WRITER_REMOVE(brt);
2688 sc->sc_brtcnt--;
2689 }
2690
2691 /*
2692 * bridge_rtnode_destroy:
2693 *
2694 * Destroy a bridge rtnode.
2695 */
2696 static void
2697 bridge_rtnode_destroy(struct bridge_rtnode *brt)
2698 {
2699
2700 PSLIST_ENTRY_DESTROY(brt, brt_list);
2701 PSLIST_ENTRY_DESTROY(brt, brt_hash);
2702 pool_put(&bridge_rtnode_pool, brt);
2703 }
2704
2705 extern pfil_head_t *inet_pfil_hook; /* XXX */
2706 extern pfil_head_t *inet6_pfil_hook; /* XXX */
2707
2708 /*
2709 * Send bridge packets through IPF if they are one of the types IPF can deal
2710 * with, or if they are ARP or REVARP. (IPF will pass ARP and REVARP without
2711 * question.)
2712 */
2713 static int
2714 bridge_ipf(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir)
2715 {
2716 int snap, error;
2717 struct ether_header *eh1, eh2;
2718 struct llc llc1;
2719 uint16_t ether_type;
2720
2721 snap = 0;
2722 error = -1; /* Default error if not error == 0 */
2723 eh1 = mtod(*mp, struct ether_header *);
2724 ether_type = ntohs(eh1->ether_type);
2725
2726 /*
2727 * Check for SNAP/LLC.
2728 */
2729 if (ether_type < ETHERMTU) {
2730 struct llc *llc2 = (struct llc *)(eh1 + 1);
2731
2732 if ((*mp)->m_len >= ETHER_HDR_LEN + 8 &&
2733 llc2->llc_dsap == LLC_SNAP_LSAP &&
2734 llc2->llc_ssap == LLC_SNAP_LSAP &&
2735 llc2->llc_control == LLC_UI) {
2736 ether_type = htons(llc2->llc_un.type_snap.ether_type);
2737 snap = 1;
2738 }
2739 }
2740
2741 /* drop VLAN traffic untagged by hardware offloading */
2742 if (vlan_has_tag(*mp))
2743 goto bad;
2744
2745 /*
2746 * If we're trying to filter bridge traffic, don't look at anything
2747 * other than IP and ARP traffic. If the filter doesn't understand
2748 * IPv6, don't allow IPv6 through the bridge either. This is lame
2749 * since if we really wanted, say, an AppleTalk filter, we are hosed,
2750 * but of course we don't have an AppleTalk filter to begin with.
2751 * (Note that since IPF doesn't understand ARP it will pass *ALL*
2752 * ARP traffic.)
2753 */
2754 switch (ether_type) {
2755 case ETHERTYPE_ARP:
2756 case ETHERTYPE_REVARP:
2757 return 0; /* Automatically pass */
2758 case ETHERTYPE_IP:
2759 # ifdef INET6
2760 case ETHERTYPE_IPV6:
2761 # endif /* INET6 */
2762 break;
2763 default:
2764 goto bad;
2765 }
2766
2767 /* Strip off the Ethernet header and keep a copy. */
2768 m_copydata(*mp, 0, ETHER_HDR_LEN, (void *) &eh2);
2769 m_adj(*mp, ETHER_HDR_LEN);
2770
2771 /* Strip off snap header, if present */
2772 if (snap) {
2773 m_copydata(*mp, 0, sizeof(struct llc), (void *) &llc1);
2774 m_adj(*mp, sizeof(struct llc));
2775 }
2776
2777 /*
2778 * Check basic packet sanity and run IPF through pfil.
2779 */
2780 KASSERT(!cpu_intr_p());
2781 switch (ether_type)
2782 {
2783 case ETHERTYPE_IP :
2784 error = bridge_ip_checkbasic(mp);
2785 if (error == 0)
2786 error = pfil_run_hooks(inet_pfil_hook, mp, ifp, dir);
2787 break;
2788 # ifdef INET6
2789 case ETHERTYPE_IPV6 :
2790 error = bridge_ip6_checkbasic(mp);
2791 if (error == 0)
2792 error = pfil_run_hooks(inet6_pfil_hook, mp, ifp, dir);
2793 break;
2794 # endif
2795 default :
2796 error = 0;
2797 break;
2798 }
2799
2800 if (*mp == NULL)
2801 return error;
2802 if (error != 0)
2803 goto bad;
2804
2805 error = -1;
2806
2807 /*
2808 * Finally, put everything back the way it was and return
2809 */
2810 if (snap) {
2811 M_PREPEND(*mp, sizeof(struct llc), M_DONTWAIT);
2812 if (*mp == NULL)
2813 return error;
2814 bcopy(&llc1, mtod(*mp, void *), sizeof(struct llc));
2815 }
2816
2817 M_PREPEND(*mp, ETHER_HDR_LEN, M_DONTWAIT);
2818 if (*mp == NULL)
2819 return error;
2820 bcopy(&eh2, mtod(*mp, void *), ETHER_HDR_LEN);
2821
2822 return 0;
2823
2824 bad:
2825 m_freem(*mp);
2826 *mp = NULL;
2827 return error;
2828 }
2829
2830 /*
2831 * Perform basic checks on header size since
2832 * IPF assumes ip_input has already processed
2833 * it for it. Cut-and-pasted from ip_input.c.
2834 * Given how simple the IPv6 version is,
2835 * does the IPv4 version really need to be
2836 * this complicated?
2837 *
2838 * XXX Should we update ipstat here, or not?
2839 * XXX Right now we update ipstat but not
2840 * XXX csum_counter.
2841 */
2842 static int
2843 bridge_ip_checkbasic(struct mbuf **mp)
2844 {
2845 struct mbuf *m = *mp;
2846 struct ip *ip;
2847 int len, hlen;
2848
2849 if (*mp == NULL)
2850 return -1;
2851
2852 if (M_GET_ALIGNED_HDR(&m, struct ip, true) != 0) {
2853 /* XXXJRT new stat, please */
2854 ip_statinc(IP_STAT_TOOSMALL);
2855 goto bad;
2856 }
2857 ip = mtod(m, struct ip *);
2858 if (ip == NULL) goto bad;
2859
2860 if (ip->ip_v != IPVERSION) {
2861 ip_statinc(IP_STAT_BADVERS);
2862 goto bad;
2863 }
2864 hlen = ip->ip_hl << 2;
2865 if (hlen < sizeof(struct ip)) { /* minimum header length */
2866 ip_statinc(IP_STAT_BADHLEN);
2867 goto bad;
2868 }
2869 if (hlen > m->m_len) {
2870 if ((m = m_pullup(m, hlen)) == 0) {
2871 ip_statinc(IP_STAT_BADHLEN);
2872 goto bad;
2873 }
2874 ip = mtod(m, struct ip *);
2875 if (ip == NULL) goto bad;
2876 }
2877
2878 switch (m->m_pkthdr.csum_flags &
2879 ((m_get_rcvif_NOMPSAFE(m)->if_csum_flags_rx & M_CSUM_IPv4) |
2880 M_CSUM_IPv4_BAD)) {
2881 case M_CSUM_IPv4|M_CSUM_IPv4_BAD:
2882 /* INET_CSUM_COUNTER_INCR(&ip_hwcsum_bad); */
2883 goto bad;
2884
2885 case M_CSUM_IPv4:
2886 /* Checksum was okay. */
2887 /* INET_CSUM_COUNTER_INCR(&ip_hwcsum_ok); */
2888 break;
2889
2890 default:
2891 /* Must compute it ourselves. */
2892 /* INET_CSUM_COUNTER_INCR(&ip_swcsum); */
2893 if (in_cksum(m, hlen) != 0)
2894 goto bad;
2895 break;
2896 }
2897
2898 /* Retrieve the packet length. */
2899 len = ntohs(ip->ip_len);
2900
2901 /*
2902 * Check for additional length bogosity
2903 */
2904 if (len < hlen) {
2905 ip_statinc(IP_STAT_BADLEN);
2906 goto bad;
2907 }
2908
2909 /*
2910 * Check that the amount of data in the buffers
2911 * is as at least much as the IP header would have us expect.
2912 * Drop packet if shorter than we expect.
2913 */
2914 if (m->m_pkthdr.len < len) {
2915 ip_statinc(IP_STAT_TOOSHORT);
2916 goto bad;
2917 }
2918
2919 /* Checks out, proceed */
2920 *mp = m;
2921 return 0;
2922
2923 bad:
2924 *mp = m;
2925 return -1;
2926 }
2927
2928 # ifdef INET6
2929 /*
2930 * Same as above, but for IPv6.
2931 * Cut-and-pasted from ip6_input.c.
2932 * XXX Should we update ip6stat, or not?
2933 */
2934 static int
2935 bridge_ip6_checkbasic(struct mbuf **mp)
2936 {
2937 struct mbuf *m = *mp;
2938 struct ip6_hdr *ip6;
2939
2940 /*
2941 * If the IPv6 header is not aligned, slurp it up into a new
2942 * mbuf with space for link headers, in the event we forward
2943 * it. Otherwise, if it is aligned, make sure the entire base
2944 * IPv6 header is in the first mbuf of the chain.
2945 */
2946 if (M_GET_ALIGNED_HDR(&m, struct ip6_hdr, true) != 0) {
2947 struct ifnet *inifp = m_get_rcvif_NOMPSAFE(m);
2948 /* XXXJRT new stat, please */
2949 ip6_statinc(IP6_STAT_TOOSMALL);
2950 in6_ifstat_inc(inifp, ifs6_in_hdrerr);
2951 goto bad;
2952 }
2953
2954 ip6 = mtod(m, struct ip6_hdr *);
2955
2956 if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) {
2957 ip6_statinc(IP6_STAT_BADVERS);
2958 in6_ifstat_inc(m_get_rcvif_NOMPSAFE(m), ifs6_in_hdrerr);
2959 goto bad;
2960 }
2961
2962 /* Checks out, proceed */
2963 *mp = m;
2964 return 0;
2965
2966 bad:
2967 *mp = m;
2968 return -1;
2969 }
2970 # endif /* INET6 */
2971